aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 19:46:44 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-25 19:46:44 -0500
commitfffddfd6c8e0c10c42c6e2cc54ba880fcc36ebbb (patch)
tree71bc5e597124dbaf7550f1e089d675718b3ed5c0
parent69086a78bdc973ec0b722be790b146e84ba8a8c4 (diff)
parentbe88298b0a3f771a4802f20c5e66af74bfd1dff1 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm merge from Dave Airlie: "Highlights: - TI LCD controller KMS driver - TI OMAP KMS driver merged from staging - drop gma500 stub driver - the fbcon locking fixes - the vgacon dirty like zebra fix. - open firmware videomode and hdmi common code helpers - major locking rework for kms object handling - pageflip/cursor won't block on polling anymore! - fbcon helper and prime helper cleanups - i915: all over the map, haswell power well enhancements, valleyview macro horrors cleaned up, killing lots of legacy GTT code, - radeon: CS ioctl unification, deprecated UMS support, gpu reset rework, VM fixes - nouveau: reworked thermal code, external dp/tmds encoder support (anx9805), fences sleep instead of polling, - exynos: all over the driver fixes." Lovely conflict in radeon/evergreen_cs.c between commit de0babd60d8d ("drm/radeon: enforce use of radeon_get_ib_value when reading user cmd") and the new changes that modified that evergreen_dma_cs_parse() function. * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (508 commits) drm/tilcdc: only build on arm drm/i915: Revert hdmi HDP pin checks drm/tegra: Add list of framebuffers to debugfs drm/tegra: Fix color expansion drm/tegra: Split DC_CMD_STATE_CONTROL register write drm/tegra: Implement page-flipping support drm/tegra: Implement VBLANK support drm/tegra: Implement .mode_set_base() drm/tegra: Add plane support drm/tegra: Remove bogus tegra_framebuffer structure drm: Add consistency check for page-flipping drm/radeon: Use generic HDMI infoframe helpers drm/tegra: Use generic HDMI infoframe helpers drm: Add EDID helper documentation drm: Add HDMI infoframe helpers video: Add generic HDMI infoframe helpers drm: Add some missing forward declarations drm: Move mode tables to drm_edid.c drm: Remove duplicate drm_mode_cea_vic() gma500: Fix n, m1 and m2 clock limits for sdvo and lvds ...
-rw-r--r--Documentation/DocBook/drm.tmpl78
-rw-r--r--Documentation/EDID/HOWTO.txt27
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/panel.txt59
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/slave.txt18
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt21
-rw-r--r--Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt21
-rw-r--r--Documentation/devicetree/bindings/video/display-timing.txt109
-rw-r--r--Documentation/thermal/nouveau_thermal81
-rw-r--r--drivers/char/agp/intel-gtt.c128
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c27
-rw-r--r--drivers/gpu/drm/ast/ast_main.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c27
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c816
-rw-r--r--drivers/gpu/drm/drm_edid.c843
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h774
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c63
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c95
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c310
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c21
-rw-r--r--drivers/gpu/drm/drm_irq.c12
-rw-r--r--drivers/gpu/drm/drm_mm.c96
-rw-r--r--drivers/gpu/drm/drm_modes.c70
-rw-r--r--drivers/gpu/drm/drm_pci.c81
-rw-r--r--drivers/gpu/drm/drm_prime.c186
-rw-r--r--drivers/gpu/drm/drm_usb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c55
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c39
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1035
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c34
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c43
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c14
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c12
-rw-r--r--drivers/gpu/drm/i2c/Kconfig28
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c906
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c254
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c94
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c131
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h475
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c516
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c333
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c305
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c33
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c370
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h436
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c540
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c503
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c46
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c79
-rw-r--r--drivers/gpu/drm/i915/intel_display.c975
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c374
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h41
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c55
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c108
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c103
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c250
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c6
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c95
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c113
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c67
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c46
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c28
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c16
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig28
-rw-r--r--drivers/gpu/drm/nouveau/Makefile29
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/core/enum.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/base.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c346
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.h78
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv04.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c371
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c309
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c187
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c44
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h44
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/enum.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/event.h36
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h27
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/software.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h11
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h19
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bus.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h127
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/therm.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c11
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c76
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c95
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c105
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c101
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv04.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c50
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c140
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c279
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c154
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c481
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c18
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c143
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c149
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c285
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c218
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c244
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c115
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/ic.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c199
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c153
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h103
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c162
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c130
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c96
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c64
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c297
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c60
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c103
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c233
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c173
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c18
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c39
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c118
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.h19
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c149
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c307
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c36
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c214
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c186
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig (renamed from drivers/staging/omapdrm/Kconfig)0
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile (renamed from drivers/staging/omapdrm/Makefile)0
-rw-r--r--drivers/gpu/drm/omapdrm/TODO23
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c (renamed from drivers/staging/omapdrm/omap_connector.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c (renamed from drivers/staging/omapdrm/omap_crtc.c)14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c (renamed from drivers/staging/omapdrm/omap_debugfs.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h (renamed from drivers/staging/omapdrm/omap_dmm_priv.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c (renamed from drivers/staging/omapdrm/omap_dmm_tiler.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h (renamed from drivers/staging/omapdrm/omap_dmm_tiler.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c (renamed from drivers/staging/omapdrm/omap_drv.c)6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h (renamed from drivers/staging/omapdrm/omap_drv.h)4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c (renamed from drivers/staging/omapdrm/omap_encoder.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c (renamed from drivers/staging/omapdrm/omap_fb.c)18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c (renamed from drivers/staging/omapdrm/omap_fbdev.c)34
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c (renamed from drivers/staging/omapdrm/omap_gem.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c (renamed from drivers/staging/omapdrm/omap_gem_dmabuf.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c (renamed from drivers/staging/omapdrm/omap_gem_helpers.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c (renamed from drivers/staging/omapdrm/omap_irq.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c (renamed from drivers/staging/omapdrm/omap_plane.c)2
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c (renamed from drivers/staging/omapdrm/tcm-sita.c)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.h (renamed from drivers/staging/omapdrm/tcm-sita.h)0
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h (renamed from drivers/staging/omapdrm/tcm.h)0
-rw-r--r--drivers/gpu/drm/radeon/Kconfig33
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c366
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1149
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c85
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h54
-rw-r--r--drivers/gpu/drm/radeon/ni.c339
-rw-r--r--drivers/gpu/drm/radeon/nid.h27
-rw-r--r--drivers/gpu/drm/radeon/r100.c224
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h4
-rw-r--r--drivers/gpu/drm/radeon/r100d.h11
-rw-r--r--drivers/gpu/drm/radeon/r200.c26
-rw-r--r--drivers/gpu/drm/radeon/r300.c42
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r300d.h11
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/r600.c401
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c332
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c135
-rw-r--r--drivers/gpu/drm/radeon/r600d.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c70
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h24
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c73
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c176
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c91
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h16
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c60
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c170
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h11
-rw-r--r--drivers/gpu/drm/radeon/rv770.c25
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h4
-rw-r--r--drivers/gpu/drm/radeon/si.c509
-rw-r--r--drivers/gpu/drm/radeon/sid.h30
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c585
-rw-r--r--drivers/gpu/drm/tegra/dc.h14
-rw-r--r--drivers/gpu/drm/tegra/drm.c103
-rw-r--r--drivers/gpu/drm/tegra/drm.h43
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c226
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h189
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig13
-rw-r--r--drivers/gpu/drm/tilcdc/Makefile10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c602
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c611
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h150
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c436
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_regs.h154
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c376
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.h26
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c419
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.h26
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c103
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c78
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c78
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c46
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/stub/Kconfig18
-rw-r--r--drivers/gpu/stub/Makefile1
-rw-r--r--drivers/gpu/stub/poulsbo.c64
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c3
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/omapdrm/TODO32
-rw-r--r--drivers/tty/vt/vt.c136
-rw-r--r--drivers/video/Kconfig26
-rw-r--r--drivers/video/Makefile5
-rw-r--r--drivers/video/console/fbcon.c58
-rw-r--r--drivers/video/console/vgacon.c22
-rw-r--r--drivers/video/display_timing.c24
-rw-r--r--drivers/video/fbmem.c11
-rw-r--r--drivers/video/fbmon.c94
-rw-r--r--drivers/video/fbsysfs.c3
-rw-r--r--drivers/video/hdmi.c308
-rw-r--r--drivers/video/of_display_timing.c239
-rw-r--r--drivers/video/of_videomode.c54
-rw-r--r--drivers/video/via/hw.c6
-rw-r--r--drivers/video/via/hw.h2
-rw-r--r--drivers/video/via/lcd.c2
-rw-r--r--drivers/video/via/share.h2
-rw-r--r--drivers/video/via/via_modesetting.c8
-rw-r--r--drivers/video/via/via_modesetting.h6
-rw-r--r--drivers/video/videomode.c39
-rw-r--r--include/drm/drmP.h34
-rw-r--r--include/drm/drm_crtc.h38
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/drm/drm_encoder_slave.h20
-rw-r--r--include/drm/drm_fb_cma_helper.h5
-rw-r--r--include/drm/drm_fb_helper.h18
-rw-r--r--include/drm/drm_gem_cma_helper.h4
-rw-r--r--include/drm/drm_mm.h40
-rw-r--r--include/drm/drm_pciids.h13
-rw-r--r--include/drm/intel-gtt.h22
-rw-r--r--include/drm/ttm/ttm_bo_driver.h61
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/fb.h8
-rw-r--r--include/linux/hdmi.h231
-rw-r--r--include/linux/vt_kern.h3
-rw-r--r--include/uapi/drm/i915_drm.h20
-rw-r--r--include/uapi/drm/omap_drm.h (renamed from drivers/staging/omapdrm/omap_drm.h)2
-rw-r--r--include/video/display_timing.h124
-rw-r--r--include/video/of_display_timing.h20
-rw-r--r--include/video/of_videomode.h18
-rw-r--r--include/video/videomode.h48
-rw-r--r--kernel/printk.c9
398 files changed, 23479 insertions, 11509 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 4ee2304f82f9..f9df3b872c16 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -743,6 +743,10 @@ char *date;</synopsis>
743 These two operations are mandatory for GEM drivers that support DRM 743 These two operations are mandatory for GEM drivers that support DRM
744 PRIME. 744 PRIME.
745 </para> 745 </para>
746 <sect4>
747 <title>DRM PRIME Helper Functions Reference</title>
748!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
749 </sect4>
746 </sect3> 750 </sect3>
747 <sect3 id="drm-gem-objects-mapping"> 751 <sect3 id="drm-gem-objects-mapping">
748 <title>GEM Objects Mapping</title> 752 <title>GEM Objects Mapping</title>
@@ -978,10 +982,25 @@ int max_width, max_height;</synopsis>
978 If the parameters are deemed valid, drivers then create, initialize and 982 If the parameters are deemed valid, drivers then create, initialize and
979 return an instance of struct <structname>drm_framebuffer</structname>. 983 return an instance of struct <structname>drm_framebuffer</structname>.
980 If desired the instance can be embedded in a larger driver-specific 984 If desired the instance can be embedded in a larger driver-specific
981 structure. The new instance is initialized with a call to 985 structure. Drivers must fill its <structfield>width</structfield>,
982 <function>drm_framebuffer_init</function> which takes a pointer to DRM 986 <structfield>height</structfield>, <structfield>pitches</structfield>,
983 frame buffer operations (struct 987 <structfield>offsets</structfield>, <structfield>depth</structfield>,
984 <structname>drm_framebuffer_funcs</structname>). Frame buffer operations are 988 <structfield>bits_per_pixel</structfield> and
989 <structfield>pixel_format</structfield> fields from the values passed
990 through the <parameter>drm_mode_fb_cmd2</parameter> argument. They
991 should call the <function>drm_helper_mode_fill_fb_struct</function>
992 helper function to do so.
993 </para>
994
995 <para>
996 The initailization of the new framebuffer instance is finalized with a
997 call to <function>drm_framebuffer_init</function> which takes a pointer
998 to DRM frame buffer operations (struct
999 <structname>drm_framebuffer_funcs</structname>). Note that this function
1000 publishes the framebuffer and so from this point on it can be accessed
1001 concurrently from other threads. Hence it must be the last step in the
1002 driver's framebuffer initialization sequence. Frame buffer operations
1003 are
985 <itemizedlist> 1004 <itemizedlist>
986 <listitem> 1005 <listitem>
987 <synopsis>int (*create_handle)(struct drm_framebuffer *fb, 1006 <synopsis>int (*create_handle)(struct drm_framebuffer *fb,
@@ -1022,16 +1041,16 @@ int max_width, max_height;</synopsis>
1022 </itemizedlist> 1041 </itemizedlist>
1023 </para> 1042 </para>
1024 <para> 1043 <para>
1025 After initializing the <structname>drm_framebuffer</structname> 1044 The lifetime of a drm framebuffer is controlled with a reference count,
1026 instance drivers must fill its <structfield>width</structfield>, 1045 drivers can grab additional references with
1027 <structfield>height</structfield>, <structfield>pitches</structfield>, 1046 <function>drm_framebuffer_reference</function> </para> and drop them
1028 <structfield>offsets</structfield>, <structfield>depth</structfield>, 1047 again with <function>drm_framebuffer_unreference</function>. For
1029 <structfield>bits_per_pixel</structfield> and 1048 driver-private framebuffers for which the last reference is never
1030 <structfield>pixel_format</structfield> fields from the values passed 1049 dropped (e.g. for the fbdev framebuffer when the struct
1031 through the <parameter>drm_mode_fb_cmd2</parameter> argument. They 1050 <structname>drm_framebuffer</structname> is embedded into the fbdev
1032 should call the <function>drm_helper_mode_fill_fb_struct</function> 1051 helper struct) drivers can manually clean up a framebuffer at module
1033 helper function to do so. 1052 unload time with
1034 </para> 1053 <function>drm_framebuffer_unregister_private</function>.
1035 </sect2> 1054 </sect2>
1036 <sect2> 1055 <sect2>
1037 <title>Output Polling</title> 1056 <title>Output Polling</title>
@@ -1043,6 +1062,22 @@ int max_width, max_height;</synopsis>
1043 operation. 1062 operation.
1044 </para> 1063 </para>
1045 </sect2> 1064 </sect2>
1065 <sect2>
1066 <title>Locking</title>
1067 <para>
1068 Beside some lookup structures with their own locking (which is hidden
1069 behind the interface functions) most of the modeset state is protected
1070 by the <code>dev-&lt;mode_config.lock</code> mutex and additionally
1071 per-crtc locks to allow cursor updates, pageflips and similar operations
1072 to occur concurrently with background tasks like output detection.
1073 Operations which cross domains like a full modeset always grab all
1074 locks. Drivers there need to protect resources shared between crtcs with
1075 additional locking. They also need to be careful to always grab the
1076 relevant crtc locks if a modset functions touches crtc state, e.g. for
1077 load detection (which does only grab the <code>mode_config.lock</code>
1078 to allow concurrent screen updates on live crtcs).
1079 </para>
1080 </sect2>
1046 </sect1> 1081 </sect1>
1047 1082
1048 <!-- Internals: kms initialization and cleanup --> 1083 <!-- Internals: kms initialization and cleanup -->
@@ -1126,6 +1161,12 @@ int max_width, max_height;</synopsis>
1126 any new rendering to the frame buffer until the page flip completes. 1161 any new rendering to the frame buffer until the page flip completes.
1127 </para> 1162 </para>
1128 <para> 1163 <para>
1164 If a page flip can be successfully scheduled the driver must set the
1165 <code>drm_crtc-&lt;fb</code> field to the new framebuffer pointed to
1166 by <code>fb</code>. This is important so that the reference counting
1167 on framebuffers stays balanced.
1168 </para>
1169 <para>
1129 If a page flip is already pending, the 1170 If a page flip is already pending, the
1130 <methodname>page_flip</methodname> operation must return 1171 <methodname>page_flip</methodname> operation must return
1131 -<errorname>EBUSY</errorname>. 1172 -<errorname>EBUSY</errorname>.
@@ -1609,6 +1650,10 @@ void intel_crt_init(struct drm_device *dev)
1609 make its properties available to applications. 1650 make its properties available to applications.
1610 </para> 1651 </para>
1611 </sect2> 1652 </sect2>
1653 <sect2>
1654 <title>KMS API Functions</title>
1655!Edrivers/gpu/drm/drm_crtc.c
1656 </sect2>
1612 </sect1> 1657 </sect1>
1613 1658
1614 <!-- Internals: kms helper functions --> 1659 <!-- Internals: kms helper functions -->
@@ -2104,6 +2149,7 @@ void intel_crt_init(struct drm_device *dev)
2104 <title>fbdev Helper Functions Reference</title> 2149 <title>fbdev Helper Functions Reference</title>
2105!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers 2150!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
2106!Edrivers/gpu/drm/drm_fb_helper.c 2151!Edrivers/gpu/drm/drm_fb_helper.c
2152!Iinclude/drm/drm_fb_helper.h
2107 </sect2> 2153 </sect2>
2108 <sect2> 2154 <sect2>
2109 <title>Display Port Helper Functions Reference</title> 2155 <title>Display Port Helper Functions Reference</title>
@@ -2111,6 +2157,10 @@ void intel_crt_init(struct drm_device *dev)
2111!Iinclude/drm/drm_dp_helper.h 2157!Iinclude/drm/drm_dp_helper.h
2112!Edrivers/gpu/drm/drm_dp_helper.c 2158!Edrivers/gpu/drm/drm_dp_helper.c
2113 </sect2> 2159 </sect2>
2160 <sect2>
2161 <title>EDID Helper Functions Reference</title>
2162!Edrivers/gpu/drm/drm_edid.c
2163 </sect2>
2114 </sect1> 2164 </sect1>
2115 2165
2116 <!-- Internals: vertical blanking --> 2166 <!-- Internals: vertical blanking -->
diff --git a/Documentation/EDID/HOWTO.txt b/Documentation/EDID/HOWTO.txt
index 75a9f2a0c43d..2d0a8f09475d 100644
--- a/Documentation/EDID/HOWTO.txt
+++ b/Documentation/EDID/HOWTO.txt
@@ -28,11 +28,30 @@ Makefile environment are given here.
28To create binary EDID and C source code files from the existing data 28To create binary EDID and C source code files from the existing data
29material, simply type "make". 29material, simply type "make".
30 30
31If you want to create your own EDID file, copy the file 1024x768.S and 31If you want to create your own EDID file, copy the file 1024x768.S,
32replace the settings with your own data. The CRC value in the last line 32replace the settings with your own data and add a new target to the
33Makefile. Please note that the EDID data structure expects the timing
34values in a different way as compared to the standard X11 format.
35
36X11:
37HTimings: hdisp hsyncstart hsyncend htotal
38VTimings: vdisp vsyncstart vsyncend vtotal
39
40EDID:
41#define XPIX hdisp
42#define XBLANK htotal-hdisp
43#define XOFFSET hsyncstart-hdisp
44#define XPULSE hsyncend-hsyncstart
45
46#define YPIX vdisp
47#define YBLANK vtotal-vdisp
48#define YOFFSET (63+(vsyncstart-vdisp))
49#define YPULSE (63+(vsyncend-vsyncstart))
50
51The CRC value in the last line
33 #define CRC 0x55 52 #define CRC 0x55
34is a bit tricky. After a first version of the binary data set is 53also is a bit tricky. After a first version of the binary data set is
35created, it must be be checked with the "edid-decode" utility which will 54created, it must be checked with the "edid-decode" utility which will
36most probably complain about a wrong CRC. Fortunately, the utility also 55most probably complain about a wrong CRC. Fortunately, the utility also
37displays the correct CRC which must then be inserted into the source 56displays the correct CRC which must then be inserted into the source
38file. After the make procedure is repeated, the EDID data set is ready 57file. After the make procedure is repeated, the EDID data set is ready
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/panel.txt b/Documentation/devicetree/bindings/drm/tilcdc/panel.txt
new file mode 100644
index 000000000000..9301c330d1a6
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/panel.txt
@@ -0,0 +1,59 @@
1Device-Tree bindings for tilcdc DRM generic panel output driver
2
3Required properties:
4 - compatible: value should be "ti,tilcdc,panel".
5 - panel-info: configuration info to configure LCDC correctly for the panel
6 - ac-bias: AC Bias Pin Frequency
7 - ac-bias-intrpt: AC Bias Pin Transitions per Interrupt
8 - dma-burst-sz: DMA burst size
9 - bpp: Bits per pixel
10 - fdd: FIFO DMA Request Delay
11 - sync-edge: Horizontal and Vertical Sync Edge: 0=rising 1=falling
12 - sync-ctrl: Horizontal and Vertical Sync: Control: 0=ignore
13 - raster-order: Raster Data Order Select: 1=Most-to-least 0=Least-to-most
14 - fifo-th: DMA FIFO threshold
15 - display-timings: typical videomode of lcd panel. Multiple video modes
16 can be listed if the panel supports multiple timings, but the 'native-mode'
17 should be the preferred/default resolution. Refer to
18 Documentation/devicetree/bindings/video/display-timing.txt for display
19 timing binding details.
20
21Recommended properties:
22 - pinctrl-names, pinctrl-0: the pincontrol settings to configure
23 muxing properly for pins that connect to TFP410 device
24
25Example:
26
27 /* Settings for CDTech_S035Q01 / LCD3 cape: */
28 lcd3 {
29 compatible = "ti,tilcdc,panel";
30 pinctrl-names = "default";
31 pinctrl-0 = <&bone_lcd3_cape_lcd_pins>;
32 panel-info {
33 ac-bias = <255>;
34 ac-bias-intrpt = <0>;
35 dma-burst-sz = <16>;
36 bpp = <16>;
37 fdd = <0x80>;
38 sync-edge = <0>;
39 sync-ctrl = <1>;
40 raster-order = <0>;
41 fifo-th = <0>;
42 };
43 display-timings {
44 native-mode = <&timing0>;
45 timing0: 320x240 {
46 hactive = <320>;
47 vactive = <240>;
48 hback-porch = <21>;
49 hfront-porch = <58>;
50 hsync-len = <47>;
51 vback-porch = <11>;
52 vfront-porch = <23>;
53 vsync-len = <2>;
54 clock-frequency = <8000000>;
55 hsync-active = <0>;
56 vsync-active = <0>;
57 };
58 };
59 };
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/slave.txt b/Documentation/devicetree/bindings/drm/tilcdc/slave.txt
new file mode 100644
index 000000000000..3d2c52460dca
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/slave.txt
@@ -0,0 +1,18 @@
1Device-Tree bindings for tilcdc DRM encoder slave output driver
2
3Required properties:
4 - compatible: value should be "ti,tilcdc,slave".
5 - i2c: the phandle for the i2c device the encoder slave is connected to
6
7Recommended properties:
8 - pinctrl-names, pinctrl-0: the pincontrol settings to configure
9 muxing properly for pins that connect to TFP410 device
10
11Example:
12
13 hdmi {
14 compatible = "ti,tilcdc,slave";
15 i2c = <&i2c0>;
16 pinctrl-names = "default";
17 pinctrl-0 = <&nxp_hdmi_bonelt_pins>;
18 };
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt b/Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt
new file mode 100644
index 000000000000..a58ae7756fc6
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/tfp410.txt
@@ -0,0 +1,21 @@
1Device-Tree bindings for tilcdc DRM TFP410 output driver
2
3Required properties:
4 - compatible: value should be "ti,tilcdc,tfp410".
5 - i2c: the phandle for the i2c device to use for DDC
6
7Recommended properties:
8 - pinctrl-names, pinctrl-0: the pincontrol settings to configure
9 muxing properly for pins that connect to TFP410 device
10 - powerdn-gpio: the powerdown GPIO, pulled low to power down the
11 TFP410 device (for DPMS_OFF)
12
13Example:
14
15 dvicape {
16 compatible = "ti,tilcdc,tfp410";
17 i2c = <&i2c2>;
18 pinctrl-names = "default";
19 pinctrl-0 = <&bone_dvi_cape_dvi_00A1_pins>;
20 powerdn-gpio = <&gpio2 31 0>;
21 };
diff --git a/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt
new file mode 100644
index 000000000000..e5f130159ae1
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/tilcdc/tilcdc.txt
@@ -0,0 +1,21 @@
1Device-Tree bindings for tilcdc DRM driver
2
3Required properties:
4 - compatible: value should be "ti,am33xx-tilcdc".
5 - interrupts: the interrupt number
6 - reg: base address and size of the LCDC device
7
8Recommended properties:
9 - interrupt-parent: the phandle for the interrupt controller that
10 services interrupts for this device.
11 - ti,hwmods: Name of the hwmod associated to the LCDC
12
13Example:
14
15 fb: fb@4830e000 {
16 compatible = "ti,am33xx-tilcdc";
17 reg = <0x4830e000 0x1000>;
18 interrupt-parent = <&intc>;
19 interrupts = <36>;
20 ti,hwmods = "lcdc";
21 };
diff --git a/Documentation/devicetree/bindings/video/display-timing.txt b/Documentation/devicetree/bindings/video/display-timing.txt
new file mode 100644
index 000000000000..150038552bc3
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/display-timing.txt
@@ -0,0 +1,109 @@
1display-timing bindings
2=======================
3
4display-timings node
5--------------------
6
7required properties:
8 - none
9
10optional properties:
11 - native-mode: The native mode for the display, in case multiple modes are
12 provided. When omitted, assume the first node is the native.
13
14timing subnode
15--------------
16
17required properties:
18 - hactive, vactive: display resolution
19 - hfront-porch, hback-porch, hsync-len: horizontal display timing parameters
20 in pixels
21 vfront-porch, vback-porch, vsync-len: vertical display timing parameters in
22 lines
23 - clock-frequency: display clock in Hz
24
25optional properties:
26 - hsync-active: hsync pulse is active low/high/ignored
27 - vsync-active: vsync pulse is active low/high/ignored
28 - de-active: data-enable pulse is active low/high/ignored
29 - pixelclk-active: with
30 - active high = drive pixel data on rising edge/
31 sample data on falling edge
32 - active low = drive pixel data on falling edge/
33 sample data on rising edge
34 - ignored = ignored
35 - interlaced (bool): boolean to enable interlaced mode
36 - doublescan (bool): boolean to enable doublescan mode
37
38All the optional properties that are not bool follow the following logic:
39 <1>: high active
40 <0>: low active
41 omitted: not used on hardware
42
43There are different ways of describing the capabilities of a display. The
44devicetree representation corresponds to the one commonly found in datasheets
45for displays. If a display supports multiple signal timings, the native-mode
46can be specified.
47
48The parameters are defined as:
49
50 +----------+-------------------------------------+----------+-------+
51 | | ↑ | | |
52 | | |vback_porch | | |
53 | | ↓ | | |
54 +----------#######################################----------+-------+
55 | # ↑ # | |
56 | # | # | |
57 | hback # | # hfront | hsync |
58 | porch # | hactive # porch | len |
59 |<-------->#<-------+--------------------------->#<-------->|<----->|
60 | # | # | |
61 | # |vactive # | |
62 | # | # | |
63 | # ↓ # | |
64 +----------#######################################----------+-------+
65 | | ↑ | | |
66 | | |vfront_porch | | |
67 | | ↓ | | |
68 +----------+-------------------------------------+----------+-------+
69 | | ↑ | | |
70 | | |vsync_len | | |
71 | | ↓ | | |
72 +----------+-------------------------------------+----------+-------+
73
74Example:
75
76 display-timings {
77 native-mode = <&timing0>;
78 timing0: 1080p24 {
79 /* 1920x1080p24 */
80 clock-frequency = <52000000>;
81 hactive = <1920>;
82 vactive = <1080>;
83 hfront-porch = <25>;
84 hback-porch = <25>;
85 hsync-len = <25>;
86 vback-porch = <2>;
87 vfront-porch = <2>;
88 vsync-len = <2>;
89 hsync-active = <1>;
90 };
91 };
92
93Every required property also supports the use of ranges, so the commonly used
94datasheet description with minimum, typical and maximum values can be used.
95
96Example:
97
98 timing1: timing {
99 /* 1920x1080p24 */
100 clock-frequency = <148500000>;
101 hactive = <1920>;
102 vactive = <1080>;
103 hsync-len = <0 44 60>;
104 hfront-porch = <80 88 95>;
105 hback-porch = <100 148 160>;
106 vfront-porch = <0 4 6>;
107 vback-porch = <0 36 50>;
108 vsync-len = <0 5 6>;
109 };
diff --git a/Documentation/thermal/nouveau_thermal b/Documentation/thermal/nouveau_thermal
new file mode 100644
index 000000000000..efceb7828f54
--- /dev/null
+++ b/Documentation/thermal/nouveau_thermal
@@ -0,0 +1,81 @@
1Kernel driver nouveau
2===================
3
4Supported chips:
5* NV43+
6
7Authors: Martin Peres (mupuf) <martin.peres@labri.fr>
8
9Description
10---------
11
12This driver allows to read the GPU core temperature, drive the GPU fan and
13set temperature alarms.
14
15Currently, due to the absence of in-kernel API to access HWMON drivers, Nouveau
16cannot access any of the i2c external monitoring chips it may find. If you
17have one of those, temperature and/or fan management through Nouveau's HWMON
18interface is likely not to work. This document may then not cover your situation
19entirely.
20
21Temperature management
22--------------------
23
24Temperature is exposed under as a read-only HWMON attribute temp1_input.
25
26In order to protect the GPU from overheating, Nouveau supports 4 configurable
27temperature thresholds:
28
29 * Fan_boost: Fan speed is set to 100% when reaching this temperature;
30 * Downclock: The GPU will be downclocked to reduce its power dissipation;
31 * Critical: The GPU is put on hold to further lower power dissipation;
32 * Shutdown: Shut the computer down to protect your GPU.
33
34WARNING: Some of these thresholds may not be used by Nouveau depending
35on your chipset.
36
37The default value for these thresholds comes from the GPU's vbios. These
38thresholds can be configured thanks to the following HWMON attributes:
39
40 * Fan_boost: temp1_auto_point1_temp and temp1_auto_point1_temp_hyst;
41 * Downclock: temp1_max and temp1_max_hyst;
42 * Critical: temp1_crit and temp1_crit_hyst;
43 * Shutdown: temp1_emergency and temp1_emergency_hyst.
44
45NOTE: Remember that the values are stored as milli degrees Celcius. Don't forget
46to multiply!
47
48Fan management
49------------
50
51Not all cards have a drivable fan. If you do, then the following HWMON
52attributes should be available:
53
54 * pwm1_enable: Current fan management mode (NONE, MANUAL or AUTO);
55 * pwm1: Current PWM value (power percentage);
56 * pwm1_min: The minimum PWM speed allowed;
57 * pwm1_max: The maximum PWM speed allowed (bypassed when hitting Fan_boost);
58
59You may also have the following attribute:
60
61 * fan1_input: Speed in RPM of your fan.
62
63Your fan can be driven in different modes:
64
65 * 0: The fan is left untouched;
66 * 1: The fan can be driven in manual (use pwm1 to change the speed);
67 * 2; The fan is driven automatically depending on the temperature.
68
69NOTE: Be sure to use the manual mode if you want to drive the fan speed manually
70
71NOTE2: Not all fan management modes may be supported on all chipsets. We are
72working on it.
73
74Bug reports
75---------
76
77Thermal management on Nouveau is new and may not work on all cards. If you have
78inquiries, please ping mupuf on IRC (#nouveau, freenode).
79
80Bug reports should be filled on Freedesktop's bug tracker. Please follow
81http://nouveau.freedesktop.org/wiki/Bugs
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index dbd901e94ea6..b8e2014cb9cb 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -60,7 +60,6 @@ struct intel_gtt_driver {
60}; 60};
61 61
62static struct _intel_private { 62static struct _intel_private {
63 struct intel_gtt base;
64 const struct intel_gtt_driver *driver; 63 const struct intel_gtt_driver *driver;
65 struct pci_dev *pcidev; /* device one */ 64 struct pci_dev *pcidev; /* device one */
66 struct pci_dev *bridge_dev; 65 struct pci_dev *bridge_dev;
@@ -75,7 +74,18 @@ static struct _intel_private {
75 struct resource ifp_resource; 74 struct resource ifp_resource;
76 int resource_valid; 75 int resource_valid;
77 struct page *scratch_page; 76 struct page *scratch_page;
77 phys_addr_t scratch_page_dma;
78 int refcount; 78 int refcount;
79 /* Whether i915 needs to use the dmar apis or not. */
80 unsigned int needs_dmar : 1;
81 phys_addr_t gma_bus_addr;
82 /* Size of memory reserved for graphics by the BIOS */
83 unsigned int stolen_size;
84 /* Total number of gtt entries. */
85 unsigned int gtt_total_entries;
86 /* Part of the gtt that is mappable by the cpu, for those chips where
87 * this is not the full gtt. */
88 unsigned int gtt_mappable_entries;
79} intel_private; 89} intel_private;
80 90
81#define INTEL_GTT_GEN intel_private.driver->gen 91#define INTEL_GTT_GEN intel_private.driver->gen
@@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
291 get_page(page); 301 get_page(page);
292 set_pages_uc(page, 1); 302 set_pages_uc(page, 1);
293 303
294 if (intel_private.base.needs_dmar) { 304 if (intel_private.needs_dmar) {
295 dma_addr = pci_map_page(intel_private.pcidev, page, 0, 305 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
296 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 306 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
297 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) 307 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
298 return -EINVAL; 308 return -EINVAL;
299 309
300 intel_private.base.scratch_page_dma = dma_addr; 310 intel_private.scratch_page_dma = dma_addr;
301 } else 311 } else
302 intel_private.base.scratch_page_dma = page_to_phys(page); 312 intel_private.scratch_page_dma = page_to_phys(page);
303 313
304 intel_private.scratch_page = page; 314 intel_private.scratch_page = page;
305 315
@@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
506 /* On previous hardware, the GTT size was just what was 516 /* On previous hardware, the GTT size was just what was
507 * required to map the aperture. 517 * required to map the aperture.
508 */ 518 */
509 return intel_private.base.gtt_mappable_entries; 519 return intel_private.gtt_mappable_entries;
510 } 520 }
511} 521}
512 522
@@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
546static void intel_gtt_teardown_scratch_page(void) 556static void intel_gtt_teardown_scratch_page(void)
547{ 557{
548 set_pages_wb(intel_private.scratch_page, 1); 558 set_pages_wb(intel_private.scratch_page, 1);
549 pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma, 559 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
550 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 560 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
551 put_page(intel_private.scratch_page); 561 put_page(intel_private.scratch_page);
552 __free_page(intel_private.scratch_page); 562 __free_page(intel_private.scratch_page);
@@ -562,6 +572,40 @@ static void intel_gtt_cleanup(void)
562 intel_gtt_teardown_scratch_page(); 572 intel_gtt_teardown_scratch_page();
563} 573}
564 574
575/* Certain Gen5 chipsets require require idling the GPU before
576 * unmapping anything from the GTT when VT-d is enabled.
577 */
578static inline int needs_ilk_vtd_wa(void)
579{
580#ifdef CONFIG_INTEL_IOMMU
581 const unsigned short gpu_devid = intel_private.pcidev->device;
582
583 /* Query intel_iommu to see if we need the workaround. Presumably that
584 * was loaded first.
585 */
586 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
587 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
588 intel_iommu_gfx_mapped)
589 return 1;
590#endif
591 return 0;
592}
593
594static bool intel_gtt_can_wc(void)
595{
596 if (INTEL_GTT_GEN <= 2)
597 return false;
598
599 if (INTEL_GTT_GEN >= 6)
600 return false;
601
602 /* Reports of major corruption with ILK vt'd enabled */
603 if (needs_ilk_vtd_wa())
604 return false;
605
606 return true;
607}
608
565static int intel_gtt_init(void) 609static int intel_gtt_init(void)
566{ 610{
567 u32 gma_addr; 611 u32 gma_addr;
@@ -572,8 +616,8 @@ static int intel_gtt_init(void)
572 if (ret != 0) 616 if (ret != 0)
573 return ret; 617 return ret;
574 618
575 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); 619 intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
576 intel_private.base.gtt_total_entries = intel_gtt_total_entries(); 620 intel_private.gtt_total_entries = intel_gtt_total_entries();
577 621
578 /* save the PGETBL reg for resume */ 622 /* save the PGETBL reg for resume */
579 intel_private.PGETBL_save = 623 intel_private.PGETBL_save =
@@ -585,13 +629,13 @@ static int intel_gtt_init(void)
585 629
586 dev_info(&intel_private.bridge_dev->dev, 630 dev_info(&intel_private.bridge_dev->dev,
587 "detected gtt size: %dK total, %dK mappable\n", 631 "detected gtt size: %dK total, %dK mappable\n",
588 intel_private.base.gtt_total_entries * 4, 632 intel_private.gtt_total_entries * 4,
589 intel_private.base.gtt_mappable_entries * 4); 633 intel_private.gtt_mappable_entries * 4);
590 634
591 gtt_map_size = intel_private.base.gtt_total_entries * 4; 635 gtt_map_size = intel_private.gtt_total_entries * 4;
592 636
593 intel_private.gtt = NULL; 637 intel_private.gtt = NULL;
594 if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2) 638 if (intel_gtt_can_wc())
595 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr, 639 intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
596 gtt_map_size); 640 gtt_map_size);
597 if (intel_private.gtt == NULL) 641 if (intel_private.gtt == NULL)
@@ -602,13 +646,12 @@ static int intel_gtt_init(void)
602 iounmap(intel_private.registers); 646 iounmap(intel_private.registers);
603 return -ENOMEM; 647 return -ENOMEM;
604 } 648 }
605 intel_private.base.gtt = intel_private.gtt;
606 649
607 global_cache_flush(); /* FIXME: ? */ 650 global_cache_flush(); /* FIXME: ? */
608 651
609 intel_private.base.stolen_size = intel_gtt_stolen_size(); 652 intel_private.stolen_size = intel_gtt_stolen_size();
610 653
611 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; 654 intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
612 655
613 ret = intel_gtt_setup_scratch_page(); 656 ret = intel_gtt_setup_scratch_page();
614 if (ret != 0) { 657 if (ret != 0) {
@@ -623,7 +666,7 @@ static int intel_gtt_init(void)
623 pci_read_config_dword(intel_private.pcidev, I915_GMADDR, 666 pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
624 &gma_addr); 667 &gma_addr);
625 668
626 intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); 669 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
627 670
628 return 0; 671 return 0;
629} 672}
@@ -634,8 +677,7 @@ static int intel_fake_agp_fetch_size(void)
634 unsigned int aper_size; 677 unsigned int aper_size;
635 int i; 678 int i;
636 679
637 aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT) 680 aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
638 / MB(1);
639 681
640 for (i = 0; i < num_sizes; i++) { 682 for (i = 0; i < num_sizes; i++) {
641 if (aper_size == intel_fake_agp_sizes[i].size) { 683 if (aper_size == intel_fake_agp_sizes[i].size) {
@@ -779,7 +821,7 @@ static int intel_fake_agp_configure(void)
779 return -EIO; 821 return -EIO;
780 822
781 intel_private.clear_fake_agp = true; 823 intel_private.clear_fake_agp = true;
782 agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr; 824 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
783 825
784 return 0; 826 return 0;
785} 827}
@@ -841,12 +883,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
841{ 883{
842 int ret = -EINVAL; 884 int ret = -EINVAL;
843 885
844 if (intel_private.base.do_idle_maps)
845 return -ENODEV;
846
847 if (intel_private.clear_fake_agp) { 886 if (intel_private.clear_fake_agp) {
848 int start = intel_private.base.stolen_size / PAGE_SIZE; 887 int start = intel_private.stolen_size / PAGE_SIZE;
849 int end = intel_private.base.gtt_mappable_entries; 888 int end = intel_private.gtt_mappable_entries;
850 intel_gtt_clear_range(start, end - start); 889 intel_gtt_clear_range(start, end - start);
851 intel_private.clear_fake_agp = false; 890 intel_private.clear_fake_agp = false;
852 } 891 }
@@ -857,7 +896,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
857 if (mem->page_count == 0) 896 if (mem->page_count == 0)
858 goto out; 897 goto out;
859 898
860 if (pg_start + mem->page_count > intel_private.base.gtt_total_entries) 899 if (pg_start + mem->page_count > intel_private.gtt_total_entries)
861 goto out_err; 900 goto out_err;
862 901
863 if (type != mem->type) 902 if (type != mem->type)
@@ -869,7 +908,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
869 if (!mem->is_flushed) 908 if (!mem->is_flushed)
870 global_cache_flush(); 909 global_cache_flush();
871 910
872 if (intel_private.base.needs_dmar) { 911 if (intel_private.needs_dmar) {
873 struct sg_table st; 912 struct sg_table st;
874 913
875 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st); 914 ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
@@ -895,7 +934,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
895 unsigned int i; 934 unsigned int i;
896 935
897 for (i = first_entry; i < (first_entry + num_entries); i++) { 936 for (i = first_entry; i < (first_entry + num_entries); i++) {
898 intel_private.driver->write_entry(intel_private.base.scratch_page_dma, 937 intel_private.driver->write_entry(intel_private.scratch_page_dma,
899 i, 0); 938 i, 0);
900 } 939 }
901 readl(intel_private.gtt+i-1); 940 readl(intel_private.gtt+i-1);
@@ -908,12 +947,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
908 if (mem->page_count == 0) 947 if (mem->page_count == 0)
909 return 0; 948 return 0;
910 949
911 if (intel_private.base.do_idle_maps)
912 return -ENODEV;
913
914 intel_gtt_clear_range(pg_start, mem->page_count); 950 intel_gtt_clear_range(pg_start, mem->page_count);
915 951
916 if (intel_private.base.needs_dmar) { 952 if (intel_private.needs_dmar) {
917 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg); 953 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
918 mem->sg_list = NULL; 954 mem->sg_list = NULL;
919 mem->num_sg = 0; 955 mem->num_sg = 0;
@@ -1070,25 +1106,6 @@ static void i965_write_entry(dma_addr_t addr,
1070 writel(addr | pte_flags, intel_private.gtt + entry); 1106 writel(addr | pte_flags, intel_private.gtt + entry);
1071} 1107}
1072 1108
1073/* Certain Gen5 chipsets require require idling the GPU before
1074 * unmapping anything from the GTT when VT-d is enabled.
1075 */
1076static inline int needs_idle_maps(void)
1077{
1078#ifdef CONFIG_INTEL_IOMMU
1079 const unsigned short gpu_devid = intel_private.pcidev->device;
1080
1081 /* Query intel_iommu to see if we need the workaround. Presumably that
1082 * was loaded first.
1083 */
1084 if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
1085 gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
1086 intel_iommu_gfx_mapped)
1087 return 1;
1088#endif
1089 return 0;
1090}
1091
1092static int i9xx_setup(void) 1109static int i9xx_setup(void)
1093{ 1110{
1094 u32 reg_addr, gtt_addr; 1111 u32 reg_addr, gtt_addr;
@@ -1116,9 +1133,6 @@ static int i9xx_setup(void)
1116 break; 1133 break;
1117 } 1134 }
1118 1135
1119 if (needs_idle_maps())
1120 intel_private.base.do_idle_maps = 1;
1121
1122 intel_i9xx_setup_flush(); 1136 intel_i9xx_setup_flush();
1123 1137
1124 return 0; 1138 return 0;
@@ -1390,9 +1404,13 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1390} 1404}
1391EXPORT_SYMBOL(intel_gmch_probe); 1405EXPORT_SYMBOL(intel_gmch_probe);
1392 1406
1393struct intel_gtt *intel_gtt_get(void) 1407void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
1408 phys_addr_t *mappable_base, unsigned long *mappable_end)
1394{ 1409{
1395 return &intel_private.base; 1410 *gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
1411 *stolen_size = intel_private.stolen_size;
1412 *mappable_base = intel_private.gma_bus_addr;
1413 *mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
1396} 1414}
1397EXPORT_SYMBOL(intel_gtt_get); 1415EXPORT_SYMBOL(intel_gtt_get);
1398 1416
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index cc9277885dd0..30879df3daea 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
obj-y += drm/ vga/ stub/ obj-y += drm/ vga/
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 983201b450f1..1e82882da9de 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -7,6 +7,7 @@
7menuconfig DRM 7menuconfig DRM
8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
10 select HDMI
10 select I2C 11 select I2C
11 select I2C_ALGOBIT 12 select I2C_ALGOBIT
12 select DMA_SHARED_BUFFER 13 select DMA_SHARED_BUFFER
@@ -69,6 +70,8 @@ config DRM_KMS_CMA_HELPER
69 help 70 help
70 Choose this if you need the KMS CMA helper functions 71 Choose this if you need the KMS CMA helper functions
71 72
73source "drivers/gpu/drm/i2c/Kconfig"
74
72config DRM_TDFX 75config DRM_TDFX
73 tristate "3dfx Banshee/Voodoo3+" 76 tristate "3dfx Banshee/Voodoo3+"
74 depends on DRM && PCI 77 depends on DRM && PCI
@@ -96,6 +99,7 @@ config DRM_RADEON
96 select DRM_TTM 99 select DRM_TTM
97 select POWER_SUPPLY 100 select POWER_SUPPLY
98 select HWMON 101 select HWMON
102 select BACKLIGHT_CLASS_DEVICE
99 help 103 help
100 Choose this option if you have an ATI Radeon graphics card. There 104 Choose this option if you have an ATI Radeon graphics card. There
101 are both PCI and AGP versions. You don't need to choose this to 105 are both PCI and AGP versions. You don't need to choose this to
@@ -212,3 +216,7 @@ source "drivers/gpu/drm/cirrus/Kconfig"
212source "drivers/gpu/drm/shmobile/Kconfig" 216source "drivers/gpu/drm/shmobile/Kconfig"
213 217
214source "drivers/gpu/drm/tegra/Kconfig" 218source "drivers/gpu/drm/tegra/Kconfig"
219
220source "drivers/gpu/drm/omapdrm/Kconfig"
221
222source "drivers/gpu/drm/tilcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6f58c81cfcbc..0d59b24f8d23 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,4 +50,6 @@ obj-$(CONFIG_DRM_UDL) += udl/
50obj-$(CONFIG_DRM_AST) += ast/ 50obj-$(CONFIG_DRM_AST) += ast/
51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
52obj-$(CONFIG_DRM_TEGRA) += tegra/ 52obj-$(CONFIG_DRM_TEGRA) += tegra/
53obj-$(CONFIG_DRM_OMAP) += omapdrm/
54obj-$(CONFIG_DRM_TILCDC) += tilcdc/
53obj-y += i2c/ 55obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 2d2c2f8d6dc6..df0d0a08097a 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -94,9 +94,9 @@ static int ast_drm_thaw(struct drm_device *dev)
94 ast_post_gpu(dev); 94 ast_post_gpu(dev);
95 95
96 drm_mode_config_reset(dev); 96 drm_mode_config_reset(dev);
97 mutex_lock(&dev->mode_config.mutex); 97 drm_modeset_lock_all(dev);
98 drm_helper_resume_force_mode(dev); 98 drm_helper_resume_force_mode(dev);
99 mutex_unlock(&dev->mode_config.mutex); 99 drm_modeset_unlock_all(dev);
100 100
101 console_lock(); 101 console_lock();
102 ast_fbdev_set_suspend(dev, 0); 102 ast_fbdev_set_suspend(dev, 0);
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 5ccf984f063a..528429252f0f 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -98,6 +98,8 @@ struct ast_private {
98 98
99 struct drm_gem_object *cursor_cache; 99 struct drm_gem_object *cursor_cache;
100 uint64_t cursor_cache_gpu_addr; 100 uint64_t cursor_cache_gpu_addr;
101 /* Acces to this cache is protected by the crtc->mutex of the only crtc
102 * we have. */
101 struct ttm_bo_kmap_obj cache_kmap; 103 struct ttm_bo_kmap_obj cache_kmap;
102 int next_cursor; 104 int next_cursor;
103}; 105};
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index d9ec77959dff..34931fe7d2c5 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -40,6 +40,7 @@
40#include <drm/drmP.h> 40#include <drm/drmP.h>
41#include <drm/drm_crtc.h> 41#include <drm/drm_crtc.h>
42#include <drm/drm_fb_helper.h> 42#include <drm/drm_fb_helper.h>
43#include <drm/drm_crtc_helper.h>
43#include "ast_drv.h" 44#include "ast_drv.h"
44 45
45static void ast_dirty_update(struct ast_fbdev *afbdev, 46static void ast_dirty_update(struct ast_fbdev *afbdev,
@@ -145,9 +146,10 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
145 return ret; 146 return ret;
146} 147}
147 148
148static int astfb_create(struct ast_fbdev *afbdev, 149static int astfb_create(struct drm_fb_helper *helper,
149 struct drm_fb_helper_surface_size *sizes) 150 struct drm_fb_helper_surface_size *sizes)
150{ 151{
152 struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
151 struct drm_device *dev = afbdev->helper.dev; 153 struct drm_device *dev = afbdev->helper.dev;
152 struct drm_mode_fb_cmd2 mode_cmd; 154 struct drm_mode_fb_cmd2 mode_cmd;
153 struct drm_framebuffer *fb; 155 struct drm_framebuffer *fb;
@@ -248,26 +250,10 @@ static void ast_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
248 *blue = ast_crtc->lut_b[regno] << 8; 250 *blue = ast_crtc->lut_b[regno] << 8;
249} 251}
250 252
251static int ast_find_or_create_single(struct drm_fb_helper *helper,
252 struct drm_fb_helper_surface_size *sizes)
253{
254 struct ast_fbdev *afbdev = (struct ast_fbdev *)helper;
255 int new_fb = 0;
256 int ret;
257
258 if (!helper->fb) {
259 ret = astfb_create(afbdev, sizes);
260 if (ret)
261 return ret;
262 new_fb = 1;
263 }
264 return new_fb;
265}
266
267static struct drm_fb_helper_funcs ast_fb_helper_funcs = { 253static struct drm_fb_helper_funcs ast_fb_helper_funcs = {
268 .gamma_set = ast_fb_gamma_set, 254 .gamma_set = ast_fb_gamma_set,
269 .gamma_get = ast_fb_gamma_get, 255 .gamma_get = ast_fb_gamma_get,
270 .fb_probe = ast_find_or_create_single, 256 .fb_probe = astfb_create,
271}; 257};
272 258
273static void ast_fbdev_destroy(struct drm_device *dev, 259static void ast_fbdev_destroy(struct drm_device *dev,
@@ -290,6 +276,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
290 drm_fb_helper_fini(&afbdev->helper); 276 drm_fb_helper_fini(&afbdev->helper);
291 277
292 vfree(afbdev->sysram); 278 vfree(afbdev->sysram);
279 drm_framebuffer_unregister_private(&afb->base);
293 drm_framebuffer_cleanup(&afb->base); 280 drm_framebuffer_cleanup(&afb->base);
294} 281}
295 282
@@ -313,6 +300,10 @@ int ast_fbdev_init(struct drm_device *dev)
313 } 300 }
314 301
315 drm_fb_helper_single_add_all_connectors(&afbdev->helper); 302 drm_fb_helper_single_add_all_connectors(&afbdev->helper);
303
304 /* disable all the possible outputs/crtcs before entering KMS mode */
305 drm_helper_disable_unused_functions(dev);
306
316 drm_fb_helper_initial_config(&afbdev->helper, 32); 307 drm_fb_helper_initial_config(&afbdev->helper, 32);
317 return 0; 308 return 0;
318} 309}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f668e6cc0f7a..f60fd7bd1183 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -246,16 +246,8 @@ static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
246 kfree(fb); 246 kfree(fb);
247} 247}
248 248
249static int ast_user_framebuffer_create_handle(struct drm_framebuffer *fb,
250 struct drm_file *file,
251 unsigned int *handle)
252{
253 return -EINVAL;
254}
255
256static const struct drm_framebuffer_funcs ast_fb_funcs = { 249static const struct drm_framebuffer_funcs ast_fb_funcs = {
257 .destroy = ast_user_framebuffer_destroy, 250 .destroy = ast_user_framebuffer_destroy,
258 .create_handle = ast_user_framebuffer_create_handle,
259}; 251};
260 252
261 253
@@ -266,13 +258,13 @@ int ast_framebuffer_init(struct drm_device *dev,
266{ 258{
267 int ret; 259 int ret;
268 260
261 drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
262 ast_fb->obj = obj;
269 ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs); 263 ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs);
270 if (ret) { 264 if (ret) {
271 DRM_ERROR("framebuffer init failed %d\n", ret); 265 DRM_ERROR("framebuffer init failed %d\n", ret);
272 return ret; 266 return ret;
273 } 267 }
274 drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd);
275 ast_fb->obj = obj;
276 return 0; 268 return 0;
277} 269}
278 270
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 6c6b4c87d309..e25afccaf85b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <drm/drmP.h> 12#include <drm/drmP.h>
13#include <drm/drm_fb_helper.h> 13#include <drm/drm_fb_helper.h>
14#include <drm/drm_crtc_helper.h>
14 15
15#include <linux/fb.h> 16#include <linux/fb.h>
16 17
@@ -120,9 +121,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
120 return ret; 121 return ret;
121} 122}
122 123
123static int cirrusfb_create(struct cirrus_fbdev *gfbdev, 124static int cirrusfb_create(struct drm_fb_helper *helper,
124 struct drm_fb_helper_surface_size *sizes) 125 struct drm_fb_helper_surface_size *sizes)
125{ 126{
127 struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
126 struct drm_device *dev = gfbdev->helper.dev; 128 struct drm_device *dev = gfbdev->helper.dev;
127 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private; 129 struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
128 struct fb_info *info; 130 struct fb_info *info;
@@ -219,23 +221,6 @@ out_iounmap:
219 return ret; 221 return ret;
220} 222}
221 223
222static int cirrus_fb_find_or_create_single(struct drm_fb_helper *helper,
223 struct drm_fb_helper_surface_size
224 *sizes)
225{
226 struct cirrus_fbdev *gfbdev = (struct cirrus_fbdev *)helper;
227 int new_fb = 0;
228 int ret;
229
230 if (!helper->fb) {
231 ret = cirrusfb_create(gfbdev, sizes);
232 if (ret)
233 return ret;
234 new_fb = 1;
235 }
236 return new_fb;
237}
238
239static int cirrus_fbdev_destroy(struct drm_device *dev, 224static int cirrus_fbdev_destroy(struct drm_device *dev,
240 struct cirrus_fbdev *gfbdev) 225 struct cirrus_fbdev *gfbdev)
241{ 226{
@@ -258,6 +243,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
258 243
259 vfree(gfbdev->sysram); 244 vfree(gfbdev->sysram);
260 drm_fb_helper_fini(&gfbdev->helper); 245 drm_fb_helper_fini(&gfbdev->helper);
246 drm_framebuffer_unregister_private(&gfb->base);
261 drm_framebuffer_cleanup(&gfb->base); 247 drm_framebuffer_cleanup(&gfb->base);
262 248
263 return 0; 249 return 0;
@@ -266,7 +252,7 @@ static int cirrus_fbdev_destroy(struct drm_device *dev,
266static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = { 252static struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
267 .gamma_set = cirrus_crtc_fb_gamma_set, 253 .gamma_set = cirrus_crtc_fb_gamma_set,
268 .gamma_get = cirrus_crtc_fb_gamma_get, 254 .gamma_get = cirrus_crtc_fb_gamma_get,
269 .fb_probe = cirrus_fb_find_or_create_single, 255 .fb_probe = cirrusfb_create,
270}; 256};
271 257
272int cirrus_fbdev_init(struct cirrus_device *cdev) 258int cirrus_fbdev_init(struct cirrus_device *cdev)
@@ -290,6 +276,9 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
290 return ret; 276 return ret;
291 } 277 }
292 drm_fb_helper_single_add_all_connectors(&gfbdev->helper); 278 drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
279
280 /* disable all the possible outputs/crtcs before entering KMS mode */
281 drm_helper_disable_unused_functions(cdev->dev);
293 drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel); 282 drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
294 283
295 return 0; 284 return 0;
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 6a9b12e88d46..35cbae827771 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -23,16 +23,8 @@ static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
23 kfree(fb); 23 kfree(fb);
24} 24}
25 25
26static int cirrus_user_framebuffer_create_handle(struct drm_framebuffer *fb,
27 struct drm_file *file_priv,
28 unsigned int *handle)
29{
30 return 0;
31}
32
33static const struct drm_framebuffer_funcs cirrus_fb_funcs = { 26static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
34 .destroy = cirrus_user_framebuffer_destroy, 27 .destroy = cirrus_user_framebuffer_destroy,
35 .create_handle = cirrus_user_framebuffer_create_handle,
36}; 28};
37 29
38int cirrus_framebuffer_init(struct drm_device *dev, 30int cirrus_framebuffer_init(struct drm_device *dev,
@@ -42,13 +34,13 @@ int cirrus_framebuffer_init(struct drm_device *dev,
42{ 34{
43 int ret; 35 int ret;
44 36
37 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
38 gfb->obj = obj;
45 ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs); 39 ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
46 if (ret) { 40 if (ret) {
47 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); 41 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
48 return ret; 42 return ret;
49 } 43 }
50 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
51 gfb->obj = obj;
52 return 0; 44 return 0;
53} 45}
54 46
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f2d667b8bee2..3bdf2a650d9c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -37,6 +37,54 @@
37#include <drm/drm_edid.h> 37#include <drm/drm_edid.h>
38#include <drm/drm_fourcc.h> 38#include <drm/drm_fourcc.h>
39 39
40/**
41 * drm_modeset_lock_all - take all modeset locks
42 * @dev: drm device
43 *
44 * This function takes all modeset locks, suitable where a more fine-grained
45 * scheme isn't (yet) implemented.
46 */
47void drm_modeset_lock_all(struct drm_device *dev)
48{
49 struct drm_crtc *crtc;
50
51 mutex_lock(&dev->mode_config.mutex);
52
53 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
54 mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
55}
56EXPORT_SYMBOL(drm_modeset_lock_all);
57
58/**
59 * drm_modeset_unlock_all - drop all modeset locks
60 * @dev: device
61 */
62void drm_modeset_unlock_all(struct drm_device *dev)
63{
64 struct drm_crtc *crtc;
65
66 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
67 mutex_unlock(&crtc->mutex);
68
69 mutex_unlock(&dev->mode_config.mutex);
70}
71EXPORT_SYMBOL(drm_modeset_unlock_all);
72
73/**
74 * drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
75 * @dev: device
76 */
77void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
78{
79 struct drm_crtc *crtc;
80
81 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
82 WARN_ON(!mutex_is_locked(&crtc->mutex));
83
84 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
85}
86EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
87
40/* Avoid boilerplate. I'm tired of typing. */ 88/* Avoid boilerplate. I'm tired of typing. */
41#define DRM_ENUM_NAME_FN(fnname, list) \ 89#define DRM_ENUM_NAME_FN(fnname, list) \
42 char *fnname(int val) \ 90 char *fnname(int val) \
@@ -203,12 +251,10 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
203} 251}
204 252
205/** 253/**
206 * drm_mode_object_get - allocate a new identifier 254 * drm_mode_object_get - allocate a new modeset identifier
207 * @dev: DRM device 255 * @dev: DRM device
208 * @ptr: object pointer, used to generate unique ID 256 * @obj: object pointer, used to generate unique ID
209 * @type: object type 257 * @obj_type: object type
210 *
211 * LOCKING:
212 * 258 *
213 * Create a unique identifier based on @ptr in @dev's identifier space. Used 259 * Create a unique identifier based on @ptr in @dev's identifier space. Used
214 * for tracking modes, CRTCs and connectors. 260 * for tracking modes, CRTCs and connectors.
@@ -231,24 +277,27 @@ again:
231 277
232 mutex_lock(&dev->mode_config.idr_mutex); 278 mutex_lock(&dev->mode_config.idr_mutex);
233 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); 279 ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
280
281 if (!ret) {
282 /*
283 * Set up the object linking under the protection of the idr
284 * lock so that other users can't see inconsistent state.
285 */
286 obj->id = new_id;
287 obj->type = obj_type;
288 }
234 mutex_unlock(&dev->mode_config.idr_mutex); 289 mutex_unlock(&dev->mode_config.idr_mutex);
290
235 if (ret == -EAGAIN) 291 if (ret == -EAGAIN)
236 goto again; 292 goto again;
237 else if (ret)
238 return ret;
239 293
240 obj->id = new_id; 294 return ret;
241 obj->type = obj_type;
242 return 0;
243} 295}
244 296
245/** 297/**
246 * drm_mode_object_put - free an identifer 298 * drm_mode_object_put - free a modeset identifer
247 * @dev: DRM device 299 * @dev: DRM device
248 * @id: ID to free 300 * @object: object to free
249 *
250 * LOCKING:
251 * Caller must hold DRM mode_config lock.
252 * 301 *
253 * Free @id from @dev's unique identifier pool. 302 * Free @id from @dev's unique identifier pool.
254 */ 303 */
@@ -260,11 +309,24 @@ static void drm_mode_object_put(struct drm_device *dev,
260 mutex_unlock(&dev->mode_config.idr_mutex); 309 mutex_unlock(&dev->mode_config.idr_mutex);
261} 310}
262 311
312/**
313 * drm_mode_object_find - look up a drm object with static lifetime
314 * @dev: drm device
315 * @id: id of the mode object
316 * @type: type of the mode object
317 *
318 * Note that framebuffers cannot be looked up with this functions - since those
319 * are reference counted, they need special treatment.
320 */
263struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, 321struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
264 uint32_t id, uint32_t type) 322 uint32_t id, uint32_t type)
265{ 323{
266 struct drm_mode_object *obj = NULL; 324 struct drm_mode_object *obj = NULL;
267 325
326 /* Framebuffers are reference counted and need their own lookup
327 * function.*/
328 WARN_ON(type == DRM_MODE_OBJECT_FB);
329
268 mutex_lock(&dev->mode_config.idr_mutex); 330 mutex_lock(&dev->mode_config.idr_mutex);
269 obj = idr_find(&dev->mode_config.crtc_idr, id); 331 obj = idr_find(&dev->mode_config.crtc_idr, id);
270 if (!obj || (obj->type != type) || (obj->id != id)) 332 if (!obj || (obj->type != type) || (obj->id != id))
@@ -278,13 +340,18 @@ EXPORT_SYMBOL(drm_mode_object_find);
278/** 340/**
279 * drm_framebuffer_init - initialize a framebuffer 341 * drm_framebuffer_init - initialize a framebuffer
280 * @dev: DRM device 342 * @dev: DRM device
281 * 343 * @fb: framebuffer to be initialized
282 * LOCKING: 344 * @funcs: ... with these functions
283 * Caller must hold mode config lock.
284 * 345 *
285 * Allocates an ID for the framebuffer's parent mode object, sets its mode 346 * Allocates an ID for the framebuffer's parent mode object, sets its mode
286 * functions & device file and adds it to the master fd list. 347 * functions & device file and adds it to the master fd list.
287 * 348 *
349 * IMPORTANT:
350 * This functions publishes the fb and makes it available for concurrent access
351 * by other users. Which means by this point the fb _must_ be fully set up -
352 * since all the fb attributes are invariant over its lifetime, no further
353 * locking but only correct reference counting is required.
354 *
288 * RETURNS: 355 * RETURNS:
289 * Zero on success, error code on failure. 356 * Zero on success, error code on failure.
290 */ 357 */
@@ -293,16 +360,23 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb,
293{ 360{
294 int ret; 361 int ret;
295 362
363 mutex_lock(&dev->mode_config.fb_lock);
296 kref_init(&fb->refcount); 364 kref_init(&fb->refcount);
365 INIT_LIST_HEAD(&fb->filp_head);
366 fb->dev = dev;
367 fb->funcs = funcs;
297 368
298 ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); 369 ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
299 if (ret) 370 if (ret)
300 return ret; 371 goto out;
372
373 /* Grab the idr reference. */
374 drm_framebuffer_reference(fb);
301 375
302 fb->dev = dev;
303 fb->funcs = funcs;
304 dev->mode_config.num_fb++; 376 dev->mode_config.num_fb++;
305 list_add(&fb->head, &dev->mode_config.fb_list); 377 list_add(&fb->head, &dev->mode_config.fb_list);
378out:
379 mutex_unlock(&dev->mode_config.fb_lock);
306 380
307 return 0; 381 return 0;
308} 382}
@@ -315,23 +389,63 @@ static void drm_framebuffer_free(struct kref *kref)
315 fb->funcs->destroy(fb); 389 fb->funcs->destroy(fb);
316} 390}
317 391
392static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
393 uint32_t id)
394{
395 struct drm_mode_object *obj = NULL;
396 struct drm_framebuffer *fb;
397
398 mutex_lock(&dev->mode_config.idr_mutex);
399 obj = idr_find(&dev->mode_config.crtc_idr, id);
400 if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
401 fb = NULL;
402 else
403 fb = obj_to_fb(obj);
404 mutex_unlock(&dev->mode_config.idr_mutex);
405
406 return fb;
407}
408
409/**
410 * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
411 * @dev: drm device
412 * @id: id of the fb object
413 *
414 * If successful, this grabs an additional reference to the framebuffer -
415 * callers need to make sure to eventually unreference the returned framebuffer
416 * again.
417 */
418struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
419 uint32_t id)
420{
421 struct drm_framebuffer *fb;
422
423 mutex_lock(&dev->mode_config.fb_lock);
424 fb = __drm_framebuffer_lookup(dev, id);
425 if (fb)
426 kref_get(&fb->refcount);
427 mutex_unlock(&dev->mode_config.fb_lock);
428
429 return fb;
430}
431EXPORT_SYMBOL(drm_framebuffer_lookup);
432
318/** 433/**
319 * drm_framebuffer_unreference - unref a framebuffer 434 * drm_framebuffer_unreference - unref a framebuffer
435 * @fb: framebuffer to unref
320 * 436 *
321 * LOCKING: 437 * This functions decrements the fb's refcount and frees it if it drops to zero.
322 * Caller must hold mode config lock.
323 */ 438 */
324void drm_framebuffer_unreference(struct drm_framebuffer *fb) 439void drm_framebuffer_unreference(struct drm_framebuffer *fb)
325{ 440{
326 struct drm_device *dev = fb->dev;
327 DRM_DEBUG("FB ID: %d\n", fb->base.id); 441 DRM_DEBUG("FB ID: %d\n", fb->base.id);
328 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
329 kref_put(&fb->refcount, drm_framebuffer_free); 442 kref_put(&fb->refcount, drm_framebuffer_free);
330} 443}
331EXPORT_SYMBOL(drm_framebuffer_unreference); 444EXPORT_SYMBOL(drm_framebuffer_unreference);
332 445
333/** 446/**
334 * drm_framebuffer_reference - incr the fb refcnt 447 * drm_framebuffer_reference - incr the fb refcnt
448 * @fb: framebuffer
335 */ 449 */
336void drm_framebuffer_reference(struct drm_framebuffer *fb) 450void drm_framebuffer_reference(struct drm_framebuffer *fb)
337{ 451{
@@ -340,29 +454,74 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb)
340} 454}
341EXPORT_SYMBOL(drm_framebuffer_reference); 455EXPORT_SYMBOL(drm_framebuffer_reference);
342 456
457static void drm_framebuffer_free_bug(struct kref *kref)
458{
459 BUG();
460}
461
462static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
463{
464 DRM_DEBUG("FB ID: %d\n", fb->base.id);
465 kref_put(&fb->refcount, drm_framebuffer_free_bug);
466}
467
468/* dev->mode_config.fb_lock must be held! */
469static void __drm_framebuffer_unregister(struct drm_device *dev,
470 struct drm_framebuffer *fb)
471{
472 mutex_lock(&dev->mode_config.idr_mutex);
473 idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
474 mutex_unlock(&dev->mode_config.idr_mutex);
475
476 fb->base.id = 0;
477
478 __drm_framebuffer_unreference(fb);
479}
480
481/**
482 * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
483 * @fb: fb to unregister
484 *
485 * Drivers need to call this when cleaning up driver-private framebuffers, e.g.
486 * those used for fbdev. Note that the caller must hold a reference of it's own,
487 * i.e. the object may not be destroyed through this call (since it'll lead to a
488 * locking inversion).
489 */
490void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
491{
492 struct drm_device *dev = fb->dev;
493
494 mutex_lock(&dev->mode_config.fb_lock);
495 /* Mark fb as reaped and drop idr ref. */
496 __drm_framebuffer_unregister(dev, fb);
497 mutex_unlock(&dev->mode_config.fb_lock);
498}
499EXPORT_SYMBOL(drm_framebuffer_unregister_private);
500
343/** 501/**
344 * drm_framebuffer_cleanup - remove a framebuffer object 502 * drm_framebuffer_cleanup - remove a framebuffer object
345 * @fb: framebuffer to remove 503 * @fb: framebuffer to remove
346 * 504 *
347 * LOCKING: 505 * Cleanup references to a user-created framebuffer. This function is intended
348 * Caller must hold mode config lock. 506 * to be used from the drivers ->destroy callback.
507 *
508 * Note that this function does not remove the fb from active usuage - if it is
509 * still used anywhere, hilarity can ensue since userspace could call getfb on
510 * the id and get back -EINVAL. Obviously no concern at driver unload time.
349 * 511 *
350 * Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes 512 * Also, the framebuffer will not be removed from the lookup idr - for
351 * it, setting it to NULL. 513 * user-created framebuffers this will happen in in the rmfb ioctl. For
514 * driver-private objects (e.g. for fbdev) drivers need to explicitly call
515 * drm_framebuffer_unregister_private.
352 */ 516 */
353void drm_framebuffer_cleanup(struct drm_framebuffer *fb) 517void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
354{ 518{
355 struct drm_device *dev = fb->dev; 519 struct drm_device *dev = fb->dev;
356 /* 520
357 * This could be moved to drm_framebuffer_remove(), but for 521 mutex_lock(&dev->mode_config.fb_lock);
358 * debugging is nice to keep around the list of fb's that are
359 * no longer associated w/ a drm_file but are not unreferenced
360 * yet. (i915 and omapdrm have debugfs files which will show
361 * this.)
362 */
363 drm_mode_object_put(dev, &fb->base);
364 list_del(&fb->head); 522 list_del(&fb->head);
365 dev->mode_config.num_fb--; 523 dev->mode_config.num_fb--;
524 mutex_unlock(&dev->mode_config.fb_lock);
366} 525}
367EXPORT_SYMBOL(drm_framebuffer_cleanup); 526EXPORT_SYMBOL(drm_framebuffer_cleanup);
368 527
@@ -370,11 +529,13 @@ EXPORT_SYMBOL(drm_framebuffer_cleanup);
370 * drm_framebuffer_remove - remove and unreference a framebuffer object 529 * drm_framebuffer_remove - remove and unreference a framebuffer object
371 * @fb: framebuffer to remove 530 * @fb: framebuffer to remove
372 * 531 *
373 * LOCKING:
374 * Caller must hold mode config lock.
375 *
376 * Scans all the CRTCs and planes in @dev's mode_config. If they're 532 * Scans all the CRTCs and planes in @dev's mode_config. If they're
377 * using @fb, removes it, setting it to NULL. 533 * using @fb, removes it, setting it to NULL. Then drops the reference to the
534 * passed-in framebuffer. Might take the modeset locks.
535 *
536 * Note that this function optimizes the cleanup away if the caller holds the
537 * last reference to the framebuffer. It is also guaranteed to not take the
538 * modeset locks in this case.
378 */ 539 */
379void drm_framebuffer_remove(struct drm_framebuffer *fb) 540void drm_framebuffer_remove(struct drm_framebuffer *fb)
380{ 541{
@@ -384,33 +545,53 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
384 struct drm_mode_set set; 545 struct drm_mode_set set;
385 int ret; 546 int ret;
386 547
387 /* remove from any CRTC */ 548 WARN_ON(!list_empty(&fb->filp_head));
388 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 549
389 if (crtc->fb == fb) { 550 /*
390 /* should turn off the crtc */ 551 * drm ABI mandates that we remove any deleted framebuffers from active
391 memset(&set, 0, sizeof(struct drm_mode_set)); 552 * useage. But since most sane clients only remove framebuffers they no
392 set.crtc = crtc; 553 * longer need, try to optimize this away.
393 set.fb = NULL; 554 *
394 ret = crtc->funcs->set_config(&set); 555 * Since we're holding a reference ourselves, observing a refcount of 1
395 if (ret) 556 * means that we're the last holder and can skip it. Also, the refcount
396 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); 557 * can never increase from 1 again, so we don't need any barriers or
558 * locks.
559 *
560 * Note that userspace could try to race with use and instate a new
561 * usage _after_ we've cleared all current ones. End result will be an
562 * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
563 * in this manner.
564 */
565 if (atomic_read(&fb->refcount.refcount) > 1) {
566 drm_modeset_lock_all(dev);
567 /* remove from any CRTC */
568 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
569 if (crtc->fb == fb) {
570 /* should turn off the crtc */
571 memset(&set, 0, sizeof(struct drm_mode_set));
572 set.crtc = crtc;
573 set.fb = NULL;
574 ret = drm_mode_set_config_internal(&set);
575 if (ret)
576 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
577 }
397 } 578 }
398 }
399 579
400 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 580 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
401 if (plane->fb == fb) { 581 if (plane->fb == fb) {
402 /* should turn off the crtc */ 582 /* should turn off the crtc */
403 ret = plane->funcs->disable_plane(plane); 583 ret = plane->funcs->disable_plane(plane);
404 if (ret) 584 if (ret)
405 DRM_ERROR("failed to disable plane with busy fb\n"); 585 DRM_ERROR("failed to disable plane with busy fb\n");
406 /* disconnect the plane from the fb and crtc: */ 586 /* disconnect the plane from the fb and crtc: */
407 plane->fb = NULL; 587 __drm_framebuffer_unreference(plane->fb);
408 plane->crtc = NULL; 588 plane->fb = NULL;
589 plane->crtc = NULL;
590 }
409 } 591 }
592 drm_modeset_unlock_all(dev);
410 } 593 }
411 594
412 list_del(&fb->filp_head);
413
414 drm_framebuffer_unreference(fb); 595 drm_framebuffer_unreference(fb);
415} 596}
416EXPORT_SYMBOL(drm_framebuffer_remove); 597EXPORT_SYMBOL(drm_framebuffer_remove);
@@ -421,9 +602,6 @@ EXPORT_SYMBOL(drm_framebuffer_remove);
421 * @crtc: CRTC object to init 602 * @crtc: CRTC object to init
422 * @funcs: callbacks for the new CRTC 603 * @funcs: callbacks for the new CRTC
423 * 604 *
424 * LOCKING:
425 * Takes mode_config lock.
426 *
427 * Inits a new object created as base part of an driver crtc object. 605 * Inits a new object created as base part of an driver crtc object.
428 * 606 *
429 * RETURNS: 607 * RETURNS:
@@ -438,7 +616,9 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
438 crtc->funcs = funcs; 616 crtc->funcs = funcs;
439 crtc->invert_dimensions = false; 617 crtc->invert_dimensions = false;
440 618
441 mutex_lock(&dev->mode_config.mutex); 619 drm_modeset_lock_all(dev);
620 mutex_init(&crtc->mutex);
621 mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
442 622
443 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 623 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
444 if (ret) 624 if (ret)
@@ -450,7 +630,7 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
450 dev->mode_config.num_crtc++; 630 dev->mode_config.num_crtc++;
451 631
452 out: 632 out:
453 mutex_unlock(&dev->mode_config.mutex); 633 drm_modeset_unlock_all(dev);
454 634
455 return ret; 635 return ret;
456} 636}
@@ -460,9 +640,6 @@ EXPORT_SYMBOL(drm_crtc_init);
460 * drm_crtc_cleanup - Cleans up the core crtc usage. 640 * drm_crtc_cleanup - Cleans up the core crtc usage.
461 * @crtc: CRTC to cleanup 641 * @crtc: CRTC to cleanup
462 * 642 *
463 * LOCKING:
464 * Caller must hold mode config lock.
465 *
466 * Cleanup @crtc. Removes from drm modesetting space 643 * Cleanup @crtc. Removes from drm modesetting space
467 * does NOT free object, caller does that. 644 * does NOT free object, caller does that.
468 */ 645 */
@@ -484,9 +661,6 @@ EXPORT_SYMBOL(drm_crtc_cleanup);
484 * @connector: connector the new mode 661 * @connector: connector the new mode
485 * @mode: mode data 662 * @mode: mode data
486 * 663 *
487 * LOCKING:
488 * Caller must hold mode config lock.
489 *
490 * Add @mode to @connector's mode list for later use. 664 * Add @mode to @connector's mode list for later use.
491 */ 665 */
492void drm_mode_probed_add(struct drm_connector *connector, 666void drm_mode_probed_add(struct drm_connector *connector,
@@ -501,9 +675,6 @@ EXPORT_SYMBOL(drm_mode_probed_add);
501 * @connector: connector list to modify 675 * @connector: connector list to modify
502 * @mode: mode to remove 676 * @mode: mode to remove
503 * 677 *
504 * LOCKING:
505 * Caller must hold mode config lock.
506 *
507 * Remove @mode from @connector's mode list, then free it. 678 * Remove @mode from @connector's mode list, then free it.
508 */ 679 */
509void drm_mode_remove(struct drm_connector *connector, 680void drm_mode_remove(struct drm_connector *connector,
@@ -519,10 +690,7 @@ EXPORT_SYMBOL(drm_mode_remove);
519 * @dev: DRM device 690 * @dev: DRM device
520 * @connector: the connector to init 691 * @connector: the connector to init
521 * @funcs: callbacks for this connector 692 * @funcs: callbacks for this connector
522 * @name: user visible name of the connector 693 * @connector_type: user visible type of the connector
523 *
524 * LOCKING:
525 * Takes mode config lock.
526 * 694 *
527 * Initialises a preallocated connector. Connectors should be 695 * Initialises a preallocated connector. Connectors should be
528 * subclassed as part of driver connector objects. 696 * subclassed as part of driver connector objects.
@@ -537,7 +705,7 @@ int drm_connector_init(struct drm_device *dev,
537{ 705{
538 int ret; 706 int ret;
539 707
540 mutex_lock(&dev->mode_config.mutex); 708 drm_modeset_lock_all(dev);
541 709
542 ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); 710 ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
543 if (ret) 711 if (ret)
@@ -567,7 +735,7 @@ int drm_connector_init(struct drm_device *dev,
567 dev->mode_config.dpms_property, 0); 735 dev->mode_config.dpms_property, 0);
568 736
569 out: 737 out:
570 mutex_unlock(&dev->mode_config.mutex); 738 drm_modeset_unlock_all(dev);
571 739
572 return ret; 740 return ret;
573} 741}
@@ -577,9 +745,6 @@ EXPORT_SYMBOL(drm_connector_init);
577 * drm_connector_cleanup - cleans up an initialised connector 745 * drm_connector_cleanup - cleans up an initialised connector
578 * @connector: connector to cleanup 746 * @connector: connector to cleanup
579 * 747 *
580 * LOCKING:
581 * Takes mode config lock.
582 *
583 * Cleans up the connector but doesn't free the object. 748 * Cleans up the connector but doesn't free the object.
584 */ 749 */
585void drm_connector_cleanup(struct drm_connector *connector) 750void drm_connector_cleanup(struct drm_connector *connector)
@@ -596,11 +761,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
596 list_for_each_entry_safe(mode, t, &connector->user_modes, head) 761 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
597 drm_mode_remove(connector, mode); 762 drm_mode_remove(connector, mode);
598 763
599 mutex_lock(&dev->mode_config.mutex);
600 drm_mode_object_put(dev, &connector->base); 764 drm_mode_object_put(dev, &connector->base);
601 list_del(&connector->head); 765 list_del(&connector->head);
602 dev->mode_config.num_connector--; 766 dev->mode_config.num_connector--;
603 mutex_unlock(&dev->mode_config.mutex);
604} 767}
605EXPORT_SYMBOL(drm_connector_cleanup); 768EXPORT_SYMBOL(drm_connector_cleanup);
606 769
@@ -622,7 +785,7 @@ int drm_encoder_init(struct drm_device *dev,
622{ 785{
623 int ret; 786 int ret;
624 787
625 mutex_lock(&dev->mode_config.mutex); 788 drm_modeset_lock_all(dev);
626 789
627 ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); 790 ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
628 if (ret) 791 if (ret)
@@ -636,7 +799,7 @@ int drm_encoder_init(struct drm_device *dev,
636 dev->mode_config.num_encoder++; 799 dev->mode_config.num_encoder++;
637 800
638 out: 801 out:
639 mutex_unlock(&dev->mode_config.mutex); 802 drm_modeset_unlock_all(dev);
640 803
641 return ret; 804 return ret;
642} 805}
@@ -645,11 +808,11 @@ EXPORT_SYMBOL(drm_encoder_init);
645void drm_encoder_cleanup(struct drm_encoder *encoder) 808void drm_encoder_cleanup(struct drm_encoder *encoder)
646{ 809{
647 struct drm_device *dev = encoder->dev; 810 struct drm_device *dev = encoder->dev;
648 mutex_lock(&dev->mode_config.mutex); 811 drm_modeset_lock_all(dev);
649 drm_mode_object_put(dev, &encoder->base); 812 drm_mode_object_put(dev, &encoder->base);
650 list_del(&encoder->head); 813 list_del(&encoder->head);
651 dev->mode_config.num_encoder--; 814 dev->mode_config.num_encoder--;
652 mutex_unlock(&dev->mode_config.mutex); 815 drm_modeset_unlock_all(dev);
653} 816}
654EXPORT_SYMBOL(drm_encoder_cleanup); 817EXPORT_SYMBOL(drm_encoder_cleanup);
655 818
@@ -661,7 +824,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
661{ 824{
662 int ret; 825 int ret;
663 826
664 mutex_lock(&dev->mode_config.mutex); 827 drm_modeset_lock_all(dev);
665 828
666 ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); 829 ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
667 if (ret) 830 if (ret)
@@ -695,7 +858,7 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
695 } 858 }
696 859
697 out: 860 out:
698 mutex_unlock(&dev->mode_config.mutex); 861 drm_modeset_unlock_all(dev);
699 862
700 return ret; 863 return ret;
701} 864}
@@ -705,7 +868,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
705{ 868{
706 struct drm_device *dev = plane->dev; 869 struct drm_device *dev = plane->dev;
707 870
708 mutex_lock(&dev->mode_config.mutex); 871 drm_modeset_lock_all(dev);
709 kfree(plane->format_types); 872 kfree(plane->format_types);
710 drm_mode_object_put(dev, &plane->base); 873 drm_mode_object_put(dev, &plane->base);
711 /* if not added to a list, it must be a private plane */ 874 /* if not added to a list, it must be a private plane */
@@ -713,7 +876,7 @@ void drm_plane_cleanup(struct drm_plane *plane)
713 list_del(&plane->head); 876 list_del(&plane->head);
714 dev->mode_config.num_plane--; 877 dev->mode_config.num_plane--;
715 } 878 }
716 mutex_unlock(&dev->mode_config.mutex); 879 drm_modeset_unlock_all(dev);
717} 880}
718EXPORT_SYMBOL(drm_plane_cleanup); 881EXPORT_SYMBOL(drm_plane_cleanup);
719 882
@@ -721,9 +884,6 @@ EXPORT_SYMBOL(drm_plane_cleanup);
721 * drm_mode_create - create a new display mode 884 * drm_mode_create - create a new display mode
722 * @dev: DRM device 885 * @dev: DRM device
723 * 886 *
724 * LOCKING:
725 * Caller must hold DRM mode_config lock.
726 *
727 * Create a new drm_display_mode, give it an ID, and return it. 887 * Create a new drm_display_mode, give it an ID, and return it.
728 * 888 *
729 * RETURNS: 889 * RETURNS:
@@ -751,9 +911,6 @@ EXPORT_SYMBOL(drm_mode_create);
751 * @dev: DRM device 911 * @dev: DRM device
752 * @mode: mode to remove 912 * @mode: mode to remove
753 * 913 *
754 * LOCKING:
755 * Caller must hold mode config lock.
756 *
757 * Free @mode's unique identifier, then free it. 914 * Free @mode's unique identifier, then free it.
758 */ 915 */
759void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) 916void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
@@ -978,16 +1135,19 @@ EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
978 * drm_mode_config_init - initialize DRM mode_configuration structure 1135 * drm_mode_config_init - initialize DRM mode_configuration structure
979 * @dev: DRM device 1136 * @dev: DRM device
980 * 1137 *
981 * LOCKING:
982 * None, should happen single threaded at init time.
983 *
984 * Initialize @dev's mode_config structure, used for tracking the graphics 1138 * Initialize @dev's mode_config structure, used for tracking the graphics
985 * configuration of @dev. 1139 * configuration of @dev.
1140 *
1141 * Since this initializes the modeset locks, no locking is possible. Which is no
1142 * problem, since this should happen single threaded at init time. It is the
1143 * driver's problem to ensure this guarantee.
1144 *
986 */ 1145 */
987void drm_mode_config_init(struct drm_device *dev) 1146void drm_mode_config_init(struct drm_device *dev)
988{ 1147{
989 mutex_init(&dev->mode_config.mutex); 1148 mutex_init(&dev->mode_config.mutex);
990 mutex_init(&dev->mode_config.idr_mutex); 1149 mutex_init(&dev->mode_config.idr_mutex);
1150 mutex_init(&dev->mode_config.fb_lock);
991 INIT_LIST_HEAD(&dev->mode_config.fb_list); 1151 INIT_LIST_HEAD(&dev->mode_config.fb_list);
992 INIT_LIST_HEAD(&dev->mode_config.crtc_list); 1152 INIT_LIST_HEAD(&dev->mode_config.crtc_list);
993 INIT_LIST_HEAD(&dev->mode_config.connector_list); 1153 INIT_LIST_HEAD(&dev->mode_config.connector_list);
@@ -997,9 +1157,9 @@ void drm_mode_config_init(struct drm_device *dev)
997 INIT_LIST_HEAD(&dev->mode_config.plane_list); 1157 INIT_LIST_HEAD(&dev->mode_config.plane_list);
998 idr_init(&dev->mode_config.crtc_idr); 1158 idr_init(&dev->mode_config.crtc_idr);
999 1159
1000 mutex_lock(&dev->mode_config.mutex); 1160 drm_modeset_lock_all(dev);
1001 drm_mode_create_standard_connector_properties(dev); 1161 drm_mode_create_standard_connector_properties(dev);
1002 mutex_unlock(&dev->mode_config.mutex); 1162 drm_modeset_unlock_all(dev);
1003 1163
1004 /* Just to be sure */ 1164 /* Just to be sure */
1005 dev->mode_config.num_fb = 0; 1165 dev->mode_config.num_fb = 0;
@@ -1057,12 +1217,13 @@ EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
1057 * drm_mode_config_cleanup - free up DRM mode_config info 1217 * drm_mode_config_cleanup - free up DRM mode_config info
1058 * @dev: DRM device 1218 * @dev: DRM device
1059 * 1219 *
1060 * LOCKING:
1061 * Caller must hold mode config lock.
1062 *
1063 * Free up all the connectors and CRTCs associated with this DRM device, then 1220 * Free up all the connectors and CRTCs associated with this DRM device, then
1064 * free up the framebuffers and associated buffer objects. 1221 * free up the framebuffers and associated buffer objects.
1065 * 1222 *
1223 * Note that since this /should/ happen single-threaded at driver/device
1224 * teardown time, no locking is required. It's the driver's job to ensure that
1225 * this guarantee actually holds true.
1226 *
1066 * FIXME: cleanup any dangling user buffer objects too 1227 * FIXME: cleanup any dangling user buffer objects too
1067 */ 1228 */
1068void drm_mode_config_cleanup(struct drm_device *dev) 1229void drm_mode_config_cleanup(struct drm_device *dev)
@@ -1089,6 +1250,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
1089 drm_property_destroy(dev, property); 1250 drm_property_destroy(dev, property);
1090 } 1251 }
1091 1252
1253 /*
1254 * Single-threaded teardown context, so it's not required to grab the
1255 * fb_lock to protect against concurrent fb_list access. Contrary, it
1256 * would actually deadlock with the drm_framebuffer_cleanup function.
1257 *
1258 * Also, if there are any framebuffers left, that's a driver leak now,
1259 * so politely WARN about this.
1260 */
1261 WARN_ON(!list_empty(&dev->mode_config.fb_list));
1092 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { 1262 list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
1093 drm_framebuffer_remove(fb); 1263 drm_framebuffer_remove(fb);
1094 } 1264 }
@@ -1112,9 +1282,6 @@ EXPORT_SYMBOL(drm_mode_config_cleanup);
1112 * @out: drm_mode_modeinfo struct to return to the user 1282 * @out: drm_mode_modeinfo struct to return to the user
1113 * @in: drm_display_mode to use 1283 * @in: drm_display_mode to use
1114 * 1284 *
1115 * LOCKING:
1116 * None.
1117 *
1118 * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to 1285 * Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
1119 * the user. 1286 * the user.
1120 */ 1287 */
@@ -1151,9 +1318,6 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
1151 * @out: drm_display_mode to return to the user 1318 * @out: drm_display_mode to return to the user
1152 * @in: drm_mode_modeinfo to use 1319 * @in: drm_mode_modeinfo to use
1153 * 1320 *
1154 * LOCKING:
1155 * None.
1156 *
1157 * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to 1321 * Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
1158 * the caller. 1322 * the caller.
1159 * 1323 *
@@ -1188,13 +1352,9 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
1188 1352
1189/** 1353/**
1190 * drm_mode_getresources - get graphics configuration 1354 * drm_mode_getresources - get graphics configuration
1191 * @inode: inode from the ioctl 1355 * @dev: drm device for the ioctl
1192 * @filp: file * from the ioctl 1356 * @data: data pointer for the ioctl
1193 * @cmd: cmd from ioctl 1357 * @file_priv: drm file for the ioctl call
1194 * @arg: arg from ioctl
1195 *
1196 * LOCKING:
1197 * Takes mode config lock.
1198 * 1358 *
1199 * Construct a set of configuration description structures and return 1359 * Construct a set of configuration description structures and return
1200 * them to the user, including CRTC, connector and framebuffer configuration. 1360 * them to the user, including CRTC, connector and framebuffer configuration.
@@ -1228,8 +1388,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1228 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1388 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1229 return -EINVAL; 1389 return -EINVAL;
1230 1390
1231 mutex_lock(&dev->mode_config.mutex);
1232 1391
1392 mutex_lock(&file_priv->fbs_lock);
1233 /* 1393 /*
1234 * For the non-control nodes we need to limit the list of resources 1394 * For the non-control nodes we need to limit the list of resources
1235 * by IDs in the group list for this node 1395 * by IDs in the group list for this node
@@ -1237,6 +1397,23 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1237 list_for_each(lh, &file_priv->fbs) 1397 list_for_each(lh, &file_priv->fbs)
1238 fb_count++; 1398 fb_count++;
1239 1399
1400 /* handle this in 4 parts */
1401 /* FBs */
1402 if (card_res->count_fbs >= fb_count) {
1403 copied = 0;
1404 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
1405 list_for_each_entry(fb, &file_priv->fbs, filp_head) {
1406 if (put_user(fb->base.id, fb_id + copied)) {
1407 mutex_unlock(&file_priv->fbs_lock);
1408 return -EFAULT;
1409 }
1410 copied++;
1411 }
1412 }
1413 card_res->count_fbs = fb_count;
1414 mutex_unlock(&file_priv->fbs_lock);
1415
1416 drm_modeset_lock_all(dev);
1240 mode_group = &file_priv->master->minor->mode_group; 1417 mode_group = &file_priv->master->minor->mode_group;
1241 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { 1418 if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
1242 1419
@@ -1260,21 +1437,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1260 card_res->max_width = dev->mode_config.max_width; 1437 card_res->max_width = dev->mode_config.max_width;
1261 card_res->min_width = dev->mode_config.min_width; 1438 card_res->min_width = dev->mode_config.min_width;
1262 1439
1263 /* handle this in 4 parts */
1264 /* FBs */
1265 if (card_res->count_fbs >= fb_count) {
1266 copied = 0;
1267 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
1268 list_for_each_entry(fb, &file_priv->fbs, filp_head) {
1269 if (put_user(fb->base.id, fb_id + copied)) {
1270 ret = -EFAULT;
1271 goto out;
1272 }
1273 copied++;
1274 }
1275 }
1276 card_res->count_fbs = fb_count;
1277
1278 /* CRTCs */ 1440 /* CRTCs */
1279 if (card_res->count_crtcs >= crtc_count) { 1441 if (card_res->count_crtcs >= crtc_count) {
1280 copied = 0; 1442 copied = 0;
@@ -1370,19 +1532,15 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1370 card_res->count_connectors, card_res->count_encoders); 1532 card_res->count_connectors, card_res->count_encoders);
1371 1533
1372out: 1534out:
1373 mutex_unlock(&dev->mode_config.mutex); 1535 drm_modeset_unlock_all(dev);
1374 return ret; 1536 return ret;
1375} 1537}
1376 1538
1377/** 1539/**
1378 * drm_mode_getcrtc - get CRTC configuration 1540 * drm_mode_getcrtc - get CRTC configuration
1379 * @inode: inode from the ioctl 1541 * @dev: drm device for the ioctl
1380 * @filp: file * from the ioctl 1542 * @data: data pointer for the ioctl
1381 * @cmd: cmd from ioctl 1543 * @file_priv: drm file for the ioctl call
1382 * @arg: arg from ioctl
1383 *
1384 * LOCKING:
1385 * Takes mode config lock.
1386 * 1544 *
1387 * Construct a CRTC configuration structure to return to the user. 1545 * Construct a CRTC configuration structure to return to the user.
1388 * 1546 *
@@ -1402,7 +1560,7 @@ int drm_mode_getcrtc(struct drm_device *dev,
1402 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1560 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1403 return -EINVAL; 1561 return -EINVAL;
1404 1562
1405 mutex_lock(&dev->mode_config.mutex); 1563 drm_modeset_lock_all(dev);
1406 1564
1407 obj = drm_mode_object_find(dev, crtc_resp->crtc_id, 1565 obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
1408 DRM_MODE_OBJECT_CRTC); 1566 DRM_MODE_OBJECT_CRTC);
@@ -1430,19 +1588,15 @@ int drm_mode_getcrtc(struct drm_device *dev,
1430 } 1588 }
1431 1589
1432out: 1590out:
1433 mutex_unlock(&dev->mode_config.mutex); 1591 drm_modeset_unlock_all(dev);
1434 return ret; 1592 return ret;
1435} 1593}
1436 1594
1437/** 1595/**
1438 * drm_mode_getconnector - get connector configuration 1596 * drm_mode_getconnector - get connector configuration
1439 * @inode: inode from the ioctl 1597 * @dev: drm device for the ioctl
1440 * @filp: file * from the ioctl 1598 * @data: data pointer for the ioctl
1441 * @cmd: cmd from ioctl 1599 * @file_priv: drm file for the ioctl call
1442 * @arg: arg from ioctl
1443 *
1444 * LOCKING:
1445 * Takes mode config lock.
1446 * 1600 *
1447 * Construct a connector configuration structure to return to the user. 1601 * Construct a connector configuration structure to return to the user.
1448 * 1602 *
@@ -1575,6 +1729,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
1575 1729
1576out: 1730out:
1577 mutex_unlock(&dev->mode_config.mutex); 1731 mutex_unlock(&dev->mode_config.mutex);
1732
1578 return ret; 1733 return ret;
1579} 1734}
1580 1735
@@ -1589,7 +1744,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
1589 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1744 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1590 return -EINVAL; 1745 return -EINVAL;
1591 1746
1592 mutex_lock(&dev->mode_config.mutex); 1747 drm_modeset_lock_all(dev);
1593 obj = drm_mode_object_find(dev, enc_resp->encoder_id, 1748 obj = drm_mode_object_find(dev, enc_resp->encoder_id,
1594 DRM_MODE_OBJECT_ENCODER); 1749 DRM_MODE_OBJECT_ENCODER);
1595 if (!obj) { 1750 if (!obj) {
@@ -1608,7 +1763,7 @@ int drm_mode_getencoder(struct drm_device *dev, void *data,
1608 enc_resp->possible_clones = encoder->possible_clones; 1763 enc_resp->possible_clones = encoder->possible_clones;
1609 1764
1610out: 1765out:
1611 mutex_unlock(&dev->mode_config.mutex); 1766 drm_modeset_unlock_all(dev);
1612 return ret; 1767 return ret;
1613} 1768}
1614 1769
@@ -1618,9 +1773,6 @@ out:
1618 * @data: ioctl data 1773 * @data: ioctl data
1619 * @file_priv: DRM file info 1774 * @file_priv: DRM file info
1620 * 1775 *
1621 * LOCKING:
1622 * Takes mode config lock.
1623 *
1624 * Return an plane count and set of IDs. 1776 * Return an plane count and set of IDs.
1625 */ 1777 */
1626int drm_mode_getplane_res(struct drm_device *dev, void *data, 1778int drm_mode_getplane_res(struct drm_device *dev, void *data,
@@ -1635,7 +1787,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
1635 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1787 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1636 return -EINVAL; 1788 return -EINVAL;
1637 1789
1638 mutex_lock(&dev->mode_config.mutex); 1790 drm_modeset_lock_all(dev);
1639 config = &dev->mode_config; 1791 config = &dev->mode_config;
1640 1792
1641 /* 1793 /*
@@ -1657,7 +1809,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
1657 plane_resp->count_planes = config->num_plane; 1809 plane_resp->count_planes = config->num_plane;
1658 1810
1659out: 1811out:
1660 mutex_unlock(&dev->mode_config.mutex); 1812 drm_modeset_unlock_all(dev);
1661 return ret; 1813 return ret;
1662} 1814}
1663 1815
@@ -1667,9 +1819,6 @@ out:
1667 * @data: ioctl data 1819 * @data: ioctl data
1668 * @file_priv: DRM file info 1820 * @file_priv: DRM file info
1669 * 1821 *
1670 * LOCKING:
1671 * Takes mode config lock.
1672 *
1673 * Return plane info, including formats supported, gamma size, any 1822 * Return plane info, including formats supported, gamma size, any
1674 * current fb, etc. 1823 * current fb, etc.
1675 */ 1824 */
@@ -1685,7 +1834,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
1685 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1834 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1686 return -EINVAL; 1835 return -EINVAL;
1687 1836
1688 mutex_lock(&dev->mode_config.mutex); 1837 drm_modeset_lock_all(dev);
1689 obj = drm_mode_object_find(dev, plane_resp->plane_id, 1838 obj = drm_mode_object_find(dev, plane_resp->plane_id,
1690 DRM_MODE_OBJECT_PLANE); 1839 DRM_MODE_OBJECT_PLANE);
1691 if (!obj) { 1840 if (!obj) {
@@ -1725,7 +1874,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
1725 plane_resp->count_format_types = plane->format_count; 1874 plane_resp->count_format_types = plane->format_count;
1726 1875
1727out: 1876out:
1728 mutex_unlock(&dev->mode_config.mutex); 1877 drm_modeset_unlock_all(dev);
1729 return ret; 1878 return ret;
1730} 1879}
1731 1880
@@ -1733,10 +1882,7 @@ out:
1733 * drm_mode_setplane - set up or tear down an plane 1882 * drm_mode_setplane - set up or tear down an plane
1734 * @dev: DRM device 1883 * @dev: DRM device
1735 * @data: ioctl data* 1884 * @data: ioctl data*
1736 * @file_prive: DRM file info 1885 * @file_priv: DRM file info
1737 *
1738 * LOCKING:
1739 * Takes mode config lock.
1740 * 1886 *
1741 * Set plane info, including placement, fb, scaling, and other factors. 1887 * Set plane info, including placement, fb, scaling, and other factors.
1742 * Or pass a NULL fb to disable. 1888 * Or pass a NULL fb to disable.
@@ -1748,7 +1894,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
1748 struct drm_mode_object *obj; 1894 struct drm_mode_object *obj;
1749 struct drm_plane *plane; 1895 struct drm_plane *plane;
1750 struct drm_crtc *crtc; 1896 struct drm_crtc *crtc;
1751 struct drm_framebuffer *fb; 1897 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
1752 int ret = 0; 1898 int ret = 0;
1753 unsigned int fb_width, fb_height; 1899 unsigned int fb_width, fb_height;
1754 int i; 1900 int i;
@@ -1756,8 +1902,6 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
1756 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1902 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1757 return -EINVAL; 1903 return -EINVAL;
1758 1904
1759 mutex_lock(&dev->mode_config.mutex);
1760
1761 /* 1905 /*
1762 * First, find the plane, crtc, and fb objects. If not available, 1906 * First, find the plane, crtc, and fb objects. If not available,
1763 * we don't bother to call the driver. 1907 * we don't bother to call the driver.
@@ -1767,16 +1911,18 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
1767 if (!obj) { 1911 if (!obj) {
1768 DRM_DEBUG_KMS("Unknown plane ID %d\n", 1912 DRM_DEBUG_KMS("Unknown plane ID %d\n",
1769 plane_req->plane_id); 1913 plane_req->plane_id);
1770 ret = -ENOENT; 1914 return -ENOENT;
1771 goto out;
1772 } 1915 }
1773 plane = obj_to_plane(obj); 1916 plane = obj_to_plane(obj);
1774 1917
1775 /* No fb means shut it down */ 1918 /* No fb means shut it down */
1776 if (!plane_req->fb_id) { 1919 if (!plane_req->fb_id) {
1920 drm_modeset_lock_all(dev);
1921 old_fb = plane->fb;
1777 plane->funcs->disable_plane(plane); 1922 plane->funcs->disable_plane(plane);
1778 plane->crtc = NULL; 1923 plane->crtc = NULL;
1779 plane->fb = NULL; 1924 plane->fb = NULL;
1925 drm_modeset_unlock_all(dev);
1780 goto out; 1926 goto out;
1781 } 1927 }
1782 1928
@@ -1790,15 +1936,13 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
1790 } 1936 }
1791 crtc = obj_to_crtc(obj); 1937 crtc = obj_to_crtc(obj);
1792 1938
1793 obj = drm_mode_object_find(dev, plane_req->fb_id, 1939 fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
1794 DRM_MODE_OBJECT_FB); 1940 if (!fb) {
1795 if (!obj) {
1796 DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", 1941 DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
1797 plane_req->fb_id); 1942 plane_req->fb_id);
1798 ret = -ENOENT; 1943 ret = -ENOENT;
1799 goto out; 1944 goto out;
1800 } 1945 }
1801 fb = obj_to_fb(obj);
1802 1946
1803 /* Check whether this plane supports the fb pixel format. */ 1947 /* Check whether this plane supports the fb pixel format. */
1804 for (i = 0; i < plane->format_count; i++) 1948 for (i = 0; i < plane->format_count; i++)
@@ -1844,31 +1988,62 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
1844 goto out; 1988 goto out;
1845 } 1989 }
1846 1990
1991 drm_modeset_lock_all(dev);
1847 ret = plane->funcs->update_plane(plane, crtc, fb, 1992 ret = plane->funcs->update_plane(plane, crtc, fb,
1848 plane_req->crtc_x, plane_req->crtc_y, 1993 plane_req->crtc_x, plane_req->crtc_y,
1849 plane_req->crtc_w, plane_req->crtc_h, 1994 plane_req->crtc_w, plane_req->crtc_h,
1850 plane_req->src_x, plane_req->src_y, 1995 plane_req->src_x, plane_req->src_y,
1851 plane_req->src_w, plane_req->src_h); 1996 plane_req->src_w, plane_req->src_h);
1852 if (!ret) { 1997 if (!ret) {
1998 old_fb = plane->fb;
1853 plane->crtc = crtc; 1999 plane->crtc = crtc;
1854 plane->fb = fb; 2000 plane->fb = fb;
2001 fb = NULL;
1855 } 2002 }
2003 drm_modeset_unlock_all(dev);
1856 2004
1857out: 2005out:
1858 mutex_unlock(&dev->mode_config.mutex); 2006 if (fb)
2007 drm_framebuffer_unreference(fb);
2008 if (old_fb)
2009 drm_framebuffer_unreference(old_fb);
1859 2010
1860 return ret; 2011 return ret;
1861} 2012}
1862 2013
1863/** 2014/**
1864 * drm_mode_setcrtc - set CRTC configuration 2015 * drm_mode_set_config_internal - helper to call ->set_config
1865 * @inode: inode from the ioctl 2016 * @set: modeset config to set
1866 * @filp: file * from the ioctl
1867 * @cmd: cmd from ioctl
1868 * @arg: arg from ioctl
1869 * 2017 *
1870 * LOCKING: 2018 * This is a little helper to wrap internal calls to the ->set_config driver
1871 * Takes mode config lock. 2019 * interface. The only thing it adds is correct refcounting dance.
2020 */
2021int drm_mode_set_config_internal(struct drm_mode_set *set)
2022{
2023 struct drm_crtc *crtc = set->crtc;
2024 struct drm_framebuffer *fb, *old_fb;
2025 int ret;
2026
2027 old_fb = crtc->fb;
2028 fb = set->fb;
2029
2030 ret = crtc->funcs->set_config(set);
2031 if (ret == 0) {
2032 if (old_fb)
2033 drm_framebuffer_unreference(old_fb);
2034 if (fb)
2035 drm_framebuffer_reference(fb);
2036 }
2037
2038 return ret;
2039}
2040EXPORT_SYMBOL(drm_mode_set_config_internal);
2041
2042/**
2043 * drm_mode_setcrtc - set CRTC configuration
2044 * @dev: drm device for the ioctl
2045 * @data: data pointer for the ioctl
2046 * @file_priv: drm file for the ioctl call
1872 * 2047 *
1873 * Build a new CRTC configuration based on user request. 2048 * Build a new CRTC configuration based on user request.
1874 * 2049 *
@@ -1899,7 +2074,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1899 if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) 2074 if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
1900 return -ERANGE; 2075 return -ERANGE;
1901 2076
1902 mutex_lock(&dev->mode_config.mutex); 2077 drm_modeset_lock_all(dev);
1903 obj = drm_mode_object_find(dev, crtc_req->crtc_id, 2078 obj = drm_mode_object_find(dev, crtc_req->crtc_id,
1904 DRM_MODE_OBJECT_CRTC); 2079 DRM_MODE_OBJECT_CRTC);
1905 if (!obj) { 2080 if (!obj) {
@@ -1921,16 +2096,16 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1921 goto out; 2096 goto out;
1922 } 2097 }
1923 fb = crtc->fb; 2098 fb = crtc->fb;
2099 /* Make refcounting symmetric with the lookup path. */
2100 drm_framebuffer_reference(fb);
1924 } else { 2101 } else {
1925 obj = drm_mode_object_find(dev, crtc_req->fb_id, 2102 fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
1926 DRM_MODE_OBJECT_FB); 2103 if (!fb) {
1927 if (!obj) {
1928 DRM_DEBUG_KMS("Unknown FB ID%d\n", 2104 DRM_DEBUG_KMS("Unknown FB ID%d\n",
1929 crtc_req->fb_id); 2105 crtc_req->fb_id);
1930 ret = -EINVAL; 2106 ret = -EINVAL;
1931 goto out; 2107 goto out;
1932 } 2108 }
1933 fb = obj_to_fb(obj);
1934 } 2109 }
1935 2110
1936 mode = drm_mode_create(dev); 2111 mode = drm_mode_create(dev);
@@ -2027,12 +2202,15 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
2027 set.connectors = connector_set; 2202 set.connectors = connector_set;
2028 set.num_connectors = crtc_req->count_connectors; 2203 set.num_connectors = crtc_req->count_connectors;
2029 set.fb = fb; 2204 set.fb = fb;
2030 ret = crtc->funcs->set_config(&set); 2205 ret = drm_mode_set_config_internal(&set);
2031 2206
2032out: 2207out:
2208 if (fb)
2209 drm_framebuffer_unreference(fb);
2210
2033 kfree(connector_set); 2211 kfree(connector_set);
2034 drm_mode_destroy(dev, mode); 2212 drm_mode_destroy(dev, mode);
2035 mutex_unlock(&dev->mode_config.mutex); 2213 drm_modeset_unlock_all(dev);
2036 return ret; 2214 return ret;
2037} 2215}
2038 2216
@@ -2050,15 +2228,14 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
2050 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) 2228 if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
2051 return -EINVAL; 2229 return -EINVAL;
2052 2230
2053 mutex_lock(&dev->mode_config.mutex);
2054 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); 2231 obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
2055 if (!obj) { 2232 if (!obj) {
2056 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); 2233 DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
2057 ret = -EINVAL; 2234 return -EINVAL;
2058 goto out;
2059 } 2235 }
2060 crtc = obj_to_crtc(obj); 2236 crtc = obj_to_crtc(obj);
2061 2237
2238 mutex_lock(&crtc->mutex);
2062 if (req->flags & DRM_MODE_CURSOR_BO) { 2239 if (req->flags & DRM_MODE_CURSOR_BO) {
2063 if (!crtc->funcs->cursor_set) { 2240 if (!crtc->funcs->cursor_set) {
2064 ret = -ENXIO; 2241 ret = -ENXIO;
@@ -2078,7 +2255,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
2078 } 2255 }
2079 } 2256 }
2080out: 2257out:
2081 mutex_unlock(&dev->mode_config.mutex); 2258 mutex_unlock(&crtc->mutex);
2259
2082 return ret; 2260 return ret;
2083} 2261}
2084 2262
@@ -2089,7 +2267,7 @@ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
2089 2267
2090 switch (bpp) { 2268 switch (bpp) {
2091 case 8: 2269 case 8:
2092 fmt = DRM_FORMAT_RGB332; 2270 fmt = DRM_FORMAT_C8;
2093 break; 2271 break;
2094 case 16: 2272 case 16:
2095 if (depth == 15) 2273 if (depth == 15)
@@ -2120,13 +2298,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
2120 2298
2121/** 2299/**
2122 * drm_mode_addfb - add an FB to the graphics configuration 2300 * drm_mode_addfb - add an FB to the graphics configuration
2123 * @inode: inode from the ioctl 2301 * @dev: drm device for the ioctl
2124 * @filp: file * from the ioctl 2302 * @data: data pointer for the ioctl
2125 * @cmd: cmd from ioctl 2303 * @file_priv: drm file for the ioctl call
2126 * @arg: arg from ioctl
2127 *
2128 * LOCKING:
2129 * Takes mode config lock.
2130 * 2304 *
2131 * Add a new FB to the specified CRTC, given a user request. 2305 * Add a new FB to the specified CRTC, given a user request.
2132 * 2306 *
@@ -2161,24 +2335,19 @@ int drm_mode_addfb(struct drm_device *dev,
2161 if ((config->min_height > r.height) || (r.height > config->max_height)) 2335 if ((config->min_height > r.height) || (r.height > config->max_height))
2162 return -EINVAL; 2336 return -EINVAL;
2163 2337
2164 mutex_lock(&dev->mode_config.mutex);
2165
2166 /* TODO check buffer is sufficiently large */
2167 /* TODO setup destructor callback */
2168
2169 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); 2338 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
2170 if (IS_ERR(fb)) { 2339 if (IS_ERR(fb)) {
2171 DRM_DEBUG_KMS("could not create framebuffer\n"); 2340 DRM_DEBUG_KMS("could not create framebuffer\n");
2172 ret = PTR_ERR(fb); 2341 drm_modeset_unlock_all(dev);
2173 goto out; 2342 return PTR_ERR(fb);
2174 } 2343 }
2175 2344
2345 mutex_lock(&file_priv->fbs_lock);
2176 or->fb_id = fb->base.id; 2346 or->fb_id = fb->base.id;
2177 list_add(&fb->filp_head, &file_priv->fbs); 2347 list_add(&fb->filp_head, &file_priv->fbs);
2178 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); 2348 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
2349 mutex_unlock(&file_priv->fbs_lock);
2179 2350
2180out:
2181 mutex_unlock(&dev->mode_config.mutex);
2182 return ret; 2351 return ret;
2183} 2352}
2184 2353
@@ -2304,13 +2473,9 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
2304 2473
2305/** 2474/**
2306 * drm_mode_addfb2 - add an FB to the graphics configuration 2475 * drm_mode_addfb2 - add an FB to the graphics configuration
2307 * @inode: inode from the ioctl 2476 * @dev: drm device for the ioctl
2308 * @filp: file * from the ioctl 2477 * @data: data pointer for the ioctl
2309 * @cmd: cmd from ioctl 2478 * @file_priv: drm file for the ioctl call
2310 * @arg: arg from ioctl
2311 *
2312 * LOCKING:
2313 * Takes mode config lock.
2314 * 2479 *
2315 * Add a new FB to the specified CRTC, given a user request with format. 2480 * Add a new FB to the specified CRTC, given a user request with format.
2316 * 2481 *
@@ -2350,33 +2515,28 @@ int drm_mode_addfb2(struct drm_device *dev,
2350 if (ret) 2515 if (ret)
2351 return ret; 2516 return ret;
2352 2517
2353 mutex_lock(&dev->mode_config.mutex);
2354
2355 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); 2518 fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
2356 if (IS_ERR(fb)) { 2519 if (IS_ERR(fb)) {
2357 DRM_DEBUG_KMS("could not create framebuffer\n"); 2520 DRM_DEBUG_KMS("could not create framebuffer\n");
2358 ret = PTR_ERR(fb); 2521 drm_modeset_unlock_all(dev);
2359 goto out; 2522 return PTR_ERR(fb);
2360 } 2523 }
2361 2524
2525 mutex_lock(&file_priv->fbs_lock);
2362 r->fb_id = fb->base.id; 2526 r->fb_id = fb->base.id;
2363 list_add(&fb->filp_head, &file_priv->fbs); 2527 list_add(&fb->filp_head, &file_priv->fbs);
2364 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); 2528 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
2529 mutex_unlock(&file_priv->fbs_lock);
2530
2365 2531
2366out:
2367 mutex_unlock(&dev->mode_config.mutex);
2368 return ret; 2532 return ret;
2369} 2533}
2370 2534
2371/** 2535/**
2372 * drm_mode_rmfb - remove an FB from the configuration 2536 * drm_mode_rmfb - remove an FB from the configuration
2373 * @inode: inode from the ioctl 2537 * @dev: drm device for the ioctl
2374 * @filp: file * from the ioctl 2538 * @data: data pointer for the ioctl
2375 * @cmd: cmd from ioctl 2539 * @file_priv: drm file for the ioctl call
2376 * @arg: arg from ioctl
2377 *
2378 * LOCKING:
2379 * Takes mode config lock.
2380 * 2540 *
2381 * Remove the FB specified by the user. 2541 * Remove the FB specified by the user.
2382 * 2542 *
@@ -2388,50 +2548,49 @@ out:
2388int drm_mode_rmfb(struct drm_device *dev, 2548int drm_mode_rmfb(struct drm_device *dev,
2389 void *data, struct drm_file *file_priv) 2549 void *data, struct drm_file *file_priv)
2390{ 2550{
2391 struct drm_mode_object *obj;
2392 struct drm_framebuffer *fb = NULL; 2551 struct drm_framebuffer *fb = NULL;
2393 struct drm_framebuffer *fbl = NULL; 2552 struct drm_framebuffer *fbl = NULL;
2394 uint32_t *id = data; 2553 uint32_t *id = data;
2395 int ret = 0;
2396 int found = 0; 2554 int found = 0;
2397 2555
2398 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2556 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2399 return -EINVAL; 2557 return -EINVAL;
2400 2558
2401 mutex_lock(&dev->mode_config.mutex); 2559 mutex_lock(&file_priv->fbs_lock);
2402 obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); 2560 mutex_lock(&dev->mode_config.fb_lock);
2403 /* TODO check that we really get a framebuffer back. */ 2561 fb = __drm_framebuffer_lookup(dev, *id);
2404 if (!obj) { 2562 if (!fb)
2405 ret = -EINVAL; 2563 goto fail_lookup;
2406 goto out;
2407 }
2408 fb = obj_to_fb(obj);
2409 2564
2410 list_for_each_entry(fbl, &file_priv->fbs, filp_head) 2565 list_for_each_entry(fbl, &file_priv->fbs, filp_head)
2411 if (fb == fbl) 2566 if (fb == fbl)
2412 found = 1; 2567 found = 1;
2568 if (!found)
2569 goto fail_lookup;
2413 2570
2414 if (!found) { 2571 /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
2415 ret = -EINVAL; 2572 __drm_framebuffer_unregister(dev, fb);
2416 goto out; 2573
2417 } 2574 list_del_init(&fb->filp_head);
2575 mutex_unlock(&dev->mode_config.fb_lock);
2576 mutex_unlock(&file_priv->fbs_lock);
2418 2577
2419 drm_framebuffer_remove(fb); 2578 drm_framebuffer_remove(fb);
2420 2579
2421out: 2580 return 0;
2422 mutex_unlock(&dev->mode_config.mutex); 2581
2423 return ret; 2582fail_lookup:
2583 mutex_unlock(&dev->mode_config.fb_lock);
2584 mutex_unlock(&file_priv->fbs_lock);
2585
2586 return -EINVAL;
2424} 2587}
2425 2588
2426/** 2589/**
2427 * drm_mode_getfb - get FB info 2590 * drm_mode_getfb - get FB info
2428 * @inode: inode from the ioctl 2591 * @dev: drm device for the ioctl
2429 * @filp: file * from the ioctl 2592 * @data: data pointer for the ioctl
2430 * @cmd: cmd from ioctl 2593 * @file_priv: drm file for the ioctl call
2431 * @arg: arg from ioctl
2432 *
2433 * LOCKING:
2434 * Takes mode config lock.
2435 * 2594 *
2436 * Lookup the FB given its ID and return info about it. 2595 * Lookup the FB given its ID and return info about it.
2437 * 2596 *
@@ -2444,30 +2603,28 @@ int drm_mode_getfb(struct drm_device *dev,
2444 void *data, struct drm_file *file_priv) 2603 void *data, struct drm_file *file_priv)
2445{ 2604{
2446 struct drm_mode_fb_cmd *r = data; 2605 struct drm_mode_fb_cmd *r = data;
2447 struct drm_mode_object *obj;
2448 struct drm_framebuffer *fb; 2606 struct drm_framebuffer *fb;
2449 int ret = 0; 2607 int ret;
2450 2608
2451 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2609 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2452 return -EINVAL; 2610 return -EINVAL;
2453 2611
2454 mutex_lock(&dev->mode_config.mutex); 2612 fb = drm_framebuffer_lookup(dev, r->fb_id);
2455 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); 2613 if (!fb)
2456 if (!obj) { 2614 return -EINVAL;
2457 ret = -EINVAL;
2458 goto out;
2459 }
2460 fb = obj_to_fb(obj);
2461 2615
2462 r->height = fb->height; 2616 r->height = fb->height;
2463 r->width = fb->width; 2617 r->width = fb->width;
2464 r->depth = fb->depth; 2618 r->depth = fb->depth;
2465 r->bpp = fb->bits_per_pixel; 2619 r->bpp = fb->bits_per_pixel;
2466 r->pitch = fb->pitches[0]; 2620 r->pitch = fb->pitches[0];
2467 fb->funcs->create_handle(fb, file_priv, &r->handle); 2621 if (fb->funcs->create_handle)
2622 ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
2623 else
2624 ret = -ENODEV;
2625
2626 drm_framebuffer_unreference(fb);
2468 2627
2469out:
2470 mutex_unlock(&dev->mode_config.mutex);
2471 return ret; 2628 return ret;
2472} 2629}
2473 2630
@@ -2477,7 +2634,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2477 struct drm_clip_rect __user *clips_ptr; 2634 struct drm_clip_rect __user *clips_ptr;
2478 struct drm_clip_rect *clips = NULL; 2635 struct drm_clip_rect *clips = NULL;
2479 struct drm_mode_fb_dirty_cmd *r = data; 2636 struct drm_mode_fb_dirty_cmd *r = data;
2480 struct drm_mode_object *obj;
2481 struct drm_framebuffer *fb; 2637 struct drm_framebuffer *fb;
2482 unsigned flags; 2638 unsigned flags;
2483 int num_clips; 2639 int num_clips;
@@ -2486,13 +2642,9 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2486 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2642 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2487 return -EINVAL; 2643 return -EINVAL;
2488 2644
2489 mutex_lock(&dev->mode_config.mutex); 2645 fb = drm_framebuffer_lookup(dev, r->fb_id);
2490 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); 2646 if (!fb)
2491 if (!obj) { 2647 return -EINVAL;
2492 ret = -EINVAL;
2493 goto out_err1;
2494 }
2495 fb = obj_to_fb(obj);
2496 2648
2497 num_clips = r->num_clips; 2649 num_clips = r->num_clips;
2498 clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; 2650 clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
@@ -2530,27 +2682,26 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
2530 } 2682 }
2531 2683
2532 if (fb->funcs->dirty) { 2684 if (fb->funcs->dirty) {
2685 drm_modeset_lock_all(dev);
2533 ret = fb->funcs->dirty(fb, file_priv, flags, r->color, 2686 ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
2534 clips, num_clips); 2687 clips, num_clips);
2688 drm_modeset_unlock_all(dev);
2535 } else { 2689 } else {
2536 ret = -ENOSYS; 2690 ret = -ENOSYS;
2537 goto out_err2;
2538 } 2691 }
2539 2692
2540out_err2: 2693out_err2:
2541 kfree(clips); 2694 kfree(clips);
2542out_err1: 2695out_err1:
2543 mutex_unlock(&dev->mode_config.mutex); 2696 drm_framebuffer_unreference(fb);
2697
2544 return ret; 2698 return ret;
2545} 2699}
2546 2700
2547 2701
2548/** 2702/**
2549 * drm_fb_release - remove and free the FBs on this file 2703 * drm_fb_release - remove and free the FBs on this file
2550 * @filp: file * from the ioctl 2704 * @priv: drm file for the ioctl
2551 *
2552 * LOCKING:
2553 * Takes mode config lock.
2554 * 2705 *
2555 * Destroy all the FBs associated with @filp. 2706 * Destroy all the FBs associated with @filp.
2556 * 2707 *
@@ -2564,11 +2715,20 @@ void drm_fb_release(struct drm_file *priv)
2564 struct drm_device *dev = priv->minor->dev; 2715 struct drm_device *dev = priv->minor->dev;
2565 struct drm_framebuffer *fb, *tfb; 2716 struct drm_framebuffer *fb, *tfb;
2566 2717
2567 mutex_lock(&dev->mode_config.mutex); 2718 mutex_lock(&priv->fbs_lock);
2568 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { 2719 list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
2720
2721 mutex_lock(&dev->mode_config.fb_lock);
2722 /* Mark fb as reaped, we still have a ref from fpriv->fbs. */
2723 __drm_framebuffer_unregister(dev, fb);
2724 mutex_unlock(&dev->mode_config.fb_lock);
2725
2726 list_del_init(&fb->filp_head);
2727
2728 /* This will also drop the fpriv->fbs reference. */
2569 drm_framebuffer_remove(fb); 2729 drm_framebuffer_remove(fb);
2570 } 2730 }
2571 mutex_unlock(&dev->mode_config.mutex); 2731 mutex_unlock(&priv->fbs_lock);
2572} 2732}
2573 2733
2574/** 2734/**
@@ -2660,10 +2820,9 @@ EXPORT_SYMBOL(drm_mode_detachmode_crtc);
2660 2820
2661/** 2821/**
2662 * drm_fb_attachmode - Attach a user mode to an connector 2822 * drm_fb_attachmode - Attach a user mode to an connector
2663 * @inode: inode from the ioctl 2823 * @dev: drm device for the ioctl
2664 * @filp: file * from the ioctl 2824 * @data: data pointer for the ioctl
2665 * @cmd: cmd from ioctl 2825 * @file_priv: drm file for the ioctl call
2666 * @arg: arg from ioctl
2667 * 2826 *
2668 * This attaches a user specified mode to an connector. 2827 * This attaches a user specified mode to an connector.
2669 * Called by the user via ioctl. 2828 * Called by the user via ioctl.
@@ -2684,7 +2843,7 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
2684 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2843 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2685 return -EINVAL; 2844 return -EINVAL;
2686 2845
2687 mutex_lock(&dev->mode_config.mutex); 2846 drm_modeset_lock_all(dev);
2688 2847
2689 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); 2848 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2690 if (!obj) { 2849 if (!obj) {
@@ -2708,17 +2867,16 @@ int drm_mode_attachmode_ioctl(struct drm_device *dev,
2708 2867
2709 drm_mode_attachmode(dev, connector, mode); 2868 drm_mode_attachmode(dev, connector, mode);
2710out: 2869out:
2711 mutex_unlock(&dev->mode_config.mutex); 2870 drm_modeset_unlock_all(dev);
2712 return ret; 2871 return ret;
2713} 2872}
2714 2873
2715 2874
2716/** 2875/**
2717 * drm_fb_detachmode - Detach a user specified mode from an connector 2876 * drm_fb_detachmode - Detach a user specified mode from an connector
2718 * @inode: inode from the ioctl 2877 * @dev: drm device for the ioctl
2719 * @filp: file * from the ioctl 2878 * @data: data pointer for the ioctl
2720 * @cmd: cmd from ioctl 2879 * @file_priv: drm file for the ioctl call
2721 * @arg: arg from ioctl
2722 * 2880 *
2723 * Called by the user via ioctl. 2881 * Called by the user via ioctl.
2724 * 2882 *
@@ -2738,7 +2896,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
2738 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2896 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2739 return -EINVAL; 2897 return -EINVAL;
2740 2898
2741 mutex_lock(&dev->mode_config.mutex); 2899 drm_modeset_lock_all(dev);
2742 2900
2743 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); 2901 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
2744 if (!obj) { 2902 if (!obj) {
@@ -2755,7 +2913,7 @@ int drm_mode_detachmode_ioctl(struct drm_device *dev,
2755 2913
2756 ret = drm_mode_detachmode(dev, connector, &mode); 2914 ret = drm_mode_detachmode(dev, connector, &mode);
2757out: 2915out:
2758 mutex_unlock(&dev->mode_config.mutex); 2916 drm_modeset_unlock_all(dev);
2759 return ret; 2917 return ret;
2760} 2918}
2761 2919
@@ -3001,7 +3159,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3001 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3159 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3002 return -EINVAL; 3160 return -EINVAL;
3003 3161
3004 mutex_lock(&dev->mode_config.mutex); 3162 drm_modeset_lock_all(dev);
3005 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); 3163 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
3006 if (!obj) { 3164 if (!obj) {
3007 ret = -EINVAL; 3165 ret = -EINVAL;
@@ -3079,7 +3237,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3079 out_resp->count_enum_blobs = blob_count; 3237 out_resp->count_enum_blobs = blob_count;
3080 } 3238 }
3081done: 3239done:
3082 mutex_unlock(&dev->mode_config.mutex); 3240 drm_modeset_unlock_all(dev);
3083 return ret; 3241 return ret;
3084} 3242}
3085 3243
@@ -3130,7 +3288,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
3130 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3288 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3131 return -EINVAL; 3289 return -EINVAL;
3132 3290
3133 mutex_lock(&dev->mode_config.mutex); 3291 drm_modeset_lock_all(dev);
3134 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); 3292 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
3135 if (!obj) { 3293 if (!obj) {
3136 ret = -EINVAL; 3294 ret = -EINVAL;
@@ -3148,7 +3306,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
3148 out_resp->length = blob->length; 3306 out_resp->length = blob->length;
3149 3307
3150done: 3308done:
3151 mutex_unlock(&dev->mode_config.mutex); 3309 drm_modeset_unlock_all(dev);
3152 return ret; 3310 return ret;
3153} 3311}
3154 3312
@@ -3290,7 +3448,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
3290 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3448 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3291 return -EINVAL; 3449 return -EINVAL;
3292 3450
3293 mutex_lock(&dev->mode_config.mutex); 3451 drm_modeset_lock_all(dev);
3294 3452
3295 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 3453 obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3296 if (!obj) { 3454 if (!obj) {
@@ -3327,7 +3485,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
3327 } 3485 }
3328 arg->count_props = props_count; 3486 arg->count_props = props_count;
3329out: 3487out:
3330 mutex_unlock(&dev->mode_config.mutex); 3488 drm_modeset_unlock_all(dev);
3331 return ret; 3489 return ret;
3332} 3490}
3333 3491
@@ -3344,7 +3502,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3344 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3502 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3345 return -EINVAL; 3503 return -EINVAL;
3346 3504
3347 mutex_lock(&dev->mode_config.mutex); 3505 drm_modeset_lock_all(dev);
3348 3506
3349 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); 3507 arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
3350 if (!arg_obj) 3508 if (!arg_obj)
@@ -3382,7 +3540,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
3382 } 3540 }
3383 3541
3384out: 3542out:
3385 mutex_unlock(&dev->mode_config.mutex); 3543 drm_modeset_unlock_all(dev);
3386 return ret; 3544 return ret;
3387} 3545}
3388 3546
@@ -3444,7 +3602,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3444 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3602 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3445 return -EINVAL; 3603 return -EINVAL;
3446 3604
3447 mutex_lock(&dev->mode_config.mutex); 3605 drm_modeset_lock_all(dev);
3448 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 3606 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
3449 if (!obj) { 3607 if (!obj) {
3450 ret = -EINVAL; 3608 ret = -EINVAL;
@@ -3485,7 +3643,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
3485 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 3643 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
3486 3644
3487out: 3645out:
3488 mutex_unlock(&dev->mode_config.mutex); 3646 drm_modeset_unlock_all(dev);
3489 return ret; 3647 return ret;
3490 3648
3491} 3649}
@@ -3503,7 +3661,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
3503 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3661 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3504 return -EINVAL; 3662 return -EINVAL;
3505 3663
3506 mutex_lock(&dev->mode_config.mutex); 3664 drm_modeset_lock_all(dev);
3507 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 3665 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
3508 if (!obj) { 3666 if (!obj) {
3509 ret = -EINVAL; 3667 ret = -EINVAL;
@@ -3536,7 +3694,7 @@ int drm_mode_gamma_get_ioctl(struct drm_device *dev,
3536 goto out; 3694 goto out;
3537 } 3695 }
3538out: 3696out:
3539 mutex_unlock(&dev->mode_config.mutex); 3697 drm_modeset_unlock_all(dev);
3540 return ret; 3698 return ret;
3541} 3699}
3542 3700
@@ -3546,7 +3704,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3546 struct drm_mode_crtc_page_flip *page_flip = data; 3704 struct drm_mode_crtc_page_flip *page_flip = data;
3547 struct drm_mode_object *obj; 3705 struct drm_mode_object *obj;
3548 struct drm_crtc *crtc; 3706 struct drm_crtc *crtc;
3549 struct drm_framebuffer *fb; 3707 struct drm_framebuffer *fb = NULL, *old_fb = NULL;
3550 struct drm_pending_vblank_event *e = NULL; 3708 struct drm_pending_vblank_event *e = NULL;
3551 unsigned long flags; 3709 unsigned long flags;
3552 int hdisplay, vdisplay; 3710 int hdisplay, vdisplay;
@@ -3556,12 +3714,12 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3556 page_flip->reserved != 0) 3714 page_flip->reserved != 0)
3557 return -EINVAL; 3715 return -EINVAL;
3558 3716
3559 mutex_lock(&dev->mode_config.mutex);
3560 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC); 3717 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
3561 if (!obj) 3718 if (!obj)
3562 goto out; 3719 return -EINVAL;
3563 crtc = obj_to_crtc(obj); 3720 crtc = obj_to_crtc(obj);
3564 3721
3722 mutex_lock(&crtc->mutex);
3565 if (crtc->fb == NULL) { 3723 if (crtc->fb == NULL) {
3566 /* The framebuffer is currently unbound, presumably 3724 /* The framebuffer is currently unbound, presumably
3567 * due to a hotplug event, that userspace has not 3725 * due to a hotplug event, that userspace has not
@@ -3574,10 +3732,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3574 if (crtc->funcs->page_flip == NULL) 3732 if (crtc->funcs->page_flip == NULL)
3575 goto out; 3733 goto out;
3576 3734
3577 obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB); 3735 fb = drm_framebuffer_lookup(dev, page_flip->fb_id);
3578 if (!obj) 3736 if (!fb)
3579 goto out; 3737 goto out;
3580 fb = obj_to_fb(obj);
3581 3738
3582 hdisplay = crtc->mode.hdisplay; 3739 hdisplay = crtc->mode.hdisplay;
3583 vdisplay = crtc->mode.vdisplay; 3740 vdisplay = crtc->mode.vdisplay;
@@ -3623,6 +3780,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3623 (void (*) (struct drm_pending_event *)) kfree; 3780 (void (*) (struct drm_pending_event *)) kfree;
3624 } 3781 }
3625 3782
3783 old_fb = crtc->fb;
3626 ret = crtc->funcs->page_flip(crtc, fb, e); 3784 ret = crtc->funcs->page_flip(crtc, fb, e);
3627 if (ret) { 3785 if (ret) {
3628 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { 3786 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
@@ -3631,10 +3789,27 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
3631 spin_unlock_irqrestore(&dev->event_lock, flags); 3789 spin_unlock_irqrestore(&dev->event_lock, flags);
3632 kfree(e); 3790 kfree(e);
3633 } 3791 }
3792 /* Keep the old fb, don't unref it. */
3793 old_fb = NULL;
3794 } else {
3795 /*
3796 * Warn if the driver hasn't properly updated the crtc->fb
3797 * field to reflect that the new framebuffer is now used.
3798 * Failing to do so will screw with the reference counting
3799 * on framebuffers.
3800 */
3801 WARN_ON(crtc->fb != fb);
3802 /* Unref only the old framebuffer. */
3803 fb = NULL;
3634 } 3804 }
3635 3805
3636out: 3806out:
3637 mutex_unlock(&dev->mode_config.mutex); 3807 if (fb)
3808 drm_framebuffer_unreference(fb);
3809 if (old_fb)
3810 drm_framebuffer_unreference(old_fb);
3811 mutex_unlock(&crtc->mutex);
3812
3638 return ret; 3813 return ret;
3639} 3814}
3640 3815
@@ -3702,6 +3877,7 @@ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
3702 int *bpp) 3877 int *bpp)
3703{ 3878{
3704 switch (format) { 3879 switch (format) {
3880 case DRM_FORMAT_C8:
3705 case DRM_FORMAT_RGB332: 3881 case DRM_FORMAT_RGB332:
3706 case DRM_FORMAT_BGR233: 3882 case DRM_FORMAT_BGR233:
3707 *depth = 8; 3883 *depth = 8;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5a3770fbd770..c194f4e680ad 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -29,11 +29,11 @@
29 */ 29 */
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/hdmi.h>
32#include <linux/i2c.h> 33#include <linux/i2c.h>
33#include <linux/module.h> 34#include <linux/module.h>
34#include <drm/drmP.h> 35#include <drm/drmP.h>
35#include <drm/drm_edid.h> 36#include <drm/drm_edid.h>
36#include "drm_edid_modes.h"
37 37
38#define version_greater(edid, maj, min) \ 38#define version_greater(edid, maj, min) \
39 (((edid)->version > (maj)) || \ 39 (((edid)->version > (maj)) || \
@@ -87,9 +87,6 @@ static struct edid_quirk {
87 int product_id; 87 int product_id;
88 u32 quirks; 88 u32 quirks;
89} edid_quirk_list[] = { 89} edid_quirk_list[] = {
90 /* ASUS VW222S */
91 { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
92
93 /* Acer AL1706 */ 90 /* Acer AL1706 */
94 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, 91 { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
95 /* Acer F51 */ 92 /* Acer F51 */
@@ -130,6 +127,746 @@ static struct edid_quirk {
130 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, 127 { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
131}; 128};
132 129
130/*
131 * Autogenerated from the DMT spec.
132 * This table is copied from xfree86/modes/xf86EdidModes.c.
133 */
134static const struct drm_display_mode drm_dmt_modes[] = {
135 /* 640x350@85Hz */
136 { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
137 736, 832, 0, 350, 382, 385, 445, 0,
138 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
139 /* 640x400@85Hz */
140 { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
141 736, 832, 0, 400, 401, 404, 445, 0,
142 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
143 /* 720x400@85Hz */
144 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
145 828, 936, 0, 400, 401, 404, 446, 0,
146 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
147 /* 640x480@60Hz */
148 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
149 752, 800, 0, 480, 489, 492, 525, 0,
150 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
151 /* 640x480@72Hz */
152 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
153 704, 832, 0, 480, 489, 492, 520, 0,
154 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
155 /* 640x480@75Hz */
156 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
157 720, 840, 0, 480, 481, 484, 500, 0,
158 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
159 /* 640x480@85Hz */
160 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
161 752, 832, 0, 480, 481, 484, 509, 0,
162 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
163 /* 800x600@56Hz */
164 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
165 896, 1024, 0, 600, 601, 603, 625, 0,
166 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
167 /* 800x600@60Hz */
168 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
169 968, 1056, 0, 600, 601, 605, 628, 0,
170 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
171 /* 800x600@72Hz */
172 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
173 976, 1040, 0, 600, 637, 643, 666, 0,
174 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
175 /* 800x600@75Hz */
176 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
177 896, 1056, 0, 600, 601, 604, 625, 0,
178 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
179 /* 800x600@85Hz */
180 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
181 896, 1048, 0, 600, 601, 604, 631, 0,
182 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
183 /* 800x600@120Hz RB */
184 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
185 880, 960, 0, 600, 603, 607, 636, 0,
186 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
187 /* 848x480@60Hz */
188 { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
189 976, 1088, 0, 480, 486, 494, 517, 0,
190 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
191 /* 1024x768@43Hz, interlace */
192 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
193 1208, 1264, 0, 768, 768, 772, 817, 0,
194 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
195 DRM_MODE_FLAG_INTERLACE) },
196 /* 1024x768@60Hz */
197 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
198 1184, 1344, 0, 768, 771, 777, 806, 0,
199 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
200 /* 1024x768@70Hz */
201 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
202 1184, 1328, 0, 768, 771, 777, 806, 0,
203 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
204 /* 1024x768@75Hz */
205 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
206 1136, 1312, 0, 768, 769, 772, 800, 0,
207 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
208 /* 1024x768@85Hz */
209 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
210 1168, 1376, 0, 768, 769, 772, 808, 0,
211 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
212 /* 1024x768@120Hz RB */
213 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
214 1104, 1184, 0, 768, 771, 775, 813, 0,
215 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
216 /* 1152x864@75Hz */
217 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
218 1344, 1600, 0, 864, 865, 868, 900, 0,
219 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
220 /* 1280x768@60Hz RB */
221 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
222 1360, 1440, 0, 768, 771, 778, 790, 0,
223 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
224 /* 1280x768@60Hz */
225 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
226 1472, 1664, 0, 768, 771, 778, 798, 0,
227 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
228 /* 1280x768@75Hz */
229 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
230 1488, 1696, 0, 768, 771, 778, 805, 0,
231 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
232 /* 1280x768@85Hz */
233 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
234 1496, 1712, 0, 768, 771, 778, 809, 0,
235 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
236 /* 1280x768@120Hz RB */
237 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
238 1360, 1440, 0, 768, 771, 778, 813, 0,
239 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
240 /* 1280x800@60Hz RB */
241 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
242 1360, 1440, 0, 800, 803, 809, 823, 0,
243 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
244 /* 1280x800@60Hz */
245 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
246 1480, 1680, 0, 800, 803, 809, 831, 0,
247 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
248 /* 1280x800@75Hz */
249 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
250 1488, 1696, 0, 800, 803, 809, 838, 0,
251 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
252 /* 1280x800@85Hz */
253 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
254 1496, 1712, 0, 800, 803, 809, 843, 0,
255 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
256 /* 1280x800@120Hz RB */
257 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
258 1360, 1440, 0, 800, 803, 809, 847, 0,
259 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
260 /* 1280x960@60Hz */
261 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
262 1488, 1800, 0, 960, 961, 964, 1000, 0,
263 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
264 /* 1280x960@85Hz */
265 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
266 1504, 1728, 0, 960, 961, 964, 1011, 0,
267 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
268 /* 1280x960@120Hz RB */
269 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
270 1360, 1440, 0, 960, 963, 967, 1017, 0,
271 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
272 /* 1280x1024@60Hz */
273 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
274 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
275 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
276 /* 1280x1024@75Hz */
277 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
278 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
279 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
280 /* 1280x1024@85Hz */
281 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
282 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
283 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
284 /* 1280x1024@120Hz RB */
285 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
286 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
287 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
288 /* 1360x768@60Hz */
289 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
290 1536, 1792, 0, 768, 771, 777, 795, 0,
291 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
292 /* 1360x768@120Hz RB */
293 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
294 1440, 1520, 0, 768, 771, 776, 813, 0,
295 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
296 /* 1400x1050@60Hz RB */
297 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
298 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
299 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
300 /* 1400x1050@60Hz */
301 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
302 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
303 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
304 /* 1400x1050@75Hz */
305 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
306 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
307 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
308 /* 1400x1050@85Hz */
309 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
310 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
311 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
312 /* 1400x1050@120Hz RB */
313 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
314 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
315 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
316 /* 1440x900@60Hz RB */
317 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
318 1520, 1600, 0, 900, 903, 909, 926, 0,
319 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
320 /* 1440x900@60Hz */
321 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
322 1672, 1904, 0, 900, 903, 909, 934, 0,
323 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
324 /* 1440x900@75Hz */
325 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
326 1688, 1936, 0, 900, 903, 909, 942, 0,
327 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
328 /* 1440x900@85Hz */
329 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
330 1696, 1952, 0, 900, 903, 909, 948, 0,
331 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
332 /* 1440x900@120Hz RB */
333 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
334 1520, 1600, 0, 900, 903, 909, 953, 0,
335 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
336 /* 1600x1200@60Hz */
337 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
338 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
339 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
340 /* 1600x1200@65Hz */
341 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
342 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
343 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
344 /* 1600x1200@70Hz */
345 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
346 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
347 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
348 /* 1600x1200@75Hz */
349 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
350 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
351 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
352 /* 1600x1200@85Hz */
353 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
354 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
355 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
356 /* 1600x1200@120Hz RB */
357 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
358 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
359 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
360 /* 1680x1050@60Hz RB */
361 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
362 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
363 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
364 /* 1680x1050@60Hz */
365 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
366 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
367 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
368 /* 1680x1050@75Hz */
369 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
370 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
371 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
372 /* 1680x1050@85Hz */
373 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
374 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
375 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
376 /* 1680x1050@120Hz RB */
377 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
378 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
379 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
380 /* 1792x1344@60Hz */
381 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
382 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
383 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
384 /* 1792x1344@75Hz */
385 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
386 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
387 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
388 /* 1792x1344@120Hz RB */
389 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
390 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
391 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
392 /* 1856x1392@60Hz */
393 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
394 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
395 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
396 /* 1856x1392@75Hz */
397 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
398 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
399 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
400 /* 1856x1392@120Hz RB */
401 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
402 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
403 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
404 /* 1920x1200@60Hz RB */
405 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
406 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
407 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
408 /* 1920x1200@60Hz */
409 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
410 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
411 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
412 /* 1920x1200@75Hz */
413 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
414 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
415 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
416 /* 1920x1200@85Hz */
417 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
418 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
419 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
420 /* 1920x1200@120Hz RB */
421 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
422 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
423 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
424 /* 1920x1440@60Hz */
425 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
426 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
427 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
428 /* 1920x1440@75Hz */
429 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
430 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
431 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
432 /* 1920x1440@120Hz RB */
433 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
434 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
435 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
436 /* 2560x1600@60Hz RB */
437 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
438 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
439 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
440 /* 2560x1600@60Hz */
441 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
442 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
443 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
444 /* 2560x1600@75HZ */
445 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
446 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
447 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
448 /* 2560x1600@85HZ */
449 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
450 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
451 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
452 /* 2560x1600@120Hz RB */
453 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
454 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
455 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
456};
457
458static const struct drm_display_mode edid_est_modes[] = {
459 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
460 968, 1056, 0, 600, 601, 605, 628, 0,
461 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
462 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
463 896, 1024, 0, 600, 601, 603, 625, 0,
464 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
465 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
466 720, 840, 0, 480, 481, 484, 500, 0,
467 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
468 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
469 704, 832, 0, 480, 489, 491, 520, 0,
470 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
471 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
472 768, 864, 0, 480, 483, 486, 525, 0,
473 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
474 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
475 752, 800, 0, 480, 490, 492, 525, 0,
476 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
477 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
478 846, 900, 0, 400, 421, 423, 449, 0,
479 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
480 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
481 846, 900, 0, 400, 412, 414, 449, 0,
482 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
483 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
484 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
485 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
486 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
487 1136, 1312, 0, 768, 769, 772, 800, 0,
488 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
489 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
490 1184, 1328, 0, 768, 771, 777, 806, 0,
491 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
492 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
493 1184, 1344, 0, 768, 771, 777, 806, 0,
494 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
495 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
496 1208, 1264, 0, 768, 768, 776, 817, 0,
497 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
498 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
499 928, 1152, 0, 624, 625, 628, 667, 0,
500 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
501 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
502 896, 1056, 0, 600, 601, 604, 625, 0,
503 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
504 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
505 976, 1040, 0, 600, 637, 643, 666, 0,
506 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
507 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
508 1344, 1600, 0, 864, 865, 868, 900, 0,
509 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
510};
511
512struct minimode {
513 short w;
514 short h;
515 short r;
516 short rb;
517};
518
519static const struct minimode est3_modes[] = {
520 /* byte 6 */
521 { 640, 350, 85, 0 },
522 { 640, 400, 85, 0 },
523 { 720, 400, 85, 0 },
524 { 640, 480, 85, 0 },
525 { 848, 480, 60, 0 },
526 { 800, 600, 85, 0 },
527 { 1024, 768, 85, 0 },
528 { 1152, 864, 75, 0 },
529 /* byte 7 */
530 { 1280, 768, 60, 1 },
531 { 1280, 768, 60, 0 },
532 { 1280, 768, 75, 0 },
533 { 1280, 768, 85, 0 },
534 { 1280, 960, 60, 0 },
535 { 1280, 960, 85, 0 },
536 { 1280, 1024, 60, 0 },
537 { 1280, 1024, 85, 0 },
538 /* byte 8 */
539 { 1360, 768, 60, 0 },
540 { 1440, 900, 60, 1 },
541 { 1440, 900, 60, 0 },
542 { 1440, 900, 75, 0 },
543 { 1440, 900, 85, 0 },
544 { 1400, 1050, 60, 1 },
545 { 1400, 1050, 60, 0 },
546 { 1400, 1050, 75, 0 },
547 /* byte 9 */
548 { 1400, 1050, 85, 0 },
549 { 1680, 1050, 60, 1 },
550 { 1680, 1050, 60, 0 },
551 { 1680, 1050, 75, 0 },
552 { 1680, 1050, 85, 0 },
553 { 1600, 1200, 60, 0 },
554 { 1600, 1200, 65, 0 },
555 { 1600, 1200, 70, 0 },
556 /* byte 10 */
557 { 1600, 1200, 75, 0 },
558 { 1600, 1200, 85, 0 },
559 { 1792, 1344, 60, 0 },
560 { 1792, 1344, 85, 0 },
561 { 1856, 1392, 60, 0 },
562 { 1856, 1392, 75, 0 },
563 { 1920, 1200, 60, 1 },
564 { 1920, 1200, 60, 0 },
565 /* byte 11 */
566 { 1920, 1200, 75, 0 },
567 { 1920, 1200, 85, 0 },
568 { 1920, 1440, 60, 0 },
569 { 1920, 1440, 75, 0 },
570};
571
572static const struct minimode extra_modes[] = {
573 { 1024, 576, 60, 0 },
574 { 1366, 768, 60, 0 },
575 { 1600, 900, 60, 0 },
576 { 1680, 945, 60, 0 },
577 { 1920, 1080, 60, 0 },
578 { 2048, 1152, 60, 0 },
579 { 2048, 1536, 60, 0 },
580};
581
582/*
583 * Probably taken from CEA-861 spec.
584 * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
585 */
586static const struct drm_display_mode edid_cea_modes[] = {
587 /* 1 - 640x480@60Hz */
588 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
589 752, 800, 0, 480, 490, 492, 525, 0,
590 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
591 /* 2 - 720x480@60Hz */
592 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
593 798, 858, 0, 480, 489, 495, 525, 0,
594 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
595 /* 3 - 720x480@60Hz */
596 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
597 798, 858, 0, 480, 489, 495, 525, 0,
598 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
599 /* 4 - 1280x720@60Hz */
600 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
601 1430, 1650, 0, 720, 725, 730, 750, 0,
602 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
603 /* 5 - 1920x1080i@60Hz */
604 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
605 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
606 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
607 DRM_MODE_FLAG_INTERLACE) },
608 /* 6 - 1440x480i@60Hz */
609 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
610 1602, 1716, 0, 480, 488, 494, 525, 0,
611 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
612 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
613 /* 7 - 1440x480i@60Hz */
614 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
615 1602, 1716, 0, 480, 488, 494, 525, 0,
616 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
617 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
618 /* 8 - 1440x240@60Hz */
619 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
620 1602, 1716, 0, 240, 244, 247, 262, 0,
621 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
622 DRM_MODE_FLAG_DBLCLK) },
623 /* 9 - 1440x240@60Hz */
624 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
625 1602, 1716, 0, 240, 244, 247, 262, 0,
626 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
627 DRM_MODE_FLAG_DBLCLK) },
628 /* 10 - 2880x480i@60Hz */
629 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
630 3204, 3432, 0, 480, 488, 494, 525, 0,
631 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
632 DRM_MODE_FLAG_INTERLACE) },
633 /* 11 - 2880x480i@60Hz */
634 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
635 3204, 3432, 0, 480, 488, 494, 525, 0,
636 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
637 DRM_MODE_FLAG_INTERLACE) },
638 /* 12 - 2880x240@60Hz */
639 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
640 3204, 3432, 0, 240, 244, 247, 262, 0,
641 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
642 /* 13 - 2880x240@60Hz */
643 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
644 3204, 3432, 0, 240, 244, 247, 262, 0,
645 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
646 /* 14 - 1440x480@60Hz */
647 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
648 1596, 1716, 0, 480, 489, 495, 525, 0,
649 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
650 /* 15 - 1440x480@60Hz */
651 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
652 1596, 1716, 0, 480, 489, 495, 525, 0,
653 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
654 /* 16 - 1920x1080@60Hz */
655 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
656 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
657 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
658 /* 17 - 720x576@50Hz */
659 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
660 796, 864, 0, 576, 581, 586, 625, 0,
661 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
662 /* 18 - 720x576@50Hz */
663 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
664 796, 864, 0, 576, 581, 586, 625, 0,
665 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
666 /* 19 - 1280x720@50Hz */
667 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
668 1760, 1980, 0, 720, 725, 730, 750, 0,
669 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
670 /* 20 - 1920x1080i@50Hz */
671 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
672 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
673 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
674 DRM_MODE_FLAG_INTERLACE) },
675 /* 21 - 1440x576i@50Hz */
676 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
677 1590, 1728, 0, 576, 580, 586, 625, 0,
678 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
679 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
680 /* 22 - 1440x576i@50Hz */
681 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
682 1590, 1728, 0, 576, 580, 586, 625, 0,
683 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
684 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
685 /* 23 - 1440x288@50Hz */
686 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
687 1590, 1728, 0, 288, 290, 293, 312, 0,
688 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
689 DRM_MODE_FLAG_DBLCLK) },
690 /* 24 - 1440x288@50Hz */
691 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
692 1590, 1728, 0, 288, 290, 293, 312, 0,
693 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
694 DRM_MODE_FLAG_DBLCLK) },
695 /* 25 - 2880x576i@50Hz */
696 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
697 3180, 3456, 0, 576, 580, 586, 625, 0,
698 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
699 DRM_MODE_FLAG_INTERLACE) },
700 /* 26 - 2880x576i@50Hz */
701 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
702 3180, 3456, 0, 576, 580, 586, 625, 0,
703 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
704 DRM_MODE_FLAG_INTERLACE) },
705 /* 27 - 2880x288@50Hz */
706 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
707 3180, 3456, 0, 288, 290, 293, 312, 0,
708 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
709 /* 28 - 2880x288@50Hz */
710 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
711 3180, 3456, 0, 288, 290, 293, 312, 0,
712 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
713 /* 29 - 1440x576@50Hz */
714 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
715 1592, 1728, 0, 576, 581, 586, 625, 0,
716 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
717 /* 30 - 1440x576@50Hz */
718 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
719 1592, 1728, 0, 576, 581, 586, 625, 0,
720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
721 /* 31 - 1920x1080@50Hz */
722 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
723 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
724 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
725 /* 32 - 1920x1080@24Hz */
726 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
727 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
728 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
729 /* 33 - 1920x1080@25Hz */
730 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
731 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
732 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
733 /* 34 - 1920x1080@30Hz */
734 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
735 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
736 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
737 /* 35 - 2880x480@60Hz */
738 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
739 3192, 3432, 0, 480, 489, 495, 525, 0,
740 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
741 /* 36 - 2880x480@60Hz */
742 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
743 3192, 3432, 0, 480, 489, 495, 525, 0,
744 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
745 /* 37 - 2880x576@50Hz */
746 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
747 3184, 3456, 0, 576, 581, 586, 625, 0,
748 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
749 /* 38 - 2880x576@50Hz */
750 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
751 3184, 3456, 0, 576, 581, 586, 625, 0,
752 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
753 /* 39 - 1920x1080i@50Hz */
754 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
755 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
756 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
757 DRM_MODE_FLAG_INTERLACE) },
758 /* 40 - 1920x1080i@100Hz */
759 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
760 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
761 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
762 DRM_MODE_FLAG_INTERLACE) },
763 /* 41 - 1280x720@100Hz */
764 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
765 1760, 1980, 0, 720, 725, 730, 750, 0,
766 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
767 /* 42 - 720x576@100Hz */
768 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
769 796, 864, 0, 576, 581, 586, 625, 0,
770 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
771 /* 43 - 720x576@100Hz */
772 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
773 796, 864, 0, 576, 581, 586, 625, 0,
774 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
775 /* 44 - 1440x576i@100Hz */
776 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
777 1590, 1728, 0, 576, 580, 586, 625, 0,
778 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
779 DRM_MODE_FLAG_DBLCLK) },
780 /* 45 - 1440x576i@100Hz */
781 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
782 1590, 1728, 0, 576, 580, 586, 625, 0,
783 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
784 DRM_MODE_FLAG_DBLCLK) },
785 /* 46 - 1920x1080i@120Hz */
786 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
787 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
788 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
789 DRM_MODE_FLAG_INTERLACE) },
790 /* 47 - 1280x720@120Hz */
791 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
792 1430, 1650, 0, 720, 725, 730, 750, 0,
793 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
794 /* 48 - 720x480@120Hz */
795 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
796 798, 858, 0, 480, 489, 495, 525, 0,
797 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
798 /* 49 - 720x480@120Hz */
799 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
800 798, 858, 0, 480, 489, 495, 525, 0,
801 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
802 /* 50 - 1440x480i@120Hz */
803 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
804 1602, 1716, 0, 480, 488, 494, 525, 0,
805 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
806 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
807 /* 51 - 1440x480i@120Hz */
808 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
809 1602, 1716, 0, 480, 488, 494, 525, 0,
810 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
811 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
812 /* 52 - 720x576@200Hz */
813 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
814 796, 864, 0, 576, 581, 586, 625, 0,
815 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
816 /* 53 - 720x576@200Hz */
817 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
818 796, 864, 0, 576, 581, 586, 625, 0,
819 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
820 /* 54 - 1440x576i@200Hz */
821 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
822 1590, 1728, 0, 576, 580, 586, 625, 0,
823 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
824 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
825 /* 55 - 1440x576i@200Hz */
826 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
827 1590, 1728, 0, 576, 580, 586, 625, 0,
828 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
829 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
830 /* 56 - 720x480@240Hz */
831 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
832 798, 858, 0, 480, 489, 495, 525, 0,
833 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
834 /* 57 - 720x480@240Hz */
835 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
836 798, 858, 0, 480, 489, 495, 525, 0,
837 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
838 /* 58 - 1440x480i@240 */
839 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
840 1602, 1716, 0, 480, 488, 494, 525, 0,
841 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
842 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
843 /* 59 - 1440x480i@240 */
844 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
845 1602, 1716, 0, 480, 488, 494, 525, 0,
846 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
847 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
848 /* 60 - 1280x720@24Hz */
849 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
850 3080, 3300, 0, 720, 725, 730, 750, 0,
851 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
852 /* 61 - 1280x720@25Hz */
853 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
854 3740, 3960, 0, 720, 725, 730, 750, 0,
855 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
856 /* 62 - 1280x720@30Hz */
857 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
858 3080, 3300, 0, 720, 725, 730, 750, 0,
859 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
860 /* 63 - 1920x1080@120Hz */
861 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
862 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
863 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
864 /* 64 - 1920x1080@100Hz */
865 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
866 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
867 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
868};
869
133/*** DDC fetch and block validation ***/ 870/*** DDC fetch and block validation ***/
134 871
135static const u8 edid_header[] = { 872static const u8 edid_header[] = {
@@ -357,10 +1094,14 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
357 break; 1094 break;
358 } 1095 }
359 } 1096 }
360 if (i == 4) 1097
1098 if (i == 4 && print_bad_edid) {
361 dev_warn(connector->dev->dev, 1099 dev_warn(connector->dev->dev,
362 "%s: Ignoring invalid EDID block %d.\n", 1100 "%s: Ignoring invalid EDID block %d.\n",
363 drm_get_connector_name(connector), j); 1101 drm_get_connector_name(connector), j);
1102
1103 connector->bad_edid_counter++;
1104 }
364 } 1105 }
365 1106
366 if (valid_extensions != block[0x7e]) { 1107 if (valid_extensions != block[0x7e]) {
@@ -541,7 +1282,7 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
541{ 1282{
542 int i; 1283 int i;
543 1284
544 for (i = 0; i < drm_num_dmt_modes; i++) { 1285 for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
545 const struct drm_display_mode *ptr = &drm_dmt_modes[i]; 1286 const struct drm_display_mode *ptr = &drm_dmt_modes[i];
546 if (hsize != ptr->hdisplay) 1287 if (hsize != ptr->hdisplay)
547 continue; 1288 continue;
@@ -1082,7 +1823,7 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1082 struct drm_display_mode *newmode; 1823 struct drm_display_mode *newmode;
1083 struct drm_device *dev = connector->dev; 1824 struct drm_device *dev = connector->dev;
1084 1825
1085 for (i = 0; i < drm_num_dmt_modes; i++) { 1826 for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
1086 if (mode_in_range(drm_dmt_modes + i, edid, timing) && 1827 if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
1087 valid_inferred_mode(connector, drm_dmt_modes + i)) { 1828 valid_inferred_mode(connector, drm_dmt_modes + i)) {
1088 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); 1829 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
@@ -1117,7 +1858,7 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
1117 struct drm_display_mode *newmode; 1858 struct drm_display_mode *newmode;
1118 struct drm_device *dev = connector->dev; 1859 struct drm_device *dev = connector->dev;
1119 1860
1120 for (i = 0; i < num_extra_modes; i++) { 1861 for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
1121 const struct minimode *m = &extra_modes[i]; 1862 const struct minimode *m = &extra_modes[i];
1122 newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); 1863 newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
1123 if (!newmode) 1864 if (!newmode)
@@ -1146,7 +1887,7 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
1146 struct drm_device *dev = connector->dev; 1887 struct drm_device *dev = connector->dev;
1147 bool rb = drm_monitor_supports_rb(edid); 1888 bool rb = drm_monitor_supports_rb(edid);
1148 1889
1149 for (i = 0; i < num_extra_modes; i++) { 1890 for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
1150 const struct minimode *m = &extra_modes[i]; 1891 const struct minimode *m = &extra_modes[i];
1151 newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); 1892 newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
1152 if (!newmode) 1893 if (!newmode)
@@ -1483,9 +2224,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
1483#define VIDEO_BLOCK 0x02 2224#define VIDEO_BLOCK 0x02
1484#define VENDOR_BLOCK 0x03 2225#define VENDOR_BLOCK 0x03
1485#define SPEAKER_BLOCK 0x04 2226#define SPEAKER_BLOCK 0x04
2227#define VIDEO_CAPABILITY_BLOCK 0x07
1486#define EDID_BASIC_AUDIO (1 << 6) 2228#define EDID_BASIC_AUDIO (1 << 6)
1487#define EDID_CEA_YCRCB444 (1 << 5) 2229#define EDID_CEA_YCRCB444 (1 << 5)
1488#define EDID_CEA_YCRCB422 (1 << 4) 2230#define EDID_CEA_YCRCB422 (1 << 4)
2231#define EDID_CEA_VCDB_QS (1 << 6)
1489 2232
1490/** 2233/**
1491 * Search EDID for CEA extension block. 2234 * Search EDID for CEA extension block.
@@ -1513,16 +2256,19 @@ u8 *drm_find_cea_extension(struct edid *edid)
1513} 2256}
1514EXPORT_SYMBOL(drm_find_cea_extension); 2257EXPORT_SYMBOL(drm_find_cea_extension);
1515 2258
1516/* 2259/**
1517 * Looks for a CEA mode matching given drm_display_mode. 2260 * drm_match_cea_mode - look for a CEA mode matching given mode
1518 * Returns its CEA Video ID code, or 0 if not found. 2261 * @to_match: display mode
2262 *
2263 * Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
2264 * mode.
1519 */ 2265 */
1520u8 drm_match_cea_mode(struct drm_display_mode *to_match) 2266u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
1521{ 2267{
1522 struct drm_display_mode *cea_mode; 2268 struct drm_display_mode *cea_mode;
1523 u8 mode; 2269 u8 mode;
1524 2270
1525 for (mode = 0; mode < drm_num_cea_modes; mode++) { 2271 for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
1526 cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode]; 2272 cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
1527 2273
1528 if (drm_mode_equal(to_match, cea_mode)) 2274 if (drm_mode_equal(to_match, cea_mode))
@@ -1542,7 +2288,7 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
1542 2288
1543 for (mode = db; mode < db + len; mode++) { 2289 for (mode = db; mode < db + len; mode++) {
1544 cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */ 2290 cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
1545 if (cea_mode < drm_num_cea_modes) { 2291 if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
1546 struct drm_display_mode *newmode; 2292 struct drm_display_mode *newmode;
1547 newmode = drm_mode_duplicate(dev, 2293 newmode = drm_mode_duplicate(dev,
1548 &edid_cea_modes[cea_mode]); 2294 &edid_cea_modes[cea_mode]);
@@ -1902,6 +2648,37 @@ end:
1902EXPORT_SYMBOL(drm_detect_monitor_audio); 2648EXPORT_SYMBOL(drm_detect_monitor_audio);
1903 2649
1904/** 2650/**
2651 * drm_rgb_quant_range_selectable - is RGB quantization range selectable?
2652 *
2653 * Check whether the monitor reports the RGB quantization range selection
2654 * as supported. The AVI infoframe can then be used to inform the monitor
2655 * which quantization range (full or limited) is used.
2656 */
2657bool drm_rgb_quant_range_selectable(struct edid *edid)
2658{
2659 u8 *edid_ext;
2660 int i, start, end;
2661
2662 edid_ext = drm_find_cea_extension(edid);
2663 if (!edid_ext)
2664 return false;
2665
2666 if (cea_db_offsets(edid_ext, &start, &end))
2667 return false;
2668
2669 for_each_cea_db(edid_ext, i, start, end) {
2670 if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
2671 cea_db_payload_len(&edid_ext[i]) == 2) {
2672 DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
2673 return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
2674 }
2675 }
2676
2677 return false;
2678}
2679EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
2680
2681/**
1905 * drm_add_display_info - pull display info out if present 2682 * drm_add_display_info - pull display info out if present
1906 * @edid: EDID data 2683 * @edid: EDID data
1907 * @info: display info (attached to connector) 2684 * @info: display info (attached to connector)
@@ -2020,7 +2797,8 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
2020 num_modes += add_cvt_modes(connector, edid); 2797 num_modes += add_cvt_modes(connector, edid);
2021 num_modes += add_standard_modes(connector, edid); 2798 num_modes += add_standard_modes(connector, edid);
2022 num_modes += add_established_modes(connector, edid); 2799 num_modes += add_established_modes(connector, edid);
2023 num_modes += add_inferred_modes(connector, edid); 2800 if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
2801 num_modes += add_inferred_modes(connector, edid);
2024 num_modes += add_cea_modes(connector, edid); 2802 num_modes += add_cea_modes(connector, edid);
2025 2803
2026 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) 2804 if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
@@ -2081,20 +2859,33 @@ int drm_add_modes_noedid(struct drm_connector *connector,
2081EXPORT_SYMBOL(drm_add_modes_noedid); 2859EXPORT_SYMBOL(drm_add_modes_noedid);
2082 2860
2083/** 2861/**
2084 * drm_mode_cea_vic - return the CEA-861 VIC of a given mode 2862 * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
2085 * @mode: mode 2863 * data from a DRM display mode
2864 * @frame: HDMI AVI infoframe
2865 * @mode: DRM display mode
2086 * 2866 *
2087 * RETURNS: 2867 * Returns 0 on success or a negative error code on failure.
2088 * The VIC number, 0 in case it's not a CEA-861 mode.
2089 */ 2868 */
2090uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode) 2869int
2870drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
2871 const struct drm_display_mode *mode)
2091{ 2872{
2092 uint8_t i; 2873 int err;
2874
2875 if (!frame || !mode)
2876 return -EINVAL;
2877
2878 err = hdmi_avi_infoframe_init(frame);
2879 if (err < 0)
2880 return err;
2881
2882 frame->video_code = drm_match_cea_mode(mode);
2883 if (!frame->video_code)
2884 return 0;
2093 2885
2094 for (i = 0; i < drm_num_cea_modes; i++) 2886 frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
2095 if (drm_mode_equal(mode, &edid_cea_modes[i])) 2887 frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
2096 return i + 1;
2097 2888
2098 return 0; 2889 return 0;
2099} 2890}
2100EXPORT_SYMBOL(drm_mode_cea_vic); 2891EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
deleted file mode 100644
index 5dbf7d2557b4..000000000000
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ /dev/null
@@ -1,774 +0,0 @@
1/*
2 * Copyright (c) 2007-2008 Intel Corporation
3 * Jesse Barnes <jesse.barnes@intel.com>
4 * Copyright 2010 Red Hat, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/kernel.h>
27#include <drm/drmP.h>
28#include <drm/drm_edid.h>
29
30/*
31 * Autogenerated from the DMT spec.
32 * This table is copied from xfree86/modes/xf86EdidModes.c.
33 */
34static const struct drm_display_mode drm_dmt_modes[] = {
35 /* 640x350@85Hz */
36 { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
37 736, 832, 0, 350, 382, 385, 445, 0,
38 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
39 /* 640x400@85Hz */
40 { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
41 736, 832, 0, 400, 401, 404, 445, 0,
42 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
43 /* 720x400@85Hz */
44 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
45 828, 936, 0, 400, 401, 404, 446, 0,
46 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
47 /* 640x480@60Hz */
48 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
49 752, 800, 0, 480, 489, 492, 525, 0,
50 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
51 /* 640x480@72Hz */
52 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
53 704, 832, 0, 480, 489, 492, 520, 0,
54 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
55 /* 640x480@75Hz */
56 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
57 720, 840, 0, 480, 481, 484, 500, 0,
58 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
59 /* 640x480@85Hz */
60 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
61 752, 832, 0, 480, 481, 484, 509, 0,
62 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
63 /* 800x600@56Hz */
64 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
65 896, 1024, 0, 600, 601, 603, 625, 0,
66 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
67 /* 800x600@60Hz */
68 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
69 968, 1056, 0, 600, 601, 605, 628, 0,
70 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
71 /* 800x600@72Hz */
72 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
73 976, 1040, 0, 600, 637, 643, 666, 0,
74 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
75 /* 800x600@75Hz */
76 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
77 896, 1056, 0, 600, 601, 604, 625, 0,
78 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
79 /* 800x600@85Hz */
80 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
81 896, 1048, 0, 600, 601, 604, 631, 0,
82 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
83 /* 800x600@120Hz RB */
84 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
85 880, 960, 0, 600, 603, 607, 636, 0,
86 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
87 /* 848x480@60Hz */
88 { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
89 976, 1088, 0, 480, 486, 494, 517, 0,
90 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
91 /* 1024x768@43Hz, interlace */
92 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
93 1208, 1264, 0, 768, 768, 772, 817, 0,
94 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
95 DRM_MODE_FLAG_INTERLACE) },
96 /* 1024x768@60Hz */
97 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
98 1184, 1344, 0, 768, 771, 777, 806, 0,
99 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
100 /* 1024x768@70Hz */
101 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
102 1184, 1328, 0, 768, 771, 777, 806, 0,
103 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
104 /* 1024x768@75Hz */
105 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
106 1136, 1312, 0, 768, 769, 772, 800, 0,
107 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
108 /* 1024x768@85Hz */
109 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
110 1168, 1376, 0, 768, 769, 772, 808, 0,
111 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
112 /* 1024x768@120Hz RB */
113 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
114 1104, 1184, 0, 768, 771, 775, 813, 0,
115 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
116 /* 1152x864@75Hz */
117 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
118 1344, 1600, 0, 864, 865, 868, 900, 0,
119 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
120 /* 1280x768@60Hz RB */
121 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
122 1360, 1440, 0, 768, 771, 778, 790, 0,
123 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
124 /* 1280x768@60Hz */
125 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
126 1472, 1664, 0, 768, 771, 778, 798, 0,
127 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
128 /* 1280x768@75Hz */
129 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
130 1488, 1696, 0, 768, 771, 778, 805, 0,
131 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
132 /* 1280x768@85Hz */
133 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
134 1496, 1712, 0, 768, 771, 778, 809, 0,
135 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
136 /* 1280x768@120Hz RB */
137 { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
138 1360, 1440, 0, 768, 771, 778, 813, 0,
139 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
140 /* 1280x800@60Hz RB */
141 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
142 1360, 1440, 0, 800, 803, 809, 823, 0,
143 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
144 /* 1280x800@60Hz */
145 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
146 1480, 1680, 0, 800, 803, 809, 831, 0,
147 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
148 /* 1280x800@75Hz */
149 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
150 1488, 1696, 0, 800, 803, 809, 838, 0,
151 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
152 /* 1280x800@85Hz */
153 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
154 1496, 1712, 0, 800, 803, 809, 843, 0,
155 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
156 /* 1280x800@120Hz RB */
157 { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
158 1360, 1440, 0, 800, 803, 809, 847, 0,
159 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
160 /* 1280x960@60Hz */
161 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
162 1488, 1800, 0, 960, 961, 964, 1000, 0,
163 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
164 /* 1280x960@85Hz */
165 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
166 1504, 1728, 0, 960, 961, 964, 1011, 0,
167 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
168 /* 1280x960@120Hz RB */
169 { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
170 1360, 1440, 0, 960, 963, 967, 1017, 0,
171 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
172 /* 1280x1024@60Hz */
173 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
174 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
175 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
176 /* 1280x1024@75Hz */
177 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
178 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
179 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
180 /* 1280x1024@85Hz */
181 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
182 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
183 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
184 /* 1280x1024@120Hz RB */
185 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
186 1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
187 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
188 /* 1360x768@60Hz */
189 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
190 1536, 1792, 0, 768, 771, 777, 795, 0,
191 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
192 /* 1360x768@120Hz RB */
193 { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
194 1440, 1520, 0, 768, 771, 776, 813, 0,
195 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
196 /* 1400x1050@60Hz RB */
197 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
198 1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
199 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
200 /* 1400x1050@60Hz */
201 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
202 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
203 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
204 /* 1400x1050@75Hz */
205 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
206 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
207 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
208 /* 1400x1050@85Hz */
209 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
210 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
211 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
212 /* 1400x1050@120Hz RB */
213 { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
214 1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
215 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
216 /* 1440x900@60Hz RB */
217 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
218 1520, 1600, 0, 900, 903, 909, 926, 0,
219 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
220 /* 1440x900@60Hz */
221 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
222 1672, 1904, 0, 900, 903, 909, 934, 0,
223 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
224 /* 1440x900@75Hz */
225 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
226 1688, 1936, 0, 900, 903, 909, 942, 0,
227 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
228 /* 1440x900@85Hz */
229 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
230 1696, 1952, 0, 900, 903, 909, 948, 0,
231 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
232 /* 1440x900@120Hz RB */
233 { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
234 1520, 1600, 0, 900, 903, 909, 953, 0,
235 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
236 /* 1600x1200@60Hz */
237 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
238 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
239 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
240 /* 1600x1200@65Hz */
241 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
242 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
243 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
244 /* 1600x1200@70Hz */
245 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
246 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
247 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
248 /* 1600x1200@75Hz */
249 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
250 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
251 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
252 /* 1600x1200@85Hz */
253 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
254 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
255 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
256 /* 1600x1200@120Hz RB */
257 { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
258 1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
259 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
260 /* 1680x1050@60Hz RB */
261 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
262 1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
263 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
264 /* 1680x1050@60Hz */
265 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
266 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
267 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
268 /* 1680x1050@75Hz */
269 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
270 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
271 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
272 /* 1680x1050@85Hz */
273 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
274 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
275 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
276 /* 1680x1050@120Hz RB */
277 { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
278 1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
279 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
280 /* 1792x1344@60Hz */
281 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
282 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
283 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
284 /* 1792x1344@75Hz */
285 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
286 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
287 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
288 /* 1792x1344@120Hz RB */
289 { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
290 1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
291 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
292 /* 1856x1392@60Hz */
293 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
294 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
295 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
296 /* 1856x1392@75Hz */
297 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
298 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
299 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
300 /* 1856x1392@120Hz RB */
301 { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
302 1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
303 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
304 /* 1920x1200@60Hz RB */
305 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
306 2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
307 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
308 /* 1920x1200@60Hz */
309 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
310 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
311 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
312 /* 1920x1200@75Hz */
313 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
314 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
315 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
316 /* 1920x1200@85Hz */
317 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
318 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
319 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
320 /* 1920x1200@120Hz RB */
321 { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
322 2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
323 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
324 /* 1920x1440@60Hz */
325 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
326 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
327 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
328 /* 1920x1440@75Hz */
329 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
330 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
331 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
332 /* 1920x1440@120Hz RB */
333 { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
334 2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
335 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
336 /* 2560x1600@60Hz RB */
337 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
338 2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
339 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
340 /* 2560x1600@60Hz */
341 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
342 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
343 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
344 /* 2560x1600@75HZ */
345 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
346 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
347 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
348 /* 2560x1600@85HZ */
349 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
350 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
351 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
352 /* 2560x1600@120Hz RB */
353 { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
354 2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
355 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
356
357};
358static const int drm_num_dmt_modes =
359 sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
360
361static const struct drm_display_mode edid_est_modes[] = {
362 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
363 968, 1056, 0, 600, 601, 605, 628, 0,
364 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
365 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
366 896, 1024, 0, 600, 601, 603, 625, 0,
367 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
368 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
369 720, 840, 0, 480, 481, 484, 500, 0,
370 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
371 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
372 704, 832, 0, 480, 489, 491, 520, 0,
373 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
374 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
375 768, 864, 0, 480, 483, 486, 525, 0,
376 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
377 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
378 752, 800, 0, 480, 490, 492, 525, 0,
379 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
380 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
381 846, 900, 0, 400, 421, 423, 449, 0,
382 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
383 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
384 846, 900, 0, 400, 412, 414, 449, 0,
385 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
386 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
387 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
388 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
389 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
390 1136, 1312, 0, 768, 769, 772, 800, 0,
391 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
392 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
393 1184, 1328, 0, 768, 771, 777, 806, 0,
394 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
395 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
396 1184, 1344, 0, 768, 771, 777, 806, 0,
397 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
398 { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
399 1208, 1264, 0, 768, 768, 776, 817, 0,
400 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
401 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
402 928, 1152, 0, 624, 625, 628, 667, 0,
403 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
404 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
405 896, 1056, 0, 600, 601, 604, 625, 0,
406 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
407 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
408 976, 1040, 0, 600, 637, 643, 666, 0,
409 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
410 { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
411 1344, 1600, 0, 864, 865, 868, 900, 0,
412 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
413};
414
415struct minimode {
416 short w;
417 short h;
418 short r;
419 short rb;
420};
421
422static const struct minimode est3_modes[] = {
423 /* byte 6 */
424 { 640, 350, 85, 0 },
425 { 640, 400, 85, 0 },
426 { 720, 400, 85, 0 },
427 { 640, 480, 85, 0 },
428 { 848, 480, 60, 0 },
429 { 800, 600, 85, 0 },
430 { 1024, 768, 85, 0 },
431 { 1152, 864, 75, 0 },
432 /* byte 7 */
433 { 1280, 768, 60, 1 },
434 { 1280, 768, 60, 0 },
435 { 1280, 768, 75, 0 },
436 { 1280, 768, 85, 0 },
437 { 1280, 960, 60, 0 },
438 { 1280, 960, 85, 0 },
439 { 1280, 1024, 60, 0 },
440 { 1280, 1024, 85, 0 },
441 /* byte 8 */
442 { 1360, 768, 60, 0 },
443 { 1440, 900, 60, 1 },
444 { 1440, 900, 60, 0 },
445 { 1440, 900, 75, 0 },
446 { 1440, 900, 85, 0 },
447 { 1400, 1050, 60, 1 },
448 { 1400, 1050, 60, 0 },
449 { 1400, 1050, 75, 0 },
450 /* byte 9 */
451 { 1400, 1050, 85, 0 },
452 { 1680, 1050, 60, 1 },
453 { 1680, 1050, 60, 0 },
454 { 1680, 1050, 75, 0 },
455 { 1680, 1050, 85, 0 },
456 { 1600, 1200, 60, 0 },
457 { 1600, 1200, 65, 0 },
458 { 1600, 1200, 70, 0 },
459 /* byte 10 */
460 { 1600, 1200, 75, 0 },
461 { 1600, 1200, 85, 0 },
462 { 1792, 1344, 60, 0 },
463 { 1792, 1344, 85, 0 },
464 { 1856, 1392, 60, 0 },
465 { 1856, 1392, 75, 0 },
466 { 1920, 1200, 60, 1 },
467 { 1920, 1200, 60, 0 },
468 /* byte 11 */
469 { 1920, 1200, 75, 0 },
470 { 1920, 1200, 85, 0 },
471 { 1920, 1440, 60, 0 },
472 { 1920, 1440, 75, 0 },
473};
474static const int num_est3_modes = ARRAY_SIZE(est3_modes);
475
476static const struct minimode extra_modes[] = {
477 { 1024, 576, 60, 0 },
478 { 1366, 768, 60, 0 },
479 { 1600, 900, 60, 0 },
480 { 1680, 945, 60, 0 },
481 { 1920, 1080, 60, 0 },
482 { 2048, 1152, 60, 0 },
483 { 2048, 1536, 60, 0 },
484};
485static const int num_extra_modes = ARRAY_SIZE(extra_modes);
486
487/*
488 * Probably taken from CEA-861 spec.
489 * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
490 */
491static const struct drm_display_mode edid_cea_modes[] = {
492 /* 1 - 640x480@60Hz */
493 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
494 752, 800, 0, 480, 490, 492, 525, 0,
495 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
496 /* 2 - 720x480@60Hz */
497 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
498 798, 858, 0, 480, 489, 495, 525, 0,
499 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
500 /* 3 - 720x480@60Hz */
501 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
502 798, 858, 0, 480, 489, 495, 525, 0,
503 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
504 /* 4 - 1280x720@60Hz */
505 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
506 1430, 1650, 0, 720, 725, 730, 750, 0,
507 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
508 /* 5 - 1920x1080i@60Hz */
509 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
510 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
511 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
512 DRM_MODE_FLAG_INTERLACE) },
513 /* 6 - 1440x480i@60Hz */
514 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
515 1602, 1716, 0, 480, 488, 494, 525, 0,
516 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
517 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
518 /* 7 - 1440x480i@60Hz */
519 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
520 1602, 1716, 0, 480, 488, 494, 525, 0,
521 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
522 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
523 /* 8 - 1440x240@60Hz */
524 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
525 1602, 1716, 0, 240, 244, 247, 262, 0,
526 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
527 DRM_MODE_FLAG_DBLCLK) },
528 /* 9 - 1440x240@60Hz */
529 { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
530 1602, 1716, 0, 240, 244, 247, 262, 0,
531 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
532 DRM_MODE_FLAG_DBLCLK) },
533 /* 10 - 2880x480i@60Hz */
534 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
535 3204, 3432, 0, 480, 488, 494, 525, 0,
536 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
537 DRM_MODE_FLAG_INTERLACE) },
538 /* 11 - 2880x480i@60Hz */
539 { DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
540 3204, 3432, 0, 480, 488, 494, 525, 0,
541 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
542 DRM_MODE_FLAG_INTERLACE) },
543 /* 12 - 2880x240@60Hz */
544 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
545 3204, 3432, 0, 240, 244, 247, 262, 0,
546 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
547 /* 13 - 2880x240@60Hz */
548 { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
549 3204, 3432, 0, 240, 244, 247, 262, 0,
550 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
551 /* 14 - 1440x480@60Hz */
552 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
553 1596, 1716, 0, 480, 489, 495, 525, 0,
554 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
555 /* 15 - 1440x480@60Hz */
556 { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
557 1596, 1716, 0, 480, 489, 495, 525, 0,
558 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
559 /* 16 - 1920x1080@60Hz */
560 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
561 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
562 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
563 /* 17 - 720x576@50Hz */
564 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
565 796, 864, 0, 576, 581, 586, 625, 0,
566 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
567 /* 18 - 720x576@50Hz */
568 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
569 796, 864, 0, 576, 581, 586, 625, 0,
570 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
571 /* 19 - 1280x720@50Hz */
572 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
573 1760, 1980, 0, 720, 725, 730, 750, 0,
574 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
575 /* 20 - 1920x1080i@50Hz */
576 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
577 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
578 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
579 DRM_MODE_FLAG_INTERLACE) },
580 /* 21 - 1440x576i@50Hz */
581 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
582 1590, 1728, 0, 576, 580, 586, 625, 0,
583 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
584 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
585 /* 22 - 1440x576i@50Hz */
586 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
587 1590, 1728, 0, 576, 580, 586, 625, 0,
588 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
589 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
590 /* 23 - 1440x288@50Hz */
591 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
592 1590, 1728, 0, 288, 290, 293, 312, 0,
593 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
594 DRM_MODE_FLAG_DBLCLK) },
595 /* 24 - 1440x288@50Hz */
596 { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
597 1590, 1728, 0, 288, 290, 293, 312, 0,
598 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
599 DRM_MODE_FLAG_DBLCLK) },
600 /* 25 - 2880x576i@50Hz */
601 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
602 3180, 3456, 0, 576, 580, 586, 625, 0,
603 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
604 DRM_MODE_FLAG_INTERLACE) },
605 /* 26 - 2880x576i@50Hz */
606 { DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
607 3180, 3456, 0, 576, 580, 586, 625, 0,
608 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
609 DRM_MODE_FLAG_INTERLACE) },
610 /* 27 - 2880x288@50Hz */
611 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
612 3180, 3456, 0, 288, 290, 293, 312, 0,
613 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
614 /* 28 - 2880x288@50Hz */
615 { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
616 3180, 3456, 0, 288, 290, 293, 312, 0,
617 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
618 /* 29 - 1440x576@50Hz */
619 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
620 1592, 1728, 0, 576, 581, 586, 625, 0,
621 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
622 /* 30 - 1440x576@50Hz */
623 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
624 1592, 1728, 0, 576, 581, 586, 625, 0,
625 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
626 /* 31 - 1920x1080@50Hz */
627 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
628 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
629 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
630 /* 32 - 1920x1080@24Hz */
631 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
632 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
633 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
634 /* 33 - 1920x1080@25Hz */
635 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
636 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
637 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
638 /* 34 - 1920x1080@30Hz */
639 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
640 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
641 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
642 /* 35 - 2880x480@60Hz */
643 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
644 3192, 3432, 0, 480, 489, 495, 525, 0,
645 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
646 /* 36 - 2880x480@60Hz */
647 { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
648 3192, 3432, 0, 480, 489, 495, 525, 0,
649 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
650 /* 37 - 2880x576@50Hz */
651 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
652 3184, 3456, 0, 576, 581, 586, 625, 0,
653 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
654 /* 38 - 2880x576@50Hz */
655 { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
656 3184, 3456, 0, 576, 581, 586, 625, 0,
657 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
658 /* 39 - 1920x1080i@50Hz */
659 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
660 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
661 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
662 DRM_MODE_FLAG_INTERLACE) },
663 /* 40 - 1920x1080i@100Hz */
664 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
665 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
666 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
667 DRM_MODE_FLAG_INTERLACE) },
668 /* 41 - 1280x720@100Hz */
669 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
670 1760, 1980, 0, 720, 725, 730, 750, 0,
671 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
672 /* 42 - 720x576@100Hz */
673 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
674 796, 864, 0, 576, 581, 586, 625, 0,
675 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
676 /* 43 - 720x576@100Hz */
677 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
678 796, 864, 0, 576, 581, 586, 625, 0,
679 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
680 /* 44 - 1440x576i@100Hz */
681 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
682 1590, 1728, 0, 576, 580, 586, 625, 0,
683 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
684 DRM_MODE_FLAG_DBLCLK) },
685 /* 45 - 1440x576i@100Hz */
686 { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
687 1590, 1728, 0, 576, 580, 586, 625, 0,
688 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
689 DRM_MODE_FLAG_DBLCLK) },
690 /* 46 - 1920x1080i@120Hz */
691 { DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
692 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
693 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
694 DRM_MODE_FLAG_INTERLACE) },
695 /* 47 - 1280x720@120Hz */
696 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
697 1430, 1650, 0, 720, 725, 730, 750, 0,
698 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
699 /* 48 - 720x480@120Hz */
700 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
701 798, 858, 0, 480, 489, 495, 525, 0,
702 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
703 /* 49 - 720x480@120Hz */
704 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
705 798, 858, 0, 480, 489, 495, 525, 0,
706 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
707 /* 50 - 1440x480i@120Hz */
708 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
709 1602, 1716, 0, 480, 488, 494, 525, 0,
710 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
711 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
712 /* 51 - 1440x480i@120Hz */
713 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
714 1602, 1716, 0, 480, 488, 494, 525, 0,
715 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
716 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
717 /* 52 - 720x576@200Hz */
718 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
719 796, 864, 0, 576, 581, 586, 625, 0,
720 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
721 /* 53 - 720x576@200Hz */
722 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
723 796, 864, 0, 576, 581, 586, 625, 0,
724 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
725 /* 54 - 1440x576i@200Hz */
726 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
727 1590, 1728, 0, 576, 580, 586, 625, 0,
728 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
729 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
730 /* 55 - 1440x576i@200Hz */
731 { DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
732 1590, 1728, 0, 576, 580, 586, 625, 0,
733 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
734 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
735 /* 56 - 720x480@240Hz */
736 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
737 798, 858, 0, 480, 489, 495, 525, 0,
738 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
739 /* 57 - 720x480@240Hz */
740 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
741 798, 858, 0, 480, 489, 495, 525, 0,
742 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
743 /* 58 - 1440x480i@240 */
744 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
745 1602, 1716, 0, 480, 488, 494, 525, 0,
746 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
747 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
748 /* 59 - 1440x480i@240 */
749 { DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
750 1602, 1716, 0, 480, 488, 494, 525, 0,
751 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
752 DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
753 /* 60 - 1280x720@24Hz */
754 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
755 3080, 3300, 0, 720, 725, 730, 750, 0,
756 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
757 /* 61 - 1280x720@25Hz */
758 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
759 3740, 3960, 0, 720, 725, 730, 750, 0,
760 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
761 /* 62 - 1280x720@30Hz */
762 { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
763 3080, 3300, 0, 720, 725, 730, 750, 0,
764 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
765 /* 63 - 1920x1080@120Hz */
766 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
767 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
768 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
769 /* 64 - 1920x1080@100Hz */
770 { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
771 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
772 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
773};
774static const int drm_num_cea_modes = ARRAY_SIZE(edid_cea_modes);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 63e733408b6d..48c52f7df4e6 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -123,3 +123,66 @@ void drm_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
123 module_put(module); 123 module_put(module);
124} 124}
125EXPORT_SYMBOL(drm_i2c_encoder_destroy); 125EXPORT_SYMBOL(drm_i2c_encoder_destroy);
126
127/*
128 * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
129 */
130
131static inline struct drm_encoder_slave_funcs *
132get_slave_funcs(struct drm_encoder *enc)
133{
134 return to_encoder_slave(enc)->slave_funcs;
135}
136
137void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode)
138{
139 get_slave_funcs(encoder)->dpms(encoder, mode);
140}
141EXPORT_SYMBOL(drm_i2c_encoder_dpms);
142
143bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
144 const struct drm_display_mode *mode,
145 struct drm_display_mode *adjusted_mode)
146{
147 return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
148}
149EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
150
151void drm_i2c_encoder_prepare(struct drm_encoder *encoder)
152{
153 drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
154}
155EXPORT_SYMBOL(drm_i2c_encoder_prepare);
156
157void drm_i2c_encoder_commit(struct drm_encoder *encoder)
158{
159 drm_i2c_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
160}
161EXPORT_SYMBOL(drm_i2c_encoder_commit);
162
163void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
164 struct drm_display_mode *mode,
165 struct drm_display_mode *adjusted_mode)
166{
167 get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
168}
169EXPORT_SYMBOL(drm_i2c_encoder_mode_set);
170
171enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
172 struct drm_connector *connector)
173{
174 return get_slave_funcs(encoder)->detect(encoder, connector);
175}
176EXPORT_SYMBOL(drm_i2c_encoder_detect);
177
178void drm_i2c_encoder_save(struct drm_encoder *encoder)
179{
180 get_slave_funcs(encoder)->save(encoder);
181}
182EXPORT_SYMBOL(drm_i2c_encoder_save);
183
184void drm_i2c_encoder_restore(struct drm_encoder *encoder)
185{
186 get_slave_funcs(encoder)->restore(encoder);
187}
188EXPORT_SYMBOL(drm_i2c_encoder_restore);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index fd9d0af4d536..0b5af7d0edb1 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -85,6 +85,11 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
85 if (!fb_cma) 85 if (!fb_cma)
86 return ERR_PTR(-ENOMEM); 86 return ERR_PTR(-ENOMEM);
87 87
88 drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
89
90 for (i = 0; i < num_planes; i++)
91 fb_cma->obj[i] = obj[i];
92
88 ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs); 93 ret = drm_framebuffer_init(dev, &fb_cma->fb, &drm_fb_cma_funcs);
89 if (ret) { 94 if (ret) {
90 dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret); 95 dev_err(dev->dev, "Failed to initalize framebuffer: %d\n", ret);
@@ -92,11 +97,6 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
92 return ERR_PTR(ret); 97 return ERR_PTR(ret);
93 } 98 }
94 99
95 drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd);
96
97 for (i = 0; i < num_planes; i++)
98 fb_cma->obj[i] = obj[i];
99
100 return fb_cma; 100 return fb_cma;
101} 101}
102 102
@@ -180,6 +180,59 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
180} 180}
181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); 181EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
182 182
183#ifdef CONFIG_DEBUG_FS
184/**
185 * drm_fb_cma_describe() - Helper to dump information about a single
186 * CMA framebuffer object
187 */
188void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
189{
190 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
191 int i, n = drm_format_num_planes(fb->pixel_format);
192
193 seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
194 (char *)&fb->pixel_format);
195
196 for (i = 0; i < n; i++) {
197 seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
198 i, fb->offsets[i], fb->pitches[i]);
199 drm_gem_cma_describe(fb_cma->obj[i], m);
200 }
201}
202EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
203
204/**
205 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
206 * in debugfs.
207 */
208int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
209{
210 struct drm_info_node *node = (struct drm_info_node *) m->private;
211 struct drm_device *dev = node->minor->dev;
212 struct drm_framebuffer *fb;
213 int ret;
214
215 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
216 if (ret)
217 return ret;
218
219 ret = mutex_lock_interruptible(&dev->struct_mutex);
220 if (ret) {
221 mutex_unlock(&dev->mode_config.mutex);
222 return ret;
223 }
224
225 list_for_each_entry(fb, &dev->mode_config.fb_list, head)
226 drm_fb_cma_describe(fb, m);
227
228 mutex_unlock(&dev->struct_mutex);
229 mutex_unlock(&dev->mode_config.mutex);
230
231 return 0;
232}
233EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
234#endif
235
183static struct fb_ops drm_fbdev_cma_ops = { 236static struct fb_ops drm_fbdev_cma_ops = {
184 .owner = THIS_MODULE, 237 .owner = THIS_MODULE,
185 .fb_fillrect = sys_fillrect, 238 .fb_fillrect = sys_fillrect,
@@ -266,6 +319,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
266 return 0; 319 return 0;
267 320
268err_drm_fb_cma_destroy: 321err_drm_fb_cma_destroy:
322 drm_framebuffer_unregister_private(fb);
269 drm_fb_cma_destroy(fb); 323 drm_fb_cma_destroy(fb);
270err_framebuffer_release: 324err_framebuffer_release:
271 framebuffer_release(fbi); 325 framebuffer_release(fbi);
@@ -274,23 +328,8 @@ err_drm_gem_cma_free_object:
274 return ret; 328 return ret;
275} 329}
276 330
277static int drm_fbdev_cma_probe(struct drm_fb_helper *helper,
278 struct drm_fb_helper_surface_size *sizes)
279{
280 int ret = 0;
281
282 if (!helper->fb) {
283 ret = drm_fbdev_cma_create(helper, sizes);
284 if (ret < 0)
285 return ret;
286 ret = 1;
287 }
288
289 return ret;
290}
291
292static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = { 331static struct drm_fb_helper_funcs drm_fb_cma_helper_funcs = {
293 .fb_probe = drm_fbdev_cma_probe, 332 .fb_probe = drm_fbdev_cma_create,
294}; 333};
295 334
296/** 335/**
@@ -332,6 +371,9 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
332 371
333 } 372 }
334 373
374 /* disable all the possible outputs/crtcs before entering KMS mode */
375 drm_helper_disable_unused_functions(dev);
376
335 ret = drm_fb_helper_initial_config(helper, preferred_bpp); 377 ret = drm_fb_helper_initial_config(helper, preferred_bpp);
336 if (ret < 0) { 378 if (ret < 0) {
337 dev_err(dev->dev, "Failed to set inital hw configuration.\n"); 379 dev_err(dev->dev, "Failed to set inital hw configuration.\n");
@@ -370,8 +412,10 @@ void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
370 framebuffer_release(info); 412 framebuffer_release(info);
371 } 413 }
372 414
373 if (fbdev_cma->fb) 415 if (fbdev_cma->fb) {
416 drm_framebuffer_unregister_private(&fbdev_cma->fb->fb);
374 drm_fb_cma_destroy(&fbdev_cma->fb->fb); 417 drm_fb_cma_destroy(&fbdev_cma->fb->fb);
418 }
375 419
376 drm_fb_helper_fini(&fbdev_cma->fb_helper); 420 drm_fb_helper_fini(&fbdev_cma->fb_helper);
377 kfree(fbdev_cma); 421 kfree(fbdev_cma);
@@ -386,8 +430,13 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_fini);
386 */ 430 */
387void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma) 431void drm_fbdev_cma_restore_mode(struct drm_fbdev_cma *fbdev_cma)
388{ 432{
389 if (fbdev_cma) 433 if (fbdev_cma) {
434 struct drm_device *dev = fbdev_cma->fb_helper.dev;
435
436 drm_modeset_lock_all(dev);
390 drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper); 437 drm_fb_helper_restore_fbdev_mode(&fbdev_cma->fb_helper);
438 drm_modeset_unlock_all(dev);
439 }
391} 440}
392EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode); 441EXPORT_SYMBOL_GPL(drm_fbdev_cma_restore_mode);
393 442
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 954d175bd7fa..59d6b9bf204b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -52,9 +52,36 @@ static LIST_HEAD(kernel_fb_helper_list);
52 * mode setting driver. They can be used mostly independantely from the crtc 52 * mode setting driver. They can be used mostly independantely from the crtc
53 * helper functions used by many drivers to implement the kernel mode setting 53 * helper functions used by many drivers to implement the kernel mode setting
54 * interfaces. 54 * interfaces.
55 *
56 * Initialization is done as a three-step process with drm_fb_helper_init(),
57 * drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
58 * Drivers with fancier requirements than the default beheviour can override the
59 * second step with their own code. Teardown is done with drm_fb_helper_fini().
60 *
61 * At runtime drivers should restore the fbdev console by calling
62 * drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
63 * should also notify the fb helper code from updates to the output
64 * configuration by calling drm_fb_helper_hotplug_event(). For easier
65 * integration with the output polling code in drm_crtc_helper.c the modeset
66 * code proves a ->output_poll_changed callback.
67 *
68 * All other functions exported by the fb helper library can be used to
69 * implement the fbdev driver interface by the driver.
55 */ 70 */
56 71
57/* simple single crtc case helper function */ 72/**
73 * drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
74 * emulation helper
75 * @fb_helper: fbdev initialized with drm_fb_helper_init
76 *
77 * This functions adds all the available connectors for use with the given
78 * fb_helper. This is a separate step to allow drivers to freely assign
79 * connectors to the fbdev, e.g. if some are reserved for special purposes or
80 * not adequate to be used for the fbcon.
81 *
82 * Since this is part of the initial setup before the fbdev is published, no
83 * locking is required.
84 */
58int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) 85int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
59{ 86{
60 struct drm_device *dev = fb_helper->dev; 87 struct drm_device *dev = fb_helper->dev;
@@ -163,6 +190,10 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
163 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 190 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
164} 191}
165 192
193/**
194 * drm_fb_helper_debug_enter - implementation for ->fb_debug_enter
195 * @info: fbdev registered by the helper
196 */
166int drm_fb_helper_debug_enter(struct fb_info *info) 197int drm_fb_helper_debug_enter(struct fb_info *info)
167{ 198{
168 struct drm_fb_helper *helper = info->par; 199 struct drm_fb_helper *helper = info->par;
@@ -208,6 +239,10 @@ static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
208 return NULL; 239 return NULL;
209} 240}
210 241
242/**
243 * drm_fb_helper_debug_leave - implementation for ->fb_debug_leave
244 * @info: fbdev registered by the helper
245 */
211int drm_fb_helper_debug_leave(struct fb_info *info) 246int drm_fb_helper_debug_leave(struct fb_info *info)
212{ 247{
213 struct drm_fb_helper *helper = info->par; 248 struct drm_fb_helper *helper = info->par;
@@ -239,13 +274,24 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
239} 274}
240EXPORT_SYMBOL(drm_fb_helper_debug_leave); 275EXPORT_SYMBOL(drm_fb_helper_debug_leave);
241 276
277/**
278 * drm_fb_helper_restore_fbdev_mode - restore fbdev configuration
279 * @fb_helper: fbcon to restore
280 *
281 * This should be called from driver's drm ->lastclose callback
282 * when implementing an fbcon on top of kms using this helper. This ensures that
283 * the user isn't greeted with a black screen when e.g. X dies.
284 */
242bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper) 285bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
243{ 286{
244 bool error = false; 287 bool error = false;
245 int i, ret; 288 int i, ret;
289
290 drm_warn_on_modeset_not_all_locked(fb_helper->dev);
291
246 for (i = 0; i < fb_helper->crtc_count; i++) { 292 for (i = 0; i < fb_helper->crtc_count; i++) {
247 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set; 293 struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
248 ret = mode_set->crtc->funcs->set_config(mode_set); 294 ret = drm_mode_set_config_internal(mode_set);
249 if (ret) 295 if (ret)
250 error = true; 296 error = true;
251 } 297 }
@@ -253,6 +299,10 @@ bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
253} 299}
254EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode); 300EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
255 301
302/*
303 * restore fbcon display for all kms driver's using this helper, used for sysrq
304 * and panic handling.
305 */
256static bool drm_fb_helper_force_kernel_mode(void) 306static bool drm_fb_helper_force_kernel_mode(void)
257{ 307{
258 bool ret, error = false; 308 bool ret, error = false;
@@ -272,7 +322,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
272 return error; 322 return error;
273} 323}
274 324
275int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed, 325static int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
276 void *panic_str) 326 void *panic_str)
277{ 327{
278 /* 328 /*
@@ -285,30 +335,36 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
285 pr_err("panic occurred, switching back to text console\n"); 335 pr_err("panic occurred, switching back to text console\n");
286 return drm_fb_helper_force_kernel_mode(); 336 return drm_fb_helper_force_kernel_mode();
287} 337}
288EXPORT_SYMBOL(drm_fb_helper_panic);
289 338
290static struct notifier_block paniced = { 339static struct notifier_block paniced = {
291 .notifier_call = drm_fb_helper_panic, 340 .notifier_call = drm_fb_helper_panic,
292}; 341};
293 342
294/** 343static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
295 * drm_fb_helper_restore - restore the framebuffer console (kernel) config
296 *
297 * Restore's the kernel's fbcon mode, used for lastclose & panic paths.
298 */
299void drm_fb_helper_restore(void)
300{ 344{
301 bool ret; 345 struct drm_device *dev = fb_helper->dev;
302 ret = drm_fb_helper_force_kernel_mode(); 346 struct drm_crtc *crtc;
303 if (ret == true) 347 int bound = 0, crtcs_bound = 0;
304 DRM_ERROR("Failed to restore crtc configuration\n"); 348
349 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
350 if (crtc->fb)
351 crtcs_bound++;
352 if (crtc->fb == fb_helper->fb)
353 bound++;
354 }
355
356 if (bound < crtcs_bound)
357 return false;
358 return true;
305} 359}
306EXPORT_SYMBOL(drm_fb_helper_restore);
307 360
308#ifdef CONFIG_MAGIC_SYSRQ 361#ifdef CONFIG_MAGIC_SYSRQ
309static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 362static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
310{ 363{
311 drm_fb_helper_restore(); 364 bool ret;
365 ret = drm_fb_helper_force_kernel_mode();
366 if (ret == true)
367 DRM_ERROR("Failed to restore crtc configuration\n");
312} 368}
313static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 369static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
314 370
@@ -335,9 +391,22 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
335 int i, j; 391 int i, j;
336 392
337 /* 393 /*
394 * fbdev->blank can be called from irq context in case of a panic.
395 * Since we already have our own special panic handler which will
396 * restore the fbdev console mode completely, just bail out early.
397 */
398 if (oops_in_progress)
399 return;
400
401 /*
338 * For each CRTC in this fb, turn the connectors on/off. 402 * For each CRTC in this fb, turn the connectors on/off.
339 */ 403 */
340 mutex_lock(&dev->mode_config.mutex); 404 drm_modeset_lock_all(dev);
405 if (!drm_fb_helper_is_bound(fb_helper)) {
406 drm_modeset_unlock_all(dev);
407 return;
408 }
409
341 for (i = 0; i < fb_helper->crtc_count; i++) { 410 for (i = 0; i < fb_helper->crtc_count; i++) {
342 crtc = fb_helper->crtc_info[i].mode_set.crtc; 411 crtc = fb_helper->crtc_info[i].mode_set.crtc;
343 412
@@ -352,9 +421,14 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
352 dev->mode_config.dpms_property, dpms_mode); 421 dev->mode_config.dpms_property, dpms_mode);
353 } 422 }
354 } 423 }
355 mutex_unlock(&dev->mode_config.mutex); 424 drm_modeset_unlock_all(dev);
356} 425}
357 426
427/**
428 * drm_fb_helper_blank - implementation for ->fb_blank
429 * @blank: desired blanking state
430 * @info: fbdev registered by the helper
431 */
358int drm_fb_helper_blank(int blank, struct fb_info *info) 432int drm_fb_helper_blank(int blank, struct fb_info *info)
359{ 433{
360 switch (blank) { 434 switch (blank) {
@@ -398,6 +472,24 @@ static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
398 kfree(helper->crtc_info); 472 kfree(helper->crtc_info);
399} 473}
400 474
475/**
476 * drm_fb_helper_init - initialize a drm_fb_helper structure
477 * @dev: drm device
478 * @fb_helper: driver-allocated fbdev helper structure to initialize
479 * @crtc_count: maximum number of crtcs to support in this fbdev emulation
480 * @max_conn_count: max connector count
481 *
482 * This allocates the structures for the fbdev helper with the given limits.
483 * Note that this won't yet touch the hardware (through the driver interfaces)
484 * nor register the fbdev. This is only done in drm_fb_helper_initial_config()
485 * to allow driver writes more control over the exact init sequence.
486 *
487 * Drivers must set fb_helper->funcs before calling
488 * drm_fb_helper_initial_config().
489 *
490 * RETURNS:
491 * Zero if everything went ok, nonzero otherwise.
492 */
401int drm_fb_helper_init(struct drm_device *dev, 493int drm_fb_helper_init(struct drm_device *dev,
402 struct drm_fb_helper *fb_helper, 494 struct drm_fb_helper *fb_helper,
403 int crtc_count, int max_conn_count) 495 int crtc_count, int max_conn_count)
@@ -526,6 +618,11 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
526 return 0; 618 return 0;
527} 619}
528 620
621/**
622 * drm_fb_helper_setcmap - implementation for ->fb_setcmap
623 * @cmap: cmap to set
624 * @info: fbdev registered by the helper
625 */
529int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) 626int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
530{ 627{
531 struct drm_fb_helper *fb_helper = info->par; 628 struct drm_fb_helper *fb_helper = info->par;
@@ -565,6 +662,11 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
565} 662}
566EXPORT_SYMBOL(drm_fb_helper_setcmap); 663EXPORT_SYMBOL(drm_fb_helper_setcmap);
567 664
665/**
666 * drm_fb_helper_check_var - implementation for ->fb_check_var
667 * @var: screeninfo to check
668 * @info: fbdev registered by the helper
669 */
568int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 670int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
569 struct fb_info *info) 671 struct fb_info *info)
570{ 672{
@@ -657,13 +759,19 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
657} 759}
658EXPORT_SYMBOL(drm_fb_helper_check_var); 760EXPORT_SYMBOL(drm_fb_helper_check_var);
659 761
660/* this will let fbcon do the mode init */ 762/**
763 * drm_fb_helper_set_par - implementation for ->fb_set_par
764 * @info: fbdev registered by the helper
765 *
766 * This will let fbcon do the mode init and is called at initialization time by
767 * the fbdev core when registering the driver, and later on through the hotplug
768 * callback.
769 */
661int drm_fb_helper_set_par(struct fb_info *info) 770int drm_fb_helper_set_par(struct fb_info *info)
662{ 771{
663 struct drm_fb_helper *fb_helper = info->par; 772 struct drm_fb_helper *fb_helper = info->par;
664 struct drm_device *dev = fb_helper->dev; 773 struct drm_device *dev = fb_helper->dev;
665 struct fb_var_screeninfo *var = &info->var; 774 struct fb_var_screeninfo *var = &info->var;
666 struct drm_crtc *crtc;
667 int ret; 775 int ret;
668 int i; 776 int i;
669 777
@@ -672,16 +780,15 @@ int drm_fb_helper_set_par(struct fb_info *info)
672 return -EINVAL; 780 return -EINVAL;
673 } 781 }
674 782
675 mutex_lock(&dev->mode_config.mutex); 783 drm_modeset_lock_all(dev);
676 for (i = 0; i < fb_helper->crtc_count; i++) { 784 for (i = 0; i < fb_helper->crtc_count; i++) {
677 crtc = fb_helper->crtc_info[i].mode_set.crtc; 785 ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
678 ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
679 if (ret) { 786 if (ret) {
680 mutex_unlock(&dev->mode_config.mutex); 787 drm_modeset_unlock_all(dev);
681 return ret; 788 return ret;
682 } 789 }
683 } 790 }
684 mutex_unlock(&dev->mode_config.mutex); 791 drm_modeset_unlock_all(dev);
685 792
686 if (fb_helper->delayed_hotplug) { 793 if (fb_helper->delayed_hotplug) {
687 fb_helper->delayed_hotplug = false; 794 fb_helper->delayed_hotplug = false;
@@ -691,6 +798,11 @@ int drm_fb_helper_set_par(struct fb_info *info)
691} 798}
692EXPORT_SYMBOL(drm_fb_helper_set_par); 799EXPORT_SYMBOL(drm_fb_helper_set_par);
693 800
801/**
802 * drm_fb_helper_pan_display - implementation for ->fb_pan_display
803 * @var: updated screen information
804 * @info: fbdev registered by the helper
805 */
694int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, 806int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
695 struct fb_info *info) 807 struct fb_info *info)
696{ 808{
@@ -701,7 +813,12 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
701 int ret = 0; 813 int ret = 0;
702 int i; 814 int i;
703 815
704 mutex_lock(&dev->mode_config.mutex); 816 drm_modeset_lock_all(dev);
817 if (!drm_fb_helper_is_bound(fb_helper)) {
818 drm_modeset_unlock_all(dev);
819 return -EBUSY;
820 }
821
705 for (i = 0; i < fb_helper->crtc_count; i++) { 822 for (i = 0; i < fb_helper->crtc_count; i++) {
706 crtc = fb_helper->crtc_info[i].mode_set.crtc; 823 crtc = fb_helper->crtc_info[i].mode_set.crtc;
707 824
@@ -711,22 +828,27 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
711 modeset->y = var->yoffset; 828 modeset->y = var->yoffset;
712 829
713 if (modeset->num_connectors) { 830 if (modeset->num_connectors) {
714 ret = crtc->funcs->set_config(modeset); 831 ret = drm_mode_set_config_internal(modeset);
715 if (!ret) { 832 if (!ret) {
716 info->var.xoffset = var->xoffset; 833 info->var.xoffset = var->xoffset;
717 info->var.yoffset = var->yoffset; 834 info->var.yoffset = var->yoffset;
718 } 835 }
719 } 836 }
720 } 837 }
721 mutex_unlock(&dev->mode_config.mutex); 838 drm_modeset_unlock_all(dev);
722 return ret; 839 return ret;
723} 840}
724EXPORT_SYMBOL(drm_fb_helper_pan_display); 841EXPORT_SYMBOL(drm_fb_helper_pan_display);
725 842
726int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, 843/*
727 int preferred_bpp) 844 * Allocates the backing storage and sets up the fbdev info structure through
845 * the ->fb_probe callback and then registers the fbdev and sets up the panic
846 * notifier.
847 */
848static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
849 int preferred_bpp)
728{ 850{
729 int new_fb = 0; 851 int ret = 0;
730 int crtc_count = 0; 852 int crtc_count = 0;
731 int i; 853 int i;
732 struct fb_info *info; 854 struct fb_info *info;
@@ -804,27 +926,30 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
804 } 926 }
805 927
806 /* push down into drivers */ 928 /* push down into drivers */
807 new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); 929 ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
808 if (new_fb < 0) 930 if (ret < 0)
809 return new_fb; 931 return ret;
810 932
811 info = fb_helper->fbdev; 933 info = fb_helper->fbdev;
812 934
813 /* set the fb pointer */ 935 /*
936 * Set the fb pointer - usually drm_setup_crtcs does this for hotplug
937 * events, but at init time drm_setup_crtcs needs to be called before
938 * the fb is allocated (since we need to figure out the desired size of
939 * the fb before we can allocate it ...). Hence we need to fix things up
940 * here again.
941 */
814 for (i = 0; i < fb_helper->crtc_count; i++) 942 for (i = 0; i < fb_helper->crtc_count; i++)
815 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; 943 if (fb_helper->crtc_info[i].mode_set.num_connectors)
944 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
816 945
817 if (new_fb) {
818 info->var.pixclock = 0;
819 if (register_framebuffer(info) < 0)
820 return -EINVAL;
821 946
822 dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", 947 info->var.pixclock = 0;
823 info->node, info->fix.id); 948 if (register_framebuffer(info) < 0)
949 return -EINVAL;
824 950
825 } else { 951 dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
826 drm_fb_helper_set_par(info); 952 info->node, info->fix.id);
827 }
828 953
829 /* Switch back to kernel console on panic */ 954 /* Switch back to kernel console on panic */
830 /* multi card linked list maybe */ 955 /* multi card linked list maybe */
@@ -834,13 +959,25 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
834 &paniced); 959 &paniced);
835 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 960 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
836 } 961 }
837 if (new_fb) 962
838 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); 963 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
839 964
840 return 0; 965 return 0;
841} 966}
842EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
843 967
968/**
969 * drm_fb_helper_fill_fix - initializes fixed fbdev information
970 * @info: fbdev registered by the helper
971 * @pitch: desired pitch
972 * @depth: desired depth
973 *
974 * Helper to fill in the fixed fbdev information useful for a non-accelerated
975 * fbdev emulations. Drivers which support acceleration methods which impose
976 * additional constraints need to set up their own limits.
977 *
978 * Drivers should call this (or their equivalent setup code) from their
979 * ->fb_probe callback.
980 */
844void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 981void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
845 uint32_t depth) 982 uint32_t depth)
846{ 983{
@@ -861,6 +998,20 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
861} 998}
862EXPORT_SYMBOL(drm_fb_helper_fill_fix); 999EXPORT_SYMBOL(drm_fb_helper_fill_fix);
863 1000
1001/**
1002 * drm_fb_helper_fill_var - initalizes variable fbdev information
1003 * @info: fbdev instance to set up
1004 * @fb_helper: fb helper instance to use as template
1005 * @fb_width: desired fb width
1006 * @fb_height: desired fb height
1007 *
1008 * Sets up the variable fbdev metainformation from the given fb helper instance
1009 * and the drm framebuffer allocated in fb_helper->fb.
1010 *
1011 * Drivers should call this (or their equivalent setup code) from their
1012 * ->fb_probe callback after having allocated the fbdev backing
1013 * storage framebuffer.
1014 */
864void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 1015void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
865 uint32_t fb_width, uint32_t fb_height) 1016 uint32_t fb_width, uint32_t fb_height)
866{ 1017{
@@ -1284,6 +1435,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1284 for (i = 0; i < fb_helper->crtc_count; i++) { 1435 for (i = 0; i < fb_helper->crtc_count; i++) {
1285 modeset = &fb_helper->crtc_info[i].mode_set; 1436 modeset = &fb_helper->crtc_info[i].mode_set;
1286 modeset->num_connectors = 0; 1437 modeset->num_connectors = 0;
1438 modeset->fb = NULL;
1287 } 1439 }
1288 1440
1289 for (i = 0; i < fb_helper->connector_count; i++) { 1441 for (i = 0; i < fb_helper->connector_count; i++) {
@@ -1300,9 +1452,21 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1300 modeset->mode = drm_mode_duplicate(dev, 1452 modeset->mode = drm_mode_duplicate(dev,
1301 fb_crtc->desired_mode); 1453 fb_crtc->desired_mode);
1302 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; 1454 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
1455 modeset->fb = fb_helper->fb;
1303 } 1456 }
1304 } 1457 }
1305 1458
1459 /* Clear out any old modes if there are no more connected outputs. */
1460 for (i = 0; i < fb_helper->crtc_count; i++) {
1461 modeset = &fb_helper->crtc_info[i].mode_set;
1462 if (modeset->num_connectors == 0) {
1463 BUG_ON(modeset->fb);
1464 BUG_ON(modeset->num_connectors);
1465 if (modeset->mode)
1466 drm_mode_destroy(dev, modeset->mode);
1467 modeset->mode = NULL;
1468 }
1469 }
1306out: 1470out:
1307 kfree(crtcs); 1471 kfree(crtcs);
1308 kfree(modes); 1472 kfree(modes);
@@ -1310,18 +1474,23 @@ out:
1310} 1474}
1311 1475
1312/** 1476/**
1313 * drm_helper_initial_config - setup a sane initial connector configuration 1477 * drm_fb_helper_initial_config - setup a sane initial connector configuration
1314 * @fb_helper: fb_helper device struct 1478 * @fb_helper: fb_helper device struct
1315 * @bpp_sel: bpp value to use for the framebuffer configuration 1479 * @bpp_sel: bpp value to use for the framebuffer configuration
1316 * 1480 *
1317 * LOCKING:
1318 * Called at init time by the driver to set up the @fb_helper initial
1319 * configuration, must take the mode config lock.
1320 *
1321 * Scans the CRTCs and connectors and tries to put together an initial setup. 1481 * Scans the CRTCs and connectors and tries to put together an initial setup.
1322 * At the moment, this is a cloned configuration across all heads with 1482 * At the moment, this is a cloned configuration across all heads with
1323 * a new framebuffer object as the backing store. 1483 * a new framebuffer object as the backing store.
1324 * 1484 *
1485 * Note that this also registers the fbdev and so allows userspace to call into
1486 * the driver through the fbdev interfaces.
1487 *
1488 * This function will call down into the ->fb_probe callback to let
1489 * the driver allocate and initialize the fbdev info structure and the drm
1490 * framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
1491 * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
1492 * values for the fbdev info structure.
1493 *
1325 * RETURNS: 1494 * RETURNS:
1326 * Zero if everything went ok, nonzero otherwise. 1495 * Zero if everything went ok, nonzero otherwise.
1327 */ 1496 */
@@ -1330,9 +1499,6 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1330 struct drm_device *dev = fb_helper->dev; 1499 struct drm_device *dev = fb_helper->dev;
1331 int count = 0; 1500 int count = 0;
1332 1501
1333 /* disable all the possible outputs/crtcs before entering KMS mode */
1334 drm_helper_disable_unused_functions(fb_helper->dev);
1335
1336 drm_fb_helper_parse_command_line(fb_helper); 1502 drm_fb_helper_parse_command_line(fb_helper);
1337 1503
1338 count = drm_fb_helper_probe_connector_modes(fb_helper, 1504 count = drm_fb_helper_probe_connector_modes(fb_helper,
@@ -1355,12 +1521,17 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
1355 * probing all the outputs attached to the fb 1521 * probing all the outputs attached to the fb
1356 * @fb_helper: the drm_fb_helper 1522 * @fb_helper: the drm_fb_helper
1357 * 1523 *
1358 * LOCKING:
1359 * Called at runtime, must take mode config lock.
1360 *
1361 * Scan the connectors attached to the fb_helper and try to put together a 1524 * Scan the connectors attached to the fb_helper and try to put together a
1362 * setup after *notification of a change in output configuration. 1525 * setup after *notification of a change in output configuration.
1363 * 1526 *
1527 * Called at runtime, takes the mode config locks to be able to check/change the
1528 * modeset configuration. Must be run from process context (which usually means
1529 * either the output polling work or a work item launched from the driver's
1530 * hotplug interrupt).
1531 *
1532 * Note that the driver must ensure that this is only called _after_ the fb has
1533 * been fully set up, i.e. after the call to drm_fb_helper_initial_config.
1534 *
1364 * RETURNS: 1535 * RETURNS:
1365 * 0 on success and a non-zero error code otherwise. 1536 * 0 on success and a non-zero error code otherwise.
1366 */ 1537 */
@@ -1369,23 +1540,14 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1369 struct drm_device *dev = fb_helper->dev; 1540 struct drm_device *dev = fb_helper->dev;
1370 int count = 0; 1541 int count = 0;
1371 u32 max_width, max_height, bpp_sel; 1542 u32 max_width, max_height, bpp_sel;
1372 int bound = 0, crtcs_bound = 0;
1373 struct drm_crtc *crtc;
1374 1543
1375 if (!fb_helper->fb) 1544 if (!fb_helper->fb)
1376 return 0; 1545 return 0;
1377 1546
1378 mutex_lock(&dev->mode_config.mutex); 1547 drm_modeset_lock_all(dev);
1379 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1548 if (!drm_fb_helper_is_bound(fb_helper)) {
1380 if (crtc->fb)
1381 crtcs_bound++;
1382 if (crtc->fb == fb_helper->fb)
1383 bound++;
1384 }
1385
1386 if (bound < crtcs_bound) {
1387 fb_helper->delayed_hotplug = true; 1549 fb_helper->delayed_hotplug = true;
1388 mutex_unlock(&dev->mode_config.mutex); 1550 drm_modeset_unlock_all(dev);
1389 return 0; 1551 return 0;
1390 } 1552 }
1391 DRM_DEBUG_KMS("\n"); 1553 DRM_DEBUG_KMS("\n");
@@ -1397,9 +1559,11 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
1397 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, 1559 count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
1398 max_height); 1560 max_height);
1399 drm_setup_crtcs(fb_helper); 1561 drm_setup_crtcs(fb_helper);
1400 mutex_unlock(&dev->mode_config.mutex); 1562 drm_modeset_unlock_all(dev);
1401 1563
1402 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1564 drm_fb_helper_set_par(fb_helper->fbdev);
1565
1566 return 0;
1403} 1567}
1404EXPORT_SYMBOL(drm_fb_helper_hotplug_event); 1568EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
1405 1569
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 133b4132983e..13fdcd10a605 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -276,6 +276,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
276 276
277 INIT_LIST_HEAD(&priv->lhead); 277 INIT_LIST_HEAD(&priv->lhead);
278 INIT_LIST_HEAD(&priv->fbs); 278 INIT_LIST_HEAD(&priv->fbs);
279 mutex_init(&priv->fbs_lock);
279 INIT_LIST_HEAD(&priv->event_list); 280 INIT_LIST_HEAD(&priv->event_list);
280 init_waitqueue_head(&priv->event_wait); 281 init_waitqueue_head(&priv->event_wait);
281 priv->event_space = 4096; /* set aside 4k for event buffer */ 282 priv->event_space = 4096; /* set aside 4k for event buffer */
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 1aa8fee1e865..0a7e011509bd 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -249,3 +249,24 @@ int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
249 return drm_gem_handle_delete(file_priv, handle); 249 return drm_gem_handle_delete(file_priv, handle);
250} 250}
251EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy); 251EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
252
253#ifdef CONFIG_DEBUG_FS
254void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
255{
256 struct drm_gem_object *obj = &cma_obj->base;
257 struct drm_device *dev = obj->dev;
258 uint64_t off = 0;
259
260 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
261
262 if (obj->map_list.map)
263 off = (uint64_t)obj->map_list.hash.key;
264
265 seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
266 obj->name, obj->refcount.refcount.counter,
267 off, cma_obj->paddr, cma_obj->vaddr, obj->size);
268
269 seq_printf(m, "\n");
270}
271EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
272#endif
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 19c01ca3cc76..a6a8643a6a77 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -505,6 +505,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
505 505
506 /* Valid dotclock? */ 506 /* Valid dotclock? */
507 if (dotclock > 0) { 507 if (dotclock > 0) {
508 int frame_size;
508 /* Convert scanline length in pixels and video dot clock to 509 /* Convert scanline length in pixels and video dot clock to
509 * line duration, frame duration and pixel duration in 510 * line duration, frame duration and pixel duration in
510 * nanoseconds: 511 * nanoseconds:
@@ -512,7 +513,10 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
512 pixeldur_ns = (s64) div64_u64(1000000000, dotclock); 513 pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
513 linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * 514 linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
514 1000000000), dotclock); 515 1000000000), dotclock);
515 framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns; 516 frame_size = crtc->hwmode.crtc_htotal *
517 crtc->hwmode.crtc_vtotal;
518 framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
519 dotclock);
516 } else 520 } else
517 DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", 521 DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
518 crtc->base.id); 522 crtc->base.id);
@@ -863,6 +867,7 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
863 867
864 now = get_drm_timestamp(); 868 now = get_drm_timestamp();
865 } 869 }
870 e->pipe = crtc;
866 send_vblank_event(dev, e, seq, &now); 871 send_vblank_event(dev, e, seq, &now);
867} 872}
868EXPORT_SYMBOL(drm_send_vblank_event); 873EXPORT_SYMBOL(drm_send_vblank_event);
@@ -1218,8 +1223,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1218 int ret; 1223 int ret;
1219 unsigned int flags, seq, crtc, high_crtc; 1224 unsigned int flags, seq, crtc, high_crtc;
1220 1225
1221 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) 1226 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
1222 return -EINVAL; 1227 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
1228 return -EINVAL;
1223 1229
1224 if (vblwait->request.type & _DRM_VBLANK_SIGNAL) 1230 if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
1225 return -EINVAL; 1231 return -EINVAL;
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2aa331499f81..db1e2d6f90d7 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -102,20 +102,6 @@ int drm_mm_pre_get(struct drm_mm *mm)
102} 102}
103EXPORT_SYMBOL(drm_mm_pre_get); 103EXPORT_SYMBOL(drm_mm_pre_get);
104 104
105static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
106{
107 return hole_node->start + hole_node->size;
108}
109
110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111{
112 struct drm_mm_node *next_node =
113 list_entry(hole_node->node_list.next, struct drm_mm_node,
114 node_list);
115
116 return next_node->start;
117}
118
119static void drm_mm_insert_helper(struct drm_mm_node *hole_node, 105static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
120 struct drm_mm_node *node, 106 struct drm_mm_node *node,
121 unsigned long size, unsigned alignment, 107 unsigned long size, unsigned alignment,
@@ -127,7 +113,7 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
127 unsigned long adj_start = hole_start; 113 unsigned long adj_start = hole_start;
128 unsigned long adj_end = hole_end; 114 unsigned long adj_end = hole_end;
129 115
130 BUG_ON(!hole_node->hole_follows || node->allocated); 116 BUG_ON(node->allocated);
131 117
132 if (mm->color_adjust) 118 if (mm->color_adjust)
133 mm->color_adjust(hole_node, color, &adj_start, &adj_end); 119 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
@@ -155,12 +141,57 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
155 BUG_ON(node->start + node->size > adj_end); 141 BUG_ON(node->start + node->size > adj_end);
156 142
157 node->hole_follows = 0; 143 node->hole_follows = 0;
158 if (node->start + node->size < hole_end) { 144 if (__drm_mm_hole_node_start(node) < hole_end) {
159 list_add(&node->hole_stack, &mm->hole_stack); 145 list_add(&node->hole_stack, &mm->hole_stack);
160 node->hole_follows = 1; 146 node->hole_follows = 1;
161 } 147 }
162} 148}
163 149
150struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
151 unsigned long start,
152 unsigned long size,
153 bool atomic)
154{
155 struct drm_mm_node *hole, *node;
156 unsigned long end = start + size;
157 unsigned long hole_start;
158 unsigned long hole_end;
159
160 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
161 if (hole_start > start || hole_end < end)
162 continue;
163
164 node = drm_mm_kmalloc(mm, atomic);
165 if (unlikely(node == NULL))
166 return NULL;
167
168 node->start = start;
169 node->size = size;
170 node->mm = mm;
171 node->allocated = 1;
172
173 INIT_LIST_HEAD(&node->hole_stack);
174 list_add(&node->node_list, &hole->node_list);
175
176 if (start == hole_start) {
177 hole->hole_follows = 0;
178 list_del_init(&hole->hole_stack);
179 }
180
181 node->hole_follows = 0;
182 if (end != hole_end) {
183 list_add(&node->hole_stack, &mm->hole_stack);
184 node->hole_follows = 1;
185 }
186
187 return node;
188 }
189
190 WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
191 return NULL;
192}
193EXPORT_SYMBOL(drm_mm_create_block);
194
164struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, 195struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
165 unsigned long size, 196 unsigned long size,
166 unsigned alignment, 197 unsigned alignment,
@@ -253,7 +284,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
253 BUG_ON(node->start + node->size > end); 284 BUG_ON(node->start + node->size > end);
254 285
255 node->hole_follows = 0; 286 node->hole_follows = 0;
256 if (node->start + node->size < hole_end) { 287 if (__drm_mm_hole_node_start(node) < hole_end) {
257 list_add(&node->hole_stack, &mm->hole_stack); 288 list_add(&node->hole_stack, &mm->hole_stack);
258 node->hole_follows = 1; 289 node->hole_follows = 1;
259 } 290 }
@@ -327,12 +358,13 @@ void drm_mm_remove_node(struct drm_mm_node *node)
327 list_entry(node->node_list.prev, struct drm_mm_node, node_list); 358 list_entry(node->node_list.prev, struct drm_mm_node, node_list);
328 359
329 if (node->hole_follows) { 360 if (node->hole_follows) {
330 BUG_ON(drm_mm_hole_node_start(node) 361 BUG_ON(__drm_mm_hole_node_start(node) ==
331 == drm_mm_hole_node_end(node)); 362 __drm_mm_hole_node_end(node));
332 list_del(&node->hole_stack); 363 list_del(&node->hole_stack);
333 } else 364 } else
334 BUG_ON(drm_mm_hole_node_start(node) 365 BUG_ON(__drm_mm_hole_node_start(node) !=
335 != drm_mm_hole_node_end(node)); 366 __drm_mm_hole_node_end(node));
367
336 368
337 if (!prev_node->hole_follows) { 369 if (!prev_node->hole_follows) {
338 prev_node->hole_follows = 1; 370 prev_node->hole_follows = 1;
@@ -390,6 +422,8 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
390{ 422{
391 struct drm_mm_node *entry; 423 struct drm_mm_node *entry;
392 struct drm_mm_node *best; 424 struct drm_mm_node *best;
425 unsigned long adj_start;
426 unsigned long adj_end;
393 unsigned long best_size; 427 unsigned long best_size;
394 428
395 BUG_ON(mm->scanned_blocks); 429 BUG_ON(mm->scanned_blocks);
@@ -397,17 +431,13 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
397 best = NULL; 431 best = NULL;
398 best_size = ~0UL; 432 best_size = ~0UL;
399 433
400 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 434 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
401 unsigned long adj_start = drm_mm_hole_node_start(entry);
402 unsigned long adj_end = drm_mm_hole_node_end(entry);
403
404 if (mm->color_adjust) { 435 if (mm->color_adjust) {
405 mm->color_adjust(entry, color, &adj_start, &adj_end); 436 mm->color_adjust(entry, color, &adj_start, &adj_end);
406 if (adj_end <= adj_start) 437 if (adj_end <= adj_start)
407 continue; 438 continue;
408 } 439 }
409 440
410 BUG_ON(!entry->hole_follows);
411 if (!check_free_hole(adj_start, adj_end, size, alignment)) 441 if (!check_free_hole(adj_start, adj_end, size, alignment))
412 continue; 442 continue;
413 443
@@ -434,6 +464,8 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
434{ 464{
435 struct drm_mm_node *entry; 465 struct drm_mm_node *entry;
436 struct drm_mm_node *best; 466 struct drm_mm_node *best;
467 unsigned long adj_start;
468 unsigned long adj_end;
437 unsigned long best_size; 469 unsigned long best_size;
438 470
439 BUG_ON(mm->scanned_blocks); 471 BUG_ON(mm->scanned_blocks);
@@ -441,13 +473,11 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
441 best = NULL; 473 best = NULL;
442 best_size = ~0UL; 474 best_size = ~0UL;
443 475
444 list_for_each_entry(entry, &mm->hole_stack, hole_stack) { 476 drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
445 unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? 477 if (adj_start < start)
446 start : drm_mm_hole_node_start(entry); 478 adj_start = start;
447 unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? 479 if (adj_end > end)
448 end : drm_mm_hole_node_end(entry); 480 adj_end = end;
449
450 BUG_ON(!entry->hole_follows);
451 481
452 if (mm->color_adjust) { 482 if (mm->color_adjust) {
453 mm->color_adjust(entry, color, &adj_start, &adj_end); 483 mm->color_adjust(entry, color, &adj_start, &adj_end);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d8da30e90db5..04fa6f1808d1 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -35,6 +35,8 @@
35#include <linux/export.h> 35#include <linux/export.h>
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include <drm/drm_crtc.h> 37#include <drm/drm_crtc.h>
38#include <video/of_videomode.h>
39#include <video/videomode.h>
38 40
39/** 41/**
40 * drm_mode_debug_printmodeline - debug print a mode 42 * drm_mode_debug_printmodeline - debug print a mode
@@ -504,6 +506,74 @@ drm_gtf_mode(struct drm_device *dev, int hdisplay, int vdisplay, int vrefresh,
504} 506}
505EXPORT_SYMBOL(drm_gtf_mode); 507EXPORT_SYMBOL(drm_gtf_mode);
506 508
509#if IS_ENABLED(CONFIG_VIDEOMODE)
510int drm_display_mode_from_videomode(const struct videomode *vm,
511 struct drm_display_mode *dmode)
512{
513 dmode->hdisplay = vm->hactive;
514 dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
515 dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
516 dmode->htotal = dmode->hsync_end + vm->hback_porch;
517
518 dmode->vdisplay = vm->vactive;
519 dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
520 dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
521 dmode->vtotal = dmode->vsync_end + vm->vback_porch;
522
523 dmode->clock = vm->pixelclock / 1000;
524
525 dmode->flags = 0;
526 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
527 dmode->flags |= DRM_MODE_FLAG_PHSYNC;
528 else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW)
529 dmode->flags |= DRM_MODE_FLAG_NHSYNC;
530 if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
531 dmode->flags |= DRM_MODE_FLAG_PVSYNC;
532 else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW)
533 dmode->flags |= DRM_MODE_FLAG_NVSYNC;
534 if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
535 dmode->flags |= DRM_MODE_FLAG_INTERLACE;
536 if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
537 dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
538 drm_mode_set_name(dmode);
539
540 return 0;
541}
542EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
543#endif
544
545#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
546/**
547 * of_get_drm_display_mode - get a drm_display_mode from devicetree
548 * @np: device_node with the timing specification
549 * @dmode: will be set to the return value
550 * @index: index into the list of display timings in devicetree
551 *
552 * This function is expensive and should only be used, if only one mode is to be
553 * read from DT. To get multiple modes start with of_get_display_timings and
554 * work with that instead.
555 */
556int of_get_drm_display_mode(struct device_node *np,
557 struct drm_display_mode *dmode, int index)
558{
559 struct videomode vm;
560 int ret;
561
562 ret = of_get_videomode(np, &vm, index);
563 if (ret)
564 return ret;
565
566 drm_display_mode_from_videomode(&vm, dmode);
567
568 pr_debug("%s: got %dx%d display mode from %s\n",
569 of_node_full_name(np), vm.hactive, vm.vactive, np->name);
570 drm_mode_debug_printmodeline(dmode);
571
572 return 0;
573}
574EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
575#endif
576
507/** 577/**
508 * drm_mode_set_name - set the name on a mode 578 * drm_mode_set_name - set the name on a mode
509 * @mode: name will be set in this mode 579 * @mode: name will be set in this mode
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 754bc96e10c7..bd719e936e13 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -439,78 +439,67 @@ int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
439 return 0; 439 return 0;
440} 440}
441 441
442#else
443
444int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
445{
446 return -1;
447}
448
449#endif
450
451EXPORT_SYMBOL(drm_pci_init);
452
453/*@}*/
454void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
455{
456 struct drm_device *dev, *tmp;
457 DRM_DEBUG("\n");
458
459 if (driver->driver_features & DRIVER_MODESET) {
460 pci_unregister_driver(pdriver);
461 } else {
462 list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
463 drm_put_dev(dev);
464 }
465 DRM_INFO("Module unloaded\n");
466}
467EXPORT_SYMBOL(drm_pci_exit);
468
469int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) 442int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
470{ 443{
471 struct pci_dev *root; 444 struct pci_dev *root;
472 int pos; 445 u32 lnkcap, lnkcap2;
473 u32 lnkcap = 0, lnkcap2 = 0;
474 446
475 *mask = 0; 447 *mask = 0;
476 if (!dev->pdev) 448 if (!dev->pdev)
477 return -EINVAL; 449 return -EINVAL;
478 450
479 if (!pci_is_pcie(dev->pdev))
480 return -EINVAL;
481
482 root = dev->pdev->bus->self; 451 root = dev->pdev->bus->self;
483 452
484 pos = pci_pcie_cap(root);
485 if (!pos)
486 return -EINVAL;
487
488 /* we've been informed via and serverworks don't make the cut */ 453 /* we've been informed via and serverworks don't make the cut */
489 if (root->vendor == PCI_VENDOR_ID_VIA || 454 if (root->vendor == PCI_VENDOR_ID_VIA ||
490 root->vendor == PCI_VENDOR_ID_SERVERWORKS) 455 root->vendor == PCI_VENDOR_ID_SERVERWORKS)
491 return -EINVAL; 456 return -EINVAL;
492 457
493 pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap); 458 pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
494 pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2); 459 pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
495 460
496 lnkcap &= PCI_EXP_LNKCAP_SLS; 461 if (lnkcap2) { /* PCIe r3.0-compliant */
497 lnkcap2 &= 0xfe;
498
499 if (lnkcap2) { /* PCIE GEN 3.0 */
500 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 462 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
501 *mask |= DRM_PCIE_SPEED_25; 463 *mask |= DRM_PCIE_SPEED_25;
502 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 464 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
503 *mask |= DRM_PCIE_SPEED_50; 465 *mask |= DRM_PCIE_SPEED_50;
504 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 466 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
505 *mask |= DRM_PCIE_SPEED_80; 467 *mask |= DRM_PCIE_SPEED_80;
506 } else { 468 } else { /* pre-r3.0 */
507 if (lnkcap & 1) 469 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
508 *mask |= DRM_PCIE_SPEED_25; 470 *mask |= DRM_PCIE_SPEED_25;
509 if (lnkcap & 2) 471 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
510 *mask |= DRM_PCIE_SPEED_50; 472 *mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
511 } 473 }
512 474
513 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2); 475 DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
514 return 0; 476 return 0;
515} 477}
516EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask); 478EXPORT_SYMBOL(drm_pcie_get_speed_cap_mask);
479
480#else
481
482int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver)
483{
484 return -1;
485}
486
487#endif
488
489EXPORT_SYMBOL(drm_pci_init);
490
491/*@}*/
492void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
493{
494 struct drm_device *dev, *tmp;
495 DRM_DEBUG("\n");
496
497 if (driver->driver_features & DRIVER_MODESET) {
498 pci_unregister_driver(pdriver);
499 } else {
500 list_for_each_entry_safe(dev, tmp, &driver->device_list, driver_item)
501 drm_put_dev(dev);
502 }
503 DRM_INFO("Module unloaded\n");
504}
505EXPORT_SYMBOL(drm_pci_exit);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 7f125738f44e..366910ddcfcb 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -53,7 +53,8 @@
53 * Self-importing: if userspace is using PRIME as a replacement for flink 53 * Self-importing: if userspace is using PRIME as a replacement for flink
54 * then it will get a fd->handle request for a GEM object that it created. 54 * then it will get a fd->handle request for a GEM object that it created.
55 * Drivers should detect this situation and return back the gem object 55 * Drivers should detect this situation and return back the gem object
56 * from the dma-buf private. 56 * from the dma-buf private. Prime will do this automatically for drivers that
57 * use the drm_gem_prime_{import,export} helpers.
57 */ 58 */
58 59
59struct drm_prime_member { 60struct drm_prime_member {
@@ -62,6 +63,137 @@ struct drm_prime_member {
62 uint32_t handle; 63 uint32_t handle;
63}; 64};
64 65
66static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
67 enum dma_data_direction dir)
68{
69 struct drm_gem_object *obj = attach->dmabuf->priv;
70 struct sg_table *sgt;
71
72 mutex_lock(&obj->dev->struct_mutex);
73
74 sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
75
76 if (!IS_ERR_OR_NULL(sgt))
77 dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
78
79 mutex_unlock(&obj->dev->struct_mutex);
80 return sgt;
81}
82
83static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
84 struct sg_table *sgt, enum dma_data_direction dir)
85{
86 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
87 sg_free_table(sgt);
88 kfree(sgt);
89}
90
91static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
92{
93 struct drm_gem_object *obj = dma_buf->priv;
94
95 if (obj->export_dma_buf == dma_buf) {
96 /* drop the reference on the export fd holds */
97 obj->export_dma_buf = NULL;
98 drm_gem_object_unreference_unlocked(obj);
99 }
100}
101
102static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
103{
104 struct drm_gem_object *obj = dma_buf->priv;
105 struct drm_device *dev = obj->dev;
106
107 return dev->driver->gem_prime_vmap(obj);
108}
109
110static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
111{
112 struct drm_gem_object *obj = dma_buf->priv;
113 struct drm_device *dev = obj->dev;
114
115 dev->driver->gem_prime_vunmap(obj, vaddr);
116}
117
118static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
119 unsigned long page_num)
120{
121 return NULL;
122}
123
124static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
125 unsigned long page_num, void *addr)
126{
127
128}
129static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf,
130 unsigned long page_num)
131{
132 return NULL;
133}
134
135static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
136 unsigned long page_num, void *addr)
137{
138
139}
140
141static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf,
142 struct vm_area_struct *vma)
143{
144 return -EINVAL;
145}
146
147static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
148 .map_dma_buf = drm_gem_map_dma_buf,
149 .unmap_dma_buf = drm_gem_unmap_dma_buf,
150 .release = drm_gem_dmabuf_release,
151 .kmap = drm_gem_dmabuf_kmap,
152 .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
153 .kunmap = drm_gem_dmabuf_kunmap,
154 .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
155 .mmap = drm_gem_dmabuf_mmap,
156 .vmap = drm_gem_dmabuf_vmap,
157 .vunmap = drm_gem_dmabuf_vunmap,
158};
159
160/**
161 * DOC: PRIME Helpers
162 *
163 * Drivers can implement @gem_prime_export and @gem_prime_import in terms of
164 * simpler APIs by using the helper functions @drm_gem_prime_export and
165 * @drm_gem_prime_import. These functions implement dma-buf support in terms of
166 * five lower-level driver callbacks:
167 *
168 * Export callbacks:
169 *
170 * - @gem_prime_pin (optional): prepare a GEM object for exporting
171 *
172 * - @gem_prime_get_sg_table: provide a scatter/gather table of pinned pages
173 *
174 * - @gem_prime_vmap: vmap a buffer exported by your driver
175 *
176 * - @gem_prime_vunmap: vunmap a buffer exported by your driver
177 *
178 * Import callback:
179 *
180 * - @gem_prime_import_sg_table (import): produce a GEM object from another
181 * driver's scatter/gather table
182 */
183
184struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
185 struct drm_gem_object *obj, int flags)
186{
187 if (dev->driver->gem_prime_pin) {
188 int ret = dev->driver->gem_prime_pin(obj);
189 if (ret)
190 return ERR_PTR(ret);
191 }
192 return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
193 0600);
194}
195EXPORT_SYMBOL(drm_gem_prime_export);
196
65int drm_gem_prime_handle_to_fd(struct drm_device *dev, 197int drm_gem_prime_handle_to_fd(struct drm_device *dev,
66 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 198 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
67 int *prime_fd) 199 int *prime_fd)
@@ -117,6 +249,58 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
117} 249}
118EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 250EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
119 251
252struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
253 struct dma_buf *dma_buf)
254{
255 struct dma_buf_attachment *attach;
256 struct sg_table *sgt;
257 struct drm_gem_object *obj;
258 int ret;
259
260 if (!dev->driver->gem_prime_import_sg_table)
261 return ERR_PTR(-EINVAL);
262
263 if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
264 obj = dma_buf->priv;
265 if (obj->dev == dev) {
266 /*
267 * Importing dmabuf exported from out own gem increases
268 * refcount on gem itself instead of f_count of dmabuf.
269 */
270 drm_gem_object_reference(obj);
271 dma_buf_put(dma_buf);
272 return obj;
273 }
274 }
275
276 attach = dma_buf_attach(dma_buf, dev->dev);
277 if (IS_ERR(attach))
278 return ERR_PTR(PTR_ERR(attach));
279
280 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
281 if (IS_ERR_OR_NULL(sgt)) {
282 ret = PTR_ERR(sgt);
283 goto fail_detach;
284 }
285
286 obj = dev->driver->gem_prime_import_sg_table(dev, dma_buf->size, sgt);
287 if (IS_ERR(obj)) {
288 ret = PTR_ERR(obj);
289 goto fail_unmap;
290 }
291
292 obj->import_attach = attach;
293
294 return obj;
295
296fail_unmap:
297 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
298fail_detach:
299 dma_buf_detach(dma_buf, attach);
300 return ERR_PTR(ret);
301}
302EXPORT_SYMBOL(drm_gem_prime_import);
303
120int drm_gem_prime_fd_to_handle(struct drm_device *dev, 304int drm_gem_prime_fd_to_handle(struct drm_device *dev,
121 struct drm_file *file_priv, int prime_fd, uint32_t *handle) 305 struct drm_file *file_priv, int prime_fd, uint32_t *handle)
122{ 306{
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 3cec30611417..34a156f0c336 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -18,7 +18,7 @@ int drm_get_usb_dev(struct usb_interface *interface,
18 18
19 usbdev = interface_to_usbdev(interface); 19 usbdev = interface_to_usbdev(interface);
20 dev->usbdev = usbdev; 20 dev->usbdev = usbdev;
21 dev->dev = &usbdev->dev; 21 dev->dev = &interface->dev;
22 22
23 mutex_lock(&drm_global_mutex); 23 mutex_lock(&drm_global_mutex);
24 24
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 294c0513f587..0e04f4ea441f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -99,6 +99,10 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
99 99
100 DRM_DEBUG_KMS("%s\n", __FILE__); 100 DRM_DEBUG_KMS("%s\n", __FILE__);
101 101
102 /* This fb should have only one gem object. */
103 if (WARN_ON(exynos_fb->buf_cnt != 1))
104 return -EINVAL;
105
102 return drm_gem_handle_create(file_priv, 106 return drm_gem_handle_create(file_priv,
103 &exynos_fb->exynos_gem_obj[0]->base, handle); 107 &exynos_fb->exynos_gem_obj[0]->base, handle);
104} 108}
@@ -217,23 +221,25 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
217 struct drm_mode_fb_cmd2 *mode_cmd) 221 struct drm_mode_fb_cmd2 *mode_cmd)
218{ 222{
219 struct drm_gem_object *obj; 223 struct drm_gem_object *obj;
224 struct exynos_drm_gem_obj *exynos_gem_obj;
220 struct exynos_drm_fb *exynos_fb; 225 struct exynos_drm_fb *exynos_fb;
221 int i, ret; 226 int i, ret;
222 227
223 DRM_DEBUG_KMS("%s\n", __FILE__); 228 DRM_DEBUG_KMS("%s\n", __FILE__);
224 229
225 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
226 if (!obj) {
227 DRM_ERROR("failed to lookup gem object\n");
228 return ERR_PTR(-ENOENT);
229 }
230
231 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 230 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
232 if (!exynos_fb) { 231 if (!exynos_fb) {
233 DRM_ERROR("failed to allocate exynos drm framebuffer\n"); 232 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
234 return ERR_PTR(-ENOMEM); 233 return ERR_PTR(-ENOMEM);
235 } 234 }
236 235
236 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
237 if (!obj) {
238 DRM_ERROR("failed to lookup gem object\n");
239 ret = -ENOENT;
240 goto err_free;
241 }
242
237 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); 243 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
238 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj); 244 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
239 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); 245 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
@@ -241,43 +247,44 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
241 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 247 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
242 248
243 for (i = 1; i < exynos_fb->buf_cnt; i++) { 249 for (i = 1; i < exynos_fb->buf_cnt; i++) {
244 struct exynos_drm_gem_obj *exynos_gem_obj;
245 int ret;
246
247 obj = drm_gem_object_lookup(dev, file_priv, 250 obj = drm_gem_object_lookup(dev, file_priv,
248 mode_cmd->handles[i]); 251 mode_cmd->handles[i]);
249 if (!obj) { 252 if (!obj) {
250 DRM_ERROR("failed to lookup gem object\n"); 253 DRM_ERROR("failed to lookup gem object\n");
251 kfree(exynos_fb); 254 ret = -ENOENT;
252 return ERR_PTR(-ENOENT); 255 exynos_fb->buf_cnt = i;
256 goto err_unreference;
253 } 257 }
254 258
255 exynos_gem_obj = to_exynos_gem_obj(obj); 259 exynos_gem_obj = to_exynos_gem_obj(obj);
260 exynos_fb->exynos_gem_obj[i] = exynos_gem_obj;
256 261
257 ret = check_fb_gem_memory_type(dev, exynos_gem_obj); 262 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
258 if (ret < 0) { 263 if (ret < 0) {
259 DRM_ERROR("cannot use this gem memory type for fb.\n"); 264 DRM_ERROR("cannot use this gem memory type for fb.\n");
260 kfree(exynos_fb); 265 goto err_unreference;
261 return ERR_PTR(ret);
262 } 266 }
263
264 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
265 } 267 }
266 268
267 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 269 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
268 if (ret) { 270 if (ret) {
269 for (i = 0; i < exynos_fb->buf_cnt; i++) { 271 DRM_ERROR("failed to init framebuffer.\n");
270 struct exynos_drm_gem_obj *gem_obj; 272 goto err_unreference;
271
272 gem_obj = exynos_fb->exynos_gem_obj[i];
273 drm_gem_object_unreference_unlocked(&gem_obj->base);
274 }
275
276 kfree(exynos_fb);
277 return ERR_PTR(ret);
278 } 273 }
279 274
280 return &exynos_fb->fb; 275 return &exynos_fb->fb;
276
277err_unreference:
278 for (i = 0; i < exynos_fb->buf_cnt; i++) {
279 struct drm_gem_object *obj;
280
281 obj = &exynos_fb->exynos_gem_obj[i]->base;
282 if (obj)
283 drm_gem_object_unreference_unlocked(obj);
284 }
285err_free:
286 kfree(exynos_fb);
287 return ERR_PTR(ret);
281} 288}
282 289
283struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, 290struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 71f867340a88..68f0045f86b8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -226,36 +226,8 @@ out:
226 return ret; 226 return ret;
227} 227}
228 228
229static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
230 struct drm_fb_helper_surface_size *sizes)
231{
232 int ret = 0;
233
234 DRM_DEBUG_KMS("%s\n", __FILE__);
235
236 /*
237 * with !helper->fb, it means that this funcion is called first time
238 * and after that, the helper->fb would be used as clone mode.
239 */
240 if (!helper->fb) {
241 ret = exynos_drm_fbdev_create(helper, sizes);
242 if (ret < 0) {
243 DRM_ERROR("failed to create fbdev.\n");
244 return ret;
245 }
246
247 /*
248 * fb_helper expects a value more than 1 if succeed
249 * because register_framebuffer() should be called.
250 */
251 ret = 1;
252 }
253
254 return ret;
255}
256
257static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = { 229static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
258 .fb_probe = exynos_drm_fbdev_probe, 230 .fb_probe = exynos_drm_fbdev_create,
259}; 231};
260 232
261int exynos_drm_fbdev_init(struct drm_device *dev) 233int exynos_drm_fbdev_init(struct drm_device *dev)
@@ -295,6 +267,9 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
295 267
296 } 268 }
297 269
270 /* disable all the possible outputs/crtcs before entering KMS mode */
271 drm_helper_disable_unused_functions(dev);
272
298 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); 273 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
299 if (ret < 0) { 274 if (ret < 0) {
300 DRM_ERROR("failed to set up hw configuration.\n"); 275 DRM_ERROR("failed to set up hw configuration.\n");
@@ -326,8 +301,10 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
326 /* release drm framebuffer and real buffer */ 301 /* release drm framebuffer and real buffer */
327 if (fb_helper->fb && fb_helper->fb->funcs) { 302 if (fb_helper->fb && fb_helper->fb->funcs) {
328 fb = fb_helper->fb; 303 fb = fb_helper->fb;
329 if (fb) 304 if (fb) {
305 drm_framebuffer_unregister_private(fb);
330 drm_framebuffer_remove(fb); 306 drm_framebuffer_remove(fb);
307 }
331 } 308 }
332 309
333 /* release linux framebuffer */ 310 /* release linux framebuffer */
@@ -374,5 +351,7 @@ void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
374 if (!private || !private->fb_helper) 351 if (!private || !private->fb_helper)
375 return; 352 return;
376 353
354 drm_modeset_lock_all(dev);
377 drm_fb_helper_restore_fbdev_mode(private->fb_helper); 355 drm_fb_helper_restore_fbdev_mode(private->fb_helper);
356 drm_modeset_unlock_all(dev);
378} 357}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fb2f81b8063d..3b0da0378acf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -19,6 +19,7 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <linux/dma-attrs.h> 21#include <linux/dma-attrs.h>
22#include <linux/of.h>
22 23
23#include <drm/drmP.h> 24#include <drm/drmP.h>
24#include <drm/exynos_drm.h> 25#include <drm/exynos_drm.h>
@@ -429,7 +430,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
429 430
430 g2d_userptr->pages = pages; 431 g2d_userptr->pages = pages;
431 432
432 sgt = kzalloc(sizeof *sgt, GFP_KERNEL); 433 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
433 if (!sgt) { 434 if (!sgt) {
434 DRM_ERROR("failed to allocate sg table.\n"); 435 DRM_ERROR("failed to allocate sg table.\n");
435 ret = -ENOMEM; 436 ret = -ENOMEM;
@@ -1239,6 +1240,14 @@ static int g2d_resume(struct device *dev)
1239 1240
1240static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); 1241static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
1241 1242
1243#ifdef CONFIG_OF
1244static const struct of_device_id exynos_g2d_match[] = {
1245 { .compatible = "samsung,exynos5250-g2d" },
1246 {},
1247};
1248MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1249#endif
1250
1242struct platform_driver g2d_driver = { 1251struct platform_driver g2d_driver = {
1243 .probe = g2d_probe, 1252 .probe = g2d_probe,
1244 .remove = g2d_remove, 1253 .remove = g2d_remove,
@@ -1246,5 +1255,6 @@ struct platform_driver g2d_driver = {
1246 .name = "s5p-g2d", 1255 .name = "s5p-g2d",
1247 .owner = THIS_MODULE, 1256 .owner = THIS_MODULE,
1248 .pm = &g2d_pm_ops, 1257 .pm = &g2d_pm_ops,
1258 .of_match_table = of_match_ptr(exynos_g2d_match),
1249 }, 1259 },
1250}; 1260};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 473180776528..67e17ce112b6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -329,17 +329,11 @@ static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
329{ 329{
330 struct drm_file *file_priv; 330 struct drm_file *file_priv;
331 331
332 mutex_lock(&drm_dev->struct_mutex);
333
334 /* find current process's drm_file from filelist. */ 332 /* find current process's drm_file from filelist. */
335 list_for_each_entry(file_priv, &drm_dev->filelist, lhead) { 333 list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
336 if (file_priv->filp == filp) { 334 if (file_priv->filp == filp)
337 mutex_unlock(&drm_dev->struct_mutex);
338 return file_priv; 335 return file_priv;
339 }
340 }
341 336
342 mutex_unlock(&drm_dev->struct_mutex);
343 WARN_ON(1); 337 WARN_ON(1);
344 338
345 return ERR_PTR(-EFAULT); 339 return ERR_PTR(-EFAULT);
@@ -400,9 +394,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
400 */ 394 */
401 drm_gem_object_reference(obj); 395 drm_gem_object_reference(obj);
402 396
403 mutex_lock(&drm_dev->struct_mutex);
404 drm_vm_open_locked(drm_dev, vma); 397 drm_vm_open_locked(drm_dev, vma);
405 mutex_unlock(&drm_dev->struct_mutex);
406 398
407 return 0; 399 return 0;
408} 400}
@@ -432,6 +424,16 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
432 } 424 }
433 425
434 /* 426 /*
427 * We have to use gem object and its fops for specific mmaper,
428 * but vm_mmap() can deliver only filp. So we have to change
429 * filp->f_op and filp->private_data temporarily, then restore
430 * again. So it is important to keep lock until restoration the
431 * settings to prevent others from misuse of filp->f_op or
432 * filp->private_data.
433 */
434 mutex_lock(&dev->struct_mutex);
435
436 /*
435 * Set specific mmper's fops. And it will be restored by 437 * Set specific mmper's fops. And it will be restored by
436 * exynos_drm_gem_mmap_buffer to dev->driver->fops. 438 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
437 * This is used to call specific mapper temporarily. 439 * This is used to call specific mapper temporarily.
@@ -448,13 +450,20 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
448 addr = vm_mmap(file_priv->filp, 0, args->size, 450 addr = vm_mmap(file_priv->filp, 0, args->size,
449 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 451 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
450 452
451 drm_gem_object_unreference_unlocked(obj); 453 drm_gem_object_unreference(obj);
452 454
453 if (IS_ERR((void *)addr)) { 455 if (IS_ERR((void *)addr)) {
454 file_priv->filp->private_data = file_priv; 456 /* check filp->f_op, filp->private_data are restored */
457 if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
458 file_priv->filp->f_op = fops_get(dev->driver->fops);
459 file_priv->filp->private_data = file_priv;
460 }
461 mutex_unlock(&dev->struct_mutex);
455 return PTR_ERR((void *)addr); 462 return PTR_ERR((void *)addr);
456 } 463 }
457 464
465 mutex_unlock(&dev->struct_mutex);
466
458 args->mapped = addr; 467 args->mapped = addr;
459 468
460 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped); 469 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 28644539b305..7c27df03c9ff 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -124,9 +124,21 @@ static struct edid *drm_hdmi_get_edid(struct device *dev,
124static int drm_hdmi_check_timing(struct device *dev, void *timing) 124static int drm_hdmi_check_timing(struct device *dev, void *timing)
125{ 125{
126 struct drm_hdmi_context *ctx = to_context(dev); 126 struct drm_hdmi_context *ctx = to_context(dev);
127 int ret = 0;
127 128
128 DRM_DEBUG_KMS("%s\n", __FILE__); 129 DRM_DEBUG_KMS("%s\n", __FILE__);
129 130
131 /*
132 * Both, mixer and hdmi should be able to handle the requested mode.
133 * If any of the two fails, return mode as BAD.
134 */
135
136 if (mixer_ops && mixer_ops->check_timing)
137 ret = mixer_ops->check_timing(ctx->mixer_ctx->ctx, timing);
138
139 if (ret)
140 return ret;
141
130 if (hdmi_ops && hdmi_ops->check_timing) 142 if (hdmi_ops && hdmi_ops->check_timing)
131 return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing); 143 return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
132 144
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index d80516fc9ed7..b7faa3662307 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -32,7 +32,7 @@ struct exynos_hdmi_ops {
32 bool (*is_connected)(void *ctx); 32 bool (*is_connected)(void *ctx);
33 struct edid *(*get_edid)(void *ctx, 33 struct edid *(*get_edid)(void *ctx,
34 struct drm_connector *connector); 34 struct drm_connector *connector);
35 int (*check_timing)(void *ctx, void *timing); 35 int (*check_timing)(void *ctx, struct fb_videomode *timing);
36 int (*power_on)(void *ctx, int mode); 36 int (*power_on)(void *ctx, int mode);
37 37
38 /* manager */ 38 /* manager */
@@ -58,6 +58,9 @@ struct exynos_mixer_ops {
58 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); 58 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
59 void (*win_commit)(void *ctx, int zpos); 59 void (*win_commit)(void *ctx, int zpos);
60 void (*win_disable)(void *ctx, int zpos); 60 void (*win_disable)(void *ctx, int zpos);
61
62 /* display */
63 int (*check_timing)(void *ctx, struct fb_videomode *timing);
61}; 64};
62 65
63void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx); 66void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 53b7deea8ab7..598e60f57d4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -14,7 +14,7 @@
14 14
15#define EXYNOS_DEV_ADDR_START 0x20000000 15#define EXYNOS_DEV_ADDR_START 0x20000000
16#define EXYNOS_DEV_ADDR_SIZE 0x40000000 16#define EXYNOS_DEV_ADDR_SIZE 0x40000000
17#define EXYNOS_DEV_ADDR_ORDER 0x4 17#define EXYNOS_DEV_ADDR_ORDER 0x0
18 18
19#ifdef CONFIG_DRM_EXYNOS_IOMMU 19#ifdef CONFIG_DRM_EXYNOS_IOMMU
20 20
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 233247505ff8..2c5f266154ad 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -87,6 +87,73 @@ struct hdmi_resources {
87 int regul_count; 87 int regul_count;
88}; 88};
89 89
90struct hdmi_tg_regs {
91 u8 cmd[1];
92 u8 h_fsz[2];
93 u8 hact_st[2];
94 u8 hact_sz[2];
95 u8 v_fsz[2];
96 u8 vsync[2];
97 u8 vsync2[2];
98 u8 vact_st[2];
99 u8 vact_sz[2];
100 u8 field_chg[2];
101 u8 vact_st2[2];
102 u8 vact_st3[2];
103 u8 vact_st4[2];
104 u8 vsync_top_hdmi[2];
105 u8 vsync_bot_hdmi[2];
106 u8 field_top_hdmi[2];
107 u8 field_bot_hdmi[2];
108 u8 tg_3d[1];
109};
110
111struct hdmi_core_regs {
112 u8 h_blank[2];
113 u8 v2_blank[2];
114 u8 v1_blank[2];
115 u8 v_line[2];
116 u8 h_line[2];
117 u8 hsync_pol[1];
118 u8 vsync_pol[1];
119 u8 int_pro_mode[1];
120 u8 v_blank_f0[2];
121 u8 v_blank_f1[2];
122 u8 h_sync_start[2];
123 u8 h_sync_end[2];
124 u8 v_sync_line_bef_2[2];
125 u8 v_sync_line_bef_1[2];
126 u8 v_sync_line_aft_2[2];
127 u8 v_sync_line_aft_1[2];
128 u8 v_sync_line_aft_pxl_2[2];
129 u8 v_sync_line_aft_pxl_1[2];
130 u8 v_blank_f2[2]; /* for 3D mode */
131 u8 v_blank_f3[2]; /* for 3D mode */
132 u8 v_blank_f4[2]; /* for 3D mode */
133 u8 v_blank_f5[2]; /* for 3D mode */
134 u8 v_sync_line_aft_3[2];
135 u8 v_sync_line_aft_4[2];
136 u8 v_sync_line_aft_5[2];
137 u8 v_sync_line_aft_6[2];
138 u8 v_sync_line_aft_pxl_3[2];
139 u8 v_sync_line_aft_pxl_4[2];
140 u8 v_sync_line_aft_pxl_5[2];
141 u8 v_sync_line_aft_pxl_6[2];
142 u8 vact_space_1[2];
143 u8 vact_space_2[2];
144 u8 vact_space_3[2];
145 u8 vact_space_4[2];
146 u8 vact_space_5[2];
147 u8 vact_space_6[2];
148};
149
150struct hdmi_v14_conf {
151 int pixel_clock;
152 struct hdmi_core_regs core;
153 struct hdmi_tg_regs tg;
154 int cea_video_id;
155};
156
90struct hdmi_context { 157struct hdmi_context {
91 struct device *dev; 158 struct device *dev;
92 struct drm_device *drm_dev; 159 struct drm_device *drm_dev;
@@ -104,6 +171,7 @@ struct hdmi_context {
104 171
105 /* current hdmiphy conf index */ 172 /* current hdmiphy conf index */
106 int cur_conf; 173 int cur_conf;
174 struct hdmi_v14_conf mode_conf;
107 175
108 struct hdmi_resources res; 176 struct hdmi_resources res;
109 177
@@ -392,586 +460,132 @@ static const struct hdmi_v13_conf hdmi_v13_confs[] = {
392}; 460};
393 461
394/* HDMI Version 1.4 */ 462/* HDMI Version 1.4 */
395static const u8 hdmiphy_conf27_027[32] = { 463struct hdmiphy_config {
396 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08, 464 int pixel_clock;
397 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80, 465 u8 conf[32];
398 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
399 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
400};
401
402static const u8 hdmiphy_conf74_176[32] = {
403 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x5b, 0xef, 0x08,
404 0x81, 0xa0, 0xb9, 0xd8, 0x45, 0xa0, 0xac, 0x80,
405 0x5a, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
406 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
407};
408
409static const u8 hdmiphy_conf74_25[32] = {
410 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
411 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
412 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
413 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
414};
415
416static const u8 hdmiphy_conf148_5[32] = {
417 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
418 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
419 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
420 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
421};
422
423struct hdmi_tg_regs {
424 u8 cmd;
425 u8 h_fsz_l;
426 u8 h_fsz_h;
427 u8 hact_st_l;
428 u8 hact_st_h;
429 u8 hact_sz_l;
430 u8 hact_sz_h;
431 u8 v_fsz_l;
432 u8 v_fsz_h;
433 u8 vsync_l;
434 u8 vsync_h;
435 u8 vsync2_l;
436 u8 vsync2_h;
437 u8 vact_st_l;
438 u8 vact_st_h;
439 u8 vact_sz_l;
440 u8 vact_sz_h;
441 u8 field_chg_l;
442 u8 field_chg_h;
443 u8 vact_st2_l;
444 u8 vact_st2_h;
445 u8 vact_st3_l;
446 u8 vact_st3_h;
447 u8 vact_st4_l;
448 u8 vact_st4_h;
449 u8 vsync_top_hdmi_l;
450 u8 vsync_top_hdmi_h;
451 u8 vsync_bot_hdmi_l;
452 u8 vsync_bot_hdmi_h;
453 u8 field_top_hdmi_l;
454 u8 field_top_hdmi_h;
455 u8 field_bot_hdmi_l;
456 u8 field_bot_hdmi_h;
457 u8 tg_3d;
458};
459
460struct hdmi_core_regs {
461 u8 h_blank[2];
462 u8 v2_blank[2];
463 u8 v1_blank[2];
464 u8 v_line[2];
465 u8 h_line[2];
466 u8 hsync_pol[1];
467 u8 vsync_pol[1];
468 u8 int_pro_mode[1];
469 u8 v_blank_f0[2];
470 u8 v_blank_f1[2];
471 u8 h_sync_start[2];
472 u8 h_sync_end[2];
473 u8 v_sync_line_bef_2[2];
474 u8 v_sync_line_bef_1[2];
475 u8 v_sync_line_aft_2[2];
476 u8 v_sync_line_aft_1[2];
477 u8 v_sync_line_aft_pxl_2[2];
478 u8 v_sync_line_aft_pxl_1[2];
479 u8 v_blank_f2[2]; /* for 3D mode */
480 u8 v_blank_f3[2]; /* for 3D mode */
481 u8 v_blank_f4[2]; /* for 3D mode */
482 u8 v_blank_f5[2]; /* for 3D mode */
483 u8 v_sync_line_aft_3[2];
484 u8 v_sync_line_aft_4[2];
485 u8 v_sync_line_aft_5[2];
486 u8 v_sync_line_aft_6[2];
487 u8 v_sync_line_aft_pxl_3[2];
488 u8 v_sync_line_aft_pxl_4[2];
489 u8 v_sync_line_aft_pxl_5[2];
490 u8 v_sync_line_aft_pxl_6[2];
491 u8 vact_space_1[2];
492 u8 vact_space_2[2];
493 u8 vact_space_3[2];
494 u8 vact_space_4[2];
495 u8 vact_space_5[2];
496 u8 vact_space_6[2];
497};
498
499struct hdmi_preset_conf {
500 struct hdmi_core_regs core;
501 struct hdmi_tg_regs tg;
502};
503
504struct hdmi_conf {
505 int width;
506 int height;
507 int vrefresh;
508 bool interlace;
509 int cea_video_id;
510 const u8 *hdmiphy_data;
511 const struct hdmi_preset_conf *conf;
512};
513
514static const struct hdmi_preset_conf hdmi_conf_480p60 = {
515 .core = {
516 .h_blank = {0x8a, 0x00},
517 .v2_blank = {0x0d, 0x02},
518 .v1_blank = {0x2d, 0x00},
519 .v_line = {0x0d, 0x02},
520 .h_line = {0x5a, 0x03},
521 .hsync_pol = {0x01},
522 .vsync_pol = {0x01},
523 .int_pro_mode = {0x00},
524 .v_blank_f0 = {0xff, 0xff},
525 .v_blank_f1 = {0xff, 0xff},
526 .h_sync_start = {0x0e, 0x00},
527 .h_sync_end = {0x4c, 0x00},
528 .v_sync_line_bef_2 = {0x0f, 0x00},
529 .v_sync_line_bef_1 = {0x09, 0x00},
530 .v_sync_line_aft_2 = {0xff, 0xff},
531 .v_sync_line_aft_1 = {0xff, 0xff},
532 .v_sync_line_aft_pxl_2 = {0xff, 0xff},
533 .v_sync_line_aft_pxl_1 = {0xff, 0xff},
534 .v_blank_f2 = {0xff, 0xff},
535 .v_blank_f3 = {0xff, 0xff},
536 .v_blank_f4 = {0xff, 0xff},
537 .v_blank_f5 = {0xff, 0xff},
538 .v_sync_line_aft_3 = {0xff, 0xff},
539 .v_sync_line_aft_4 = {0xff, 0xff},
540 .v_sync_line_aft_5 = {0xff, 0xff},
541 .v_sync_line_aft_6 = {0xff, 0xff},
542 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
543 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
544 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
545 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
546 .vact_space_1 = {0xff, 0xff},
547 .vact_space_2 = {0xff, 0xff},
548 .vact_space_3 = {0xff, 0xff},
549 .vact_space_4 = {0xff, 0xff},
550 .vact_space_5 = {0xff, 0xff},
551 .vact_space_6 = {0xff, 0xff},
552 /* other don't care */
553 },
554 .tg = {
555 0x00, /* cmd */
556 0x5a, 0x03, /* h_fsz */
557 0x8a, 0x00, 0xd0, 0x02, /* hact */
558 0x0d, 0x02, /* v_fsz */
559 0x01, 0x00, 0x33, 0x02, /* vsync */
560 0x2d, 0x00, 0xe0, 0x01, /* vact */
561 0x33, 0x02, /* field_chg */
562 0x48, 0x02, /* vact_st2 */
563 0x00, 0x00, /* vact_st3 */
564 0x00, 0x00, /* vact_st4 */
565 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
566 0x01, 0x00, 0x33, 0x02, /* field top/bot */
567 0x00, /* 3d FP */
568 },
569}; 466};
570 467
571static const struct hdmi_preset_conf hdmi_conf_720p50 = { 468/* list of all required phy config settings */
572 .core = { 469static const struct hdmiphy_config hdmiphy_v14_configs[] = {
573 .h_blank = {0xbc, 0x02}, 470 {
574 .v2_blank = {0xee, 0x02}, 471 .pixel_clock = 25200000,
575 .v1_blank = {0x1e, 0x00}, 472 .conf = {
576 .v_line = {0xee, 0x02}, 473 0x01, 0x51, 0x2A, 0x75, 0x40, 0x01, 0x00, 0x08,
577 .h_line = {0xbc, 0x07}, 474 0x82, 0x80, 0xfc, 0xd8, 0x45, 0xa0, 0xac, 0x80,
578 .hsync_pol = {0x00}, 475 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
579 .vsync_pol = {0x00}, 476 0x54, 0xf4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
580 .int_pro_mode = {0x00}, 477 },
581 .v_blank_f0 = {0xff, 0xff},
582 .v_blank_f1 = {0xff, 0xff},
583 .h_sync_start = {0xb6, 0x01},
584 .h_sync_end = {0xde, 0x01},
585 .v_sync_line_bef_2 = {0x0a, 0x00},
586 .v_sync_line_bef_1 = {0x05, 0x00},
587 .v_sync_line_aft_2 = {0xff, 0xff},
588 .v_sync_line_aft_1 = {0xff, 0xff},
589 .v_sync_line_aft_pxl_2 = {0xff, 0xff},
590 .v_sync_line_aft_pxl_1 = {0xff, 0xff},
591 .v_blank_f2 = {0xff, 0xff},
592 .v_blank_f3 = {0xff, 0xff},
593 .v_blank_f4 = {0xff, 0xff},
594 .v_blank_f5 = {0xff, 0xff},
595 .v_sync_line_aft_3 = {0xff, 0xff},
596 .v_sync_line_aft_4 = {0xff, 0xff},
597 .v_sync_line_aft_5 = {0xff, 0xff},
598 .v_sync_line_aft_6 = {0xff, 0xff},
599 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
600 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
601 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
602 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
603 .vact_space_1 = {0xff, 0xff},
604 .vact_space_2 = {0xff, 0xff},
605 .vact_space_3 = {0xff, 0xff},
606 .vact_space_4 = {0xff, 0xff},
607 .vact_space_5 = {0xff, 0xff},
608 .vact_space_6 = {0xff, 0xff},
609 /* other don't care */
610 },
611 .tg = {
612 0x00, /* cmd */
613 0xbc, 0x07, /* h_fsz */
614 0xbc, 0x02, 0x00, 0x05, /* hact */
615 0xee, 0x02, /* v_fsz */
616 0x01, 0x00, 0x33, 0x02, /* vsync */
617 0x1e, 0x00, 0xd0, 0x02, /* vact */
618 0x33, 0x02, /* field_chg */
619 0x48, 0x02, /* vact_st2 */
620 0x00, 0x00, /* vact_st3 */
621 0x00, 0x00, /* vact_st4 */
622 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
623 0x01, 0x00, 0x33, 0x02, /* field top/bot */
624 0x00, /* 3d FP */
625 }, 478 },
626}; 479 {
627 480 .pixel_clock = 27000000,
628static const struct hdmi_preset_conf hdmi_conf_720p60 = { 481 .conf = {
629 .core = { 482 0x01, 0xd1, 0x22, 0x51, 0x40, 0x08, 0xfc, 0x20,
630 .h_blank = {0x72, 0x01}, 483 0x98, 0xa0, 0xcb, 0xd8, 0x45, 0xa0, 0xac, 0x80,
631 .v2_blank = {0xee, 0x02}, 484 0x06, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
632 .v1_blank = {0x1e, 0x00}, 485 0x54, 0xe4, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
633 .v_line = {0xee, 0x02}, 486 },
634 .h_line = {0x72, 0x06},
635 .hsync_pol = {0x00},
636 .vsync_pol = {0x00},
637 .int_pro_mode = {0x00},
638 .v_blank_f0 = {0xff, 0xff},
639 .v_blank_f1 = {0xff, 0xff},
640 .h_sync_start = {0x6c, 0x00},
641 .h_sync_end = {0x94, 0x00},
642 .v_sync_line_bef_2 = {0x0a, 0x00},
643 .v_sync_line_bef_1 = {0x05, 0x00},
644 .v_sync_line_aft_2 = {0xff, 0xff},
645 .v_sync_line_aft_1 = {0xff, 0xff},
646 .v_sync_line_aft_pxl_2 = {0xff, 0xff},
647 .v_sync_line_aft_pxl_1 = {0xff, 0xff},
648 .v_blank_f2 = {0xff, 0xff},
649 .v_blank_f3 = {0xff, 0xff},
650 .v_blank_f4 = {0xff, 0xff},
651 .v_blank_f5 = {0xff, 0xff},
652 .v_sync_line_aft_3 = {0xff, 0xff},
653 .v_sync_line_aft_4 = {0xff, 0xff},
654 .v_sync_line_aft_5 = {0xff, 0xff},
655 .v_sync_line_aft_6 = {0xff, 0xff},
656 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
657 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
658 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
659 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
660 .vact_space_1 = {0xff, 0xff},
661 .vact_space_2 = {0xff, 0xff},
662 .vact_space_3 = {0xff, 0xff},
663 .vact_space_4 = {0xff, 0xff},
664 .vact_space_5 = {0xff, 0xff},
665 .vact_space_6 = {0xff, 0xff},
666 /* other don't care */
667 }, 487 },
668 .tg = { 488 {
669 0x00, /* cmd */ 489 .pixel_clock = 27027000,
670 0x72, 0x06, /* h_fsz */ 490 .conf = {
671 0x72, 0x01, 0x00, 0x05, /* hact */ 491 0x01, 0xd1, 0x2d, 0x72, 0x40, 0x64, 0x12, 0x08,
672 0xee, 0x02, /* v_fsz */ 492 0x43, 0xa0, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
673 0x01, 0x00, 0x33, 0x02, /* vsync */ 493 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
674 0x1e, 0x00, 0xd0, 0x02, /* vact */ 494 0x54, 0xe3, 0x24, 0x00, 0x00, 0x00, 0x01, 0x00,
675 0x33, 0x02, /* field_chg */ 495 },
676 0x48, 0x02, /* vact_st2 */
677 0x00, 0x00, /* vact_st3 */
678 0x00, 0x00, /* vact_st4 */
679 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
680 0x01, 0x00, 0x33, 0x02, /* field top/bot */
681 0x00, /* 3d FP */
682 }, 496 },
683}; 497 {
684 498 .pixel_clock = 36000000,
685static const struct hdmi_preset_conf hdmi_conf_1080i50 = { 499 .conf = {
686 .core = { 500 0x01, 0x51, 0x2d, 0x55, 0x40, 0x01, 0x00, 0x08,
687 .h_blank = {0xd0, 0x02}, 501 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
688 .v2_blank = {0x32, 0x02}, 502 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
689 .v1_blank = {0x16, 0x00}, 503 0x54, 0xab, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
690 .v_line = {0x65, 0x04}, 504 },
691 .h_line = {0x50, 0x0a},
692 .hsync_pol = {0x00},
693 .vsync_pol = {0x00},
694 .int_pro_mode = {0x01},
695 .v_blank_f0 = {0x49, 0x02},
696 .v_blank_f1 = {0x65, 0x04},
697 .h_sync_start = {0x0e, 0x02},
698 .h_sync_end = {0x3a, 0x02},
699 .v_sync_line_bef_2 = {0x07, 0x00},
700 .v_sync_line_bef_1 = {0x02, 0x00},
701 .v_sync_line_aft_2 = {0x39, 0x02},
702 .v_sync_line_aft_1 = {0x34, 0x02},
703 .v_sync_line_aft_pxl_2 = {0x38, 0x07},
704 .v_sync_line_aft_pxl_1 = {0x38, 0x07},
705 .v_blank_f2 = {0xff, 0xff},
706 .v_blank_f3 = {0xff, 0xff},
707 .v_blank_f4 = {0xff, 0xff},
708 .v_blank_f5 = {0xff, 0xff},
709 .v_sync_line_aft_3 = {0xff, 0xff},
710 .v_sync_line_aft_4 = {0xff, 0xff},
711 .v_sync_line_aft_5 = {0xff, 0xff},
712 .v_sync_line_aft_6 = {0xff, 0xff},
713 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
714 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
715 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
716 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
717 .vact_space_1 = {0xff, 0xff},
718 .vact_space_2 = {0xff, 0xff},
719 .vact_space_3 = {0xff, 0xff},
720 .vact_space_4 = {0xff, 0xff},
721 .vact_space_5 = {0xff, 0xff},
722 .vact_space_6 = {0xff, 0xff},
723 /* other don't care */
724 }, 505 },
725 .tg = { 506 {
726 0x00, /* cmd */ 507 .pixel_clock = 40000000,
727 0x50, 0x0a, /* h_fsz */ 508 .conf = {
728 0xd0, 0x02, 0x80, 0x07, /* hact */ 509 0x01, 0x51, 0x32, 0x55, 0x40, 0x01, 0x00, 0x08,
729 0x65, 0x04, /* v_fsz */ 510 0x82, 0x80, 0x2c, 0xd9, 0x45, 0xa0, 0xac, 0x80,
730 0x01, 0x00, 0x33, 0x02, /* vsync */ 511 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
731 0x16, 0x00, 0x1c, 0x02, /* vact */ 512 0x54, 0x9a, 0x24, 0x00, 0x00, 0x00, 0x01, 0x80,
732 0x33, 0x02, /* field_chg */ 513 },
733 0x49, 0x02, /* vact_st2 */
734 0x00, 0x00, /* vact_st3 */
735 0x00, 0x00, /* vact_st4 */
736 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
737 0x01, 0x00, 0x33, 0x02, /* field top/bot */
738 0x00, /* 3d FP */
739 }, 514 },
740}; 515 {
741 516 .pixel_clock = 65000000,
742static const struct hdmi_preset_conf hdmi_conf_1080i60 = { 517 .conf = {
743 .core = { 518 0x01, 0xd1, 0x36, 0x34, 0x40, 0x1e, 0x0a, 0x08,
744 .h_blank = {0x18, 0x01}, 519 0x82, 0xa0, 0x45, 0xd9, 0x45, 0xa0, 0xac, 0x80,
745 .v2_blank = {0x32, 0x02}, 520 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
746 .v1_blank = {0x16, 0x00}, 521 0x54, 0xbd, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
747 .v_line = {0x65, 0x04}, 522 },
748 .h_line = {0x98, 0x08},
749 .hsync_pol = {0x00},
750 .vsync_pol = {0x00},
751 .int_pro_mode = {0x01},
752 .v_blank_f0 = {0x49, 0x02},
753 .v_blank_f1 = {0x65, 0x04},
754 .h_sync_start = {0x56, 0x00},
755 .h_sync_end = {0x82, 0x00},
756 .v_sync_line_bef_2 = {0x07, 0x00},
757 .v_sync_line_bef_1 = {0x02, 0x00},
758 .v_sync_line_aft_2 = {0x39, 0x02},
759 .v_sync_line_aft_1 = {0x34, 0x02},
760 .v_sync_line_aft_pxl_2 = {0xa4, 0x04},
761 .v_sync_line_aft_pxl_1 = {0xa4, 0x04},
762 .v_blank_f2 = {0xff, 0xff},
763 .v_blank_f3 = {0xff, 0xff},
764 .v_blank_f4 = {0xff, 0xff},
765 .v_blank_f5 = {0xff, 0xff},
766 .v_sync_line_aft_3 = {0xff, 0xff},
767 .v_sync_line_aft_4 = {0xff, 0xff},
768 .v_sync_line_aft_5 = {0xff, 0xff},
769 .v_sync_line_aft_6 = {0xff, 0xff},
770 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
771 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
772 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
773 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
774 .vact_space_1 = {0xff, 0xff},
775 .vact_space_2 = {0xff, 0xff},
776 .vact_space_3 = {0xff, 0xff},
777 .vact_space_4 = {0xff, 0xff},
778 .vact_space_5 = {0xff, 0xff},
779 .vact_space_6 = {0xff, 0xff},
780 /* other don't care */
781 }, 523 },
782 .tg = { 524 {
783 0x00, /* cmd */ 525 .pixel_clock = 74176000,
784 0x98, 0x08, /* h_fsz */ 526 .conf = {
785 0x18, 0x01, 0x80, 0x07, /* hact */ 527 0x01, 0xd1, 0x3e, 0x35, 0x40, 0x5b, 0xde, 0x08,
786 0x65, 0x04, /* v_fsz */ 528 0x82, 0xa0, 0x73, 0xd9, 0x45, 0xa0, 0xac, 0x80,
787 0x01, 0x00, 0x33, 0x02, /* vsync */ 529 0x56, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
788 0x16, 0x00, 0x1c, 0x02, /* vact */ 530 0x54, 0xa6, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
789 0x33, 0x02, /* field_chg */ 531 },
790 0x49, 0x02, /* vact_st2 */
791 0x00, 0x00, /* vact_st3 */
792 0x00, 0x00, /* vact_st4 */
793 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
794 0x01, 0x00, 0x33, 0x02, /* field top/bot */
795 0x00, /* 3d FP */
796 }, 532 },
797}; 533 {
798 534 .pixel_clock = 74250000,
799static const struct hdmi_preset_conf hdmi_conf_1080p30 = { 535 .conf = {
800 .core = { 536 0x01, 0xd1, 0x1f, 0x10, 0x40, 0x40, 0xf8, 0x08,
801 .h_blank = {0x18, 0x01}, 537 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
802 .v2_blank = {0x65, 0x04}, 538 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
803 .v1_blank = {0x2d, 0x00}, 539 0x54, 0xa5, 0x24, 0x01, 0x00, 0x00, 0x01, 0x00,
804 .v_line = {0x65, 0x04}, 540 },
805 .h_line = {0x98, 0x08},
806 .hsync_pol = {0x00},
807 .vsync_pol = {0x00},
808 .int_pro_mode = {0x00},
809 .v_blank_f0 = {0xff, 0xff},
810 .v_blank_f1 = {0xff, 0xff},
811 .h_sync_start = {0x56, 0x00},
812 .h_sync_end = {0x82, 0x00},
813 .v_sync_line_bef_2 = {0x09, 0x00},
814 .v_sync_line_bef_1 = {0x04, 0x00},
815 .v_sync_line_aft_2 = {0xff, 0xff},
816 .v_sync_line_aft_1 = {0xff, 0xff},
817 .v_sync_line_aft_pxl_2 = {0xff, 0xff},
818 .v_sync_line_aft_pxl_1 = {0xff, 0xff},
819 .v_blank_f2 = {0xff, 0xff},
820 .v_blank_f3 = {0xff, 0xff},
821 .v_blank_f4 = {0xff, 0xff},
822 .v_blank_f5 = {0xff, 0xff},
823 .v_sync_line_aft_3 = {0xff, 0xff},
824 .v_sync_line_aft_4 = {0xff, 0xff},
825 .v_sync_line_aft_5 = {0xff, 0xff},
826 .v_sync_line_aft_6 = {0xff, 0xff},
827 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
828 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
829 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
830 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
831 .vact_space_1 = {0xff, 0xff},
832 .vact_space_2 = {0xff, 0xff},
833 .vact_space_3 = {0xff, 0xff},
834 .vact_space_4 = {0xff, 0xff},
835 .vact_space_5 = {0xff, 0xff},
836 .vact_space_6 = {0xff, 0xff},
837 /* other don't care */
838 }, 541 },
839 .tg = { 542 {
840 0x00, /* cmd */ 543 .pixel_clock = 83500000,
841 0x98, 0x08, /* h_fsz */ 544 .conf = {
842 0x18, 0x01, 0x80, 0x07, /* hact */ 545 0x01, 0xd1, 0x23, 0x11, 0x40, 0x0c, 0xfb, 0x08,
843 0x65, 0x04, /* v_fsz */ 546 0x85, 0xa0, 0xd1, 0xd8, 0x45, 0xa0, 0xac, 0x80,
844 0x01, 0x00, 0x33, 0x02, /* vsync */ 547 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
845 0x2d, 0x00, 0x38, 0x04, /* vact */ 548 0x54, 0x93, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
846 0x33, 0x02, /* field_chg */ 549 },
847 0x48, 0x02, /* vact_st2 */
848 0x00, 0x00, /* vact_st3 */
849 0x00, 0x00, /* vact_st4 */
850 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
851 0x01, 0x00, 0x33, 0x02, /* field top/bot */
852 0x00, /* 3d FP */
853 }, 550 },
854}; 551 {
855 552 .pixel_clock = 106500000,
856static const struct hdmi_preset_conf hdmi_conf_1080p50 = { 553 .conf = {
857 .core = { 554 0x01, 0xd1, 0x2c, 0x12, 0x40, 0x0c, 0x09, 0x08,
858 .h_blank = {0xd0, 0x02}, 555 0x84, 0xa0, 0x0a, 0xd9, 0x45, 0xa0, 0xac, 0x80,
859 .v2_blank = {0x65, 0x04}, 556 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
860 .v1_blank = {0x2d, 0x00}, 557 0x54, 0x73, 0x24, 0x01, 0x00, 0x00, 0x01, 0x80,
861 .v_line = {0x65, 0x04}, 558 },
862 .h_line = {0x50, 0x0a},
863 .hsync_pol = {0x00},
864 .vsync_pol = {0x00},
865 .int_pro_mode = {0x00},
866 .v_blank_f0 = {0xff, 0xff},
867 .v_blank_f1 = {0xff, 0xff},
868 .h_sync_start = {0x0e, 0x02},
869 .h_sync_end = {0x3a, 0x02},
870 .v_sync_line_bef_2 = {0x09, 0x00},
871 .v_sync_line_bef_1 = {0x04, 0x00},
872 .v_sync_line_aft_2 = {0xff, 0xff},
873 .v_sync_line_aft_1 = {0xff, 0xff},
874 .v_sync_line_aft_pxl_2 = {0xff, 0xff},
875 .v_sync_line_aft_pxl_1 = {0xff, 0xff},
876 .v_blank_f2 = {0xff, 0xff},
877 .v_blank_f3 = {0xff, 0xff},
878 .v_blank_f4 = {0xff, 0xff},
879 .v_blank_f5 = {0xff, 0xff},
880 .v_sync_line_aft_3 = {0xff, 0xff},
881 .v_sync_line_aft_4 = {0xff, 0xff},
882 .v_sync_line_aft_5 = {0xff, 0xff},
883 .v_sync_line_aft_6 = {0xff, 0xff},
884 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
885 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
886 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
887 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
888 .vact_space_1 = {0xff, 0xff},
889 .vact_space_2 = {0xff, 0xff},
890 .vact_space_3 = {0xff, 0xff},
891 .vact_space_4 = {0xff, 0xff},
892 .vact_space_5 = {0xff, 0xff},
893 .vact_space_6 = {0xff, 0xff},
894 /* other don't care */
895 }, 559 },
896 .tg = { 560 {
897 0x00, /* cmd */ 561 .pixel_clock = 108000000,
898 0x50, 0x0a, /* h_fsz */ 562 .conf = {
899 0xd0, 0x02, 0x80, 0x07, /* hact */ 563 0x01, 0x51, 0x2d, 0x15, 0x40, 0x01, 0x00, 0x08,
900 0x65, 0x04, /* v_fsz */ 564 0x82, 0x80, 0x0e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
901 0x01, 0x00, 0x33, 0x02, /* vsync */ 565 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
902 0x2d, 0x00, 0x38, 0x04, /* vact */ 566 0x54, 0xc7, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
903 0x33, 0x02, /* field_chg */ 567 },
904 0x48, 0x02, /* vact_st2 */
905 0x00, 0x00, /* vact_st3 */
906 0x00, 0x00, /* vact_st4 */
907 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
908 0x01, 0x00, 0x33, 0x02, /* field top/bot */
909 0x00, /* 3d FP */
910 }, 568 },
911}; 569 {
912 570 .pixel_clock = 146250000,
913static const struct hdmi_preset_conf hdmi_conf_1080p60 = { 571 .conf = {
914 .core = { 572 0x01, 0xd1, 0x3d, 0x15, 0x40, 0x18, 0xfd, 0x08,
915 .h_blank = {0x18, 0x01}, 573 0x83, 0xa0, 0x6e, 0xd9, 0x45, 0xa0, 0xac, 0x80,
916 .v2_blank = {0x65, 0x04}, 574 0x08, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
917 .v1_blank = {0x2d, 0x00}, 575 0x54, 0x50, 0x25, 0x03, 0x00, 0x00, 0x01, 0x80,
918 .v_line = {0x65, 0x04}, 576 },
919 .h_line = {0x98, 0x08},
920 .hsync_pol = {0x00},
921 .vsync_pol = {0x00},
922 .int_pro_mode = {0x00},
923 .v_blank_f0 = {0xff, 0xff},
924 .v_blank_f1 = {0xff, 0xff},
925 .h_sync_start = {0x56, 0x00},
926 .h_sync_end = {0x82, 0x00},
927 .v_sync_line_bef_2 = {0x09, 0x00},
928 .v_sync_line_bef_1 = {0x04, 0x00},
929 .v_sync_line_aft_2 = {0xff, 0xff},
930 .v_sync_line_aft_1 = {0xff, 0xff},
931 .v_sync_line_aft_pxl_2 = {0xff, 0xff},
932 .v_sync_line_aft_pxl_1 = {0xff, 0xff},
933 .v_blank_f2 = {0xff, 0xff},
934 .v_blank_f3 = {0xff, 0xff},
935 .v_blank_f4 = {0xff, 0xff},
936 .v_blank_f5 = {0xff, 0xff},
937 .v_sync_line_aft_3 = {0xff, 0xff},
938 .v_sync_line_aft_4 = {0xff, 0xff},
939 .v_sync_line_aft_5 = {0xff, 0xff},
940 .v_sync_line_aft_6 = {0xff, 0xff},
941 .v_sync_line_aft_pxl_3 = {0xff, 0xff},
942 .v_sync_line_aft_pxl_4 = {0xff, 0xff},
943 .v_sync_line_aft_pxl_5 = {0xff, 0xff},
944 .v_sync_line_aft_pxl_6 = {0xff, 0xff},
945 /* other don't care */
946 }, 577 },
947 .tg = { 578 {
948 0x00, /* cmd */ 579 .pixel_clock = 148500000,
949 0x98, 0x08, /* h_fsz */ 580 .conf = {
950 0x18, 0x01, 0x80, 0x07, /* hact */ 581 0x01, 0xd1, 0x1f, 0x00, 0x40, 0x40, 0xf8, 0x08,
951 0x65, 0x04, /* v_fsz */ 582 0x81, 0xa0, 0xba, 0xd8, 0x45, 0xa0, 0xac, 0x80,
952 0x01, 0x00, 0x33, 0x02, /* vsync */ 583 0x3c, 0x80, 0x11, 0x04, 0x02, 0x22, 0x44, 0x86,
953 0x2d, 0x00, 0x38, 0x04, /* vact */ 584 0x54, 0x4b, 0x25, 0x03, 0x00, 0x00, 0x01, 0x00,
954 0x33, 0x02, /* field_chg */ 585 },
955 0x48, 0x02, /* vact_st2 */
956 0x00, 0x00, /* vact_st3 */
957 0x00, 0x00, /* vact_st4 */
958 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
959 0x01, 0x00, 0x33, 0x02, /* field top/bot */
960 0x00, /* 3d FP */
961 }, 586 },
962}; 587};
963 588
964static const struct hdmi_conf hdmi_confs[] = {
965 { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
966 { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
967 { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
968 { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
969 { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
970 { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
971 { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
972 { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
973};
974
975struct hdmi_infoframe { 589struct hdmi_infoframe {
976 enum HDMI_PACKET_TYPE type; 590 enum HDMI_PACKET_TYPE type;
977 u8 ver; 591 u8 ver;
@@ -1275,31 +889,6 @@ static int hdmi_v13_conf_index(struct drm_display_mode *mode)
1275 return -EINVAL; 889 return -EINVAL;
1276} 890}
1277 891
1278static int hdmi_v14_conf_index(struct drm_display_mode *mode)
1279{
1280 int i;
1281
1282 for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
1283 if (hdmi_confs[i].width == mode->hdisplay &&
1284 hdmi_confs[i].height == mode->vdisplay &&
1285 hdmi_confs[i].vrefresh == mode->vrefresh &&
1286 hdmi_confs[i].interlace ==
1287 ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1288 true : false))
1289 return i;
1290
1291 return -EINVAL;
1292}
1293
1294static int hdmi_conf_index(struct hdmi_context *hdata,
1295 struct drm_display_mode *mode)
1296{
1297 if (hdata->type == HDMI_TYPE13)
1298 return hdmi_v13_conf_index(mode);
1299
1300 return hdmi_v14_conf_index(mode);
1301}
1302
1303static u8 hdmi_chksum(struct hdmi_context *hdata, 892static u8 hdmi_chksum(struct hdmi_context *hdata,
1304 u32 start, u8 len, u32 hdr_sum) 893 u32 start, u8 len, u32 hdr_sum)
1305{ 894{
@@ -1357,7 +946,7 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
1357 if (hdata->type == HDMI_TYPE13) 946 if (hdata->type == HDMI_TYPE13)
1358 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id; 947 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
1359 else 948 else
1360 vic = hdmi_confs[hdata->cur_conf].cea_video_id; 949 vic = hdata->mode_conf.cea_video_id;
1361 950
1362 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic); 951 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
1363 952
@@ -1434,44 +1023,51 @@ static int hdmi_v13_check_timing(struct fb_videomode *check_timing)
1434 return -EINVAL; 1023 return -EINVAL;
1435} 1024}
1436 1025
1026static int hdmi_v14_find_phy_conf(int pixel_clock)
1027{
1028 int i;
1029
1030 for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++) {
1031 if (hdmiphy_v14_configs[i].pixel_clock == pixel_clock)
1032 return i;
1033 }
1034
1035 DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
1036 return -EINVAL;
1037}
1038
1437static int hdmi_v14_check_timing(struct fb_videomode *check_timing) 1039static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
1438{ 1040{
1439 int i; 1041 int i;
1440 1042
1441 DRM_DEBUG_KMS("valid mode : xres=%d, yres=%d, refresh=%d, intl=%d\n", 1043 DRM_DEBUG_KMS("mode: xres=%d, yres=%d, refresh=%d, clock=%d, intl=%d\n",
1442 check_timing->xres, check_timing->yres, 1044 check_timing->xres, check_timing->yres,
1443 check_timing->refresh, (check_timing->vmode & 1045 check_timing->refresh, check_timing->pixclock,
1444 FB_VMODE_INTERLACED) ? true : false); 1046 (check_timing->vmode & FB_VMODE_INTERLACED) ?
1047 true : false);
1445 1048
1446 for (i = 0; i < ARRAY_SIZE(hdmi_confs); i++) 1049 for (i = 0; i < ARRAY_SIZE(hdmiphy_v14_configs); i++)
1447 if (hdmi_confs[i].width == check_timing->xres && 1050 if (hdmiphy_v14_configs[i].pixel_clock ==
1448 hdmi_confs[i].height == check_timing->yres && 1051 check_timing->pixclock)
1449 hdmi_confs[i].vrefresh == check_timing->refresh && 1052 return 0;
1450 hdmi_confs[i].interlace ==
1451 ((check_timing->vmode & FB_VMODE_INTERLACED) ?
1452 true : false))
1453 return 0;
1454
1455 /* TODO */
1456 1053
1457 return -EINVAL; 1054 return -EINVAL;
1458} 1055}
1459 1056
1460static int hdmi_check_timing(void *ctx, void *timing) 1057static int hdmi_check_timing(void *ctx, struct fb_videomode *timing)
1461{ 1058{
1462 struct hdmi_context *hdata = ctx; 1059 struct hdmi_context *hdata = ctx;
1463 struct fb_videomode *check_timing = timing;
1464 1060
1465 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1061 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1466 1062
1467 DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres, 1063 DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", timing->xres,
1468 check_timing->yres, check_timing->refresh, 1064 timing->yres, timing->refresh,
1469 check_timing->vmode); 1065 timing->vmode);
1470 1066
1471 if (hdata->type == HDMI_TYPE13) 1067 if (hdata->type == HDMI_TYPE13)
1472 return hdmi_v13_check_timing(check_timing); 1068 return hdmi_v13_check_timing(timing);
1473 else 1069 else
1474 return hdmi_v14_check_timing(check_timing); 1070 return hdmi_v14_check_timing(timing);
1475} 1071}
1476 1072
1477static void hdmi_set_acr(u32 freq, u8 *acr) 1073static void hdmi_set_acr(u32 freq, u8 *acr)
@@ -1795,9 +1391,8 @@ static void hdmi_v13_timing_apply(struct hdmi_context *hdata)
1795 1391
1796static void hdmi_v14_timing_apply(struct hdmi_context *hdata) 1392static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
1797{ 1393{
1798 const struct hdmi_preset_conf *conf = hdmi_confs[hdata->cur_conf].conf; 1394 struct hdmi_core_regs *core = &hdata->mode_conf.core;
1799 const struct hdmi_core_regs *core = &conf->core; 1395 struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
1800 const struct hdmi_tg_regs *tg = &conf->tg;
1801 int tries; 1396 int tries;
1802 1397
1803 /* setting core registers */ 1398 /* setting core registers */
@@ -1900,39 +1495,39 @@ static void hdmi_v14_timing_apply(struct hdmi_context *hdata)
1900 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]); 1495 hdmi_reg_writeb(hdata, HDMI_VACT_SPACE_6_1, core->vact_space_6[1]);
1901 1496
1902 /* Timing generator registers */ 1497 /* Timing generator registers */
1903 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l); 1498 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz[0]);
1904 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h); 1499 hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz[1]);
1905 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l); 1500 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st[0]);
1906 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h); 1501 hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st[1]);
1907 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l); 1502 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz[0]);
1908 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h); 1503 hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz[1]);
1909 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l); 1504 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz[0]);
1910 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h); 1505 hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz[1]);
1911 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l); 1506 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync[0]);
1912 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h); 1507 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync[1]);
1913 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l); 1508 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2[0]);
1914 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h); 1509 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2[1]);
1915 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l); 1510 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st[0]);
1916 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h); 1511 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st[1]);
1917 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l); 1512 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz[0]);
1918 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h); 1513 hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz[1]);
1919 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l); 1514 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg[0]);
1920 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h); 1515 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg[1]);
1921 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l); 1516 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2[0]);
1922 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h); 1517 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2[1]);
1923 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3_l); 1518 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_L, tg->vact_st3[0]);
1924 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3_h); 1519 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST3_H, tg->vact_st3[1]);
1925 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4_l); 1520 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_L, tg->vact_st4[0]);
1926 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4_h); 1521 hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST4_H, tg->vact_st4[1]);
1927 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l); 1522 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi[0]);
1928 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h); 1523 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi[1]);
1929 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l); 1524 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi[0]);
1930 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h); 1525 hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi[1]);
1931 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l); 1526 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi[0]);
1932 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h); 1527 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi[1]);
1933 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l); 1528 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi[0]);
1934 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h); 1529 hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi[1]);
1935 hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d); 1530 hdmi_reg_writeb(hdata, HDMI_TG_3D, tg->tg_3d[0]);
1936 1531
1937 /* waiting for HDMIPHY's PLL to get to steady state */ 1532 /* waiting for HDMIPHY's PLL to get to steady state */
1938 for (tries = 100; tries; --tries) { 1533 for (tries = 100; tries; --tries) {
@@ -2029,10 +1624,17 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
2029 } 1624 }
2030 1625
2031 /* pixel clock */ 1626 /* pixel clock */
2032 if (hdata->type == HDMI_TYPE13) 1627 if (hdata->type == HDMI_TYPE13) {
2033 hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data; 1628 hdmiphy_data = hdmi_v13_confs[hdata->cur_conf].hdmiphy_data;
2034 else 1629 } else {
2035 hdmiphy_data = hdmi_confs[hdata->cur_conf].hdmiphy_data; 1630 i = hdmi_v14_find_phy_conf(hdata->mode_conf.pixel_clock);
1631 if (i < 0) {
1632 DRM_ERROR("failed to find hdmiphy conf\n");
1633 return;
1634 }
1635
1636 hdmiphy_data = hdmiphy_v14_configs[i].conf;
1637 }
2036 1638
2037 memcpy(buffer, hdmiphy_data, 32); 1639 memcpy(buffer, hdmiphy_data, 32);
2038 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32); 1640 ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
@@ -2100,7 +1702,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
2100 if (hdata->type == HDMI_TYPE13) 1702 if (hdata->type == HDMI_TYPE13)
2101 index = hdmi_v13_conf_index(adjusted_mode); 1703 index = hdmi_v13_conf_index(adjusted_mode);
2102 else 1704 else
2103 index = hdmi_v14_conf_index(adjusted_mode); 1705 index = hdmi_v14_find_phy_conf(adjusted_mode->clock * 1000);
2104 1706
2105 /* just return if user desired mode exists. */ 1707 /* just return if user desired mode exists. */
2106 if (index >= 0) 1708 if (index >= 0)
@@ -2114,7 +1716,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
2114 if (hdata->type == HDMI_TYPE13) 1716 if (hdata->type == HDMI_TYPE13)
2115 index = hdmi_v13_conf_index(m); 1717 index = hdmi_v13_conf_index(m);
2116 else 1718 else
2117 index = hdmi_v14_conf_index(m); 1719 index = hdmi_v14_find_phy_conf(m->clock * 1000);
2118 1720
2119 if (index >= 0) { 1721 if (index >= 0) {
2120 struct drm_mode_object base; 1722 struct drm_mode_object base;
@@ -2123,6 +1725,9 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
2123 DRM_INFO("desired mode doesn't exist so\n"); 1725 DRM_INFO("desired mode doesn't exist so\n");
2124 DRM_INFO("use the most suitable mode among modes.\n"); 1726 DRM_INFO("use the most suitable mode among modes.\n");
2125 1727
1728 DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
1729 m->hdisplay, m->vdisplay, m->vrefresh);
1730
2126 /* preserve display mode header while copying. */ 1731 /* preserve display mode header while copying. */
2127 head = adjusted_mode->head; 1732 head = adjusted_mode->head;
2128 base = adjusted_mode->base; 1733 base = adjusted_mode->base;
@@ -2134,6 +1739,122 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
2134 } 1739 }
2135} 1740}
2136 1741
1742static void hdmi_set_reg(u8 *reg_pair, int num_bytes, u32 value)
1743{
1744 int i;
1745 BUG_ON(num_bytes > 4);
1746 for (i = 0; i < num_bytes; i++)
1747 reg_pair[i] = (value >> (8 * i)) & 0xff;
1748}
1749
1750static void hdmi_v14_mode_set(struct hdmi_context *hdata,
1751 struct drm_display_mode *m)
1752{
1753 struct hdmi_core_regs *core = &hdata->mode_conf.core;
1754 struct hdmi_tg_regs *tg = &hdata->mode_conf.tg;
1755
1756 hdata->mode_conf.cea_video_id = drm_match_cea_mode(m);
1757
1758 hdata->mode_conf.pixel_clock = m->clock * 1000;
1759 hdmi_set_reg(core->h_blank, 2, m->htotal - m->hdisplay);
1760 hdmi_set_reg(core->v_line, 2, m->vtotal);
1761 hdmi_set_reg(core->h_line, 2, m->htotal);
1762 hdmi_set_reg(core->hsync_pol, 1,
1763 (m->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0);
1764 hdmi_set_reg(core->vsync_pol, 1,
1765 (m->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0);
1766 hdmi_set_reg(core->int_pro_mode, 1,
1767 (m->flags & DRM_MODE_FLAG_INTERLACE) ? 1 : 0);
1768
1769 /*
1770 * Quirk requirement for exynos 5 HDMI IP design,
1771 * 2 pixels less than the actual calculation for hsync_start
1772 * and end.
1773 */
1774
1775 /* Following values & calculations differ for different type of modes */
1776 if (m->flags & DRM_MODE_FLAG_INTERLACE) {
1777 /* Interlaced Mode */
1778 hdmi_set_reg(core->v_sync_line_bef_2, 2,
1779 (m->vsync_end - m->vdisplay) / 2);
1780 hdmi_set_reg(core->v_sync_line_bef_1, 2,
1781 (m->vsync_start - m->vdisplay) / 2);
1782 hdmi_set_reg(core->v2_blank, 2, m->vtotal / 2);
1783 hdmi_set_reg(core->v1_blank, 2, (m->vtotal - m->vdisplay) / 2);
1784 hdmi_set_reg(core->v_blank_f0, 2, (m->vtotal +
1785 ((m->vsync_end - m->vsync_start) * 4) + 5) / 2);
1786 hdmi_set_reg(core->v_blank_f1, 2, m->vtotal);
1787 hdmi_set_reg(core->v_sync_line_aft_2, 2, (m->vtotal / 2) + 7);
1788 hdmi_set_reg(core->v_sync_line_aft_1, 2, (m->vtotal / 2) + 2);
1789 hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2,
1790 (m->htotal / 2) + (m->hsync_start - m->hdisplay));
1791 hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2,
1792 (m->htotal / 2) + (m->hsync_start - m->hdisplay));
1793 hdmi_set_reg(tg->vact_st, 2, (m->vtotal - m->vdisplay) / 2);
1794 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay / 2);
1795 hdmi_set_reg(tg->vact_st2, 2, 0x249);/* Reset value + 1*/
1796 hdmi_set_reg(tg->vact_st3, 2, 0x0);
1797 hdmi_set_reg(tg->vact_st4, 2, 0x0);
1798 } else {
1799 /* Progressive Mode */
1800 hdmi_set_reg(core->v_sync_line_bef_2, 2,
1801 m->vsync_end - m->vdisplay);
1802 hdmi_set_reg(core->v_sync_line_bef_1, 2,
1803 m->vsync_start - m->vdisplay);
1804 hdmi_set_reg(core->v2_blank, 2, m->vtotal);
1805 hdmi_set_reg(core->v1_blank, 2, m->vtotal - m->vdisplay);
1806 hdmi_set_reg(core->v_blank_f0, 2, 0xffff);
1807 hdmi_set_reg(core->v_blank_f1, 2, 0xffff);
1808 hdmi_set_reg(core->v_sync_line_aft_2, 2, 0xffff);
1809 hdmi_set_reg(core->v_sync_line_aft_1, 2, 0xffff);
1810 hdmi_set_reg(core->v_sync_line_aft_pxl_2, 2, 0xffff);
1811 hdmi_set_reg(core->v_sync_line_aft_pxl_1, 2, 0xffff);
1812 hdmi_set_reg(tg->vact_st, 2, m->vtotal - m->vdisplay);
1813 hdmi_set_reg(tg->vact_sz, 2, m->vdisplay);
1814 hdmi_set_reg(tg->vact_st2, 2, 0x248); /* Reset value */
1815 hdmi_set_reg(tg->vact_st3, 2, 0x47b); /* Reset value */
1816 hdmi_set_reg(tg->vact_st4, 2, 0x6ae); /* Reset value */
1817 }
1818
1819 /* Following values & calculations are same irrespective of mode type */
1820 hdmi_set_reg(core->h_sync_start, 2, m->hsync_start - m->hdisplay - 2);
1821 hdmi_set_reg(core->h_sync_end, 2, m->hsync_end - m->hdisplay - 2);
1822 hdmi_set_reg(core->vact_space_1, 2, 0xffff);
1823 hdmi_set_reg(core->vact_space_2, 2, 0xffff);
1824 hdmi_set_reg(core->vact_space_3, 2, 0xffff);
1825 hdmi_set_reg(core->vact_space_4, 2, 0xffff);
1826 hdmi_set_reg(core->vact_space_5, 2, 0xffff);
1827 hdmi_set_reg(core->vact_space_6, 2, 0xffff);
1828 hdmi_set_reg(core->v_blank_f2, 2, 0xffff);
1829 hdmi_set_reg(core->v_blank_f3, 2, 0xffff);
1830 hdmi_set_reg(core->v_blank_f4, 2, 0xffff);
1831 hdmi_set_reg(core->v_blank_f5, 2, 0xffff);
1832 hdmi_set_reg(core->v_sync_line_aft_3, 2, 0xffff);
1833 hdmi_set_reg(core->v_sync_line_aft_4, 2, 0xffff);
1834 hdmi_set_reg(core->v_sync_line_aft_5, 2, 0xffff);
1835 hdmi_set_reg(core->v_sync_line_aft_6, 2, 0xffff);
1836 hdmi_set_reg(core->v_sync_line_aft_pxl_3, 2, 0xffff);
1837 hdmi_set_reg(core->v_sync_line_aft_pxl_4, 2, 0xffff);
1838 hdmi_set_reg(core->v_sync_line_aft_pxl_5, 2, 0xffff);
1839 hdmi_set_reg(core->v_sync_line_aft_pxl_6, 2, 0xffff);
1840
1841 /* Timing generator registers */
1842 hdmi_set_reg(tg->cmd, 1, 0x0);
1843 hdmi_set_reg(tg->h_fsz, 2, m->htotal);
1844 hdmi_set_reg(tg->hact_st, 2, m->htotal - m->hdisplay);
1845 hdmi_set_reg(tg->hact_sz, 2, m->hdisplay);
1846 hdmi_set_reg(tg->v_fsz, 2, m->vtotal);
1847 hdmi_set_reg(tg->vsync, 2, 0x1);
1848 hdmi_set_reg(tg->vsync2, 2, 0x233); /* Reset value */
1849 hdmi_set_reg(tg->field_chg, 2, 0x233); /* Reset value */
1850 hdmi_set_reg(tg->vsync_top_hdmi, 2, 0x1); /* Reset value */
1851 hdmi_set_reg(tg->vsync_bot_hdmi, 2, 0x233); /* Reset value */
1852 hdmi_set_reg(tg->field_top_hdmi, 2, 0x1); /* Reset value */
1853 hdmi_set_reg(tg->field_bot_hdmi, 2, 0x233); /* Reset value */
1854 hdmi_set_reg(tg->tg_3d, 1, 0x0);
1855
1856}
1857
2137static void hdmi_mode_set(void *ctx, void *mode) 1858static void hdmi_mode_set(void *ctx, void *mode)
2138{ 1859{
2139 struct hdmi_context *hdata = ctx; 1860 struct hdmi_context *hdata = ctx;
@@ -2141,11 +1862,15 @@ static void hdmi_mode_set(void *ctx, void *mode)
2141 1862
2142 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1863 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2143 1864
2144 conf_idx = hdmi_conf_index(hdata, mode); 1865 if (hdata->type == HDMI_TYPE13) {
2145 if (conf_idx >= 0) 1866 conf_idx = hdmi_v13_conf_index(mode);
2146 hdata->cur_conf = conf_idx; 1867 if (conf_idx >= 0)
2147 else 1868 hdata->cur_conf = conf_idx;
2148 DRM_DEBUG_KMS("not supported mode\n"); 1869 else
1870 DRM_DEBUG_KMS("not supported mode\n");
1871 } else {
1872 hdmi_v14_mode_set(hdata, mode);
1873 }
2149} 1874}
2150 1875
2151static void hdmi_get_max_resol(void *ctx, unsigned int *width, 1876static void hdmi_get_max_resol(void *ctx, unsigned int *width,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index c414584bfbae..e919aba29b3d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -284,13 +284,13 @@ static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
284 MXR_CFG_SCAN_PROGRASSIVE); 284 MXR_CFG_SCAN_PROGRASSIVE);
285 285
286 /* choosing between porper HD and SD mode */ 286 /* choosing between porper HD and SD mode */
287 if (height == 480) 287 if (height <= 480)
288 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD; 288 val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
289 else if (height == 576) 289 else if (height <= 576)
290 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD; 290 val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
291 else if (height == 720) 291 else if (height <= 720)
292 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; 292 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
293 else if (height == 1080) 293 else if (height <= 1080)
294 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD; 294 val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
295 else 295 else
296 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD; 296 val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
@@ -818,6 +818,29 @@ static void mixer_win_disable(void *ctx, int win)
818 mixer_ctx->win_data[win].enabled = false; 818 mixer_ctx->win_data[win].enabled = false;
819} 819}
820 820
821int mixer_check_timing(void *ctx, struct fb_videomode *timing)
822{
823 struct mixer_context *mixer_ctx = ctx;
824 u32 w, h;
825
826 w = timing->xres;
827 h = timing->yres;
828
829 DRM_DEBUG_KMS("%s : xres=%d, yres=%d, refresh=%d, intl=%d\n",
830 __func__, timing->xres, timing->yres,
831 timing->refresh, (timing->vmode &
832 FB_VMODE_INTERLACED) ? true : false);
833
834 if (mixer_ctx->mxr_ver == MXR_VER_0_0_0_16)
835 return 0;
836
837 if ((w >= 464 && w <= 720 && h >= 261 && h <= 576) ||
838 (w >= 1024 && w <= 1280 && h >= 576 && h <= 720) ||
839 (w >= 1664 && w <= 1920 && h >= 936 && h <= 1080))
840 return 0;
841
842 return -EINVAL;
843}
821static void mixer_wait_for_vblank(void *ctx) 844static void mixer_wait_for_vblank(void *ctx)
822{ 845{
823 struct mixer_context *mixer_ctx = ctx; 846 struct mixer_context *mixer_ctx = ctx;
@@ -955,6 +978,9 @@ static struct exynos_mixer_ops mixer_ops = {
955 .win_mode_set = mixer_win_mode_set, 978 .win_mode_set = mixer_win_mode_set,
956 .win_commit = mixer_win_commit, 979 .win_commit = mixer_win_commit,
957 .win_disable = mixer_win_disable, 980 .win_disable = mixer_win_disable,
981
982 /* display */
983 .check_timing = mixer_check_timing,
958}; 984};
959 985
960static irqreturn_t mixer_irq_handler(int irq, void *arg) 986static irqreturn_t mixer_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index afded54dbb10..2590cac84257 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -260,13 +260,13 @@ static int psb_framebuffer_init(struct drm_device *dev,
260 default: 260 default:
261 return -EINVAL; 261 return -EINVAL;
262 } 262 }
263 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
264 fb->gtt = gt;
263 ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs); 265 ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
264 if (ret) { 266 if (ret) {
265 dev_err(dev->dev, "framebuffer init failed: %d\n", ret); 267 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
266 return ret; 268 return ret;
267 } 269 }
268 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
269 fb->gtt = gt;
270 return 0; 270 return 0;
271} 271}
272 272
@@ -545,9 +545,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
545 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper; 545 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
546 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev; 546 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
547 struct drm_psb_private *dev_priv = dev->dev_private; 547 struct drm_psb_private *dev_priv = dev->dev_private;
548 int new_fb = 0;
549 int bytespp; 548 int bytespp;
550 int ret;
551 549
552 bytespp = sizes->surface_bpp / 8; 550 bytespp = sizes->surface_bpp / 8;
553 if (bytespp == 3) /* no 24bit packed */ 551 if (bytespp == 3) /* no 24bit packed */
@@ -562,13 +560,7 @@ static int psbfb_probe(struct drm_fb_helper *helper,
562 sizes->surface_depth = 16; 560 sizes->surface_depth = 16;
563 } 561 }
564 562
565 if (!helper->fb) { 563 return psbfb_create(psb_fbdev, sizes);
566 ret = psbfb_create(psb_fbdev, sizes);
567 if (ret)
568 return ret;
569 new_fb = 1;
570 }
571 return new_fb;
572} 564}
573 565
574static struct drm_fb_helper_funcs psb_fb_helper_funcs = { 566static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
@@ -590,6 +582,7 @@ static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
590 framebuffer_release(info); 582 framebuffer_release(info);
591 } 583 }
592 drm_fb_helper_fini(&fbdev->psb_fb_helper); 584 drm_fb_helper_fini(&fbdev->psb_fb_helper);
585 drm_framebuffer_unregister_private(&psbfb->base);
593 drm_framebuffer_cleanup(&psbfb->base); 586 drm_framebuffer_cleanup(&psbfb->base);
594 587
595 if (psbfb->gtt) 588 if (psbfb->gtt)
@@ -615,6 +608,10 @@ int psb_fbdev_init(struct drm_device *dev)
615 INTELFB_CONN_LIMIT); 608 INTELFB_CONN_LIMIT);
616 609
617 drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper); 610 drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
611
612 /* disable all the possible outputs/crtcs before entering KMS mode */
613 drm_helper_disable_unused_functions(dev);
614
618 drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32); 615 drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
619 return 0; 616 return 0;
620} 617}
@@ -668,30 +665,6 @@ static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
668{ 665{
669 struct psb_framebuffer *psbfb = to_psb_fb(fb); 666 struct psb_framebuffer *psbfb = to_psb_fb(fb);
670 struct gtt_range *r = psbfb->gtt; 667 struct gtt_range *r = psbfb->gtt;
671 struct drm_device *dev = fb->dev;
672 struct drm_psb_private *dev_priv = dev->dev_private;
673 struct psb_fbdev *fbdev = dev_priv->fbdev;
674 struct drm_crtc *crtc;
675 int reset = 0;
676
677 /* Should never get stolen memory for a user fb */
678 WARN_ON(r->stolen);
679
680 /* Check if we are erroneously live */
681 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
682 if (crtc->fb == fb)
683 reset = 1;
684
685 if (reset)
686 /*
687 * Now force a sane response before we permit the DRM CRTC
688 * layer to do stupid things like blank the display. Instead
689 * we reset this framebuffer as if the user had forced a reset.
690 * We must do this before the cleanup so that the DRM layer
691 * doesn't get a chance to stick its oar in where it isn't
692 * wanted.
693 */
694 drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
695 668
696 /* Let DRM do its clean up */ 669 /* Let DRM do its clean up */
697 drm_framebuffer_cleanup(fb); 670 drm_framebuffer_cleanup(fb);
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index b58c4701c4e8..f6f534b4197e 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -194,7 +194,7 @@ static int psb_save_display_registers(struct drm_device *dev)
194 regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT); 194 regs->saveCHICKENBIT = PSB_RVDC32(DSPCHICKENBIT);
195 195
196 /* Save crtc and output state */ 196 /* Save crtc and output state */
197 mutex_lock(&dev->mode_config.mutex); 197 drm_modeset_lock_all(dev);
198 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 198 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
199 if (drm_helper_crtc_in_use(crtc)) 199 if (drm_helper_crtc_in_use(crtc))
200 crtc->funcs->save(crtc); 200 crtc->funcs->save(crtc);
@@ -204,7 +204,7 @@ static int psb_save_display_registers(struct drm_device *dev)
204 if (connector->funcs->save) 204 if (connector->funcs->save)
205 connector->funcs->save(connector); 205 connector->funcs->save(connector);
206 206
207 mutex_unlock(&dev->mode_config.mutex); 207 drm_modeset_unlock_all(dev);
208 return 0; 208 return 0;
209} 209}
210 210
@@ -234,7 +234,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
234 /*make sure VGA plane is off. it initializes to on after reset!*/ 234 /*make sure VGA plane is off. it initializes to on after reset!*/
235 PSB_WVDC32(0x80000000, VGACNTRL); 235 PSB_WVDC32(0x80000000, VGACNTRL);
236 236
237 mutex_lock(&dev->mode_config.mutex); 237 drm_modeset_lock_all(dev);
238 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 238 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
239 if (drm_helper_crtc_in_use(crtc)) 239 if (drm_helper_crtc_in_use(crtc))
240 crtc->funcs->restore(crtc); 240 crtc->funcs->restore(crtc);
@@ -243,7 +243,7 @@ static int psb_restore_display_registers(struct drm_device *dev)
243 if (connector->funcs->restore) 243 if (connector->funcs->restore)
244 connector->funcs->restore(connector); 244 connector->funcs->restore(connector);
245 245
246 mutex_unlock(&dev->mode_config.mutex); 246 drm_modeset_unlock_all(dev);
247 return 0; 247 return 0;
248} 248}
249 249
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index dd1fbfa7e467..111e3df9c5de 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -149,6 +149,16 @@ static struct drm_ioctl_desc psb_ioctls[] = {
149 149
150static void psb_lastclose(struct drm_device *dev) 150static void psb_lastclose(struct drm_device *dev)
151{ 151{
152 int ret;
153 struct drm_psb_private *dev_priv = dev->dev_private;
154 struct psb_fbdev *fbdev = dev_priv->fbdev;
155
156 drm_modeset_lock_all(dev);
157 ret = drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
158 if (ret)
159 DRM_DEBUG("failed to restore crtc mode\n");
160 drm_modeset_unlock_all(dev);
161
152 return; 162 return;
153} 163}
154 164
@@ -476,7 +486,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
476 case PSB_MODE_OPERATION_MODE_VALID: 486 case PSB_MODE_OPERATION_MODE_VALID:
477 umode = &arg->mode; 487 umode = &arg->mode;
478 488
479 mutex_lock(&dev->mode_config.mutex); 489 drm_modeset_lock_all(dev);
480 490
481 obj = drm_mode_object_find(dev, obj_id, 491 obj = drm_mode_object_find(dev, obj_id,
482 DRM_MODE_OBJECT_CONNECTOR); 492 DRM_MODE_OBJECT_CONNECTOR);
@@ -525,7 +535,7 @@ static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
525 if (mode) 535 if (mode)
526 drm_mode_destroy(dev, mode); 536 drm_mode_destroy(dev, mode);
527mode_op_out: 537mode_op_out:
528 mutex_unlock(&dev->mode_config.mutex); 538 drm_modeset_unlock_all(dev);
529 return ret; 539 return ret;
530 540
531 default: 541 default:
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 8033526bb53b..9edb1902a096 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -85,14 +85,14 @@ struct psb_intel_limit_t {
85#define I9XX_DOT_MAX 400000 85#define I9XX_DOT_MAX 400000
86#define I9XX_VCO_MIN 1400000 86#define I9XX_VCO_MIN 1400000
87#define I9XX_VCO_MAX 2800000 87#define I9XX_VCO_MAX 2800000
88#define I9XX_N_MIN 3 88#define I9XX_N_MIN 1
89#define I9XX_N_MAX 8 89#define I9XX_N_MAX 6
90#define I9XX_M_MIN 70 90#define I9XX_M_MIN 70
91#define I9XX_M_MAX 120 91#define I9XX_M_MAX 120
92#define I9XX_M1_MIN 10 92#define I9XX_M1_MIN 8
93#define I9XX_M1_MAX 20 93#define I9XX_M1_MAX 18
94#define I9XX_M2_MIN 5 94#define I9XX_M2_MIN 3
95#define I9XX_M2_MAX 9 95#define I9XX_M2_MAX 7
96#define I9XX_P_SDVO_DAC_MIN 5 96#define I9XX_P_SDVO_DAC_MIN 5
97#define I9XX_P_SDVO_DAC_MAX 80 97#define I9XX_P_SDVO_DAC_MAX 80
98#define I9XX_P_LVDS_MIN 7 98#define I9XX_P_LVDS_MIN 7
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
new file mode 100644
index 000000000000..4d341db462a2
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -0,0 +1,28 @@
1menu "I2C encoder or helper chips"
2 depends on DRM && DRM_KMS_HELPER && I2C
3
4config DRM_I2C_CH7006
5 tristate "Chrontel ch7006 TV encoder"
6 default m if DRM_NOUVEAU
7 help
8 Support for Chrontel ch7006 and similar TV encoders, found
9 on some nVidia video cards.
10
11 This driver is currently only useful if you're also using
12 the nouveau driver.
13
14config DRM_I2C_SIL164
15 tristate "Silicon Image sil164 TMDS transmitter"
16 default m if DRM_NOUVEAU
17 help
18 Support for sil164 and similar single-link (or dual-link
19 when used in pairs) TMDS transmitters, used in some nVidia
20 video cards.
21
22config DRM_I2C_NXP_TDA998X
23 tristate "NXP Semiconductors TDA998X HDMI encoder"
24 default m if DRM_TILCDC
25 help
26 Support for NXP Semiconductors TDA998X HDMI encoders.
27
28endmenu
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 92862563e7ee..43aa33baebed 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -5,3 +5,6 @@ obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
5 5
6sil164-y := sil164_drv.o 6sil164-y := sil164_drv.o
7obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o 7obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
8
9tda998x-y := tda998x_drv.o
10obj-$(CONFIG_DRM_I2C_NXP_TDA998X) += tda998x.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index b865d0728e28..51fa32392029 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -364,7 +364,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
364 .crtc = crtc, 364 .crtc = crtc,
365 }; 365 };
366 366
367 crtc->funcs->set_config(&modeset); 367 drm_mode_set_config_internal(&modeset);
368 } 368 }
369 } 369 }
370 370
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
new file mode 100644
index 000000000000..e68b58a1aaf9
--- /dev/null
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -0,0 +1,906 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19
20#include <linux/module.h>
21
22#include <drm/drmP.h>
23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_encoder_slave.h>
25#include <drm/drm_edid.h>
26
27
28#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
29
30struct tda998x_priv {
31 struct i2c_client *cec;
32 uint16_t rev;
33 uint8_t current_page;
34 int dpms;
35};
36
37#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
38
39/* The TDA9988 series of devices use a paged register scheme.. to simplify
40 * things we encode the page # in upper bits of the register #. To read/
41 * write a given register, we need to make sure CURPAGE register is set
42 * appropriately. Which implies reads/writes are not atomic. Fun!
43 */
44
45#define REG(page, addr) (((page) << 8) | (addr))
46#define REG2ADDR(reg) ((reg) & 0xff)
47#define REG2PAGE(reg) (((reg) >> 8) & 0xff)
48
49#define REG_CURPAGE 0xff /* write */
50
51
52/* Page 00h: General Control */
53#define REG_VERSION_LSB REG(0x00, 0x00) /* read */
54#define REG_MAIN_CNTRL0 REG(0x00, 0x01) /* read/write */
55# define MAIN_CNTRL0_SR (1 << 0)
56# define MAIN_CNTRL0_DECS (1 << 1)
57# define MAIN_CNTRL0_DEHS (1 << 2)
58# define MAIN_CNTRL0_CECS (1 << 3)
59# define MAIN_CNTRL0_CEHS (1 << 4)
60# define MAIN_CNTRL0_SCALER (1 << 7)
61#define REG_VERSION_MSB REG(0x00, 0x02) /* read */
62#define REG_SOFTRESET REG(0x00, 0x0a) /* write */
63# define SOFTRESET_AUDIO (1 << 0)
64# define SOFTRESET_I2C_MASTER (1 << 1)
65#define REG_DDC_DISABLE REG(0x00, 0x0b) /* read/write */
66#define REG_CCLK_ON REG(0x00, 0x0c) /* read/write */
67#define REG_I2C_MASTER REG(0x00, 0x0d) /* read/write */
68# define I2C_MASTER_DIS_MM (1 << 0)
69# define I2C_MASTER_DIS_FILT (1 << 1)
70# define I2C_MASTER_APP_STRT_LAT (1 << 2)
71#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
72#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
73#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
74# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
75#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
76#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
77#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
78#define REG_ENA_AP REG(0x00, 0x1e) /* read/write */
79#define REG_VIP_CNTRL_0 REG(0x00, 0x20) /* write */
80# define VIP_CNTRL_0_MIRR_A (1 << 7)
81# define VIP_CNTRL_0_SWAP_A(x) (((x) & 7) << 4)
82# define VIP_CNTRL_0_MIRR_B (1 << 3)
83# define VIP_CNTRL_0_SWAP_B(x) (((x) & 7) << 0)
84#define REG_VIP_CNTRL_1 REG(0x00, 0x21) /* write */
85# define VIP_CNTRL_1_MIRR_C (1 << 7)
86# define VIP_CNTRL_1_SWAP_C(x) (((x) & 7) << 4)
87# define VIP_CNTRL_1_MIRR_D (1 << 3)
88# define VIP_CNTRL_1_SWAP_D(x) (((x) & 7) << 0)
89#define REG_VIP_CNTRL_2 REG(0x00, 0x22) /* write */
90# define VIP_CNTRL_2_MIRR_E (1 << 7)
91# define VIP_CNTRL_2_SWAP_E(x) (((x) & 7) << 4)
92# define VIP_CNTRL_2_MIRR_F (1 << 3)
93# define VIP_CNTRL_2_SWAP_F(x) (((x) & 7) << 0)
94#define REG_VIP_CNTRL_3 REG(0x00, 0x23) /* write */
95# define VIP_CNTRL_3_X_TGL (1 << 0)
96# define VIP_CNTRL_3_H_TGL (1 << 1)
97# define VIP_CNTRL_3_V_TGL (1 << 2)
98# define VIP_CNTRL_3_EMB (1 << 3)
99# define VIP_CNTRL_3_SYNC_DE (1 << 4)
100# define VIP_CNTRL_3_SYNC_HS (1 << 5)
101# define VIP_CNTRL_3_DE_INT (1 << 6)
102# define VIP_CNTRL_3_EDGE (1 << 7)
103#define REG_VIP_CNTRL_4 REG(0x00, 0x24) /* write */
104# define VIP_CNTRL_4_BLC(x) (((x) & 3) << 0)
105# define VIP_CNTRL_4_BLANKIT(x) (((x) & 3) << 2)
106# define VIP_CNTRL_4_CCIR656 (1 << 4)
107# define VIP_CNTRL_4_656_ALT (1 << 5)
108# define VIP_CNTRL_4_TST_656 (1 << 6)
109# define VIP_CNTRL_4_TST_PAT (1 << 7)
110#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
111# define VIP_CNTRL_5_CKCASE (1 << 0)
112# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
113#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
114# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
115# define MAT_CONTRL_MAT_BP (1 << 2)
116#define REG_VIDFORMAT REG(0x00, 0xa0) /* write */
117#define REG_REFPIX_MSB REG(0x00, 0xa1) /* write */
118#define REG_REFPIX_LSB REG(0x00, 0xa2) /* write */
119#define REG_REFLINE_MSB REG(0x00, 0xa3) /* write */
120#define REG_REFLINE_LSB REG(0x00, 0xa4) /* write */
121#define REG_NPIX_MSB REG(0x00, 0xa5) /* write */
122#define REG_NPIX_LSB REG(0x00, 0xa6) /* write */
123#define REG_NLINE_MSB REG(0x00, 0xa7) /* write */
124#define REG_NLINE_LSB REG(0x00, 0xa8) /* write */
125#define REG_VS_LINE_STRT_1_MSB REG(0x00, 0xa9) /* write */
126#define REG_VS_LINE_STRT_1_LSB REG(0x00, 0xaa) /* write */
127#define REG_VS_PIX_STRT_1_MSB REG(0x00, 0xab) /* write */
128#define REG_VS_PIX_STRT_1_LSB REG(0x00, 0xac) /* write */
129#define REG_VS_LINE_END_1_MSB REG(0x00, 0xad) /* write */
130#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
131#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
132#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
133#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
134#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
135#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
136#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
137#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
138#define REG_HS_PIX_START_LSB REG(0x00, 0xba) /* write */
139#define REG_HS_PIX_STOP_MSB REG(0x00, 0xbb) /* write */
140#define REG_HS_PIX_STOP_LSB REG(0x00, 0xbc) /* write */
141#define REG_VWIN_START_1_MSB REG(0x00, 0xbd) /* write */
142#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
143#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
144#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
145#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
146#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
147#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
148#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
149#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
150# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
151# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
152# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
153#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
154# define TBG_CNTRL_1_VH_TGL_0 (1 << 0)
155# define TBG_CNTRL_1_VH_TGL_1 (1 << 1)
156# define TBG_CNTRL_1_VH_TGL_2 (1 << 2)
157# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3)
158# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4)
159# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5)
160# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
161#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
162#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
163# define HVF_CNTRL_0_SM (1 << 7)
164# define HVF_CNTRL_0_RWB (1 << 6)
165# define HVF_CNTRL_0_PREFIL(x) (((x) & 3) << 2)
166# define HVF_CNTRL_0_INTPOL(x) (((x) & 3) << 0)
167#define REG_HVF_CNTRL_1 REG(0x00, 0xe5) /* write */
168# define HVF_CNTRL_1_FOR (1 << 0)
169# define HVF_CNTRL_1_YUVBLK (1 << 1)
170# define HVF_CNTRL_1_VQR(x) (((x) & 3) << 2)
171# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
172# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
173#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
174
175
176/* Page 02h: PLL settings */
177#define REG_PLL_SERIAL_1 REG(0x02, 0x00) /* read/write */
178# define PLL_SERIAL_1_SRL_FDN (1 << 0)
179# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
180# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
181#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
182# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
183# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
184#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
185# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
186# define PLL_SERIAL_3_SRL_DE (1 << 2)
187# define PLL_SERIAL_3_SRL_PXIN_SEL (1 << 4)
188#define REG_SERIALIZER REG(0x02, 0x03) /* read/write */
189#define REG_BUFFER_OUT REG(0x02, 0x04) /* read/write */
190#define REG_PLL_SCG1 REG(0x02, 0x05) /* read/write */
191#define REG_PLL_SCG2 REG(0x02, 0x06) /* read/write */
192#define REG_PLL_SCGN1 REG(0x02, 0x07) /* read/write */
193#define REG_PLL_SCGN2 REG(0x02, 0x08) /* read/write */
194#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
195#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
196#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
197#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
198# define SEL_CLK_SEL_CLK1 (1 << 0)
199# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
200# define SEL_CLK_ENA_SC_CLK (1 << 3)
201#define REG_ANA_GENERAL REG(0x02, 0x12) /* read/write */
202
203
204/* Page 09h: EDID Control */
205#define REG_EDID_DATA_0 REG(0x09, 0x00) /* read */
206/* next 127 successive registers are the EDID block */
207#define REG_EDID_CTRL REG(0x09, 0xfa) /* read/write */
208#define REG_DDC_ADDR REG(0x09, 0xfb) /* read/write */
209#define REG_DDC_OFFS REG(0x09, 0xfc) /* read/write */
210#define REG_DDC_SEGM_ADDR REG(0x09, 0xfd) /* read/write */
211#define REG_DDC_SEGM REG(0x09, 0xfe) /* read/write */
212
213
214/* Page 10h: information frames and packets */
215
216
217/* Page 11h: audio settings and content info packets */
218#define REG_AIP_CNTRL_0 REG(0x11, 0x00) /* read/write */
219# define AIP_CNTRL_0_RST_FIFO (1 << 0)
220# define AIP_CNTRL_0_SWAP (1 << 1)
221# define AIP_CNTRL_0_LAYOUT (1 << 2)
222# define AIP_CNTRL_0_ACR_MAN (1 << 5)
223# define AIP_CNTRL_0_RST_CTS (1 << 6)
224#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
225# define ENC_CNTRL_RST_ENC (1 << 0)
226# define ENC_CNTRL_RST_SEL (1 << 1)
227# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
228
229
230/* Page 12h: HDCP and OTP */
231#define REG_TX3 REG(0x12, 0x9a) /* read/write */
232#define REG_TX33 REG(0x12, 0xb8) /* read/write */
233# define TX33_HDMI (1 << 1)
234
235
236/* Page 13h: Gamut related metadata packets */
237
238
239
240/* CEC registers: (not paged)
241 */
242#define REG_CEC_FRO_IM_CLK_CTRL 0xfb /* read/write */
243# define CEC_FRO_IM_CLK_CTRL_GHOST_DIS (1 << 7)
244# define CEC_FRO_IM_CLK_CTRL_ENA_OTP (1 << 6)
245# define CEC_FRO_IM_CLK_CTRL_IMCLK_SEL (1 << 1)
246# define CEC_FRO_IM_CLK_CTRL_FRO_DIV (1 << 0)
247#define REG_CEC_RXSHPDLEV 0xfe /* read */
248# define CEC_RXSHPDLEV_RXSENS (1 << 0)
249# define CEC_RXSHPDLEV_HPD (1 << 1)
250
251#define REG_CEC_ENAMODS 0xff /* read/write */
252# define CEC_ENAMODS_DIS_FRO (1 << 6)
253# define CEC_ENAMODS_DIS_CCLK (1 << 5)
254# define CEC_ENAMODS_EN_RXSENS (1 << 2)
255# define CEC_ENAMODS_EN_HDMI (1 << 1)
256# define CEC_ENAMODS_EN_CEC (1 << 0)
257
258
259/* Device versions: */
260#define TDA9989N2 0x0101
261#define TDA19989 0x0201
262#define TDA19989N2 0x0202
263#define TDA19988 0x0301
264
265static void
266cec_write(struct drm_encoder *encoder, uint16_t addr, uint8_t val)
267{
268 struct i2c_client *client = to_tda998x_priv(encoder)->cec;
269 uint8_t buf[] = {addr, val};
270 int ret;
271
272 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
273 if (ret < 0)
274 dev_err(&client->dev, "Error %d writing to cec:0x%x\n", ret, addr);
275}
276
277static uint8_t
278cec_read(struct drm_encoder *encoder, uint8_t addr)
279{
280 struct i2c_client *client = to_tda998x_priv(encoder)->cec;
281 uint8_t val;
282 int ret;
283
284 ret = i2c_master_send(client, &addr, sizeof(addr));
285 if (ret < 0)
286 goto fail;
287
288 ret = i2c_master_recv(client, &val, sizeof(val));
289 if (ret < 0)
290 goto fail;
291
292 return val;
293
294fail:
295 dev_err(&client->dev, "Error %d reading from cec:0x%x\n", ret, addr);
296 return 0;
297}
298
299static void
300set_page(struct drm_encoder *encoder, uint16_t reg)
301{
302 struct tda998x_priv *priv = to_tda998x_priv(encoder);
303
304 if (REG2PAGE(reg) != priv->current_page) {
305 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
306 uint8_t buf[] = {
307 REG_CURPAGE, REG2PAGE(reg)
308 };
309 int ret = i2c_master_send(client, buf, sizeof(buf));
310 if (ret < 0)
311 dev_err(&client->dev, "Error %d writing to REG_CURPAGE\n", ret);
312
313 priv->current_page = REG2PAGE(reg);
314 }
315}
316
317static int
318reg_read_range(struct drm_encoder *encoder, uint16_t reg, char *buf, int cnt)
319{
320 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
321 uint8_t addr = REG2ADDR(reg);
322 int ret;
323
324 set_page(encoder, reg);
325
326 ret = i2c_master_send(client, &addr, sizeof(addr));
327 if (ret < 0)
328 goto fail;
329
330 ret = i2c_master_recv(client, buf, cnt);
331 if (ret < 0)
332 goto fail;
333
334 return ret;
335
336fail:
337 dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
338 return ret;
339}
340
341static uint8_t
342reg_read(struct drm_encoder *encoder, uint16_t reg)
343{
344 uint8_t val = 0;
345 reg_read_range(encoder, reg, &val, sizeof(val));
346 return val;
347}
348
349static void
350reg_write(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
351{
352 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
353 uint8_t buf[] = {REG2ADDR(reg), val};
354 int ret;
355
356 set_page(encoder, reg);
357
358 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
359 if (ret < 0)
360 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
361}
362
363static void
364reg_write16(struct drm_encoder *encoder, uint16_t reg, uint16_t val)
365{
366 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
367 uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
368 int ret;
369
370 set_page(encoder, reg);
371
372 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
373 if (ret < 0)
374 dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
375}
376
377static void
378reg_set(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
379{
380 reg_write(encoder, reg, reg_read(encoder, reg) | val);
381}
382
383static void
384reg_clear(struct drm_encoder *encoder, uint16_t reg, uint8_t val)
385{
386 reg_write(encoder, reg, reg_read(encoder, reg) & ~val);
387}
388
389static void
390tda998x_reset(struct drm_encoder *encoder)
391{
392 /* reset audio and i2c master: */
393 reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
394 msleep(50);
395 reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO | SOFTRESET_I2C_MASTER);
396 msleep(50);
397
398 /* reset transmitter: */
399 reg_set(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
400 reg_clear(encoder, REG_MAIN_CNTRL0, MAIN_CNTRL0_SR);
401
402 /* PLL registers common configuration */
403 reg_write(encoder, REG_PLL_SERIAL_1, 0x00);
404 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(1));
405 reg_write(encoder, REG_PLL_SERIAL_3, 0x00);
406 reg_write(encoder, REG_SERIALIZER, 0x00);
407 reg_write(encoder, REG_BUFFER_OUT, 0x00);
408 reg_write(encoder, REG_PLL_SCG1, 0x00);
409 reg_write(encoder, REG_AUDIO_DIV, 0x03);
410 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
411 reg_write(encoder, REG_PLL_SCGN1, 0xfa);
412 reg_write(encoder, REG_PLL_SCGN2, 0x00);
413 reg_write(encoder, REG_PLL_SCGR1, 0x5b);
414 reg_write(encoder, REG_PLL_SCGR2, 0x00);
415 reg_write(encoder, REG_PLL_SCG2, 0x10);
416}
417
418/* DRM encoder functions */
419
420static void
421tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
422{
423}
424
425static void
426tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
427{
428 struct tda998x_priv *priv = to_tda998x_priv(encoder);
429
430 /* we only care about on or off: */
431 if (mode != DRM_MODE_DPMS_ON)
432 mode = DRM_MODE_DPMS_OFF;
433
434 if (mode == priv->dpms)
435 return;
436
437 switch (mode) {
438 case DRM_MODE_DPMS_ON:
439 /* enable audio and video ports */
440 reg_write(encoder, REG_ENA_AP, 0xff);
441 reg_write(encoder, REG_ENA_VP_0, 0xff);
442 reg_write(encoder, REG_ENA_VP_1, 0xff);
443 reg_write(encoder, REG_ENA_VP_2, 0xff);
444 /* set muxing after enabling ports: */
445 reg_write(encoder, REG_VIP_CNTRL_0,
446 VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
447 reg_write(encoder, REG_VIP_CNTRL_1,
448 VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
449 reg_write(encoder, REG_VIP_CNTRL_2,
450 VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
451 break;
452 case DRM_MODE_DPMS_OFF:
453 /* disable audio and video ports */
454 reg_write(encoder, REG_ENA_AP, 0x00);
455 reg_write(encoder, REG_ENA_VP_0, 0x00);
456 reg_write(encoder, REG_ENA_VP_1, 0x00);
457 reg_write(encoder, REG_ENA_VP_2, 0x00);
458 break;
459 }
460
461 priv->dpms = mode;
462}
463
464static void
465tda998x_encoder_save(struct drm_encoder *encoder)
466{
467 DBG("");
468}
469
470static void
471tda998x_encoder_restore(struct drm_encoder *encoder)
472{
473 DBG("");
474}
475
476static bool
477tda998x_encoder_mode_fixup(struct drm_encoder *encoder,
478 const struct drm_display_mode *mode,
479 struct drm_display_mode *adjusted_mode)
480{
481 return true;
482}
483
484static int
485tda998x_encoder_mode_valid(struct drm_encoder *encoder,
486 struct drm_display_mode *mode)
487{
488 return MODE_OK;
489}
490
491static void
492tda998x_encoder_mode_set(struct drm_encoder *encoder,
493 struct drm_display_mode *mode,
494 struct drm_display_mode *adjusted_mode)
495{
496 struct tda998x_priv *priv = to_tda998x_priv(encoder);
497 uint16_t hs_start, hs_end, line_start, line_end;
498 uint16_t vwin_start, vwin_end, de_start, de_end;
499 uint16_t ref_pix, ref_line, pix_start2;
500 uint8_t reg, div, rep;
501
502 hs_start = mode->hsync_start - mode->hdisplay;
503 hs_end = mode->hsync_end - mode->hdisplay;
504 line_start = 1;
505 line_end = 1 + mode->vsync_end - mode->vsync_start;
506 vwin_start = mode->vtotal - mode->vsync_start;
507 vwin_end = vwin_start + mode->vdisplay;
508 de_start = mode->htotal - mode->hdisplay;
509 de_end = mode->htotal;
510
511 pix_start2 = 0;
512 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
513 pix_start2 = (mode->htotal / 2) + hs_start;
514
515 /* TODO how is this value calculated? It is 2 for all common
516 * formats in the tables in out of tree nxp driver (assuming
517 * I've properly deciphered their byzantine table system)
518 */
519 ref_line = 2;
520
521 /* this might changes for other color formats from the CRTC: */
522 ref_pix = 3 + hs_start;
523
524 div = 148500 / mode->clock;
525
526 DBG("clock=%d, div=%u", mode->clock, div);
527 DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
528 hs_start, hs_end, line_start, line_end);
529 DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
530 vwin_start, vwin_end, de_start, de_end);
531 DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
532 ref_line, ref_pix, pix_start2);
533
534 /* mute the audio FIFO: */
535 reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
536
537 /* set HDMI HDCP mode off: */
538 reg_set(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
539 reg_clear(encoder, REG_TX33, TX33_HDMI);
540
541 reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(0));
542 /* no pre-filter or interpolator: */
543 reg_write(encoder, REG_HVF_CNTRL_0, HVF_CNTRL_0_PREFIL(0) |
544 HVF_CNTRL_0_INTPOL(0));
545 reg_write(encoder, REG_VIP_CNTRL_5, VIP_CNTRL_5_SP_CNT(0));
546 reg_write(encoder, REG_VIP_CNTRL_4, VIP_CNTRL_4_BLANKIT(0) |
547 VIP_CNTRL_4_BLC(0));
548 reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_CCIR);
549
550 reg_clear(encoder, REG_PLL_SERIAL_1, PLL_SERIAL_1_SRL_MAN_IZ);
551 reg_clear(encoder, REG_PLL_SERIAL_3, PLL_SERIAL_3_SRL_DE);
552 reg_write(encoder, REG_SERIALIZER, 0);
553 reg_write(encoder, REG_HVF_CNTRL_1, HVF_CNTRL_1_VQR(0));
554
555 /* TODO enable pixel repeat for pixel rates less than 25Msamp/s */
556 rep = 0;
557 reg_write(encoder, REG_RPT_CNTRL, 0);
558 reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_VRF_CLK(0) |
559 SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
560
561 reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
562 PLL_SERIAL_2_SRL_PR(rep));
563
564 reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
565 reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
566
567 /* set color matrix bypass flag: */
568 reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
569
570 /* set BIAS tmds value: */
571 reg_write(encoder, REG_ANA_GENERAL, 0x09);
572
573 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
574
575 reg_write(encoder, REG_VIP_CNTRL_3, 0);
576 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
577 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
578 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
579
580 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
581 reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
582
583 reg_write(encoder, REG_VIDFORMAT, 0x00);
584 reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
585 reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
586 reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
587 reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
588 reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
589 reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
590 reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
591 reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
592 reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
593 reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
594 reg_write16(encoder, REG_DE_START_MSB, de_start);
595 reg_write16(encoder, REG_DE_STOP_MSB, de_end);
596
597 if (priv->rev == TDA19988) {
598 /* let incoming pixels fill the active space (if any) */
599 reg_write(encoder, REG_ENABLE_SPACE, 0x01);
600 }
601
602 reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
603 reg_write16(encoder, REG_REFLINE_MSB, ref_line);
604
605 reg = TBG_CNTRL_1_VHX_EXT_DE |
606 TBG_CNTRL_1_VHX_EXT_HS |
607 TBG_CNTRL_1_VHX_EXT_VS |
608 TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
609 TBG_CNTRL_1_VH_TGL_2;
610 if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
611 reg |= TBG_CNTRL_1_VH_TGL_0;
612 reg_set(encoder, REG_TBG_CNTRL_1, reg);
613
614 /* must be last register set: */
615 reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
616}
617
618static enum drm_connector_status
619tda998x_encoder_detect(struct drm_encoder *encoder,
620 struct drm_connector *connector)
621{
622 uint8_t val = cec_read(encoder, REG_CEC_RXSHPDLEV);
623 return (val & CEC_RXSHPDLEV_HPD) ? connector_status_connected :
624 connector_status_disconnected;
625}
626
627static int
628read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
629{
630 uint8_t offset, segptr;
631 int ret, i;
632
633 /* enable EDID read irq: */
634 reg_set(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
635
636 offset = (blk & 1) ? 128 : 0;
637 segptr = blk / 2;
638
639 reg_write(encoder, REG_DDC_ADDR, 0xa0);
640 reg_write(encoder, REG_DDC_OFFS, offset);
641 reg_write(encoder, REG_DDC_SEGM_ADDR, 0x60);
642 reg_write(encoder, REG_DDC_SEGM, segptr);
643
644 /* enable reading EDID: */
645 reg_write(encoder, REG_EDID_CTRL, 0x1);
646
647 /* flag must be cleared by sw: */
648 reg_write(encoder, REG_EDID_CTRL, 0x0);
649
650 /* wait for block read to complete: */
651 for (i = 100; i > 0; i--) {
652 uint8_t val = reg_read(encoder, REG_INT_FLAGS_2);
653 if (val & INT_FLAGS_2_EDID_BLK_RD)
654 break;
655 msleep(1);
656 }
657
658 if (i == 0)
659 return -ETIMEDOUT;
660
661 ret = reg_read_range(encoder, REG_EDID_DATA_0, buf, EDID_LENGTH);
662 if (ret != EDID_LENGTH) {
663 dev_err(encoder->dev->dev, "failed to read edid block %d: %d",
664 blk, ret);
665 return ret;
666 }
667
668 reg_clear(encoder, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
669
670 return 0;
671}
672
673static uint8_t *
674do_get_edid(struct drm_encoder *encoder)
675{
676 int j = 0, valid_extensions = 0;
677 uint8_t *block, *new;
678 bool print_bad_edid = drm_debug & DRM_UT_KMS;
679
680 if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
681 return NULL;
682
683 /* base block fetch */
684 if (read_edid_block(encoder, block, 0))
685 goto fail;
686
687 if (!drm_edid_block_valid(block, 0, print_bad_edid))
688 goto fail;
689
690 /* if there's no extensions, we're done */
691 if (block[0x7e] == 0)
692 return block;
693
694 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
695 if (!new)
696 goto fail;
697 block = new;
698
699 for (j = 1; j <= block[0x7e]; j++) {
700 uint8_t *ext_block = block + (valid_extensions + 1) * EDID_LENGTH;
701 if (read_edid_block(encoder, ext_block, j))
702 goto fail;
703
704 if (!drm_edid_block_valid(ext_block, j, print_bad_edid))
705 goto fail;
706
707 valid_extensions++;
708 }
709
710 if (valid_extensions != block[0x7e]) {
711 block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
712 block[0x7e] = valid_extensions;
713 new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
714 if (!new)
715 goto fail;
716 block = new;
717 }
718
719 return block;
720
721fail:
722 dev_warn(encoder->dev->dev, "failed to read EDID\n");
723 kfree(block);
724 return NULL;
725}
726
727static int
728tda998x_encoder_get_modes(struct drm_encoder *encoder,
729 struct drm_connector *connector)
730{
731 struct edid *edid = (struct edid *)do_get_edid(encoder);
732 int n = 0;
733
734 if (edid) {
735 drm_mode_connector_update_edid_property(connector, edid);
736 n = drm_add_edid_modes(connector, edid);
737 kfree(edid);
738 }
739
740 return n;
741}
742
743static int
744tda998x_encoder_create_resources(struct drm_encoder *encoder,
745 struct drm_connector *connector)
746{
747 DBG("");
748 return 0;
749}
750
751static int
752tda998x_encoder_set_property(struct drm_encoder *encoder,
753 struct drm_connector *connector,
754 struct drm_property *property,
755 uint64_t val)
756{
757 DBG("");
758 return 0;
759}
760
761static void
762tda998x_encoder_destroy(struct drm_encoder *encoder)
763{
764 struct tda998x_priv *priv = to_tda998x_priv(encoder);
765 drm_i2c_encoder_destroy(encoder);
766 kfree(priv);
767}
768
769static struct drm_encoder_slave_funcs tda998x_encoder_funcs = {
770 .set_config = tda998x_encoder_set_config,
771 .destroy = tda998x_encoder_destroy,
772 .dpms = tda998x_encoder_dpms,
773 .save = tda998x_encoder_save,
774 .restore = tda998x_encoder_restore,
775 .mode_fixup = tda998x_encoder_mode_fixup,
776 .mode_valid = tda998x_encoder_mode_valid,
777 .mode_set = tda998x_encoder_mode_set,
778 .detect = tda998x_encoder_detect,
779 .get_modes = tda998x_encoder_get_modes,
780 .create_resources = tda998x_encoder_create_resources,
781 .set_property = tda998x_encoder_set_property,
782};
783
784/* I2C driver functions */
785
786static int
787tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
788{
789 return 0;
790}
791
792static int
793tda998x_remove(struct i2c_client *client)
794{
795 return 0;
796}
797
798static int
799tda998x_encoder_init(struct i2c_client *client,
800 struct drm_device *dev,
801 struct drm_encoder_slave *encoder_slave)
802{
803 struct drm_encoder *encoder = &encoder_slave->base;
804 struct tda998x_priv *priv;
805
806 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
807 if (!priv)
808 return -ENOMEM;
809
810 priv->current_page = 0;
811 priv->cec = i2c_new_dummy(client->adapter, 0x34);
812 priv->dpms = DRM_MODE_DPMS_OFF;
813
814 encoder_slave->slave_priv = priv;
815 encoder_slave->slave_funcs = &tda998x_encoder_funcs;
816
817 /* wake up the device: */
818 cec_write(encoder, REG_CEC_ENAMODS,
819 CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
820
821 tda998x_reset(encoder);
822
823 /* read version: */
824 priv->rev = reg_read(encoder, REG_VERSION_LSB) |
825 reg_read(encoder, REG_VERSION_MSB) << 8;
826
827 /* mask off feature bits: */
828 priv->rev &= ~0x30; /* not-hdcp and not-scalar bit */
829
830 switch (priv->rev) {
831 case TDA9989N2: dev_info(dev->dev, "found TDA9989 n2"); break;
832 case TDA19989: dev_info(dev->dev, "found TDA19989"); break;
833 case TDA19989N2: dev_info(dev->dev, "found TDA19989 n2"); break;
834 case TDA19988: dev_info(dev->dev, "found TDA19988"); break;
835 default:
836 DBG("found unsupported device: %04x", priv->rev);
837 goto fail;
838 }
839
840 /* after reset, enable DDC: */
841 reg_write(encoder, REG_DDC_DISABLE, 0x00);
842
843 /* set clock on DDC channel: */
844 reg_write(encoder, REG_TX3, 39);
845
846 /* if necessary, disable multi-master: */
847 if (priv->rev == TDA19989)
848 reg_set(encoder, REG_I2C_MASTER, I2C_MASTER_DIS_MM);
849
850 cec_write(encoder, REG_CEC_FRO_IM_CLK_CTRL,
851 CEC_FRO_IM_CLK_CTRL_GHOST_DIS | CEC_FRO_IM_CLK_CTRL_IMCLK_SEL);
852
853 return 0;
854
855fail:
856 /* if encoder_init fails, the encoder slave is never registered,
857 * so cleanup here:
858 */
859 if (priv->cec)
860 i2c_unregister_device(priv->cec);
861 kfree(priv);
862 encoder_slave->slave_priv = NULL;
863 encoder_slave->slave_funcs = NULL;
864 return -ENXIO;
865}
866
867static struct i2c_device_id tda998x_ids[] = {
868 { "tda998x", 0 },
869 { }
870};
871MODULE_DEVICE_TABLE(i2c, tda998x_ids);
872
873static struct drm_i2c_encoder_driver tda998x_driver = {
874 .i2c_driver = {
875 .probe = tda998x_probe,
876 .remove = tda998x_remove,
877 .driver = {
878 .name = "tda998x",
879 },
880 .id_table = tda998x_ids,
881 },
882 .encoder_init = tda998x_encoder_init,
883};
884
885/* Module initialization */
886
887static int __init
888tda998x_init(void)
889{
890 DBG("");
891 return drm_i2c_encoder_register(THIS_MODULE, &tda998x_driver);
892}
893
894static void __exit
895tda998x_exit(void)
896{
897 DBG("");
898 drm_i2c_encoder_unregister(&tda998x_driver);
899}
900
901MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
902MODULE_DESCRIPTION("NXP Semiconductors TDA998X HDMI Encoder");
903MODULE_LICENSE("GPL");
904
905module_init(tda998x_init);
906module_exit(tda998x_exit);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0f2c5493242b..91f3ac6cef35 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
16 i915_gem_tiling.o \ 16 i915_gem_tiling.o \
17 i915_sysfs.o \ 17 i915_sysfs.o \
18 i915_trace_points.o \ 18 i915_trace_points.o \
19 i915_ums.o \
19 intel_display.o \ 20 intel_display.o \
20 intel_crt.o \ 21 intel_crt.o \
21 intel_lvds.o \ 22 intel_lvds.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 32158d21c632..aae31489c893 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -103,7 +103,7 @@ static const char *cache_level_str(int type)
103static void 103static void
104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 104describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
105{ 105{
106 seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d %d%s%s%s", 106 seq_printf(m, "%p: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
107 &obj->base, 107 &obj->base,
108 get_pin_flag(obj), 108 get_pin_flag(obj),
109 get_tiling_flag(obj), 109 get_tiling_flag(obj),
@@ -125,6 +125,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 if (obj->gtt_space != NULL) 125 if (obj->gtt_space != NULL)
126 seq_printf(m, " (gtt offset: %08x, size: %08x)", 126 seq_printf(m, " (gtt offset: %08x, size: %08x)",
127 obj->gtt_offset, (unsigned int)obj->gtt_space->size); 127 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
128 if (obj->stolen)
129 seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
128 if (obj->pin_mappable || obj->fault_mappable) { 130 if (obj->pin_mappable || obj->fault_mappable) {
129 char s[3], *t = s; 131 char s[3], *t = s;
130 if (obj->pin_mappable) 132 if (obj->pin_mappable)
@@ -257,8 +259,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
257 seq_printf(m, "%u fault mappable objects, %zu bytes\n", 259 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
258 count, size); 260 count, size);
259 261
260 seq_printf(m, "%zu [%zu] gtt total\n", 262 seq_printf(m, "%zu [%lu] gtt total\n",
261 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total); 263 dev_priv->gtt.total,
264 dev_priv->gtt.mappable_end - dev_priv->gtt.start);
262 265
263 mutex_unlock(&dev->struct_mutex); 266 mutex_unlock(&dev->struct_mutex);
264 267
@@ -388,7 +391,7 @@ static void i915_ring_seqno_info(struct seq_file *m,
388 struct intel_ring_buffer *ring) 391 struct intel_ring_buffer *ring)
389{ 392{
390 if (ring->get_seqno) { 393 if (ring->get_seqno) {
391 seq_printf(m, "Current sequence (%s): %d\n", 394 seq_printf(m, "Current sequence (%s): %u\n",
392 ring->name, ring->get_seqno(ring, false)); 395 ring->name, ring->get_seqno(ring, false));
393 } 396 }
394} 397}
@@ -545,11 +548,11 @@ static int i915_hws_info(struct seq_file *m, void *data)
545 struct drm_device *dev = node->minor->dev; 548 struct drm_device *dev = node->minor->dev;
546 drm_i915_private_t *dev_priv = dev->dev_private; 549 drm_i915_private_t *dev_priv = dev->dev_private;
547 struct intel_ring_buffer *ring; 550 struct intel_ring_buffer *ring;
548 const volatile u32 __iomem *hws; 551 const u32 *hws;
549 int i; 552 int i;
550 553
551 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 554 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
552 hws = (volatile u32 __iomem *)ring->status_page.page_addr; 555 hws = ring->status_page.page_addr;
553 if (hws == NULL) 556 if (hws == NULL)
554 return 0; 557 return 0;
555 558
@@ -609,7 +612,7 @@ static void print_error_buffers(struct seq_file *m,
609 seq_printf(m, "%s [%d]:\n", name, count); 612 seq_printf(m, "%s [%d]:\n", name, count);
610 613
611 while (count--) { 614 while (count--) {
612 seq_printf(m, " %08x %8u %04x %04x %x %x%s%s%s%s%s%s%s", 615 seq_printf(m, " %08x %8u %02x %02x %x %x%s%s%s%s%s%s%s",
613 err->gtt_offset, 616 err->gtt_offset,
614 err->size, 617 err->size,
615 err->read_domains, 618 err->read_domains,
@@ -691,7 +694,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
691 694
692 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, 695 seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
693 error->time.tv_usec); 696 error->time.tv_usec);
694 seq_printf(m, "Kernel: " UTS_RELEASE); 697 seq_printf(m, "Kernel: " UTS_RELEASE "\n");
695 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 698 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
696 seq_printf(m, "EIR: 0x%08x\n", error->eir); 699 seq_printf(m, "EIR: 0x%08x\n", error->eir);
697 seq_printf(m, "IER: 0x%08x\n", error->ier); 700 seq_printf(m, "IER: 0x%08x\n", error->ier);
@@ -816,11 +819,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
816 819
817 error_priv->dev = dev; 820 error_priv->dev = dev;
818 821
819 spin_lock_irqsave(&dev_priv->error_lock, flags); 822 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
820 error_priv->error = dev_priv->first_error; 823 error_priv->error = dev_priv->gpu_error.first_error;
821 if (error_priv->error) 824 if (error_priv->error)
822 kref_get(&error_priv->error->ref); 825 kref_get(&error_priv->error->ref);
823 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 826 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
824 827
825 return single_open(file, i915_error_state, error_priv); 828 return single_open(file, i915_error_state, error_priv);
826} 829}
@@ -846,6 +849,77 @@ static const struct file_operations i915_error_state_fops = {
846 .release = i915_error_state_release, 849 .release = i915_error_state_release,
847}; 850};
848 851
852static ssize_t
853i915_next_seqno_read(struct file *filp,
854 char __user *ubuf,
855 size_t max,
856 loff_t *ppos)
857{
858 struct drm_device *dev = filp->private_data;
859 drm_i915_private_t *dev_priv = dev->dev_private;
860 char buf[80];
861 int len;
862 int ret;
863
864 ret = mutex_lock_interruptible(&dev->struct_mutex);
865 if (ret)
866 return ret;
867
868 len = snprintf(buf, sizeof(buf),
869 "next_seqno : 0x%x\n",
870 dev_priv->next_seqno);
871
872 mutex_unlock(&dev->struct_mutex);
873
874 if (len > sizeof(buf))
875 len = sizeof(buf);
876
877 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
878}
879
880static ssize_t
881i915_next_seqno_write(struct file *filp,
882 const char __user *ubuf,
883 size_t cnt,
884 loff_t *ppos)
885{
886 struct drm_device *dev = filp->private_data;
887 char buf[20];
888 u32 val = 1;
889 int ret;
890
891 if (cnt > 0) {
892 if (cnt > sizeof(buf) - 1)
893 return -EINVAL;
894
895 if (copy_from_user(buf, ubuf, cnt))
896 return -EFAULT;
897 buf[cnt] = 0;
898
899 ret = kstrtouint(buf, 0, &val);
900 if (ret < 0)
901 return ret;
902 }
903
904 ret = mutex_lock_interruptible(&dev->struct_mutex);
905 if (ret)
906 return ret;
907
908 ret = i915_gem_set_seqno(dev, val);
909
910 mutex_unlock(&dev->struct_mutex);
911
912 return ret ?: cnt;
913}
914
915static const struct file_operations i915_next_seqno_fops = {
916 .owner = THIS_MODULE,
917 .open = simple_open,
918 .read = i915_next_seqno_read,
919 .write = i915_next_seqno_write,
920 .llseek = default_llseek,
921};
922
849static int i915_rstdby_delays(struct seq_file *m, void *unused) 923static int i915_rstdby_delays(struct seq_file *m, void *unused)
850{ 924{
851 struct drm_info_node *node = (struct drm_info_node *) m->private; 925 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -888,7 +962,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
888 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 962 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
889 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 963 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
890 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 964 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
891 u32 rpstat; 965 u32 rpstat, cagf;
892 u32 rpupei, rpcurup, rpprevup; 966 u32 rpupei, rpcurup, rpprevup;
893 u32 rpdownei, rpcurdown, rpprevdown; 967 u32 rpdownei, rpcurdown, rpprevdown;
894 int max_freq; 968 int max_freq;
@@ -907,6 +981,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
907 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); 981 rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
908 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); 982 rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
909 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); 983 rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
984 if (IS_HASWELL(dev))
985 cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
986 else
987 cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
988 cagf *= GT_FREQUENCY_MULTIPLIER;
910 989
911 gen6_gt_force_wake_put(dev_priv); 990 gen6_gt_force_wake_put(dev_priv);
912 mutex_unlock(&dev->struct_mutex); 991 mutex_unlock(&dev->struct_mutex);
@@ -919,8 +998,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
919 gt_perf_status & 0xff); 998 gt_perf_status & 0xff);
920 seq_printf(m, "Render p-state limit: %d\n", 999 seq_printf(m, "Render p-state limit: %d\n",
921 rp_state_limits & 0xff); 1000 rp_state_limits & 0xff);
922 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 1001 seq_printf(m, "CAGF: %dMHz\n", cagf);
923 GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
924 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 1002 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
925 GEN6_CURICONT_MASK); 1003 GEN6_CURICONT_MASK);
926 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & 1004 seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@@ -1372,28 +1450,31 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1372 ifbdev = dev_priv->fbdev; 1450 ifbdev = dev_priv->fbdev;
1373 fb = to_intel_framebuffer(ifbdev->helper.fb); 1451 fb = to_intel_framebuffer(ifbdev->helper.fb);
1374 1452
1375 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ", 1453 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1376 fb->base.width, 1454 fb->base.width,
1377 fb->base.height, 1455 fb->base.height,
1378 fb->base.depth, 1456 fb->base.depth,
1379 fb->base.bits_per_pixel); 1457 fb->base.bits_per_pixel,
1458 atomic_read(&fb->base.refcount.refcount));
1380 describe_obj(m, fb->obj); 1459 describe_obj(m, fb->obj);
1381 seq_printf(m, "\n"); 1460 seq_printf(m, "\n");
1461 mutex_unlock(&dev->mode_config.mutex);
1382 1462
1463 mutex_lock(&dev->mode_config.fb_lock);
1383 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1464 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
1384 if (&fb->base == ifbdev->helper.fb) 1465 if (&fb->base == ifbdev->helper.fb)
1385 continue; 1466 continue;
1386 1467
1387 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ", 1468 seq_printf(m, "user size: %d x %d, depth %d, %d bpp, refcount %d, obj ",
1388 fb->base.width, 1469 fb->base.width,
1389 fb->base.height, 1470 fb->base.height,
1390 fb->base.depth, 1471 fb->base.depth,
1391 fb->base.bits_per_pixel); 1472 fb->base.bits_per_pixel,
1473 atomic_read(&fb->base.refcount.refcount));
1392 describe_obj(m, fb->obj); 1474 describe_obj(m, fb->obj);
1393 seq_printf(m, "\n"); 1475 seq_printf(m, "\n");
1394 } 1476 }
1395 1477 mutex_unlock(&dev->mode_config.fb_lock);
1396 mutex_unlock(&dev->mode_config.mutex);
1397 1478
1398 return 0; 1479 return 0;
1399} 1480}
@@ -1403,7 +1484,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
1403 struct drm_info_node *node = (struct drm_info_node *) m->private; 1484 struct drm_info_node *node = (struct drm_info_node *) m->private;
1404 struct drm_device *dev = node->minor->dev; 1485 struct drm_device *dev = node->minor->dev;
1405 drm_i915_private_t *dev_priv = dev->dev_private; 1486 drm_i915_private_t *dev_priv = dev->dev_private;
1406 int ret; 1487 struct intel_ring_buffer *ring;
1488 int ret, i;
1407 1489
1408 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1490 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
1409 if (ret) 1491 if (ret)
@@ -1421,6 +1503,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
1421 seq_printf(m, "\n"); 1503 seq_printf(m, "\n");
1422 } 1504 }
1423 1505
1506 for_each_ring(ring, dev_priv, i) {
1507 if (ring->default_context) {
1508 seq_printf(m, "HW default context %s ring ", ring->name);
1509 describe_obj(m, ring->default_context->obj);
1510 seq_printf(m, "\n");
1511 }
1512 }
1513
1424 mutex_unlock(&dev->mode_config.mutex); 1514 mutex_unlock(&dev->mode_config.mutex);
1425 1515
1426 return 0; 1516 return 0;
@@ -1556,7 +1646,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1556 return 0; 1646 return 0;
1557 } 1647 }
1558 1648
1559 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 1649 ret = mutex_lock_interruptible(&dev_priv->dpio_lock);
1560 if (ret) 1650 if (ret)
1561 return ret; 1651 return ret;
1562 1652
@@ -1585,7 +1675,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
1585 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n", 1675 seq_printf(m, "DPIO_FASTCLK_DISABLE: 0x%08x\n",
1586 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE)); 1676 intel_dpio_read(dev_priv, DPIO_FASTCLK_DISABLE));
1587 1677
1588 mutex_unlock(&dev->mode_config.mutex); 1678 mutex_unlock(&dev_priv->dpio_lock);
1589 1679
1590 return 0; 1680 return 0;
1591} 1681}
@@ -1603,7 +1693,7 @@ i915_wedged_read(struct file *filp,
1603 1693
1604 len = snprintf(buf, sizeof(buf), 1694 len = snprintf(buf, sizeof(buf),
1605 "wedged : %d\n", 1695 "wedged : %d\n",
1606 atomic_read(&dev_priv->mm.wedged)); 1696 atomic_read(&dev_priv->gpu_error.reset_counter));
1607 1697
1608 if (len > sizeof(buf)) 1698 if (len > sizeof(buf))
1609 len = sizeof(buf); 1699 len = sizeof(buf);
@@ -1658,7 +1748,7 @@ i915_ring_stop_read(struct file *filp,
1658 int len; 1748 int len;
1659 1749
1660 len = snprintf(buf, sizeof(buf), 1750 len = snprintf(buf, sizeof(buf),
1661 "0x%08x\n", dev_priv->stop_rings); 1751 "0x%08x\n", dev_priv->gpu_error.stop_rings);
1662 1752
1663 if (len > sizeof(buf)) 1753 if (len > sizeof(buf))
1664 len = sizeof(buf); 1754 len = sizeof(buf);
@@ -1694,7 +1784,7 @@ i915_ring_stop_write(struct file *filp,
1694 if (ret) 1784 if (ret)
1695 return ret; 1785 return ret;
1696 1786
1697 dev_priv->stop_rings = val; 1787 dev_priv->gpu_error.stop_rings = val;
1698 mutex_unlock(&dev->struct_mutex); 1788 mutex_unlock(&dev->struct_mutex);
1699 1789
1700 return cnt; 1790 return cnt;
@@ -1708,6 +1798,102 @@ static const struct file_operations i915_ring_stop_fops = {
1708 .llseek = default_llseek, 1798 .llseek = default_llseek,
1709}; 1799};
1710 1800
1801#define DROP_UNBOUND 0x1
1802#define DROP_BOUND 0x2
1803#define DROP_RETIRE 0x4
1804#define DROP_ACTIVE 0x8
1805#define DROP_ALL (DROP_UNBOUND | \
1806 DROP_BOUND | \
1807 DROP_RETIRE | \
1808 DROP_ACTIVE)
1809static ssize_t
1810i915_drop_caches_read(struct file *filp,
1811 char __user *ubuf,
1812 size_t max,
1813 loff_t *ppos)
1814{
1815 char buf[20];
1816 int len;
1817
1818 len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
1819 if (len > sizeof(buf))
1820 len = sizeof(buf);
1821
1822 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
1823}
1824
1825static ssize_t
1826i915_drop_caches_write(struct file *filp,
1827 const char __user *ubuf,
1828 size_t cnt,
1829 loff_t *ppos)
1830{
1831 struct drm_device *dev = filp->private_data;
1832 struct drm_i915_private *dev_priv = dev->dev_private;
1833 struct drm_i915_gem_object *obj, *next;
1834 char buf[20];
1835 int val = 0, ret;
1836
1837 if (cnt > 0) {
1838 if (cnt > sizeof(buf) - 1)
1839 return -EINVAL;
1840
1841 if (copy_from_user(buf, ubuf, cnt))
1842 return -EFAULT;
1843 buf[cnt] = 0;
1844
1845 val = simple_strtoul(buf, NULL, 0);
1846 }
1847
1848 DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
1849
1850 /* No need to check and wait for gpu resets, only libdrm auto-restarts
1851 * on ioctls on -EAGAIN. */
1852 ret = mutex_lock_interruptible(&dev->struct_mutex);
1853 if (ret)
1854 return ret;
1855
1856 if (val & DROP_ACTIVE) {
1857 ret = i915_gpu_idle(dev);
1858 if (ret)
1859 goto unlock;
1860 }
1861
1862 if (val & (DROP_RETIRE | DROP_ACTIVE))
1863 i915_gem_retire_requests(dev);
1864
1865 if (val & DROP_BOUND) {
1866 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
1867 if (obj->pin_count == 0) {
1868 ret = i915_gem_object_unbind(obj);
1869 if (ret)
1870 goto unlock;
1871 }
1872 }
1873
1874 if (val & DROP_UNBOUND) {
1875 list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
1876 if (obj->pages_pin_count == 0) {
1877 ret = i915_gem_object_put_pages(obj);
1878 if (ret)
1879 goto unlock;
1880 }
1881 }
1882
1883unlock:
1884 mutex_unlock(&dev->struct_mutex);
1885
1886 return ret ?: cnt;
1887}
1888
1889static const struct file_operations i915_drop_caches_fops = {
1890 .owner = THIS_MODULE,
1891 .open = simple_open,
1892 .read = i915_drop_caches_read,
1893 .write = i915_drop_caches_write,
1894 .llseek = default_llseek,
1895};
1896
1711static ssize_t 1897static ssize_t
1712i915_max_freq_read(struct file *filp, 1898i915_max_freq_read(struct file *filp,
1713 char __user *ubuf, 1899 char __user *ubuf,
@@ -2105,11 +2291,23 @@ int i915_debugfs_init(struct drm_minor *minor)
2105 return ret; 2291 return ret;
2106 2292
2107 ret = i915_debugfs_create(minor->debugfs_root, minor, 2293 ret = i915_debugfs_create(minor->debugfs_root, minor,
2294 "i915_gem_drop_caches",
2295 &i915_drop_caches_fops);
2296 if (ret)
2297 return ret;
2298
2299 ret = i915_debugfs_create(minor->debugfs_root, minor,
2108 "i915_error_state", 2300 "i915_error_state",
2109 &i915_error_state_fops); 2301 &i915_error_state_fops);
2110 if (ret) 2302 if (ret)
2111 return ret; 2303 return ret;
2112 2304
2305 ret = i915_debugfs_create(minor->debugfs_root, minor,
2306 "i915_next_seqno",
2307 &i915_next_seqno_fops);
2308 if (ret)
2309 return ret;
2310
2113 return drm_debugfs_create_files(i915_debugfs_list, 2311 return drm_debugfs_create_files(i915_debugfs_list,
2114 I915_DEBUGFS_ENTRIES, 2312 I915_DEBUGFS_ENTRIES,
2115 minor->debugfs_root, minor); 2313 minor->debugfs_root, minor);
@@ -2129,10 +2327,14 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
2129 1, minor); 2327 1, minor);
2130 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops, 2328 drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
2131 1, minor); 2329 1, minor);
2330 drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
2331 1, minor);
2132 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops, 2332 drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
2133 1, minor); 2333 1, minor);
2134 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops, 2334 drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
2135 1, minor); 2335 1, minor);
2336 drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
2337 1, minor);
2136} 2338}
2137 2339
2138#endif /* CONFIG_DEBUG_FS */ 2340#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 99daa896105d..4fa6beb14c77 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
992 case I915_PARAM_HAS_PINNED_BATCHES: 992 case I915_PARAM_HAS_PINNED_BATCHES:
993 value = 1; 993 value = 1;
994 break; 994 break;
995 case I915_PARAM_HAS_EXEC_NO_RELOC:
996 value = 1;
997 break;
998 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
999 value = 1;
1000 break;
995 default: 1001 default:
996 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 1002 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
997 param->param); 1003 param->param);
@@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1070 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1076 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1071 1077
1072 dev_priv->dri1.gfx_hws_cpu_addr = 1078 dev_priv->dri1.gfx_hws_cpu_addr =
1073 ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096); 1079 ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
1074 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { 1080 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1075 i915_dma_cleanup(dev); 1081 i915_dma_cleanup(dev);
1076 ring->status_page.gfx_addr = 0; 1082 ring->status_page.gfx_addr = 0;
@@ -1297,19 +1303,21 @@ static int i915_load_modeset_init(struct drm_device *dev)
1297 if (ret) 1303 if (ret)
1298 goto cleanup_vga_switcheroo; 1304 goto cleanup_vga_switcheroo;
1299 1305
1306 ret = drm_irq_install(dev);
1307 if (ret)
1308 goto cleanup_gem_stolen;
1309
1310 /* Important: The output setup functions called by modeset_init need
1311 * working irqs for e.g. gmbus and dp aux transfers. */
1300 intel_modeset_init(dev); 1312 intel_modeset_init(dev);
1301 1313
1302 ret = i915_gem_init(dev); 1314 ret = i915_gem_init(dev);
1303 if (ret) 1315 if (ret)
1304 goto cleanup_gem_stolen; 1316 goto cleanup_irq;
1305
1306 intel_modeset_gem_init(dev);
1307 1317
1308 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume); 1318 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1309 1319
1310 ret = drm_irq_install(dev); 1320 intel_modeset_gem_init(dev);
1311 if (ret)
1312 goto cleanup_gem;
1313 1321
1314 /* Always safe in the mode setting case. */ 1322 /* Always safe in the mode setting case. */
1315 /* FIXME: do pre/post-mode set stuff in core KMS code */ 1323 /* FIXME: do pre/post-mode set stuff in core KMS code */
@@ -1317,7 +1325,25 @@ static int i915_load_modeset_init(struct drm_device *dev)
1317 1325
1318 ret = intel_fbdev_init(dev); 1326 ret = intel_fbdev_init(dev);
1319 if (ret) 1327 if (ret)
1320 goto cleanup_irq; 1328 goto cleanup_gem;
1329
1330 /* Only enable hotplug handling once the fbdev is fully set up. */
1331 intel_hpd_init(dev);
1332
1333 /*
1334 * Some ports require correctly set-up hpd registers for detection to
1335 * work properly (leading to ghost connected connector status), e.g. VGA
1336 * on gm45. Hence we can only set up the initial fbdev config after hpd
1337 * irqs are fully enabled. Now we should scan for the initial config
1338 * only once hotplug handling is enabled, but due to screwed-up locking
1339 * around kms/fbdev init we can't protect the fdbev initial config
1340 * scanning against hotplug events. Hence do this first and ignore the
1341 * tiny window where we will loose hotplug notifactions.
1342 */
1343 intel_fbdev_initial_config(dev);
1344
1345 /* Only enable hotplug handling once the fbdev is fully set up. */
1346 dev_priv->enable_hotplug_processing = true;
1321 1347
1322 drm_kms_helper_poll_init(dev); 1348 drm_kms_helper_poll_init(dev);
1323 1349
@@ -1326,13 +1352,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
1326 1352
1327 return 0; 1353 return 0;
1328 1354
1329cleanup_irq:
1330 drm_irq_uninstall(dev);
1331cleanup_gem: 1355cleanup_gem:
1332 mutex_lock(&dev->struct_mutex); 1356 mutex_lock(&dev->struct_mutex);
1333 i915_gem_cleanup_ringbuffer(dev); 1357 i915_gem_cleanup_ringbuffer(dev);
1334 mutex_unlock(&dev->struct_mutex); 1358 mutex_unlock(&dev->struct_mutex);
1335 i915_gem_cleanup_aliasing_ppgtt(dev); 1359 i915_gem_cleanup_aliasing_ppgtt(dev);
1360cleanup_irq:
1361 drm_irq_uninstall(dev);
1336cleanup_gem_stolen: 1362cleanup_gem_stolen:
1337 i915_gem_cleanup_stolen(dev); 1363 i915_gem_cleanup_stolen(dev);
1338cleanup_vga_switcheroo: 1364cleanup_vga_switcheroo:
@@ -1400,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1400 if (!ap) 1426 if (!ap)
1401 return; 1427 return;
1402 1428
1403 ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr; 1429 ap->ranges[0].base = dev_priv->gtt.mappable_base;
1404 ap->ranges[0].size = 1430 ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
1405 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1431
1406 primary = 1432 primary =
1407 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 1433 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
1408 1434
@@ -1516,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1516 goto put_gmch; 1542 goto put_gmch;
1517 } 1543 }
1518 1544
1519 aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; 1545 aperture_size = dev_priv->gtt.mappable_end;
1520 dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
1521 1546
1522 dev_priv->mm.gtt_mapping = 1547 dev_priv->gtt.mappable =
1523 io_mapping_create_wc(dev_priv->mm.gtt_base_addr, 1548 io_mapping_create_wc(dev_priv->gtt.mappable_base,
1524 aperture_size); 1549 aperture_size);
1525 if (dev_priv->mm.gtt_mapping == NULL) { 1550 if (dev_priv->gtt.mappable == NULL) {
1526 ret = -EIO; 1551 ret = -EIO;
1527 goto out_rmmap; 1552 goto out_rmmap;
1528 } 1553 }
1529 1554
1530 i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr, 1555 i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
1531 aperture_size); 1556 aperture_size);
1532 1557
1533 /* The i915 workqueue is primarily used for batched retirement of 1558 /* The i915 workqueue is primarily used for batched retirement of
@@ -1580,11 +1605,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1580 pci_enable_msi(dev->pdev); 1605 pci_enable_msi(dev->pdev);
1581 1606
1582 spin_lock_init(&dev_priv->irq_lock); 1607 spin_lock_init(&dev_priv->irq_lock);
1583 spin_lock_init(&dev_priv->error_lock); 1608 spin_lock_init(&dev_priv->gpu_error.lock);
1584 spin_lock_init(&dev_priv->rps.lock); 1609 spin_lock_init(&dev_priv->rps.lock);
1585 spin_lock_init(&dev_priv->dpio_lock); 1610 mutex_init(&dev_priv->dpio_lock);
1586 1611
1587 mutex_init(&dev_priv->rps.hw_lock); 1612 mutex_init(&dev_priv->rps.hw_lock);
1613 mutex_init(&dev_priv->modeset_restore_lock);
1588 1614
1589 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1615 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1590 dev_priv->num_pipe = 3; 1616 dev_priv->num_pipe = 3;
@@ -1614,9 +1640,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1614 intel_opregion_init(dev); 1640 intel_opregion_init(dev);
1615 acpi_video_register(); 1641 acpi_video_register();
1616 1642
1617 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1618 (unsigned long) dev);
1619
1620 if (IS_GEN5(dev)) 1643 if (IS_GEN5(dev))
1621 intel_gpu_ips_init(dev_priv); 1644 intel_gpu_ips_init(dev_priv);
1622 1645
@@ -1635,15 +1658,15 @@ out_gem_unload:
1635out_mtrrfree: 1658out_mtrrfree:
1636 if (dev_priv->mm.gtt_mtrr >= 0) { 1659 if (dev_priv->mm.gtt_mtrr >= 0) {
1637 mtrr_del(dev_priv->mm.gtt_mtrr, 1660 mtrr_del(dev_priv->mm.gtt_mtrr,
1638 dev_priv->mm.gtt_base_addr, 1661 dev_priv->gtt.mappable_base,
1639 aperture_size); 1662 aperture_size);
1640 dev_priv->mm.gtt_mtrr = -1; 1663 dev_priv->mm.gtt_mtrr = -1;
1641 } 1664 }
1642 io_mapping_free(dev_priv->mm.gtt_mapping); 1665 io_mapping_free(dev_priv->gtt.mappable);
1643out_rmmap: 1666out_rmmap:
1644 pci_iounmap(dev->pdev, dev_priv->regs); 1667 pci_iounmap(dev->pdev, dev_priv->regs);
1645put_gmch: 1668put_gmch:
1646 i915_gem_gtt_fini(dev); 1669 dev_priv->gtt.gtt_remove(dev);
1647put_bridge: 1670put_bridge:
1648 pci_dev_put(dev_priv->bridge_dev); 1671 pci_dev_put(dev_priv->bridge_dev);
1649free_priv: 1672free_priv:
@@ -1673,11 +1696,11 @@ int i915_driver_unload(struct drm_device *dev)
1673 /* Cancel the retire work handler, which should be idle now. */ 1696 /* Cancel the retire work handler, which should be idle now. */
1674 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 1697 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
1675 1698
1676 io_mapping_free(dev_priv->mm.gtt_mapping); 1699 io_mapping_free(dev_priv->gtt.mappable);
1677 if (dev_priv->mm.gtt_mtrr >= 0) { 1700 if (dev_priv->mm.gtt_mtrr >= 0) {
1678 mtrr_del(dev_priv->mm.gtt_mtrr, 1701 mtrr_del(dev_priv->mm.gtt_mtrr,
1679 dev_priv->mm.gtt_base_addr, 1702 dev_priv->gtt.mappable_base,
1680 dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); 1703 dev_priv->gtt.mappable_end);
1681 dev_priv->mm.gtt_mtrr = -1; 1704 dev_priv->mm.gtt_mtrr = -1;
1682 } 1705 }
1683 1706
@@ -1703,8 +1726,8 @@ int i915_driver_unload(struct drm_device *dev)
1703 } 1726 }
1704 1727
1705 /* Free error state after interrupts are fully disabled. */ 1728 /* Free error state after interrupts are fully disabled. */
1706 del_timer_sync(&dev_priv->hangcheck_timer); 1729 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1707 cancel_work_sync(&dev_priv->error_work); 1730 cancel_work_sync(&dev_priv->gpu_error.work);
1708 i915_destroy_error_state(dev); 1731 i915_destroy_error_state(dev);
1709 1732
1710 if (dev->pdev->msi_enabled) 1733 if (dev->pdev->msi_enabled)
@@ -1723,9 +1746,6 @@ int i915_driver_unload(struct drm_device *dev)
1723 mutex_unlock(&dev->struct_mutex); 1746 mutex_unlock(&dev->struct_mutex);
1724 i915_gem_cleanup_aliasing_ppgtt(dev); 1747 i915_gem_cleanup_aliasing_ppgtt(dev);
1725 i915_gem_cleanup_stolen(dev); 1748 i915_gem_cleanup_stolen(dev);
1726 drm_mm_takedown(&dev_priv->mm.stolen);
1727
1728 intel_cleanup_overlay(dev);
1729 1749
1730 if (!I915_NEED_GFX_HWS(dev)) 1750 if (!I915_NEED_GFX_HWS(dev))
1731 i915_free_hws(dev); 1751 i915_free_hws(dev);
@@ -1738,6 +1758,10 @@ int i915_driver_unload(struct drm_device *dev)
1738 intel_teardown_mchbar(dev); 1758 intel_teardown_mchbar(dev);
1739 1759
1740 destroy_workqueue(dev_priv->wq); 1760 destroy_workqueue(dev_priv->wq);
1761 pm_qos_remove_request(&dev_priv->pm_qos);
1762
1763 if (dev_priv->slab)
1764 kmem_cache_destroy(dev_priv->slab);
1741 1765
1742 pci_dev_put(dev_priv->bridge_dev); 1766 pci_dev_put(dev_priv->bridge_dev);
1743 kfree(dev->dev_private); 1767 kfree(dev->dev_private);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 117265840b1f..c5b8c81b9440 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
276 .has_bsd_ring = 1, 276 .has_bsd_ring = 1,
277 .has_blt_ring = 1, 277 .has_blt_ring = 1,
278 .is_valleyview = 1, 278 .is_valleyview = 1,
279 .display_mmio_offset = VLV_DISPLAY_BASE,
279}; 280};
280 281
281static const struct intel_device_info intel_valleyview_d_info = { 282static const struct intel_device_info intel_valleyview_d_info = {
@@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
285 .has_bsd_ring = 1, 286 .has_bsd_ring = 1,
286 .has_blt_ring = 1, 287 .has_blt_ring = 1,
287 .is_valleyview = 1, 288 .is_valleyview = 1,
289 .display_mmio_offset = VLV_DISPLAY_BASE,
288}; 290};
289 291
290static const struct intel_device_info intel_haswell_d_info = { 292static const struct intel_device_info intel_haswell_d_info = {
@@ -468,6 +470,13 @@ static int i915_drm_freeze(struct drm_device *dev)
468{ 470{
469 struct drm_i915_private *dev_priv = dev->dev_private; 471 struct drm_i915_private *dev_priv = dev->dev_private;
470 472
473 /* ignore lid events during suspend */
474 mutex_lock(&dev_priv->modeset_restore_lock);
475 dev_priv->modeset_restore = MODESET_SUSPENDED;
476 mutex_unlock(&dev_priv->modeset_restore_lock);
477
478 intel_set_power_well(dev, true);
479
471 drm_kms_helper_poll_disable(dev); 480 drm_kms_helper_poll_disable(dev);
472 481
473 pci_save_state(dev->pdev); 482 pci_save_state(dev->pdev);
@@ -492,9 +501,6 @@ static int i915_drm_freeze(struct drm_device *dev)
492 501
493 intel_opregion_fini(dev); 502 intel_opregion_fini(dev);
494 503
495 /* Modeset on resume, not lid events */
496 dev_priv->modeset_on_lid = 0;
497
498 console_lock(); 504 console_lock();
499 intel_fbdev_set_suspend(dev, 1); 505 intel_fbdev_set_suspend(dev, 1);
500 console_unlock(); 506 console_unlock();
@@ -565,12 +571,11 @@ static int __i915_drm_thaw(struct drm_device *dev)
565 intel_modeset_init_hw(dev); 571 intel_modeset_init_hw(dev);
566 intel_modeset_setup_hw_state(dev, false); 572 intel_modeset_setup_hw_state(dev, false);
567 drm_irq_install(dev); 573 drm_irq_install(dev);
574 intel_hpd_init(dev);
568 } 575 }
569 576
570 intel_opregion_init(dev); 577 intel_opregion_init(dev);
571 578
572 dev_priv->modeset_on_lid = 0;
573
574 /* 579 /*
575 * The console lock can be pretty contented on resume due 580 * The console lock can be pretty contented on resume due
576 * to all the printk activity. Try to keep it out of the hot 581 * to all the printk activity. Try to keep it out of the hot
@@ -583,6 +588,9 @@ static int __i915_drm_thaw(struct drm_device *dev)
583 schedule_work(&dev_priv->console_resume_work); 588 schedule_work(&dev_priv->console_resume_work);
584 } 589 }
585 590
591 mutex_lock(&dev_priv->modeset_restore_lock);
592 dev_priv->modeset_restore = MODESET_DONE;
593 mutex_unlock(&dev_priv->modeset_restore_lock);
586 return error; 594 return error;
587} 595}
588 596
@@ -778,9 +786,9 @@ int intel_gpu_reset(struct drm_device *dev)
778 } 786 }
779 787
780 /* Also reset the gpu hangman. */ 788 /* Also reset the gpu hangman. */
781 if (dev_priv->stop_rings) { 789 if (dev_priv->gpu_error.stop_rings) {
782 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n"); 790 DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
783 dev_priv->stop_rings = 0; 791 dev_priv->gpu_error.stop_rings = 0;
784 if (ret == -ENODEV) { 792 if (ret == -ENODEV) {
785 DRM_ERROR("Reset not implemented, but ignoring " 793 DRM_ERROR("Reset not implemented, but ignoring "
786 "error for simulated gpu hangs\n"); 794 "error for simulated gpu hangs\n");
@@ -819,12 +827,12 @@ int i915_reset(struct drm_device *dev)
819 i915_gem_reset(dev); 827 i915_gem_reset(dev);
820 828
821 ret = -ENODEV; 829 ret = -ENODEV;
822 if (get_seconds() - dev_priv->last_gpu_reset < 5) 830 if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
823 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 831 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
824 else 832 else
825 ret = intel_gpu_reset(dev); 833 ret = intel_gpu_reset(dev);
826 834
827 dev_priv->last_gpu_reset = get_seconds(); 835 dev_priv->gpu_error.last_reset = get_seconds();
828 if (ret) { 836 if (ret) {
829 DRM_ERROR("Failed to reset chip.\n"); 837 DRM_ERROR("Failed to reset chip.\n");
830 mutex_unlock(&dev->struct_mutex); 838 mutex_unlock(&dev->struct_mutex);
@@ -870,6 +878,7 @@ int i915_reset(struct drm_device *dev)
870 878
871 drm_irq_uninstall(dev); 879 drm_irq_uninstall(dev);
872 drm_irq_install(dev); 880 drm_irq_install(dev);
881 intel_hpd_init(dev);
873 } else { 882 } else {
874 mutex_unlock(&dev->struct_mutex); 883 mutex_unlock(&dev->struct_mutex);
875 } 884 }
@@ -1113,102 +1122,6 @@ MODULE_LICENSE("GPL and additional rights");
1113 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \ 1122 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1114 ((reg) < 0x40000) && \ 1123 ((reg) < 0x40000) && \
1115 ((reg) != FORCEWAKE)) 1124 ((reg) != FORCEWAKE))
1116
1117static bool IS_DISPLAYREG(u32 reg)
1118{
1119 /*
1120 * This should make it easier to transition modules over to the
1121 * new register block scheme, since we can do it incrementally.
1122 */
1123 if (reg >= VLV_DISPLAY_BASE)
1124 return false;
1125
1126 if (reg >= RENDER_RING_BASE &&
1127 reg < RENDER_RING_BASE + 0xff)
1128 return false;
1129 if (reg >= GEN6_BSD_RING_BASE &&
1130 reg < GEN6_BSD_RING_BASE + 0xff)
1131 return false;
1132 if (reg >= BLT_RING_BASE &&
1133 reg < BLT_RING_BASE + 0xff)
1134 return false;
1135
1136 if (reg == PGTBL_ER)
1137 return false;
1138
1139 if (reg >= IPEIR_I965 &&
1140 reg < HWSTAM)
1141 return false;
1142
1143 if (reg == MI_MODE)
1144 return false;
1145
1146 if (reg == GFX_MODE_GEN7)
1147 return false;
1148
1149 if (reg == RENDER_HWS_PGA_GEN7 ||
1150 reg == BSD_HWS_PGA_GEN7 ||
1151 reg == BLT_HWS_PGA_GEN7)
1152 return false;
1153
1154 if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
1155 reg == GEN6_BSD_RNCID)
1156 return false;
1157
1158 if (reg == GEN6_BLITTER_ECOSKPD)
1159 return false;
1160
1161 if (reg >= 0x4000c &&
1162 reg <= 0x4002c)
1163 return false;
1164
1165 if (reg >= 0x4f000 &&
1166 reg <= 0x4f08f)
1167 return false;
1168
1169 if (reg >= 0x4f100 &&
1170 reg <= 0x4f11f)
1171 return false;
1172
1173 if (reg >= VLV_MASTER_IER &&
1174 reg <= GEN6_PMIER)
1175 return false;
1176
1177 if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
1178 reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
1179 return false;
1180
1181 if (reg >= VLV_IIR_RW &&
1182 reg <= VLV_ISR)
1183 return false;
1184
1185 if (reg == FORCEWAKE_VLV ||
1186 reg == FORCEWAKE_ACK_VLV)
1187 return false;
1188
1189 if (reg == GEN6_GDRST)
1190 return false;
1191
1192 switch (reg) {
1193 case _3D_CHICKEN3:
1194 case IVB_CHICKEN3:
1195 case GEN7_COMMON_SLICE_CHICKEN1:
1196 case GEN7_L3CNTLREG1:
1197 case GEN7_L3_CHICKEN_MODE_REGISTER:
1198 case GEN7_ROW_CHICKEN2:
1199 case GEN7_L3SQCREG4:
1200 case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
1201 case GEN7_HALF_SLICE_CHICKEN1:
1202 case GEN6_MBCTL:
1203 case GEN6_UCGCTL2:
1204 return false;
1205 default:
1206 break;
1207 }
1208
1209 return true;
1210}
1211
1212static void 1125static void
1213ilk_dummy_write(struct drm_i915_private *dev_priv) 1126ilk_dummy_write(struct drm_i915_private *dev_priv)
1214{ 1127{
@@ -1232,8 +1145,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1232 if (dev_priv->forcewake_count == 0) \ 1145 if (dev_priv->forcewake_count == 0) \
1233 dev_priv->gt.force_wake_put(dev_priv); \ 1146 dev_priv->gt.force_wake_put(dev_priv); \
1234 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ 1147 spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
1235 } else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1236 val = read##y(dev_priv->regs + reg + 0x180000); \
1237 } else { \ 1148 } else { \
1238 val = read##y(dev_priv->regs + reg); \ 1149 val = read##y(dev_priv->regs + reg); \
1239 } \ 1150 } \
@@ -1260,11 +1171,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1260 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ 1171 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1261 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ 1172 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1262 } \ 1173 } \
1263 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1174 write##y(val, dev_priv->regs + reg); \
1264 write##y(val, dev_priv->regs + reg + 0x180000); \
1265 } else { \
1266 write##y(val, dev_priv->regs + reg); \
1267 } \
1268 if (unlikely(__fifo_ret)) { \ 1175 if (unlikely(__fifo_ret)) { \
1269 gen6_gt_check_fifodbg(dev_priv); \ 1176 gen6_gt_check_fifodbg(dev_priv); \
1270 } \ 1177 } \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12ab3bdea54d..e95337c97459 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -30,6 +30,8 @@
30#ifndef _I915_DRV_H_ 30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_ 31#define _I915_DRV_H_
32 32
33#include <uapi/drm/i915_drm.h>
34
33#include "i915_reg.h" 35#include "i915_reg.h"
34#include "intel_bios.h" 36#include "intel_bios.h"
35#include "intel_ringbuffer.h" 37#include "intel_ringbuffer.h"
@@ -40,6 +42,7 @@
40#include <linux/backlight.h> 42#include <linux/backlight.h>
41#include <linux/intel-iommu.h> 43#include <linux/intel-iommu.h>
42#include <linux/kref.h> 44#include <linux/kref.h>
45#include <linux/pm_qos.h>
43 46
44/* General customization: 47/* General customization:
45 */ 48 */
@@ -83,7 +86,12 @@ enum port {
83}; 86};
84#define port_name(p) ((p) + 'A') 87#define port_name(p) ((p) + 'A')
85 88
86#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 89#define I915_GEM_GPU_DOMAINS \
90 (I915_GEM_DOMAIN_RENDER | \
91 I915_GEM_DOMAIN_SAMPLER | \
92 I915_GEM_DOMAIN_COMMAND | \
93 I915_GEM_DOMAIN_INSTRUCTION | \
94 I915_GEM_DOMAIN_VERTEX)
87 95
88#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) 96#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
89 97
@@ -101,6 +109,19 @@ struct intel_pch_pll {
101}; 109};
102#define I915_NUM_PLLS 2 110#define I915_NUM_PLLS 2
103 111
112/* Used by dp and fdi links */
113struct intel_link_m_n {
114 uint32_t tu;
115 uint32_t gmch_m;
116 uint32_t gmch_n;
117 uint32_t link_m;
118 uint32_t link_n;
119};
120
121void intel_link_compute_m_n(int bpp, int nlanes,
122 int pixel_clock, int link_clock,
123 struct intel_link_m_n *m_n);
124
104struct intel_ddi_plls { 125struct intel_ddi_plls {
105 int spll_refcount; 126 int spll_refcount;
106 int wrpll1_refcount; 127 int wrpll1_refcount;
@@ -279,6 +300,7 @@ struct drm_i915_display_funcs {
279 struct drm_i915_gem_object *obj); 300 struct drm_i915_gem_object *obj);
280 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, 301 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
281 int x, int y); 302 int x, int y);
303 void (*hpd_irq_setup)(struct drm_device *dev);
282 /* clock updates for mode set */ 304 /* clock updates for mode set */
283 /* cursor updates */ 305 /* cursor updates */
284 /* render clock increase/decrease */ 306 /* render clock increase/decrease */
@@ -318,6 +340,7 @@ struct drm_i915_gt_funcs {
318 DEV_INFO_FLAG(has_llc) 340 DEV_INFO_FLAG(has_llc)
319 341
320struct intel_device_info { 342struct intel_device_info {
343 u32 display_mmio_offset;
321 u8 gen; 344 u8 gen;
322 u8 is_mobile:1; 345 u8 is_mobile:1;
323 u8 is_i85x:1; 346 u8 is_i85x:1;
@@ -345,6 +368,50 @@ struct intel_device_info {
345 u8 has_llc:1; 368 u8 has_llc:1;
346}; 369};
347 370
371enum i915_cache_level {
372 I915_CACHE_NONE = 0,
373 I915_CACHE_LLC,
374 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
375};
376
377/* The Graphics Translation Table is the way in which GEN hardware translates a
378 * Graphics Virtual Address into a Physical Address. In addition to the normal
379 * collateral associated with any va->pa translations GEN hardware also has a
380 * portion of the GTT which can be mapped by the CPU and remain both coherent
381 * and correct (in cases like swizzling). That region is referred to as GMADR in
382 * the spec.
383 */
384struct i915_gtt {
385 unsigned long start; /* Start offset of used GTT */
386 size_t total; /* Total size GTT can map */
387 size_t stolen_size; /* Total size of stolen memory */
388
389 unsigned long mappable_end; /* End offset that we can CPU map */
390 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
391 phys_addr_t mappable_base; /* PA of our GMADR */
392
393 /** "Graphics Stolen Memory" holds the global PTEs */
394 void __iomem *gsm;
395
396 bool do_idle_maps;
397 dma_addr_t scratch_page_dma;
398 struct page *scratch_page;
399
400 /* global gtt ops */
401 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
402 size_t *stolen, phys_addr_t *mappable_base,
403 unsigned long *mappable_end);
404 void (*gtt_remove)(struct drm_device *dev);
405 void (*gtt_clear_range)(struct drm_device *dev,
406 unsigned int first_entry,
407 unsigned int num_entries);
408 void (*gtt_insert_entries)(struct drm_device *dev,
409 struct sg_table *st,
410 unsigned int pg_start,
411 enum i915_cache_level cache_level);
412};
413#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
414
348#define I915_PPGTT_PD_ENTRIES 512 415#define I915_PPGTT_PD_ENTRIES 512
349#define I915_PPGTT_PT_ENTRIES 1024 416#define I915_PPGTT_PT_ENTRIES 1024
350struct i915_hw_ppgtt { 417struct i915_hw_ppgtt {
@@ -354,6 +421,16 @@ struct i915_hw_ppgtt {
354 uint32_t pd_offset; 421 uint32_t pd_offset;
355 dma_addr_t *pt_dma_addr; 422 dma_addr_t *pt_dma_addr;
356 dma_addr_t scratch_page_dma_addr; 423 dma_addr_t scratch_page_dma_addr;
424
425 /* pte functions, mirroring the interface of the global gtt. */
426 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
427 unsigned int first_entry,
428 unsigned int num_entries);
429 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
430 struct sg_table *st,
431 unsigned int pg_start,
432 enum i915_cache_level cache_level);
433 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
357}; 434};
358 435
359 436
@@ -580,6 +657,9 @@ struct intel_gen6_power_mgmt {
580 struct mutex hw_lock; 657 struct mutex hw_lock;
581}; 658};
582 659
660/* defined intel_pm.c */
661extern spinlock_t mchdev_lock;
662
583struct intel_ilk_power_mgmt { 663struct intel_ilk_power_mgmt {
584 u8 cur_delay; 664 u8 cur_delay;
585 u8 min_delay; 665 u8 min_delay;
@@ -620,8 +700,162 @@ struct intel_l3_parity {
620 struct work_struct error_work; 700 struct work_struct error_work;
621}; 701};
622 702
703struct i915_gem_mm {
704 /** Memory allocator for GTT stolen memory */
705 struct drm_mm stolen;
706 /** Memory allocator for GTT */
707 struct drm_mm gtt_space;
708 /** List of all objects in gtt_space. Used to restore gtt
709 * mappings on resume */
710 struct list_head bound_list;
711 /**
712 * List of objects which are not bound to the GTT (thus
713 * are idle and not used by the GPU) but still have
714 * (presumably uncached) pages still attached.
715 */
716 struct list_head unbound_list;
717
718 /** Usable portion of the GTT for GEM */
719 unsigned long stolen_base; /* limited to low memory (32-bit) */
720
721 int gtt_mtrr;
722
723 /** PPGTT used for aliasing the PPGTT with the GTT */
724 struct i915_hw_ppgtt *aliasing_ppgtt;
725
726 struct shrinker inactive_shrinker;
727 bool shrinker_no_lock_stealing;
728
729 /**
730 * List of objects currently involved in rendering.
731 *
732 * Includes buffers having the contents of their GPU caches
733 * flushed, not necessarily primitives. last_rendering_seqno
734 * represents when the rendering involved will be completed.
735 *
736 * A reference is held on the buffer while on this list.
737 */
738 struct list_head active_list;
739
740 /**
741 * LRU list of objects which are not in the ringbuffer and
742 * are ready to unbind, but are still in the GTT.
743 *
744 * last_rendering_seqno is 0 while an object is in this list.
745 *
746 * A reference is not held on the buffer while on this list,
747 * as merely being GTT-bound shouldn't prevent its being
748 * freed, and we'll pull it off the list in the free path.
749 */
750 struct list_head inactive_list;
751
752 /** LRU list of objects with fence regs on them. */
753 struct list_head fence_list;
754
755 /**
756 * We leave the user IRQ off as much as possible,
757 * but this means that requests will finish and never
758 * be retired once the system goes idle. Set a timer to
759 * fire periodically while the ring is running. When it
760 * fires, go retire requests.
761 */
762 struct delayed_work retire_work;
763
764 /**
765 * Are we in a non-interruptible section of code like
766 * modesetting?
767 */
768 bool interruptible;
769
770 /**
771 * Flag if the X Server, and thus DRM, is not currently in
772 * control of the device.
773 *
774 * This is set between LeaveVT and EnterVT. It needs to be
775 * replaced with a semaphore. It also needs to be
776 * transitioned away from for kernel modesetting.
777 */
778 int suspended;
779
780 /** Bit 6 swizzling required for X tiling */
781 uint32_t bit_6_swizzle_x;
782 /** Bit 6 swizzling required for Y tiling */
783 uint32_t bit_6_swizzle_y;
784
785 /* storage for physical objects */
786 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
787
788 /* accounting, useful for userland debugging */
789 size_t object_memory;
790 u32 object_count;
791};
792
793struct i915_gpu_error {
794 /* For hangcheck timer */
795#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
796#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
797 struct timer_list hangcheck_timer;
798 int hangcheck_count;
799 uint32_t last_acthd[I915_NUM_RINGS];
800 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
801
802 /* For reset and error_state handling. */
803 spinlock_t lock;
804 /* Protected by the above dev->gpu_error.lock. */
805 struct drm_i915_error_state *first_error;
806 struct work_struct work;
807
808 unsigned long last_reset;
809
810 /**
811 * State variable and reset counter controlling the reset flow
812 *
813 * Upper bits are for the reset counter. This counter is used by the
814 * wait_seqno code to race-free noticed that a reset event happened and
815 * that it needs to restart the entire ioctl (since most likely the
816 * seqno it waited for won't ever signal anytime soon).
817 *
818 * This is important for lock-free wait paths, where no contended lock
819 * naturally enforces the correct ordering between the bail-out of the
820 * waiter and the gpu reset work code.
821 *
822 * Lowest bit controls the reset state machine: Set means a reset is in
823 * progress. This state will (presuming we don't have any bugs) decay
824 * into either unset (successful reset) or the special WEDGED value (hw
825 * terminally sour). All waiters on the reset_queue will be woken when
826 * that happens.
827 */
828 atomic_t reset_counter;
829
830 /**
831 * Special values/flags for reset_counter
832 *
833 * Note that the code relies on
834 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
835 * being true.
836 */
837#define I915_RESET_IN_PROGRESS_FLAG 1
838#define I915_WEDGED 0xffffffff
839
840 /**
841 * Waitqueue to signal when the reset has completed. Used by clients
842 * that wait for dev_priv->mm.wedged to settle.
843 */
844 wait_queue_head_t reset_queue;
845
846 /* For gpu hang simulation. */
847 unsigned int stop_rings;
848};
849
850enum modeset_restore {
851 MODESET_ON_LID_OPEN,
852 MODESET_DONE,
853 MODESET_SUSPENDED,
854};
855
623typedef struct drm_i915_private { 856typedef struct drm_i915_private {
624 struct drm_device *dev; 857 struct drm_device *dev;
858 struct kmem_cache *slab;
625 859
626 const struct intel_device_info *info; 860 const struct intel_device_info *info;
627 861
@@ -636,10 +870,11 @@ typedef struct drm_i915_private {
636 /** forcewake_count is protected by gt_lock */ 870 /** forcewake_count is protected by gt_lock */
637 unsigned forcewake_count; 871 unsigned forcewake_count;
638 /** gt_lock is also taken in irq contexts. */ 872 /** gt_lock is also taken in irq contexts. */
639 struct spinlock gt_lock; 873 spinlock_t gt_lock;
640 874
641 struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; 875 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
642 876
877
643 /** gmbus_mutex protects against concurrent usage of the single hw gmbus 878 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
644 * controller on different i2c buses. */ 879 * controller on different i2c buses. */
645 struct mutex gmbus_mutex; 880 struct mutex gmbus_mutex;
@@ -649,9 +884,11 @@ typedef struct drm_i915_private {
649 */ 884 */
650 uint32_t gpio_mmio_base; 885 uint32_t gpio_mmio_base;
651 886
887 wait_queue_head_t gmbus_wait_queue;
888
652 struct pci_dev *bridge_dev; 889 struct pci_dev *bridge_dev;
653 struct intel_ring_buffer ring[I915_NUM_RINGS]; 890 struct intel_ring_buffer ring[I915_NUM_RINGS];
654 uint32_t next_seqno; 891 uint32_t last_seqno, next_seqno;
655 892
656 drm_dma_handle_t *status_page_dmah; 893 drm_dma_handle_t *status_page_dmah;
657 struct resource mch_res; 894 struct resource mch_res;
@@ -661,31 +898,24 @@ typedef struct drm_i915_private {
661 /* protects the irq masks */ 898 /* protects the irq masks */
662 spinlock_t irq_lock; 899 spinlock_t irq_lock;
663 900
901 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
902 struct pm_qos_request pm_qos;
903
664 /* DPIO indirect register protection */ 904 /* DPIO indirect register protection */
665 spinlock_t dpio_lock; 905 struct mutex dpio_lock;
666 906
667 /** Cached value of IMR to avoid reads in updating the bitfield */ 907 /** Cached value of IMR to avoid reads in updating the bitfield */
668 u32 pipestat[2]; 908 u32 pipestat[2];
669 u32 irq_mask; 909 u32 irq_mask;
670 u32 gt_irq_mask; 910 u32 gt_irq_mask;
671 u32 pch_irq_mask;
672 911
673 u32 hotplug_supported_mask; 912 u32 hotplug_supported_mask;
674 struct work_struct hotplug_work; 913 struct work_struct hotplug_work;
914 bool enable_hotplug_processing;
675 915
676 int num_pipe; 916 int num_pipe;
677 int num_pch_pll; 917 int num_pch_pll;
678 918
679 /* For hangcheck timer */
680#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
681#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
682 struct timer_list hangcheck_timer;
683 int hangcheck_count;
684 uint32_t last_acthd[I915_NUM_RINGS];
685 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
686
687 unsigned int stop_rings;
688
689 unsigned long cfb_size; 919 unsigned long cfb_size;
690 unsigned int cfb_fb; 920 unsigned int cfb_fb;
691 enum plane cfb_plane; 921 enum plane cfb_plane;
@@ -696,7 +926,7 @@ typedef struct drm_i915_private {
696 926
697 /* overlay */ 927 /* overlay */
698 struct intel_overlay *overlay; 928 struct intel_overlay *overlay;
699 bool sprite_scaling_enabled; 929 unsigned int sprite_scaling_enabled;
700 930
701 /* LVDS info */ 931 /* LVDS info */
702 int backlight_level; /* restore backlight to this value */ 932 int backlight_level; /* restore backlight to this value */
@@ -713,7 +943,6 @@ typedef struct drm_i915_private {
713 unsigned int display_clock_mode:1; 943 unsigned int display_clock_mode:1;
714 int lvds_ssc_freq; 944 int lvds_ssc_freq;
715 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ 945 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
716 unsigned int lvds_val; /* used for checking LVDS channel mode */
717 struct { 946 struct {
718 int rate; 947 int rate;
719 int lanes; 948 int lanes;
@@ -734,11 +963,6 @@ typedef struct drm_i915_private {
734 963
735 unsigned int fsb_freq, mem_freq, is_ddr3; 964 unsigned int fsb_freq, mem_freq, is_ddr3;
736 965
737 spinlock_t error_lock;
738 /* Protected by dev->error_lock. */
739 struct drm_i915_error_state *first_error;
740 struct work_struct error_work;
741 struct completion error_completion;
742 struct workqueue_struct *wq; 966 struct workqueue_struct *wq;
743 967
744 /* Display functions */ 968 /* Display functions */
@@ -750,115 +974,12 @@ typedef struct drm_i915_private {
750 974
751 unsigned long quirks; 975 unsigned long quirks;
752 976
753 /* Register state */ 977 enum modeset_restore modeset_restore;
754 bool modeset_on_lid; 978 struct mutex modeset_restore_lock;
755 979
756 struct { 980 struct i915_gtt gtt;
757 /** Bridge to intel-gtt-ko */ 981
758 struct intel_gtt *gtt; 982 struct i915_gem_mm mm;
759 /** Memory allocator for GTT stolen memory */
760 struct drm_mm stolen;
761 /** Memory allocator for GTT */
762 struct drm_mm gtt_space;
763 /** List of all objects in gtt_space. Used to restore gtt
764 * mappings on resume */
765 struct list_head bound_list;
766 /**
767 * List of objects which are not bound to the GTT (thus
768 * are idle and not used by the GPU) but still have
769 * (presumably uncached) pages still attached.
770 */
771 struct list_head unbound_list;
772
773 /** Usable portion of the GTT for GEM */
774 unsigned long gtt_start;
775 unsigned long gtt_mappable_end;
776 unsigned long gtt_end;
777
778 struct io_mapping *gtt_mapping;
779 phys_addr_t gtt_base_addr;
780 int gtt_mtrr;
781
782 /** PPGTT used for aliasing the PPGTT with the GTT */
783 struct i915_hw_ppgtt *aliasing_ppgtt;
784
785 struct shrinker inactive_shrinker;
786 bool shrinker_no_lock_stealing;
787
788 /**
789 * List of objects currently involved in rendering.
790 *
791 * Includes buffers having the contents of their GPU caches
792 * flushed, not necessarily primitives. last_rendering_seqno
793 * represents when the rendering involved will be completed.
794 *
795 * A reference is held on the buffer while on this list.
796 */
797 struct list_head active_list;
798
799 /**
800 * LRU list of objects which are not in the ringbuffer and
801 * are ready to unbind, but are still in the GTT.
802 *
803 * last_rendering_seqno is 0 while an object is in this list.
804 *
805 * A reference is not held on the buffer while on this list,
806 * as merely being GTT-bound shouldn't prevent its being
807 * freed, and we'll pull it off the list in the free path.
808 */
809 struct list_head inactive_list;
810
811 /** LRU list of objects with fence regs on them. */
812 struct list_head fence_list;
813
814 /**
815 * We leave the user IRQ off as much as possible,
816 * but this means that requests will finish and never
817 * be retired once the system goes idle. Set a timer to
818 * fire periodically while the ring is running. When it
819 * fires, go retire requests.
820 */
821 struct delayed_work retire_work;
822
823 /**
824 * Are we in a non-interruptible section of code like
825 * modesetting?
826 */
827 bool interruptible;
828
829 /**
830 * Flag if the X Server, and thus DRM, is not currently in
831 * control of the device.
832 *
833 * This is set between LeaveVT and EnterVT. It needs to be
834 * replaced with a semaphore. It also needs to be
835 * transitioned away from for kernel modesetting.
836 */
837 int suspended;
838
839 /**
840 * Flag if the hardware appears to be wedged.
841 *
842 * This is set when attempts to idle the device timeout.
843 * It prevents command submission from occurring and makes
844 * every pending request fail
845 */
846 atomic_t wedged;
847
848 /** Bit 6 swizzling required for X tiling */
849 uint32_t bit_6_swizzle_x;
850 /** Bit 6 swizzling required for Y tiling */
851 uint32_t bit_6_swizzle_y;
852
853 /* storage for physical objects */
854 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
855
856 /* accounting, useful for userland debugging */
857 size_t gtt_total;
858 size_t mappable_gtt_total;
859 size_t object_memory;
860 u32 object_count;
861 } mm;
862 983
863 /* Kernel Modesetting */ 984 /* Kernel Modesetting */
864 985
@@ -900,7 +1021,7 @@ typedef struct drm_i915_private {
900 struct drm_mm_node *compressed_fb; 1021 struct drm_mm_node *compressed_fb;
901 struct drm_mm_node *compressed_llb; 1022 struct drm_mm_node *compressed_llb;
902 1023
903 unsigned long last_gpu_reset; 1024 struct i915_gpu_error gpu_error;
904 1025
905 /* list of fbdev register on this device */ 1026 /* list of fbdev register on this device */
906 struct intel_fbdev *fbdev; 1027 struct intel_fbdev *fbdev;
@@ -919,7 +1040,7 @@ typedef struct drm_i915_private {
919 bool hw_contexts_disabled; 1040 bool hw_contexts_disabled;
920 uint32_t hw_context_size; 1041 uint32_t hw_context_size;
921 1042
922 bool fdi_rx_polarity_reversed; 1043 u32 fdi_rx_config;
923 1044
924 struct i915_suspend_saved_registers regfile; 1045 struct i915_suspend_saved_registers regfile;
925 1046
@@ -940,11 +1061,7 @@ enum hdmi_force_audio {
940 HDMI_AUDIO_ON, /* force turn on HDMI audio */ 1061 HDMI_AUDIO_ON, /* force turn on HDMI audio */
941}; 1062};
942 1063
943enum i915_cache_level { 1064#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
944 I915_CACHE_NONE = 0,
945 I915_CACHE_LLC,
946 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
947};
948 1065
949struct drm_i915_gem_object_ops { 1066struct drm_i915_gem_object_ops {
950 /* Interface between the GEM object and its backing storage. 1067 /* Interface between the GEM object and its backing storage.
@@ -971,6 +1088,8 @@ struct drm_i915_gem_object {
971 1088
972 /** Current space allocated to this object in the GTT, if any. */ 1089 /** Current space allocated to this object in the GTT, if any. */
973 struct drm_mm_node *gtt_space; 1090 struct drm_mm_node *gtt_space;
1091 /** Stolen memory for this object, instead of being backed by shmem. */
1092 struct drm_mm_node *stolen;
974 struct list_head gtt_list; 1093 struct list_head gtt_list;
975 1094
976 /** This object's place on the active/inactive lists */ 1095 /** This object's place on the active/inactive lists */
@@ -1096,13 +1215,6 @@ struct drm_i915_gem_object {
1096 1215
1097 /** for phy allocated objects */ 1216 /** for phy allocated objects */
1098 struct drm_i915_gem_phys_object *phys_obj; 1217 struct drm_i915_gem_phys_object *phys_obj;
1099
1100 /**
1101 * Number of crtcs where this object is currently the fb, but
1102 * will be page flipped away on the next vblank. When it
1103 * reaches 0, dev_priv->pending_flip_queue will be woken up.
1104 */
1105 atomic_t pending_flip;
1106}; 1218};
1107#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) 1219#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1108 1220
@@ -1141,7 +1253,7 @@ struct drm_i915_gem_request {
1141 1253
1142struct drm_i915_file_private { 1254struct drm_i915_file_private {
1143 struct { 1255 struct {
1144 struct spinlock lock; 1256 spinlock_t lock;
1145 struct list_head request_list; 1257 struct list_head request_list;
1146 } mm; 1258 } mm;
1147 struct idr context_idr; 1259 struct idr context_idr;
@@ -1227,6 +1339,8 @@ struct drm_i915_file_private {
1227 1339
1228#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1340#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1229 1341
1342#define HAS_DDI(dev) (IS_HASWELL(dev))
1343
1230#define INTEL_PCH_DEVICE_ID_MASK 0xff00 1344#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1231#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 1345#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1232#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 1346#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -1323,6 +1437,7 @@ void i915_hangcheck_elapsed(unsigned long data);
1323void i915_handle_error(struct drm_device *dev, bool wedged); 1437void i915_handle_error(struct drm_device *dev, bool wedged);
1324 1438
1325extern void intel_irq_init(struct drm_device *dev); 1439extern void intel_irq_init(struct drm_device *dev);
1440extern void intel_hpd_init(struct drm_device *dev);
1326extern void intel_gt_init(struct drm_device *dev); 1441extern void intel_gt_init(struct drm_device *dev);
1327extern void intel_gt_reset(struct drm_device *dev); 1442extern void intel_gt_reset(struct drm_device *dev);
1328 1443
@@ -1391,18 +1506,22 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1391int i915_gem_wait_ioctl(struct drm_device *dev, void *data, 1506int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1392 struct drm_file *file_priv); 1507 struct drm_file *file_priv);
1393void i915_gem_load(struct drm_device *dev); 1508void i915_gem_load(struct drm_device *dev);
1509void *i915_gem_object_alloc(struct drm_device *dev);
1510void i915_gem_object_free(struct drm_i915_gem_object *obj);
1394int i915_gem_init_object(struct drm_gem_object *obj); 1511int i915_gem_init_object(struct drm_gem_object *obj);
1395void i915_gem_object_init(struct drm_i915_gem_object *obj, 1512void i915_gem_object_init(struct drm_i915_gem_object *obj,
1396 const struct drm_i915_gem_object_ops *ops); 1513 const struct drm_i915_gem_object_ops *ops);
1397struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 1514struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1398 size_t size); 1515 size_t size);
1399void i915_gem_free_object(struct drm_gem_object *obj); 1516void i915_gem_free_object(struct drm_gem_object *obj);
1517
1400int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, 1518int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1401 uint32_t alignment, 1519 uint32_t alignment,
1402 bool map_and_fenceable, 1520 bool map_and_fenceable,
1403 bool nonblocking); 1521 bool nonblocking);
1404void i915_gem_object_unpin(struct drm_i915_gem_object *obj); 1522void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1405int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); 1523int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1524int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1406void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 1525void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1407void i915_gem_lastclose(struct drm_device *dev); 1526void i915_gem_lastclose(struct drm_device *dev);
1408 1527
@@ -1454,8 +1573,8 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1454 return (int32_t)(seq1 - seq2) >= 0; 1573 return (int32_t)(seq1 - seq2) >= 0;
1455} 1574}
1456 1575
1457extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 1576int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1458 1577int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1459int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1578int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1460int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1579int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1461 1580
@@ -1481,8 +1600,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1481 1600
1482void i915_gem_retire_requests(struct drm_device *dev); 1601void i915_gem_retire_requests(struct drm_device *dev);
1483void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); 1602void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1484int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, 1603int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1485 bool interruptible); 1604 bool interruptible);
1605static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1606{
1607 return unlikely(atomic_read(&error->reset_counter)
1608 & I915_RESET_IN_PROGRESS_FLAG);
1609}
1610
1611static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1612{
1613 return atomic_read(&error->reset_counter) == I915_WEDGED;
1614}
1486 1615
1487void i915_gem_reset(struct drm_device *dev); 1616void i915_gem_reset(struct drm_device *dev);
1488void i915_gem_clflush_object(struct drm_i915_gem_object *obj); 1617void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@@ -1523,9 +1652,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
1523void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1652void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1524 1653
1525uint32_t 1654uint32_t
1526i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, 1655i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
1527 uint32_t size, 1656uint32_t
1528 int tiling_mode); 1657i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1658 int tiling_mode, bool fenced);
1529 1659
1530int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, 1660int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1531 enum i915_cache_level cache_level); 1661 enum i915_cache_level cache_level);
@@ -1548,7 +1678,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1548 struct drm_file *file); 1678 struct drm_file *file);
1549 1679
1550/* i915_gem_gtt.c */ 1680/* i915_gem_gtt.c */
1551int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
1552void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); 1681void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1553void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 1682void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1554 struct drm_i915_gem_object *obj, 1683 struct drm_i915_gem_object *obj,
@@ -1562,12 +1691,10 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1562 enum i915_cache_level cache_level); 1691 enum i915_cache_level cache_level);
1563void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); 1692void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1564void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); 1693void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1565void i915_gem_init_global_gtt(struct drm_device *dev, 1694void i915_gem_init_global_gtt(struct drm_device *dev);
1566 unsigned long start, 1695void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1567 unsigned long mappable_end, 1696 unsigned long mappable_end, unsigned long end);
1568 unsigned long end);
1569int i915_gem_gtt_init(struct drm_device *dev); 1697int i915_gem_gtt_init(struct drm_device *dev);
1570void i915_gem_gtt_fini(struct drm_device *dev);
1571static inline void i915_gem_chipset_flush(struct drm_device *dev) 1698static inline void i915_gem_chipset_flush(struct drm_device *dev)
1572{ 1699{
1573 if (INTEL_INFO(dev)->gen < 6) 1700 if (INTEL_INFO(dev)->gen < 6)
@@ -1585,9 +1712,22 @@ int i915_gem_evict_everything(struct drm_device *dev);
1585 1712
1586/* i915_gem_stolen.c */ 1713/* i915_gem_stolen.c */
1587int i915_gem_init_stolen(struct drm_device *dev); 1714int i915_gem_init_stolen(struct drm_device *dev);
1715int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1716void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
1588void i915_gem_cleanup_stolen(struct drm_device *dev); 1717void i915_gem_cleanup_stolen(struct drm_device *dev);
1718struct drm_i915_gem_object *
1719i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
1720void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
1589 1721
1590/* i915_gem_tiling.c */ 1722/* i915_gem_tiling.c */
1723inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1724{
1725 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1726
1727 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1728 obj->tiling_mode != I915_TILING_NONE;
1729}
1730
1591void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1731void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1592void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); 1732void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1593void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); 1733void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
@@ -1613,9 +1753,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
1613extern int i915_save_state(struct drm_device *dev); 1753extern int i915_save_state(struct drm_device *dev);
1614extern int i915_restore_state(struct drm_device *dev); 1754extern int i915_restore_state(struct drm_device *dev);
1615 1755
1616/* i915_suspend.c */ 1756/* i915_ums.c */
1617extern int i915_save_state(struct drm_device *dev); 1757void i915_save_display_reg(struct drm_device *dev);
1618extern int i915_restore_state(struct drm_device *dev); 1758void i915_restore_display_reg(struct drm_device *dev);
1619 1759
1620/* i915_sysfs.c */ 1760/* i915_sysfs.c */
1621void i915_setup_sysfs(struct drm_device *dev_priv); 1761void i915_setup_sysfs(struct drm_device *dev_priv);
@@ -1672,6 +1812,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
1672extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1812extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1673extern void intel_modeset_setup_hw_state(struct drm_device *dev, 1813extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1674 bool force_restore); 1814 bool force_restore);
1815extern void i915_redisable_vga(struct drm_device *dev);
1675extern bool intel_fbc_enabled(struct drm_device *dev); 1816extern bool intel_fbc_enabled(struct drm_device *dev);
1676extern void intel_disable_fbc(struct drm_device *dev); 1817extern void intel_disable_fbc(struct drm_device *dev);
1677extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1818extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@@ -1744,5 +1885,19 @@ __i915_write(64, q)
1744#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 1885#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1745#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 1886#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1746 1887
1888/* "Broadcast RGB" property */
1889#define INTEL_BROADCAST_RGB_AUTO 0
1890#define INTEL_BROADCAST_RGB_FULL 1
1891#define INTEL_BROADCAST_RGB_LIMITED 2
1892
1893static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
1894{
1895 if (HAS_PCH_SPLIT(dev))
1896 return CPU_VGACNTRL;
1897 else if (IS_VALLEYVIEW(dev))
1898 return VLV_VGACNTRL;
1899 else
1900 return VGACNTRL;
1901}
1747 1902
1748#endif 1903#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8febea6daa08..8413ffced815 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -87,47 +87,43 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
87} 87}
88 88
89static int 89static int
90i915_gem_wait_for_error(struct drm_device *dev) 90i915_gem_wait_for_error(struct i915_gpu_error *error)
91{ 91{
92 struct drm_i915_private *dev_priv = dev->dev_private;
93 struct completion *x = &dev_priv->error_completion;
94 unsigned long flags;
95 int ret; 92 int ret;
96 93
97 if (!atomic_read(&dev_priv->mm.wedged)) 94#define EXIT_COND (!i915_reset_in_progress(error))
95 if (EXIT_COND)
98 return 0; 96 return 0;
99 97
98 /* GPU is already declared terminally dead, give up. */
99 if (i915_terminally_wedged(error))
100 return -EIO;
101
100 /* 102 /*
101 * Only wait 10 seconds for the gpu reset to complete to avoid hanging 103 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
102 * userspace. If it takes that long something really bad is going on and 104 * userspace. If it takes that long something really bad is going on and
103 * we should simply try to bail out and fail as gracefully as possible. 105 * we should simply try to bail out and fail as gracefully as possible.
104 */ 106 */
105 ret = wait_for_completion_interruptible_timeout(x, 10*HZ); 107 ret = wait_event_interruptible_timeout(error->reset_queue,
108 EXIT_COND,
109 10*HZ);
106 if (ret == 0) { 110 if (ret == 0) {
107 DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); 111 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
108 return -EIO; 112 return -EIO;
109 } else if (ret < 0) { 113 } else if (ret < 0) {
110 return ret; 114 return ret;
111 } 115 }
116#undef EXIT_COND
112 117
113 if (atomic_read(&dev_priv->mm.wedged)) {
114 /* GPU is hung, bump the completion count to account for
115 * the token we just consumed so that we never hit zero and
116 * end up waiting upon a subsequent completion event that
117 * will never happen.
118 */
119 spin_lock_irqsave(&x->wait.lock, flags);
120 x->done++;
121 spin_unlock_irqrestore(&x->wait.lock, flags);
122 }
123 return 0; 118 return 0;
124} 119}
125 120
126int i915_mutex_lock_interruptible(struct drm_device *dev) 121int i915_mutex_lock_interruptible(struct drm_device *dev)
127{ 122{
123 struct drm_i915_private *dev_priv = dev->dev_private;
128 int ret; 124 int ret;
129 125
130 ret = i915_gem_wait_for_error(dev); 126 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
131 if (ret) 127 if (ret)
132 return ret; 128 return ret;
133 129
@@ -149,6 +145,7 @@ int
149i915_gem_init_ioctl(struct drm_device *dev, void *data, 145i915_gem_init_ioctl(struct drm_device *dev, void *data,
150 struct drm_file *file) 146 struct drm_file *file)
151{ 147{
148 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct drm_i915_gem_init *args = data; 149 struct drm_i915_gem_init *args = data;
153 150
154 if (drm_core_check_feature(dev, DRIVER_MODESET)) 151 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -163,8 +160,9 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
163 return -ENODEV; 160 return -ENODEV;
164 161
165 mutex_lock(&dev->struct_mutex); 162 mutex_lock(&dev->struct_mutex);
166 i915_gem_init_global_gtt(dev, args->gtt_start, 163 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
167 args->gtt_end, args->gtt_end); 164 args->gtt_end);
165 dev_priv->gtt.mappable_end = args->gtt_end;
168 mutex_unlock(&dev->struct_mutex); 166 mutex_unlock(&dev->struct_mutex);
169 167
170 return 0; 168 return 0;
@@ -186,12 +184,24 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
186 pinned += obj->gtt_space->size; 184 pinned += obj->gtt_space->size;
187 mutex_unlock(&dev->struct_mutex); 185 mutex_unlock(&dev->struct_mutex);
188 186
189 args->aper_size = dev_priv->mm.gtt_total; 187 args->aper_size = dev_priv->gtt.total;
190 args->aper_available_size = args->aper_size - pinned; 188 args->aper_available_size = args->aper_size - pinned;
191 189
192 return 0; 190 return 0;
193} 191}
194 192
193void *i915_gem_object_alloc(struct drm_device *dev)
194{
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO);
197}
198
199void i915_gem_object_free(struct drm_i915_gem_object *obj)
200{
201 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
202 kmem_cache_free(dev_priv->slab, obj);
203}
204
195static int 205static int
196i915_gem_create(struct drm_file *file, 206i915_gem_create(struct drm_file *file,
197 struct drm_device *dev, 207 struct drm_device *dev,
@@ -215,7 +225,7 @@ i915_gem_create(struct drm_file *file,
215 if (ret) { 225 if (ret) {
216 drm_gem_object_release(&obj->base); 226 drm_gem_object_release(&obj->base);
217 i915_gem_info_remove_obj(dev->dev_private, obj->base.size); 227 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
218 kfree(obj); 228 i915_gem_object_free(obj);
219 return ret; 229 return ret;
220 } 230 }
221 231
@@ -259,14 +269,6 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
259 args->size, &args->handle); 269 args->size, &args->handle);
260} 270}
261 271
262static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
263{
264 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
265
266 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
267 obj->tiling_mode != I915_TILING_NONE;
268}
269
270static inline int 272static inline int
271__copy_to_user_swizzled(char __user *cpu_vaddr, 273__copy_to_user_swizzled(char __user *cpu_vaddr,
272 const char *gpu_vaddr, int gpu_offset, 274 const char *gpu_vaddr, int gpu_offset,
@@ -407,7 +409,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
407 loff_t offset; 409 loff_t offset;
408 int shmem_page_offset, page_length, ret = 0; 410 int shmem_page_offset, page_length, ret = 0;
409 int obj_do_bit17_swizzling, page_do_bit17_swizzling; 411 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
410 int hit_slowpath = 0;
411 int prefaulted = 0; 412 int prefaulted = 0;
412 int needs_clflush = 0; 413 int needs_clflush = 0;
413 struct scatterlist *sg; 414 struct scatterlist *sg;
@@ -469,7 +470,6 @@ i915_gem_shmem_pread(struct drm_device *dev,
469 if (ret == 0) 470 if (ret == 0)
470 goto next_page; 471 goto next_page;
471 472
472 hit_slowpath = 1;
473 mutex_unlock(&dev->struct_mutex); 473 mutex_unlock(&dev->struct_mutex);
474 474
475 if (!prefaulted) { 475 if (!prefaulted) {
@@ -502,12 +502,6 @@ next_page:
502out: 502out:
503 i915_gem_object_unpin_pages(obj); 503 i915_gem_object_unpin_pages(obj);
504 504
505 if (hit_slowpath) {
506 /* Fixup: Kill any reinstated backing storage pages */
507 if (obj->madv == __I915_MADV_PURGED)
508 i915_gem_object_truncate(obj);
509 }
510
511 return ret; 505 return ret;
512} 506}
513 507
@@ -641,7 +635,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
641 * source page isn't available. Return the error and we'll 635 * source page isn't available. Return the error and we'll
642 * retry in the slow path. 636 * retry in the slow path.
643 */ 637 */
644 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 638 if (fast_user_write(dev_priv->gtt.mappable, page_base,
645 page_offset, user_data, page_length)) { 639 page_offset, user_data, page_length)) {
646 ret = -EFAULT; 640 ret = -EFAULT;
647 goto out_unpin; 641 goto out_unpin;
@@ -838,12 +832,13 @@ out:
838 i915_gem_object_unpin_pages(obj); 832 i915_gem_object_unpin_pages(obj);
839 833
840 if (hit_slowpath) { 834 if (hit_slowpath) {
841 /* Fixup: Kill any reinstated backing storage pages */ 835 /*
842 if (obj->madv == __I915_MADV_PURGED) 836 * Fixup: Flush cpu caches in case we didn't flush the dirty
843 i915_gem_object_truncate(obj); 837 * cachelines in-line while writing and the object moved
844 /* and flush dirty cachelines in case the object isn't in the cpu write 838 * out of the cpu write domain while we've dropped the lock.
845 * domain anymore. */ 839 */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 840 if (!needs_clflush_after &&
841 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj); 842 i915_gem_clflush_object(obj);
848 i915_gem_chipset_flush(dev); 843 i915_gem_chipset_flush(dev);
849 } 844 }
@@ -940,26 +935,17 @@ unlock:
940} 935}
941 936
942int 937int
943i915_gem_check_wedge(struct drm_i915_private *dev_priv, 938i915_gem_check_wedge(struct i915_gpu_error *error,
944 bool interruptible) 939 bool interruptible)
945{ 940{
946 if (atomic_read(&dev_priv->mm.wedged)) { 941 if (i915_reset_in_progress(error)) {
947 struct completion *x = &dev_priv->error_completion;
948 bool recovery_complete;
949 unsigned long flags;
950
951 /* Give the error handler a chance to run. */
952 spin_lock_irqsave(&x->wait.lock, flags);
953 recovery_complete = x->done > 0;
954 spin_unlock_irqrestore(&x->wait.lock, flags);
955
956 /* Non-interruptible callers can't handle -EAGAIN, hence return 942 /* Non-interruptible callers can't handle -EAGAIN, hence return
957 * -EIO unconditionally for these. */ 943 * -EIO unconditionally for these. */
958 if (!interruptible) 944 if (!interruptible)
959 return -EIO; 945 return -EIO;
960 946
961 /* Recovery complete, but still wedged means reset failure. */ 947 /* Recovery complete, but the reset failed ... */
962 if (recovery_complete) 948 if (i915_terminally_wedged(error))
963 return -EIO; 949 return -EIO;
964 950
965 return -EAGAIN; 951 return -EAGAIN;
@@ -990,13 +976,22 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
990 * __wait_seqno - wait until execution of seqno has finished 976 * __wait_seqno - wait until execution of seqno has finished
991 * @ring: the ring expected to report seqno 977 * @ring: the ring expected to report seqno
992 * @seqno: duh! 978 * @seqno: duh!
979 * @reset_counter: reset sequence associated with the given seqno
993 * @interruptible: do an interruptible wait (normally yes) 980 * @interruptible: do an interruptible wait (normally yes)
994 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 981 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
995 * 982 *
983 * Note: It is of utmost importance that the passed in seqno and reset_counter
984 * values have been read by the caller in an smp safe manner. Where read-side
985 * locks are involved, it is sufficient to read the reset_counter before
986 * unlocking the lock that protects the seqno. For lockless tricks, the
987 * reset_counter _must_ be read before, and an appropriate smp_rmb must be
988 * inserted.
989 *
996 * Returns 0 if the seqno was found within the alloted time. Else returns the 990 * Returns 0 if the seqno was found within the alloted time. Else returns the
997 * errno with remaining time filled in timeout argument. 991 * errno with remaining time filled in timeout argument.
998 */ 992 */
999static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, 993static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
994 unsigned reset_counter,
1000 bool interruptible, struct timespec *timeout) 995 bool interruptible, struct timespec *timeout)
1001{ 996{
1002 drm_i915_private_t *dev_priv = ring->dev->dev_private; 997 drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1026,7 +1021,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1026 1021
1027#define EXIT_COND \ 1022#define EXIT_COND \
1028 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ 1023 (i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
1029 atomic_read(&dev_priv->mm.wedged)) 1024 i915_reset_in_progress(&dev_priv->gpu_error) || \
1025 reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1030 do { 1026 do {
1031 if (interruptible) 1027 if (interruptible)
1032 end = wait_event_interruptible_timeout(ring->irq_queue, 1028 end = wait_event_interruptible_timeout(ring->irq_queue,
@@ -1036,7 +1032,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1036 end = wait_event_timeout(ring->irq_queue, EXIT_COND, 1032 end = wait_event_timeout(ring->irq_queue, EXIT_COND,
1037 timeout_jiffies); 1033 timeout_jiffies);
1038 1034
1039 ret = i915_gem_check_wedge(dev_priv, interruptible); 1035 /* We need to check whether any gpu reset happened in between
1036 * the caller grabbing the seqno and now ... */
1037 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
1038 end = -EAGAIN;
1039
1040 /* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
1041 * gone. */
1042 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1040 if (ret) 1043 if (ret)
1041 end = ret; 1044 end = ret;
1042 } while (end == 0 && wait_forever); 1045 } while (end == 0 && wait_forever);
@@ -1082,7 +1085,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1082 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1085 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1083 BUG_ON(seqno == 0); 1086 BUG_ON(seqno == 0);
1084 1087
1085 ret = i915_gem_check_wedge(dev_priv, interruptible); 1088 ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1086 if (ret) 1089 if (ret)
1087 return ret; 1090 return ret;
1088 1091
@@ -1090,7 +1093,9 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
1090 if (ret) 1093 if (ret)
1091 return ret; 1094 return ret;
1092 1095
1093 return __wait_seqno(ring, seqno, interruptible, NULL); 1096 return __wait_seqno(ring, seqno,
1097 atomic_read(&dev_priv->gpu_error.reset_counter),
1098 interruptible, NULL);
1094} 1099}
1095 1100
1096/** 1101/**
@@ -1137,6 +1142,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1137 struct drm_device *dev = obj->base.dev; 1142 struct drm_device *dev = obj->base.dev;
1138 struct drm_i915_private *dev_priv = dev->dev_private; 1143 struct drm_i915_private *dev_priv = dev->dev_private;
1139 struct intel_ring_buffer *ring = obj->ring; 1144 struct intel_ring_buffer *ring = obj->ring;
1145 unsigned reset_counter;
1140 u32 seqno; 1146 u32 seqno;
1141 int ret; 1147 int ret;
1142 1148
@@ -1147,7 +1153,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1147 if (seqno == 0) 1153 if (seqno == 0)
1148 return 0; 1154 return 0;
1149 1155
1150 ret = i915_gem_check_wedge(dev_priv, true); 1156 ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1151 if (ret) 1157 if (ret)
1152 return ret; 1158 return ret;
1153 1159
@@ -1155,8 +1161,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1155 if (ret) 1161 if (ret)
1156 return ret; 1162 return ret;
1157 1163
1164 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1158 mutex_unlock(&dev->struct_mutex); 1165 mutex_unlock(&dev->struct_mutex);
1159 ret = __wait_seqno(ring, seqno, true, NULL); 1166 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
1160 mutex_lock(&dev->struct_mutex); 1167 mutex_lock(&dev->struct_mutex);
1161 1168
1162 i915_gem_retire_requests_ring(ring); 1169 i915_gem_retire_requests_ring(ring);
@@ -1344,6 +1351,12 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1344 1351
1345 trace_i915_gem_object_fault(obj, page_offset, true, write); 1352 trace_i915_gem_object_fault(obj, page_offset, true, write);
1346 1353
1354 /* Access to snoopable pages through the GTT is incoherent. */
1355 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1356 ret = -EINVAL;
1357 goto unlock;
1358 }
1359
1347 /* Now bind it into the GTT if needed */ 1360 /* Now bind it into the GTT if needed */
1348 ret = i915_gem_object_pin(obj, 0, true, false); 1361 ret = i915_gem_object_pin(obj, 0, true, false);
1349 if (ret) 1362 if (ret)
@@ -1359,7 +1372,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1359 1372
1360 obj->fault_mappable = true; 1373 obj->fault_mappable = true;
1361 1374
1362 pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) + 1375 pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
1363 page_offset; 1376 page_offset;
1364 1377
1365 /* Finally, remap it using the new GTT offset */ 1378 /* Finally, remap it using the new GTT offset */
@@ -1374,7 +1387,7 @@ out:
1374 /* If this -EIO is due to a gpu hang, give the reset code a 1387 /* If this -EIO is due to a gpu hang, give the reset code a
1375 * chance to clean up the mess. Otherwise return the proper 1388 * chance to clean up the mess. Otherwise return the proper
1376 * SIGBUS. */ 1389 * SIGBUS. */
1377 if (!atomic_read(&dev_priv->mm.wedged)) 1390 if (i915_terminally_wedged(&dev_priv->gpu_error))
1378 return VM_FAULT_SIGBUS; 1391 return VM_FAULT_SIGBUS;
1379 case -EAGAIN: 1392 case -EAGAIN:
1380 /* Give the error handler a chance to run and move the 1393 /* Give the error handler a chance to run and move the
@@ -1432,7 +1445,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1432 obj->fault_mappable = false; 1445 obj->fault_mappable = false;
1433} 1446}
1434 1447
1435static uint32_t 1448uint32_t
1436i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) 1449i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1437{ 1450{
1438 uint32_t gtt_size; 1451 uint32_t gtt_size;
@@ -1460,16 +1473,15 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1460 * Return the required GTT alignment for an object, taking into account 1473 * Return the required GTT alignment for an object, taking into account
1461 * potential fence register mapping. 1474 * potential fence register mapping.
1462 */ 1475 */
1463static uint32_t 1476uint32_t
1464i915_gem_get_gtt_alignment(struct drm_device *dev, 1477i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1465 uint32_t size, 1478 int tiling_mode, bool fenced)
1466 int tiling_mode)
1467{ 1479{
1468 /* 1480 /*
1469 * Minimum alignment is 4k (GTT page size), but might be greater 1481 * Minimum alignment is 4k (GTT page size), but might be greater
1470 * if a fence register is needed for the object. 1482 * if a fence register is needed for the object.
1471 */ 1483 */
1472 if (INTEL_INFO(dev)->gen >= 4 || 1484 if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1473 tiling_mode == I915_TILING_NONE) 1485 tiling_mode == I915_TILING_NONE)
1474 return 4096; 1486 return 4096;
1475 1487
@@ -1480,35 +1492,6 @@ i915_gem_get_gtt_alignment(struct drm_device *dev,
1480 return i915_gem_get_gtt_size(dev, size, tiling_mode); 1492 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1481} 1493}
1482 1494
1483/**
1484 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1485 * unfenced object
1486 * @dev: the device
1487 * @size: size of the object
1488 * @tiling_mode: tiling mode of the object
1489 *
1490 * Return the required GTT alignment for an object, only taking into account
1491 * unfenced tiled surface requirements.
1492 */
1493uint32_t
1494i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
1495 uint32_t size,
1496 int tiling_mode)
1497{
1498 /*
1499 * Minimum alignment is 4k (GTT page size) for sane hw.
1500 */
1501 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1502 tiling_mode == I915_TILING_NONE)
1503 return 4096;
1504
1505 /* Previous hardware however needs to be aligned to a power-of-two
1506 * tile height. The simplest method for determining this is to reuse
1507 * the power-of-tile object size.
1508 */
1509 return i915_gem_get_gtt_size(dev, size, tiling_mode);
1510}
1511
1512static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 1495static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1513{ 1496{
1514 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1497 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -1571,7 +1554,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1571 goto unlock; 1554 goto unlock;
1572 } 1555 }
1573 1556
1574 if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1557 if (obj->base.size > dev_priv->gtt.mappable_end) {
1575 ret = -E2BIG; 1558 ret = -E2BIG;
1576 goto out; 1559 goto out;
1577 } 1560 }
@@ -1689,7 +1672,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1689 kfree(obj->pages); 1672 kfree(obj->pages);
1690} 1673}
1691 1674
1692static int 1675int
1693i915_gem_object_put_pages(struct drm_i915_gem_object *obj) 1676i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1694{ 1677{
1695 const struct drm_i915_gem_object_ops *ops = obj->ops; 1678 const struct drm_i915_gem_object_ops *ops = obj->ops;
@@ -1862,6 +1845,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1862 if (obj->pages) 1845 if (obj->pages)
1863 return 0; 1846 return 0;
1864 1847
1848 if (obj->madv != I915_MADV_WILLNEED) {
1849 DRM_ERROR("Attempting to obtain a purgeable object\n");
1850 return -EINVAL;
1851 }
1852
1865 BUG_ON(obj->pages_pin_count); 1853 BUG_ON(obj->pages_pin_count);
1866 1854
1867 ret = ops->get_pages(obj); 1855 ret = ops->get_pages(obj);
@@ -1918,9 +1906,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1918 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 1906 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1919 BUG_ON(!obj->active); 1907 BUG_ON(!obj->active);
1920 1908
1921 if (obj->pin_count) /* are we a framebuffer? */
1922 intel_mark_fb_idle(obj);
1923
1924 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1909 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1925 1910
1926 list_del_init(&obj->ring_list); 1911 list_del_init(&obj->ring_list);
@@ -1940,30 +1925,24 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1940} 1925}
1941 1926
1942static int 1927static int
1943i915_gem_handle_seqno_wrap(struct drm_device *dev) 1928i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
1944{ 1929{
1945 struct drm_i915_private *dev_priv = dev->dev_private; 1930 struct drm_i915_private *dev_priv = dev->dev_private;
1946 struct intel_ring_buffer *ring; 1931 struct intel_ring_buffer *ring;
1947 int ret, i, j; 1932 int ret, i, j;
1948 1933
1949 /* The hardware uses various monotonic 32-bit counters, if we 1934 /* Carefully retire all requests without writing to the rings */
1950 * detect that they will wraparound we need to idle the GPU
1951 * and reset those counters.
1952 */
1953 ret = 0;
1954 for_each_ring(ring, dev_priv, i) { 1935 for_each_ring(ring, dev_priv, i) {
1955 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) 1936 ret = intel_ring_idle(ring);
1956 ret |= ring->sync_seqno[j] != 0; 1937 if (ret)
1938 return ret;
1957 } 1939 }
1958 if (ret == 0)
1959 return ret;
1960
1961 ret = i915_gpu_idle(dev);
1962 if (ret)
1963 return ret;
1964
1965 i915_gem_retire_requests(dev); 1940 i915_gem_retire_requests(dev);
1941
1942 /* Finally reset hw state */
1966 for_each_ring(ring, dev_priv, i) { 1943 for_each_ring(ring, dev_priv, i) {
1944 intel_ring_init_seqno(ring, seqno);
1945
1967 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) 1946 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1968 ring->sync_seqno[j] = 0; 1947 ring->sync_seqno[j] = 0;
1969 } 1948 }
@@ -1971,6 +1950,32 @@ i915_gem_handle_seqno_wrap(struct drm_device *dev)
1971 return 0; 1950 return 0;
1972} 1951}
1973 1952
1953int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
1954{
1955 struct drm_i915_private *dev_priv = dev->dev_private;
1956 int ret;
1957
1958 if (seqno == 0)
1959 return -EINVAL;
1960
1961 /* HWS page needs to be set less than what we
1962 * will inject to ring
1963 */
1964 ret = i915_gem_init_seqno(dev, seqno - 1);
1965 if (ret)
1966 return ret;
1967
1968 /* Carefully set the last_seqno value so that wrap
1969 * detection still works
1970 */
1971 dev_priv->next_seqno = seqno;
1972 dev_priv->last_seqno = seqno - 1;
1973 if (dev_priv->last_seqno == 0)
1974 dev_priv->last_seqno--;
1975
1976 return 0;
1977}
1978
1974int 1979int
1975i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 1980i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1976{ 1981{
@@ -1978,14 +1983,14 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1978 1983
1979 /* reserve 0 for non-seqno */ 1984 /* reserve 0 for non-seqno */
1980 if (dev_priv->next_seqno == 0) { 1985 if (dev_priv->next_seqno == 0) {
1981 int ret = i915_gem_handle_seqno_wrap(dev); 1986 int ret = i915_gem_init_seqno(dev, 0);
1982 if (ret) 1987 if (ret)
1983 return ret; 1988 return ret;
1984 1989
1985 dev_priv->next_seqno = 1; 1990 dev_priv->next_seqno = 1;
1986 } 1991 }
1987 1992
1988 *seqno = dev_priv->next_seqno++; 1993 *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
1989 return 0; 1994 return 0;
1990} 1995}
1991 1996
@@ -2052,7 +2057,7 @@ i915_add_request(struct intel_ring_buffer *ring,
2052 2057
2053 if (!dev_priv->mm.suspended) { 2058 if (!dev_priv->mm.suspended) {
2054 if (i915_enable_hangcheck) { 2059 if (i915_enable_hangcheck) {
2055 mod_timer(&dev_priv->hangcheck_timer, 2060 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2056 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2061 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2057 } 2062 }
2058 if (was_empty) { 2063 if (was_empty) {
@@ -2317,10 +2322,12 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2317int 2322int
2318i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2323i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2319{ 2324{
2325 drm_i915_private_t *dev_priv = dev->dev_private;
2320 struct drm_i915_gem_wait *args = data; 2326 struct drm_i915_gem_wait *args = data;
2321 struct drm_i915_gem_object *obj; 2327 struct drm_i915_gem_object *obj;
2322 struct intel_ring_buffer *ring = NULL; 2328 struct intel_ring_buffer *ring = NULL;
2323 struct timespec timeout_stack, *timeout = NULL; 2329 struct timespec timeout_stack, *timeout = NULL;
2330 unsigned reset_counter;
2324 u32 seqno = 0; 2331 u32 seqno = 0;
2325 int ret = 0; 2332 int ret = 0;
2326 2333
@@ -2361,9 +2368,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2361 } 2368 }
2362 2369
2363 drm_gem_object_unreference(&obj->base); 2370 drm_gem_object_unreference(&obj->base);
2371 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2364 mutex_unlock(&dev->struct_mutex); 2372 mutex_unlock(&dev->struct_mutex);
2365 2373
2366 ret = __wait_seqno(ring, seqno, true, timeout); 2374 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2367 if (timeout) { 2375 if (timeout) {
2368 WARN_ON(!timespec_valid(timeout)); 2376 WARN_ON(!timespec_valid(timeout));
2369 args->timeout_ns = timespec_to_ns(timeout); 2377 args->timeout_ns = timespec_to_ns(timeout);
@@ -2427,15 +2435,15 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2427{ 2435{
2428 u32 old_write_domain, old_read_domains; 2436 u32 old_write_domain, old_read_domains;
2429 2437
2430 /* Act a barrier for all accesses through the GTT */
2431 mb();
2432
2433 /* Force a pagefault for domain tracking on next user access */ 2438 /* Force a pagefault for domain tracking on next user access */
2434 i915_gem_release_mmap(obj); 2439 i915_gem_release_mmap(obj);
2435 2440
2436 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 2441 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2437 return; 2442 return;
2438 2443
2444 /* Wait for any direct GTT access to complete */
2445 mb();
2446
2439 old_read_domains = obj->base.read_domains; 2447 old_read_domains = obj->base.read_domains;
2440 old_write_domain = obj->base.write_domain; 2448 old_write_domain = obj->base.write_domain;
2441 2449
@@ -2454,7 +2462,7 @@ int
2454i915_gem_object_unbind(struct drm_i915_gem_object *obj) 2462i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2455{ 2463{
2456 drm_i915_private_t *dev_priv = obj->base.dev->dev_private; 2464 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2457 int ret = 0; 2465 int ret;
2458 2466
2459 if (obj->gtt_space == NULL) 2467 if (obj->gtt_space == NULL)
2460 return 0; 2468 return 0;
@@ -2521,52 +2529,38 @@ int i915_gpu_idle(struct drm_device *dev)
2521 return 0; 2529 return 0;
2522} 2530}
2523 2531
2524static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2525 struct drm_i915_gem_object *obj)
2526{
2527 drm_i915_private_t *dev_priv = dev->dev_private;
2528 uint64_t val;
2529
2530 if (obj) {
2531 u32 size = obj->gtt_space->size;
2532
2533 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2534 0xfffff000) << 32;
2535 val |= obj->gtt_offset & 0xfffff000;
2536 val |= (uint64_t)((obj->stride / 128) - 1) <<
2537 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2538
2539 if (obj->tiling_mode == I915_TILING_Y)
2540 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2541 val |= I965_FENCE_REG_VALID;
2542 } else
2543 val = 0;
2544
2545 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2546 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2547}
2548
2549static void i965_write_fence_reg(struct drm_device *dev, int reg, 2532static void i965_write_fence_reg(struct drm_device *dev, int reg,
2550 struct drm_i915_gem_object *obj) 2533 struct drm_i915_gem_object *obj)
2551{ 2534{
2552 drm_i915_private_t *dev_priv = dev->dev_private; 2535 drm_i915_private_t *dev_priv = dev->dev_private;
2536 int fence_reg;
2537 int fence_pitch_shift;
2553 uint64_t val; 2538 uint64_t val;
2554 2539
2540 if (INTEL_INFO(dev)->gen >= 6) {
2541 fence_reg = FENCE_REG_SANDYBRIDGE_0;
2542 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
2543 } else {
2544 fence_reg = FENCE_REG_965_0;
2545 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
2546 }
2547
2555 if (obj) { 2548 if (obj) {
2556 u32 size = obj->gtt_space->size; 2549 u32 size = obj->gtt_space->size;
2557 2550
2558 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2551 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2559 0xfffff000) << 32; 2552 0xfffff000) << 32;
2560 val |= obj->gtt_offset & 0xfffff000; 2553 val |= obj->gtt_offset & 0xfffff000;
2561 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; 2554 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
2562 if (obj->tiling_mode == I915_TILING_Y) 2555 if (obj->tiling_mode == I915_TILING_Y)
2563 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2556 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2564 val |= I965_FENCE_REG_VALID; 2557 val |= I965_FENCE_REG_VALID;
2565 } else 2558 } else
2566 val = 0; 2559 val = 0;
2567 2560
2568 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); 2561 fence_reg += reg * 8;
2569 POSTING_READ(FENCE_REG_965_0 + reg * 8); 2562 I915_WRITE64(fence_reg, val);
2563 POSTING_READ(fence_reg);
2570} 2564}
2571 2565
2572static void i915_write_fence_reg(struct drm_device *dev, int reg, 2566static void i915_write_fence_reg(struct drm_device *dev, int reg,
@@ -2645,18 +2639,37 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
2645 POSTING_READ(FENCE_REG_830_0 + reg * 4); 2639 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2646} 2640}
2647 2641
2642inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
2643{
2644 return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
2645}
2646
2648static void i915_gem_write_fence(struct drm_device *dev, int reg, 2647static void i915_gem_write_fence(struct drm_device *dev, int reg,
2649 struct drm_i915_gem_object *obj) 2648 struct drm_i915_gem_object *obj)
2650{ 2649{
2650 struct drm_i915_private *dev_priv = dev->dev_private;
2651
2652 /* Ensure that all CPU reads are completed before installing a fence
2653 * and all writes before removing the fence.
2654 */
2655 if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
2656 mb();
2657
2651 switch (INTEL_INFO(dev)->gen) { 2658 switch (INTEL_INFO(dev)->gen) {
2652 case 7: 2659 case 7:
2653 case 6: sandybridge_write_fence_reg(dev, reg, obj); break; 2660 case 6:
2654 case 5: 2661 case 5:
2655 case 4: i965_write_fence_reg(dev, reg, obj); break; 2662 case 4: i965_write_fence_reg(dev, reg, obj); break;
2656 case 3: i915_write_fence_reg(dev, reg, obj); break; 2663 case 3: i915_write_fence_reg(dev, reg, obj); break;
2657 case 2: i830_write_fence_reg(dev, reg, obj); break; 2664 case 2: i830_write_fence_reg(dev, reg, obj); break;
2658 default: break; 2665 default: BUG();
2659 } 2666 }
2667
2668 /* And similarly be paranoid that no direct access to this region
2669 * is reordered to before the fence is installed.
2670 */
2671 if (i915_gem_object_needs_mb(obj))
2672 mb();
2660} 2673}
2661 2674
2662static inline int fence_number(struct drm_i915_private *dev_priv, 2675static inline int fence_number(struct drm_i915_private *dev_priv,
@@ -2686,7 +2699,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2686} 2699}
2687 2700
2688static int 2701static int
2689i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) 2702i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
2690{ 2703{
2691 if (obj->last_fenced_seqno) { 2704 if (obj->last_fenced_seqno) {
2692 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); 2705 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
@@ -2696,12 +2709,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2696 obj->last_fenced_seqno = 0; 2709 obj->last_fenced_seqno = 0;
2697 } 2710 }
2698 2711
2699 /* Ensure that all CPU reads are completed before installing a fence
2700 * and all writes before removing the fence.
2701 */
2702 if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
2703 mb();
2704
2705 obj->fenced_gpu_access = false; 2712 obj->fenced_gpu_access = false;
2706 return 0; 2713 return 0;
2707} 2714}
@@ -2712,7 +2719,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2712 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2719 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2713 int ret; 2720 int ret;
2714 2721
2715 ret = i915_gem_object_flush_fence(obj); 2722 ret = i915_gem_object_wait_fence(obj);
2716 if (ret) 2723 if (ret)
2717 return ret; 2724 return ret;
2718 2725
@@ -2786,7 +2793,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2786 * will need to serialise the write to the associated fence register? 2793 * will need to serialise the write to the associated fence register?
2787 */ 2794 */
2788 if (obj->fence_dirty) { 2795 if (obj->fence_dirty) {
2789 ret = i915_gem_object_flush_fence(obj); 2796 ret = i915_gem_object_wait_fence(obj);
2790 if (ret) 2797 if (ret)
2791 return ret; 2798 return ret;
2792 } 2799 }
@@ -2807,7 +2814,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2807 if (reg->obj) { 2814 if (reg->obj) {
2808 struct drm_i915_gem_object *old = reg->obj; 2815 struct drm_i915_gem_object *old = reg->obj;
2809 2816
2810 ret = i915_gem_object_flush_fence(old); 2817 ret = i915_gem_object_wait_fence(old);
2811 if (ret) 2818 if (ret)
2812 return ret; 2819 return ret;
2813 2820
@@ -2830,7 +2837,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
2830 2837
2831 /* On non-LLC machines we have to be careful when putting differing 2838 /* On non-LLC machines we have to be careful when putting differing
2832 * types of snoopable memory together to avoid the prefetcher 2839 * types of snoopable memory together to avoid the prefetcher
2833 * crossing memory domains and dieing. 2840 * crossing memory domains and dying.
2834 */ 2841 */
2835 if (HAS_LLC(dev)) 2842 if (HAS_LLC(dev))
2836 return true; 2843 return true;
@@ -2908,21 +2915,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2908 bool mappable, fenceable; 2915 bool mappable, fenceable;
2909 int ret; 2916 int ret;
2910 2917
2911 if (obj->madv != I915_MADV_WILLNEED) {
2912 DRM_ERROR("Attempting to bind a purgeable object\n");
2913 return -EINVAL;
2914 }
2915
2916 fence_size = i915_gem_get_gtt_size(dev, 2918 fence_size = i915_gem_get_gtt_size(dev,
2917 obj->base.size, 2919 obj->base.size,
2918 obj->tiling_mode); 2920 obj->tiling_mode);
2919 fence_alignment = i915_gem_get_gtt_alignment(dev, 2921 fence_alignment = i915_gem_get_gtt_alignment(dev,
2920 obj->base.size, 2922 obj->base.size,
2921 obj->tiling_mode); 2923 obj->tiling_mode, true);
2922 unfenced_alignment = 2924 unfenced_alignment =
2923 i915_gem_get_unfenced_gtt_alignment(dev, 2925 i915_gem_get_gtt_alignment(dev,
2924 obj->base.size, 2926 obj->base.size,
2925 obj->tiling_mode); 2927 obj->tiling_mode, false);
2926 2928
2927 if (alignment == 0) 2929 if (alignment == 0)
2928 alignment = map_and_fenceable ? fence_alignment : 2930 alignment = map_and_fenceable ? fence_alignment :
@@ -2938,7 +2940,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2938 * before evicting everything in a vain attempt to find space. 2940 * before evicting everything in a vain attempt to find space.
2939 */ 2941 */
2940 if (obj->base.size > 2942 if (obj->base.size >
2941 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { 2943 (map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
2942 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2944 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2943 return -E2BIG; 2945 return -E2BIG;
2944 } 2946 }
@@ -2959,7 +2961,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2959 if (map_and_fenceable) 2961 if (map_and_fenceable)
2960 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, 2962 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2961 size, alignment, obj->cache_level, 2963 size, alignment, obj->cache_level,
2962 0, dev_priv->mm.gtt_mappable_end); 2964 0, dev_priv->gtt.mappable_end);
2963 else 2965 else
2964 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, 2966 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2965 size, alignment, obj->cache_level); 2967 size, alignment, obj->cache_level);
@@ -2999,7 +3001,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2999 (node->start & (fence_alignment - 1)) == 0; 3001 (node->start & (fence_alignment - 1)) == 0;
3000 3002
3001 mappable = 3003 mappable =
3002 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 3004 obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
3003 3005
3004 obj->map_and_fenceable = mappable && fenceable; 3006 obj->map_and_fenceable = mappable && fenceable;
3005 3007
@@ -3019,6 +3021,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
3019 if (obj->pages == NULL) 3021 if (obj->pages == NULL)
3020 return; 3022 return;
3021 3023
3024 /*
3025 * Stolen memory is always coherent with the GPU as it is explicitly
3026 * marked as wc by the system, or the system is cache-coherent.
3027 */
3028 if (obj->stolen)
3029 return;
3030
3022 /* If the GPU is snooping the contents of the CPU cache, 3031 /* If the GPU is snooping the contents of the CPU cache,
3023 * we do not need to manually clear the CPU cache lines. However, 3032 * we do not need to manually clear the CPU cache lines. However,
3024 * the caches are only snooped when the render cache is 3033 * the caches are only snooped when the render cache is
@@ -3107,6 +3116,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3107 3116
3108 i915_gem_object_flush_cpu_write_domain(obj); 3117 i915_gem_object_flush_cpu_write_domain(obj);
3109 3118
3119 /* Serialise direct access to this object with the barriers for
3120 * coherent writes from the GPU, by effectively invalidating the
3121 * GTT domain upon first access.
3122 */
3123 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3124 mb();
3125
3110 old_write_domain = obj->base.write_domain; 3126 old_write_domain = obj->base.write_domain;
3111 old_read_domains = obj->base.read_domains; 3127 old_read_domains = obj->base.read_domains;
3112 3128
@@ -3413,11 +3429,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3413 unsigned long recent_enough = jiffies - msecs_to_jiffies(20); 3429 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3414 struct drm_i915_gem_request *request; 3430 struct drm_i915_gem_request *request;
3415 struct intel_ring_buffer *ring = NULL; 3431 struct intel_ring_buffer *ring = NULL;
3432 unsigned reset_counter;
3416 u32 seqno = 0; 3433 u32 seqno = 0;
3417 int ret; 3434 int ret;
3418 3435
3419 if (atomic_read(&dev_priv->mm.wedged)) 3436 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3420 return -EIO; 3437 if (ret)
3438 return ret;
3439
3440 ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
3441 if (ret)
3442 return ret;
3421 3443
3422 spin_lock(&file_priv->mm.lock); 3444 spin_lock(&file_priv->mm.lock);
3423 list_for_each_entry(request, &file_priv->mm.request_list, client_list) { 3445 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@@ -3427,12 +3449,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3427 ring = request->ring; 3449 ring = request->ring;
3428 seqno = request->seqno; 3450 seqno = request->seqno;
3429 } 3451 }
3452 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
3430 spin_unlock(&file_priv->mm.lock); 3453 spin_unlock(&file_priv->mm.lock);
3431 3454
3432 if (seqno == 0) 3455 if (seqno == 0)
3433 return 0; 3456 return 0;
3434 3457
3435 ret = __wait_seqno(ring, seqno, true, NULL); 3458 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
3436 if (ret == 0) 3459 if (ret == 0)
3437 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 3460 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3438 3461
@@ -3706,14 +3729,14 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3706{ 3729{
3707 struct drm_i915_gem_object *obj; 3730 struct drm_i915_gem_object *obj;
3708 struct address_space *mapping; 3731 struct address_space *mapping;
3709 u32 mask; 3732 gfp_t mask;
3710 3733
3711 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 3734 obj = i915_gem_object_alloc(dev);
3712 if (obj == NULL) 3735 if (obj == NULL)
3713 return NULL; 3736 return NULL;
3714 3737
3715 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 3738 if (drm_gem_object_init(dev, &obj->base, size) != 0) {
3716 kfree(obj); 3739 i915_gem_object_free(obj);
3717 return NULL; 3740 return NULL;
3718 } 3741 }
3719 3742
@@ -3785,6 +3808,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3785 obj->pages_pin_count = 0; 3808 obj->pages_pin_count = 0;
3786 i915_gem_object_put_pages(obj); 3809 i915_gem_object_put_pages(obj);
3787 i915_gem_object_free_mmap_offset(obj); 3810 i915_gem_object_free_mmap_offset(obj);
3811 i915_gem_object_release_stolen(obj);
3788 3812
3789 BUG_ON(obj->pages); 3813 BUG_ON(obj->pages);
3790 3814
@@ -3795,7 +3819,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
3795 i915_gem_info_remove_obj(dev_priv, obj->base.size); 3819 i915_gem_info_remove_obj(dev_priv, obj->base.size);
3796 3820
3797 kfree(obj->bit_17); 3821 kfree(obj->bit_17);
3798 kfree(obj); 3822 i915_gem_object_free(obj);
3799} 3823}
3800 3824
3801int 3825int
@@ -3829,7 +3853,7 @@ i915_gem_idle(struct drm_device *dev)
3829 * And not confound mm.suspended! 3853 * And not confound mm.suspended!
3830 */ 3854 */
3831 dev_priv->mm.suspended = 1; 3855 dev_priv->mm.suspended = 1;
3832 del_timer_sync(&dev_priv->hangcheck_timer); 3856 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
3833 3857
3834 i915_kernel_lost_context(dev); 3858 i915_kernel_lost_context(dev);
3835 i915_gem_cleanup_ringbuffer(dev); 3859 i915_gem_cleanup_ringbuffer(dev);
@@ -3848,7 +3872,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
3848 u32 misccpctl; 3872 u32 misccpctl;
3849 int i; 3873 int i;
3850 3874
3851 if (!IS_IVYBRIDGE(dev)) 3875 if (!HAS_L3_GPU_CACHE(dev))
3852 return; 3876 return;
3853 3877
3854 if (!dev_priv->l3_parity.remap_info) 3878 if (!dev_priv->l3_parity.remap_info)
@@ -3891,8 +3915,10 @@ void i915_gem_init_swizzling(struct drm_device *dev)
3891 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); 3915 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
3892 if (IS_GEN6(dev)) 3916 if (IS_GEN6(dev))
3893 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); 3917 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
3894 else 3918 else if (IS_GEN7(dev))
3895 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 3919 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3920 else
3921 BUG();
3896} 3922}
3897 3923
3898static bool 3924static bool
@@ -3911,22 +3937,11 @@ intel_enable_blt(struct drm_device *dev)
3911 return true; 3937 return true;
3912} 3938}
3913 3939
3914int 3940static int i915_gem_init_rings(struct drm_device *dev)
3915i915_gem_init_hw(struct drm_device *dev)
3916{ 3941{
3917 drm_i915_private_t *dev_priv = dev->dev_private; 3942 struct drm_i915_private *dev_priv = dev->dev_private;
3918 int ret; 3943 int ret;
3919 3944
3920 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3921 return -EIO;
3922
3923 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3924 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3925
3926 i915_gem_l3_remap(dev);
3927
3928 i915_gem_init_swizzling(dev);
3929
3930 ret = intel_init_render_ring_buffer(dev); 3945 ret = intel_init_render_ring_buffer(dev);
3931 if (ret) 3946 if (ret)
3932 return ret; 3947 return ret;
@@ -3943,76 +3958,59 @@ i915_gem_init_hw(struct drm_device *dev)
3943 goto cleanup_bsd_ring; 3958 goto cleanup_bsd_ring;
3944 } 3959 }
3945 3960
3946 dev_priv->next_seqno = 1; 3961 ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
3947 3962 if (ret)
3948 /* 3963 goto cleanup_blt_ring;
3949 * XXX: There was some w/a described somewhere suggesting loading
3950 * contexts before PPGTT.
3951 */
3952 i915_gem_context_init(dev);
3953 i915_gem_init_ppgtt(dev);
3954 3964
3955 return 0; 3965 return 0;
3956 3966
3967cleanup_blt_ring:
3968 intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
3957cleanup_bsd_ring: 3969cleanup_bsd_ring:
3958 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); 3970 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
3959cleanup_render_ring: 3971cleanup_render_ring:
3960 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); 3972 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
3973
3961 return ret; 3974 return ret;
3962} 3975}
3963 3976
3964static bool 3977int
3965intel_enable_ppgtt(struct drm_device *dev) 3978i915_gem_init_hw(struct drm_device *dev)
3966{ 3979{
3967 if (i915_enable_ppgtt >= 0) 3980 drm_i915_private_t *dev_priv = dev->dev_private;
3968 return i915_enable_ppgtt; 3981 int ret;
3969 3982
3970#ifdef CONFIG_INTEL_IOMMU 3983 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3971 /* Disable ppgtt on SNB if VT-d is on. */ 3984 return -EIO;
3972 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
3973 return false;
3974#endif
3975 3985
3976 return true; 3986 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
3987 I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
3988
3989 i915_gem_l3_remap(dev);
3990
3991 i915_gem_init_swizzling(dev);
3992
3993 ret = i915_gem_init_rings(dev);
3994 if (ret)
3995 return ret;
3996
3997 /*
3998 * XXX: There was some w/a described somewhere suggesting loading
3999 * contexts before PPGTT.
4000 */
4001 i915_gem_context_init(dev);
4002 i915_gem_init_ppgtt(dev);
4003
4004 return 0;
3977} 4005}
3978 4006
3979int i915_gem_init(struct drm_device *dev) 4007int i915_gem_init(struct drm_device *dev)
3980{ 4008{
3981 struct drm_i915_private *dev_priv = dev->dev_private; 4009 struct drm_i915_private *dev_priv = dev->dev_private;
3982 unsigned long gtt_size, mappable_size;
3983 int ret; 4010 int ret;
3984 4011
3985 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
3986 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
3987
3988 mutex_lock(&dev->struct_mutex); 4012 mutex_lock(&dev->struct_mutex);
3989 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { 4013 i915_gem_init_global_gtt(dev);
3990 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
3991 * aperture accordingly when using aliasing ppgtt. */
3992 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
3993
3994 i915_gem_init_global_gtt(dev, 0, mappable_size, gtt_size);
3995
3996 ret = i915_gem_init_aliasing_ppgtt(dev);
3997 if (ret) {
3998 mutex_unlock(&dev->struct_mutex);
3999 return ret;
4000 }
4001 } else {
4002 /* Let GEM Manage all of the aperture.
4003 *
4004 * However, leave one page at the end still bound to the scratch
4005 * page. There are a number of places where the hardware
4006 * apparently prefetches past the end of the object, and we've
4007 * seen multiple hangs with the GPU head pointer stuck in a
4008 * batchbuffer bound at the last page of the aperture. One page
4009 * should be enough to keep any prefetching inside of the
4010 * aperture.
4011 */
4012 i915_gem_init_global_gtt(dev, 0, mappable_size,
4013 gtt_size);
4014 }
4015
4016 ret = i915_gem_init_hw(dev); 4014 ret = i915_gem_init_hw(dev);
4017 mutex_unlock(&dev->struct_mutex); 4015 mutex_unlock(&dev->struct_mutex);
4018 if (ret) { 4016 if (ret) {
@@ -4047,9 +4045,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4047 if (drm_core_check_feature(dev, DRIVER_MODESET)) 4045 if (drm_core_check_feature(dev, DRIVER_MODESET))
4048 return 0; 4046 return 0;
4049 4047
4050 if (atomic_read(&dev_priv->mm.wedged)) { 4048 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4051 DRM_ERROR("Reenabling wedged hardware, good luck\n"); 4049 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4052 atomic_set(&dev_priv->mm.wedged, 0); 4050 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4053 } 4051 }
4054 4052
4055 mutex_lock(&dev->struct_mutex); 4053 mutex_lock(&dev->struct_mutex);
@@ -4113,8 +4111,14 @@ init_ring_lists(struct intel_ring_buffer *ring)
4113void 4111void
4114i915_gem_load(struct drm_device *dev) 4112i915_gem_load(struct drm_device *dev)
4115{ 4113{
4116 int i;
4117 drm_i915_private_t *dev_priv = dev->dev_private; 4114 drm_i915_private_t *dev_priv = dev->dev_private;
4115 int i;
4116
4117 dev_priv->slab =
4118 kmem_cache_create("i915_gem_object",
4119 sizeof(struct drm_i915_gem_object), 0,
4120 SLAB_HWCACHE_ALIGN,
4121 NULL);
4118 4122
4119 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4123 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4120 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4124 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
@@ -4127,7 +4131,7 @@ i915_gem_load(struct drm_device *dev)
4127 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 4131 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4128 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 4132 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4129 i915_gem_retire_work_handler); 4133 i915_gem_retire_work_handler);
4130 init_completion(&dev_priv->error_completion); 4134 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4131 4135
4132 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 4136 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4133 if (IS_GEN3(dev)) { 4137 if (IS_GEN3(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a3f06bcad551..21177d9df423 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -126,13 +126,8 @@ static int get_context_size(struct drm_device *dev)
126 126
127static void do_destroy(struct i915_hw_context *ctx) 127static void do_destroy(struct i915_hw_context *ctx)
128{ 128{
129 struct drm_device *dev = ctx->obj->base.dev;
130 struct drm_i915_private *dev_priv = dev->dev_private;
131
132 if (ctx->file_priv) 129 if (ctx->file_priv)
133 idr_remove(&ctx->file_priv->context_idr, ctx->id); 130 idr_remove(&ctx->file_priv->context_idr, ctx->id);
134 else
135 BUG_ON(ctx != dev_priv->ring[RCS].default_context);
136 131
137 drm_gem_object_unreference(&ctx->obj->base); 132 drm_gem_object_unreference(&ctx->obj->base);
138 kfree(ctx); 133 kfree(ctx);
@@ -242,7 +237,6 @@ err_destroy:
242void i915_gem_context_init(struct drm_device *dev) 237void i915_gem_context_init(struct drm_device *dev)
243{ 238{
244 struct drm_i915_private *dev_priv = dev->dev_private; 239 struct drm_i915_private *dev_priv = dev->dev_private;
245 uint32_t ctx_size;
246 240
247 if (!HAS_HW_CONTEXTS(dev)) { 241 if (!HAS_HW_CONTEXTS(dev)) {
248 dev_priv->hw_contexts_disabled = true; 242 dev_priv->hw_contexts_disabled = true;
@@ -254,11 +248,9 @@ void i915_gem_context_init(struct drm_device *dev)
254 dev_priv->ring[RCS].default_context) 248 dev_priv->ring[RCS].default_context)
255 return; 249 return;
256 250
257 ctx_size = get_context_size(dev); 251 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
258 dev_priv->hw_context_size = get_context_size(dev);
259 dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
260 252
261 if (ctx_size <= 0 || ctx_size > (1<<20)) { 253 if (dev_priv->hw_context_size > (1<<20)) {
262 dev_priv->hw_contexts_disabled = true; 254 dev_priv->hw_contexts_disabled = true;
263 return; 255 return;
264 } 256 }
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index abeaafef6d7e..6a5af6828624 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -281,8 +281,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
281 if (IS_ERR(attach)) 281 if (IS_ERR(attach))
282 return ERR_CAST(attach); 282 return ERR_CAST(attach);
283 283
284 284 obj = i915_gem_object_alloc(dev);
285 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
286 if (obj == NULL) { 285 if (obj == NULL) {
287 ret = -ENOMEM; 286 ret = -ENOMEM;
288 goto fail_detach; 287 goto fail_detach;
@@ -290,7 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
290 289
291 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size); 290 ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
292 if (ret) { 291 if (ret) {
293 kfree(obj); 292 i915_gem_object_free(obj);
294 goto fail_detach; 293 goto fail_detach;
295 } 294 }
296 295
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 776a3225184c..c86d5d9356fd 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
80 if (mappable) 80 if (mappable)
81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, 81 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
82 min_size, alignment, cache_level, 82 min_size, alignment, cache_level,
83 0, dev_priv->mm.gtt_mappable_end); 83 0, dev_priv->gtt.mappable_end);
84 else 84 else
85 drm_mm_init_scan(&dev_priv->mm.gtt_space, 85 drm_mm_init_scan(&dev_priv->mm.gtt_space,
86 min_size, alignment, cache_level); 86 min_size, alignment, cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 26d08bb58218..2f2daebd0eef 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,61 +34,133 @@
34#include <linux/dma_remapping.h> 34#include <linux/dma_remapping.h>
35 35
36struct eb_objects { 36struct eb_objects {
37 struct list_head objects;
37 int and; 38 int and;
38 struct hlist_head buckets[0]; 39 union {
40 struct drm_i915_gem_object *lut[0];
41 struct hlist_head buckets[0];
42 };
39}; 43};
40 44
41static struct eb_objects * 45static struct eb_objects *
42eb_create(int size) 46eb_create(struct drm_i915_gem_execbuffer2 *args)
43{ 47{
44 struct eb_objects *eb; 48 struct eb_objects *eb = NULL;
45 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; 49
46 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); 50 if (args->flags & I915_EXEC_HANDLE_LUT) {
47 while (count > size) 51 int size = args->buffer_count;
48 count >>= 1; 52 size *= sizeof(struct drm_i915_gem_object *);
49 eb = kzalloc(count*sizeof(struct hlist_head) + 53 size += sizeof(struct eb_objects);
50 sizeof(struct eb_objects), 54 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
51 GFP_KERNEL); 55 }
52 if (eb == NULL) 56
53 return eb; 57 if (eb == NULL) {
54 58 int size = args->buffer_count;
55 eb->and = count - 1; 59 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
60 BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
61 while (count > 2*size)
62 count >>= 1;
63 eb = kzalloc(count*sizeof(struct hlist_head) +
64 sizeof(struct eb_objects),
65 GFP_TEMPORARY);
66 if (eb == NULL)
67 return eb;
68
69 eb->and = count - 1;
70 } else
71 eb->and = -args->buffer_count;
72
73 INIT_LIST_HEAD(&eb->objects);
56 return eb; 74 return eb;
57} 75}
58 76
59static void 77static void
60eb_reset(struct eb_objects *eb) 78eb_reset(struct eb_objects *eb)
61{ 79{
62 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); 80 if (eb->and >= 0)
81 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
63} 82}
64 83
65static void 84static int
66eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) 85eb_lookup_objects(struct eb_objects *eb,
86 struct drm_i915_gem_exec_object2 *exec,
87 const struct drm_i915_gem_execbuffer2 *args,
88 struct drm_file *file)
67{ 89{
68 hlist_add_head(&obj->exec_node, 90 int i;
69 &eb->buckets[obj->exec_handle & eb->and]); 91
92 spin_lock(&file->table_lock);
93 for (i = 0; i < args->buffer_count; i++) {
94 struct drm_i915_gem_object *obj;
95
96 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
97 if (obj == NULL) {
98 spin_unlock(&file->table_lock);
99 DRM_DEBUG("Invalid object handle %d at index %d\n",
100 exec[i].handle, i);
101 return -ENOENT;
102 }
103
104 if (!list_empty(&obj->exec_list)) {
105 spin_unlock(&file->table_lock);
106 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
107 obj, exec[i].handle, i);
108 return -EINVAL;
109 }
110
111 drm_gem_object_reference(&obj->base);
112 list_add_tail(&obj->exec_list, &eb->objects);
113
114 obj->exec_entry = &exec[i];
115 if (eb->and < 0) {
116 eb->lut[i] = obj;
117 } else {
118 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
119 obj->exec_handle = handle;
120 hlist_add_head(&obj->exec_node,
121 &eb->buckets[handle & eb->and]);
122 }
123 }
124 spin_unlock(&file->table_lock);
125
126 return 0;
70} 127}
71 128
72static struct drm_i915_gem_object * 129static struct drm_i915_gem_object *
73eb_get_object(struct eb_objects *eb, unsigned long handle) 130eb_get_object(struct eb_objects *eb, unsigned long handle)
74{ 131{
75 struct hlist_head *head; 132 if (eb->and < 0) {
76 struct hlist_node *node; 133 if (handle >= -eb->and)
77 struct drm_i915_gem_object *obj; 134 return NULL;
135 return eb->lut[handle];
136 } else {
137 struct hlist_head *head;
138 struct hlist_node *node;
78 139
79 head = &eb->buckets[handle & eb->and]; 140 head = &eb->buckets[handle & eb->and];
80 hlist_for_each(node, head) { 141 hlist_for_each(node, head) {
81 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); 142 struct drm_i915_gem_object *obj;
82 if (obj->exec_handle == handle)
83 return obj;
84 }
85 143
86 return NULL; 144 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
145 if (obj->exec_handle == handle)
146 return obj;
147 }
148 return NULL;
149 }
87} 150}
88 151
89static void 152static void
90eb_destroy(struct eb_objects *eb) 153eb_destroy(struct eb_objects *eb)
91{ 154{
155 while (!list_empty(&eb->objects)) {
156 struct drm_i915_gem_object *obj;
157
158 obj = list_first_entry(&eb->objects,
159 struct drm_i915_gem_object,
160 exec_list);
161 list_del_init(&obj->exec_list);
162 drm_gem_object_unreference(&obj->base);
163 }
92 kfree(eb); 164 kfree(eb);
93} 165}
94 166
@@ -150,17 +222,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
150 reloc->write_domain); 222 reloc->write_domain);
151 return ret; 223 return ret;
152 } 224 }
153 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
154 reloc->write_domain != target_obj->pending_write_domain)) {
155 DRM_DEBUG("Write domain conflict: "
156 "obj %p target %d offset %d "
157 "new %08x old %08x\n",
158 obj, reloc->target_handle,
159 (int) reloc->offset,
160 reloc->write_domain,
161 target_obj->pending_write_domain);
162 return ret;
163 }
164 225
165 target_obj->pending_read_domains |= reloc->read_domains; 226 target_obj->pending_read_domains |= reloc->read_domains;
166 target_obj->pending_write_domain |= reloc->write_domain; 227 target_obj->pending_write_domain |= reloc->write_domain;
@@ -220,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
220 281
221 /* Map the page containing the relocation we're going to perform. */ 282 /* Map the page containing the relocation we're going to perform. */
222 reloc->offset += obj->gtt_offset; 283 reloc->offset += obj->gtt_offset;
223 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 284 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
224 reloc->offset & PAGE_MASK); 285 reloc->offset & PAGE_MASK);
225 reloc_entry = (uint32_t __iomem *) 286 reloc_entry = (uint32_t __iomem *)
226 (reloc_page + (reloc->offset & ~PAGE_MASK)); 287 (reloc_page + (reloc->offset & ~PAGE_MASK));
@@ -299,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
299 360
300static int 361static int
301i915_gem_execbuffer_relocate(struct drm_device *dev, 362i915_gem_execbuffer_relocate(struct drm_device *dev,
302 struct eb_objects *eb, 363 struct eb_objects *eb)
303 struct list_head *objects)
304{ 364{
305 struct drm_i915_gem_object *obj; 365 struct drm_i915_gem_object *obj;
306 int ret = 0; 366 int ret = 0;
@@ -313,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
313 * lockdep complains vehemently. 373 * lockdep complains vehemently.
314 */ 374 */
315 pagefault_disable(); 375 pagefault_disable();
316 list_for_each_entry(obj, objects, exec_list) { 376 list_for_each_entry(obj, &eb->objects, exec_list) {
317 ret = i915_gem_execbuffer_relocate_object(obj, eb); 377 ret = i915_gem_execbuffer_relocate_object(obj, eb);
318 if (ret) 378 if (ret)
319 break; 379 break;
@@ -335,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
335 395
336static int 396static int
337i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, 397i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
338 struct intel_ring_buffer *ring) 398 struct intel_ring_buffer *ring,
399 bool *need_reloc)
339{ 400{
340 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 401 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
341 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; 402 struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
@@ -376,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
376 obj->has_aliasing_ppgtt_mapping = 1; 437 obj->has_aliasing_ppgtt_mapping = 1;
377 } 438 }
378 439
379 entry->offset = obj->gtt_offset; 440 if (entry->offset != obj->gtt_offset) {
441 entry->offset = obj->gtt_offset;
442 *need_reloc = true;
443 }
444
445 if (entry->flags & EXEC_OBJECT_WRITE) {
446 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
447 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
448 }
449
450 if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
451 !obj->has_global_gtt_mapping)
452 i915_gem_gtt_bind_object(obj, obj->cache_level);
453
380 return 0; 454 return 0;
381} 455}
382 456
@@ -402,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
402static int 476static int
403i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, 477i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
404 struct drm_file *file, 478 struct drm_file *file,
405 struct list_head *objects) 479 struct list_head *objects,
480 bool *need_relocs)
406{ 481{
407 struct drm_i915_gem_object *obj; 482 struct drm_i915_gem_object *obj;
408 struct list_head ordered_objects; 483 struct list_head ordered_objects;
@@ -430,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
430 else 505 else
431 list_move_tail(&obj->exec_list, &ordered_objects); 506 list_move_tail(&obj->exec_list, &ordered_objects);
432 507
433 obj->base.pending_read_domains = 0; 508 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
434 obj->base.pending_write_domain = 0; 509 obj->base.pending_write_domain = 0;
435 obj->pending_fenced_gpu_access = false; 510 obj->pending_fenced_gpu_access = false;
436 } 511 }
@@ -470,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
470 (need_mappable && !obj->map_and_fenceable)) 545 (need_mappable && !obj->map_and_fenceable))
471 ret = i915_gem_object_unbind(obj); 546 ret = i915_gem_object_unbind(obj);
472 else 547 else
473 ret = i915_gem_execbuffer_reserve_object(obj, ring); 548 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
474 if (ret) 549 if (ret)
475 goto err; 550 goto err;
476 } 551 }
@@ -480,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
480 if (obj->gtt_space) 555 if (obj->gtt_space)
481 continue; 556 continue;
482 557
483 ret = i915_gem_execbuffer_reserve_object(obj, ring); 558 ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
484 if (ret) 559 if (ret)
485 goto err; 560 goto err;
486 } 561 }
@@ -500,21 +575,22 @@ err: /* Decrement pin count for bound objects */
500 575
501static int 576static int
502i915_gem_execbuffer_relocate_slow(struct drm_device *dev, 577i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
578 struct drm_i915_gem_execbuffer2 *args,
503 struct drm_file *file, 579 struct drm_file *file,
504 struct intel_ring_buffer *ring, 580 struct intel_ring_buffer *ring,
505 struct list_head *objects,
506 struct eb_objects *eb, 581 struct eb_objects *eb,
507 struct drm_i915_gem_exec_object2 *exec, 582 struct drm_i915_gem_exec_object2 *exec)
508 int count)
509{ 583{
510 struct drm_i915_gem_relocation_entry *reloc; 584 struct drm_i915_gem_relocation_entry *reloc;
511 struct drm_i915_gem_object *obj; 585 struct drm_i915_gem_object *obj;
586 bool need_relocs;
512 int *reloc_offset; 587 int *reloc_offset;
513 int i, total, ret; 588 int i, total, ret;
589 int count = args->buffer_count;
514 590
515 /* We may process another execbuffer during the unlock... */ 591 /* We may process another execbuffer during the unlock... */
516 while (!list_empty(objects)) { 592 while (!list_empty(&eb->objects)) {
517 obj = list_first_entry(objects, 593 obj = list_first_entry(&eb->objects,
518 struct drm_i915_gem_object, 594 struct drm_i915_gem_object,
519 exec_list); 595 exec_list);
520 list_del_init(&obj->exec_list); 596 list_del_init(&obj->exec_list);
@@ -582,27 +658,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
582 658
583 /* reacquire the objects */ 659 /* reacquire the objects */
584 eb_reset(eb); 660 eb_reset(eb);
585 for (i = 0; i < count; i++) { 661 ret = eb_lookup_objects(eb, exec, args, file);
586 obj = to_intel_bo(drm_gem_object_lookup(dev, file, 662 if (ret)
587 exec[i].handle)); 663 goto err;
588 if (&obj->base == NULL) {
589 DRM_DEBUG("Invalid object handle %d at index %d\n",
590 exec[i].handle, i);
591 ret = -ENOENT;
592 goto err;
593 }
594
595 list_add_tail(&obj->exec_list, objects);
596 obj->exec_handle = exec[i].handle;
597 obj->exec_entry = &exec[i];
598 eb_add_object(eb, obj);
599 }
600 664
601 ret = i915_gem_execbuffer_reserve(ring, file, objects); 665 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
666 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
602 if (ret) 667 if (ret)
603 goto err; 668 goto err;
604 669
605 list_for_each_entry(obj, objects, exec_list) { 670 list_for_each_entry(obj, &eb->objects, exec_list) {
606 int offset = obj->exec_entry - exec; 671 int offset = obj->exec_entry - exec;
607 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, 672 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
608 reloc + reloc_offset[offset]); 673 reloc + reloc_offset[offset]);
@@ -623,44 +688,11 @@ err:
623} 688}
624 689
625static int 690static int
626i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
627{
628 u32 plane, flip_mask;
629 int ret;
630
631 /* Check for any pending flips. As we only maintain a flip queue depth
632 * of 1, we can simply insert a WAIT for the next display flip prior
633 * to executing the batch and avoid stalling the CPU.
634 */
635
636 for (plane = 0; flips >> plane; plane++) {
637 if (((flips >> plane) & 1) == 0)
638 continue;
639
640 if (plane)
641 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
642 else
643 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
644
645 ret = intel_ring_begin(ring, 2);
646 if (ret)
647 return ret;
648
649 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
650 intel_ring_emit(ring, MI_NOOP);
651 intel_ring_advance(ring);
652 }
653
654 return 0;
655}
656
657static int
658i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, 691i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
659 struct list_head *objects) 692 struct list_head *objects)
660{ 693{
661 struct drm_i915_gem_object *obj; 694 struct drm_i915_gem_object *obj;
662 uint32_t flush_domains = 0; 695 uint32_t flush_domains = 0;
663 uint32_t flips = 0;
664 int ret; 696 int ret;
665 697
666 list_for_each_entry(obj, objects, exec_list) { 698 list_for_each_entry(obj, objects, exec_list) {
@@ -671,18 +703,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
671 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) 703 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
672 i915_gem_clflush_object(obj); 704 i915_gem_clflush_object(obj);
673 705
674 if (obj->base.pending_write_domain)
675 flips |= atomic_read(&obj->pending_flip);
676
677 flush_domains |= obj->base.write_domain; 706 flush_domains |= obj->base.write_domain;
678 } 707 }
679 708
680 if (flips) {
681 ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
682 if (ret)
683 return ret;
684 }
685
686 if (flush_domains & I915_GEM_DOMAIN_CPU) 709 if (flush_domains & I915_GEM_DOMAIN_CPU)
687 i915_gem_chipset_flush(ring->dev); 710 i915_gem_chipset_flush(ring->dev);
688 711
@@ -698,6 +721,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
698static bool 721static bool
699i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) 722i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
700{ 723{
724 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
725 return false;
726
701 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; 727 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
702} 728}
703 729
@@ -711,6 +737,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
711 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 737 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
712 int length; /* limited by fault_in_pages_readable() */ 738 int length; /* limited by fault_in_pages_readable() */
713 739
740 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
741 return -EINVAL;
742
714 /* First check for malicious input causing overflow */ 743 /* First check for malicious input causing overflow */
715 if (exec[i].relocation_count > 744 if (exec[i].relocation_count >
716 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) 745 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
@@ -718,9 +747,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
718 747
719 length = exec[i].relocation_count * 748 length = exec[i].relocation_count *
720 sizeof(struct drm_i915_gem_relocation_entry); 749 sizeof(struct drm_i915_gem_relocation_entry);
721 if (!access_ok(VERIFY_READ, ptr, length))
722 return -EFAULT;
723
724 /* we may also need to update the presumed offsets */ 750 /* we may also need to update the presumed offsets */
725 if (!access_ok(VERIFY_WRITE, ptr, length)) 751 if (!access_ok(VERIFY_WRITE, ptr, length))
726 return -EFAULT; 752 return -EFAULT;
@@ -742,8 +768,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
742 u32 old_read = obj->base.read_domains; 768 u32 old_read = obj->base.read_domains;
743 u32 old_write = obj->base.write_domain; 769 u32 old_write = obj->base.write_domain;
744 770
745 obj->base.read_domains = obj->base.pending_read_domains;
746 obj->base.write_domain = obj->base.pending_write_domain; 771 obj->base.write_domain = obj->base.pending_write_domain;
772 if (obj->base.write_domain == 0)
773 obj->base.pending_read_domains |= obj->base.read_domains;
774 obj->base.read_domains = obj->base.pending_read_domains;
747 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 775 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
748 776
749 i915_gem_object_move_to_active(obj, ring); 777 i915_gem_object_move_to_active(obj, ring);
@@ -802,21 +830,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
802 struct drm_i915_gem_exec_object2 *exec) 830 struct drm_i915_gem_exec_object2 *exec)
803{ 831{
804 drm_i915_private_t *dev_priv = dev->dev_private; 832 drm_i915_private_t *dev_priv = dev->dev_private;
805 struct list_head objects;
806 struct eb_objects *eb; 833 struct eb_objects *eb;
807 struct drm_i915_gem_object *batch_obj; 834 struct drm_i915_gem_object *batch_obj;
808 struct drm_clip_rect *cliprects = NULL; 835 struct drm_clip_rect *cliprects = NULL;
809 struct intel_ring_buffer *ring; 836 struct intel_ring_buffer *ring;
810 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 837 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
811 u32 exec_start, exec_len; 838 u32 exec_start, exec_len;
812 u32 mask; 839 u32 mask, flags;
813 u32 flags;
814 int ret, mode, i; 840 int ret, mode, i;
841 bool need_relocs;
815 842
816 if (!i915_gem_check_execbuffer(args)) { 843 if (!i915_gem_check_execbuffer(args))
817 DRM_DEBUG("execbuf with invalid offset/length\n");
818 return -EINVAL; 844 return -EINVAL;
819 }
820 845
821 ret = validate_exec_list(exec, args->buffer_count); 846 ret = validate_exec_list(exec, args->buffer_count);
822 if (ret) 847 if (ret)
@@ -937,7 +962,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
937 goto pre_mutex_err; 962 goto pre_mutex_err;
938 } 963 }
939 964
940 eb = eb_create(args->buffer_count); 965 eb = eb_create(args);
941 if (eb == NULL) { 966 if (eb == NULL) {
942 mutex_unlock(&dev->struct_mutex); 967 mutex_unlock(&dev->struct_mutex);
943 ret = -ENOMEM; 968 ret = -ENOMEM;
@@ -945,51 +970,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
945 } 970 }
946 971
947 /* Look up object handles */ 972 /* Look up object handles */
948 INIT_LIST_HEAD(&objects); 973 ret = eb_lookup_objects(eb, exec, args, file);
949 for (i = 0; i < args->buffer_count; i++) { 974 if (ret)
950 struct drm_i915_gem_object *obj; 975 goto err;
951
952 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
953 exec[i].handle));
954 if (&obj->base == NULL) {
955 DRM_DEBUG("Invalid object handle %d at index %d\n",
956 exec[i].handle, i);
957 /* prevent error path from reading uninitialized data */
958 ret = -ENOENT;
959 goto err;
960 }
961
962 if (!list_empty(&obj->exec_list)) {
963 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
964 obj, exec[i].handle, i);
965 ret = -EINVAL;
966 goto err;
967 }
968
969 list_add_tail(&obj->exec_list, &objects);
970 obj->exec_handle = exec[i].handle;
971 obj->exec_entry = &exec[i];
972 eb_add_object(eb, obj);
973 }
974 976
975 /* take note of the batch buffer before we might reorder the lists */ 977 /* take note of the batch buffer before we might reorder the lists */
976 batch_obj = list_entry(objects.prev, 978 batch_obj = list_entry(eb->objects.prev,
977 struct drm_i915_gem_object, 979 struct drm_i915_gem_object,
978 exec_list); 980 exec_list);
979 981
980 /* Move the objects en-masse into the GTT, evicting if necessary. */ 982 /* Move the objects en-masse into the GTT, evicting if necessary. */
981 ret = i915_gem_execbuffer_reserve(ring, file, &objects); 983 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
984 ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
982 if (ret) 985 if (ret)
983 goto err; 986 goto err;
984 987
985 /* The objects are in their final locations, apply the relocations. */ 988 /* The objects are in their final locations, apply the relocations. */
986 ret = i915_gem_execbuffer_relocate(dev, eb, &objects); 989 if (need_relocs)
990 ret = i915_gem_execbuffer_relocate(dev, eb);
987 if (ret) { 991 if (ret) {
988 if (ret == -EFAULT) { 992 if (ret == -EFAULT) {
989 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, 993 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
990 &objects, eb, 994 eb, exec);
991 exec,
992 args->buffer_count);
993 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 995 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
994 } 996 }
995 if (ret) 997 if (ret)
@@ -1011,7 +1013,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1011 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) 1013 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
1012 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); 1014 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
1013 1015
1014 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); 1016 ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
1015 if (ret) 1017 if (ret)
1016 goto err; 1018 goto err;
1017 1019
@@ -1065,20 +1067,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1065 1067
1066 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); 1068 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1067 1069
1068 i915_gem_execbuffer_move_to_active(&objects, ring); 1070 i915_gem_execbuffer_move_to_active(&eb->objects, ring);
1069 i915_gem_execbuffer_retire_commands(dev, file, ring); 1071 i915_gem_execbuffer_retire_commands(dev, file, ring);
1070 1072
1071err: 1073err:
1072 eb_destroy(eb); 1074 eb_destroy(eb);
1073 while (!list_empty(&objects)) {
1074 struct drm_i915_gem_object *obj;
1075
1076 obj = list_first_entry(&objects,
1077 struct drm_i915_gem_object,
1078 exec_list);
1079 list_del_init(&obj->exec_list);
1080 drm_gem_object_unreference(&obj->base);
1081 }
1082 1075
1083 mutex_unlock(&dev->struct_mutex); 1076 mutex_unlock(&dev->struct_mutex);
1084 1077
@@ -1187,7 +1180,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
1187 } 1180 }
1188 1181
1189 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, 1182 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1190 GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); 1183 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1191 if (exec2_list == NULL) 1184 if (exec2_list == NULL)
1192 exec2_list = drm_malloc_ab(sizeof(*exec2_list), 1185 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1193 args->buffer_count); 1186 args->buffer_count);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2c150dee78a7..926a1e2dd234 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -44,9 +44,9 @@ typedef uint32_t gtt_pte_t;
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) 44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) 45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46 46
47static inline gtt_pte_t pte_encode(struct drm_device *dev, 47static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
48 dma_addr_t addr, 48 dma_addr_t addr,
49 enum i915_cache_level level) 49 enum i915_cache_level level)
50{ 50{
51 gtt_pte_t pte = GEN6_PTE_VALID; 51 gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr); 52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
@@ -77,7 +77,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev,
77} 77}
78 78
79/* PPGTT support for Sandybdrige/Gen6 and later */ 79/* PPGTT support for Sandybdrige/Gen6 and later */
80static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 80static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
81 unsigned first_entry, 81 unsigned first_entry,
82 unsigned num_entries) 82 unsigned num_entries)
83{ 83{
@@ -87,8 +87,9 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
88 unsigned last_pte, i; 88 unsigned last_pte, i;
89 89
90 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, 90 scratch_pte = gen6_pte_encode(ppgtt->dev,
91 I915_CACHE_LLC); 91 ppgtt->scratch_page_dma_addr,
92 I915_CACHE_LLC);
92 93
93 while (num_entries) { 94 while (num_entries) {
94 last_pte = first_pte + num_entries; 95 last_pte = first_pte + num_entries;
@@ -108,10 +109,72 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
108 } 109 }
109} 110}
110 111
111int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) 112static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
113 struct sg_table *pages,
114 unsigned first_entry,
115 enum i915_cache_level cache_level)
112{ 116{
117 gtt_pte_t *pt_vaddr;
118 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
119 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
120 unsigned i, j, m, segment_len;
121 dma_addr_t page_addr;
122 struct scatterlist *sg;
123
124 /* init sg walking */
125 sg = pages->sgl;
126 i = 0;
127 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
128 m = 0;
129
130 while (i < pages->nents) {
131 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
132
133 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
134 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
135 pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
136 cache_level);
137
138 /* grab the next page */
139 if (++m == segment_len) {
140 if (++i == pages->nents)
141 break;
142
143 sg = sg_next(sg);
144 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
145 m = 0;
146 }
147 }
148
149 kunmap_atomic(pt_vaddr);
150
151 first_pte = 0;
152 act_pd++;
153 }
154}
155
156static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
157{
158 int i;
159
160 if (ppgtt->pt_dma_addr) {
161 for (i = 0; i < ppgtt->num_pd_entries; i++)
162 pci_unmap_page(ppgtt->dev->pdev,
163 ppgtt->pt_dma_addr[i],
164 4096, PCI_DMA_BIDIRECTIONAL);
165 }
166
167 kfree(ppgtt->pt_dma_addr);
168 for (i = 0; i < ppgtt->num_pd_entries; i++)
169 __free_page(ppgtt->pt_pages[i]);
170 kfree(ppgtt->pt_pages);
171 kfree(ppgtt);
172}
173
174static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
175{
176 struct drm_device *dev = ppgtt->dev;
113 struct drm_i915_private *dev_priv = dev->dev_private; 177 struct drm_i915_private *dev_priv = dev->dev_private;
114 struct i915_hw_ppgtt *ppgtt;
115 unsigned first_pd_entry_in_global_pt; 178 unsigned first_pd_entry_in_global_pt;
116 int i; 179 int i;
117 int ret = -ENOMEM; 180 int ret = -ENOMEM;
@@ -119,18 +182,17 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
119 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 182 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
120 * entries. For aliasing ppgtt support we just steal them at the end for 183 * entries. For aliasing ppgtt support we just steal them at the end for
121 * now. */ 184 * now. */
122 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; 185 first_pd_entry_in_global_pt =
123 186 gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
124 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
125 if (!ppgtt)
126 return ret;
127 187
128 ppgtt->dev = dev;
129 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 188 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
189 ppgtt->clear_range = gen6_ppgtt_clear_range;
190 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
191 ppgtt->cleanup = gen6_ppgtt_cleanup;
130 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 192 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
131 GFP_KERNEL); 193 GFP_KERNEL);
132 if (!ppgtt->pt_pages) 194 if (!ppgtt->pt_pages)
133 goto err_ppgtt; 195 return -ENOMEM;
134 196
135 for (i = 0; i < ppgtt->num_pd_entries; i++) { 197 for (i = 0; i < ppgtt->num_pd_entries; i++) {
136 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); 198 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
@@ -138,39 +200,32 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
138 goto err_pt_alloc; 200 goto err_pt_alloc;
139 } 201 }
140 202
141 if (dev_priv->mm.gtt->needs_dmar) { 203 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
142 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) 204 GFP_KERNEL);
143 *ppgtt->num_pd_entries, 205 if (!ppgtt->pt_dma_addr)
144 GFP_KERNEL); 206 goto err_pt_alloc;
145 if (!ppgtt->pt_dma_addr)
146 goto err_pt_alloc;
147 207
148 for (i = 0; i < ppgtt->num_pd_entries; i++) { 208 for (i = 0; i < ppgtt->num_pd_entries; i++) {
149 dma_addr_t pt_addr; 209 dma_addr_t pt_addr;
150 210
151 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 211 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
152 0, 4096, 212 PCI_DMA_BIDIRECTIONAL);
153 PCI_DMA_BIDIRECTIONAL);
154 213
155 if (pci_dma_mapping_error(dev->pdev, 214 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
156 pt_addr)) { 215 ret = -EIO;
157 ret = -EIO; 216 goto err_pd_pin;
158 goto err_pd_pin;
159 217
160 }
161 ppgtt->pt_dma_addr[i] = pt_addr;
162 } 218 }
219 ppgtt->pt_dma_addr[i] = pt_addr;
163 } 220 }
164 221
165 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; 222 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
166 223
167 i915_ppgtt_clear_range(ppgtt, 0, 224 ppgtt->clear_range(ppgtt, 0,
168 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 225 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
169 226
170 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); 227 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
171 228
172 dev_priv->mm.aliasing_ppgtt = ppgtt;
173
174 return 0; 229 return 0;
175 230
176err_pd_pin: 231err_pd_pin:
@@ -186,94 +241,57 @@ err_pt_alloc:
186 __free_page(ppgtt->pt_pages[i]); 241 __free_page(ppgtt->pt_pages[i]);
187 } 242 }
188 kfree(ppgtt->pt_pages); 243 kfree(ppgtt->pt_pages);
189err_ppgtt:
190 kfree(ppgtt);
191 244
192 return ret; 245 return ret;
193} 246}
194 247
195void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) 248static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
196{ 249{
197 struct drm_i915_private *dev_priv = dev->dev_private; 250 struct drm_i915_private *dev_priv = dev->dev_private;
198 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 251 struct i915_hw_ppgtt *ppgtt;
199 int i; 252 int ret;
200 253
254 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
201 if (!ppgtt) 255 if (!ppgtt)
202 return; 256 return -ENOMEM;
203 257
204 if (ppgtt->pt_dma_addr) { 258 ppgtt->dev = dev;
205 for (i = 0; i < ppgtt->num_pd_entries; i++)
206 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
207 4096, PCI_DMA_BIDIRECTIONAL);
208 }
209 259
210 kfree(ppgtt->pt_dma_addr); 260 ret = gen6_ppgtt_init(ppgtt);
211 for (i = 0; i < ppgtt->num_pd_entries; i++) 261 if (ret)
212 __free_page(ppgtt->pt_pages[i]); 262 kfree(ppgtt);
213 kfree(ppgtt->pt_pages); 263 else
214 kfree(ppgtt); 264 dev_priv->mm.aliasing_ppgtt = ppgtt;
265
266 return ret;
215} 267}
216 268
217static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 269void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
218 const struct sg_table *pages,
219 unsigned first_entry,
220 enum i915_cache_level cache_level)
221{ 270{
222 gtt_pte_t *pt_vaddr; 271 struct drm_i915_private *dev_priv = dev->dev_private;
223 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 272 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
224 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
225 unsigned i, j, m, segment_len;
226 dma_addr_t page_addr;
227 struct scatterlist *sg;
228
229 /* init sg walking */
230 sg = pages->sgl;
231 i = 0;
232 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
233 m = 0;
234
235 while (i < pages->nents) {
236 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
237
238 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
239 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
240 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
241 cache_level);
242
243 /* grab the next page */
244 if (++m == segment_len) {
245 if (++i == pages->nents)
246 break;
247
248 sg = sg_next(sg);
249 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
250 m = 0;
251 }
252 }
253 273
254 kunmap_atomic(pt_vaddr); 274 if (!ppgtt)
275 return;
255 276
256 first_pte = 0; 277 ppgtt->cleanup(ppgtt);
257 act_pd++;
258 }
259} 278}
260 279
261void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, 280void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
262 struct drm_i915_gem_object *obj, 281 struct drm_i915_gem_object *obj,
263 enum i915_cache_level cache_level) 282 enum i915_cache_level cache_level)
264{ 283{
265 i915_ppgtt_insert_sg_entries(ppgtt, 284 ppgtt->insert_entries(ppgtt, obj->pages,
266 obj->pages, 285 obj->gtt_space->start >> PAGE_SHIFT,
267 obj->gtt_space->start >> PAGE_SHIFT, 286 cache_level);
268 cache_level);
269} 287}
270 288
271void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 289void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
272 struct drm_i915_gem_object *obj) 290 struct drm_i915_gem_object *obj)
273{ 291{
274 i915_ppgtt_clear_range(ppgtt, 292 ppgtt->clear_range(ppgtt,
275 obj->gtt_space->start >> PAGE_SHIFT, 293 obj->gtt_space->start >> PAGE_SHIFT,
276 obj->base.size >> PAGE_SHIFT); 294 obj->base.size >> PAGE_SHIFT);
277} 295}
278 296
279void i915_gem_init_ppgtt(struct drm_device *dev) 297void i915_gem_init_ppgtt(struct drm_device *dev)
@@ -282,7 +300,7 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
282 uint32_t pd_offset; 300 uint32_t pd_offset;
283 struct intel_ring_buffer *ring; 301 struct intel_ring_buffer *ring;
284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 302 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
285 uint32_t __iomem *pd_addr; 303 gtt_pte_t __iomem *pd_addr;
286 uint32_t pd_entry; 304 uint32_t pd_entry;
287 int i; 305 int i;
288 306
@@ -290,15 +308,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
290 return; 308 return;
291 309
292 310
293 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); 311 pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
294 for (i = 0; i < ppgtt->num_pd_entries; i++) { 312 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 dma_addr_t pt_addr; 313 dma_addr_t pt_addr;
296 314
297 if (dev_priv->mm.gtt->needs_dmar) 315 pt_addr = ppgtt->pt_dma_addr[i];
298 pt_addr = ppgtt->pt_dma_addr[i];
299 else
300 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
301
302 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); 316 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
303 pd_entry |= GEN6_PDE_VALID; 317 pd_entry |= GEN6_PDE_VALID;
304 318
@@ -338,11 +352,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
338 } 352 }
339} 353}
340 354
355extern int intel_iommu_gfx_mapped;
356/* Certain Gen5 chipsets require require idling the GPU before
357 * unmapping anything from the GTT when VT-d is enabled.
358 */
359static inline bool needs_idle_maps(struct drm_device *dev)
360{
361#ifdef CONFIG_INTEL_IOMMU
362 /* Query intel_iommu to see if we need the workaround. Presumably that
363 * was loaded first.
364 */
365 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
366 return true;
367#endif
368 return false;
369}
370
341static bool do_idling(struct drm_i915_private *dev_priv) 371static bool do_idling(struct drm_i915_private *dev_priv)
342{ 372{
343 bool ret = dev_priv->mm.interruptible; 373 bool ret = dev_priv->mm.interruptible;
344 374
345 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { 375 if (unlikely(dev_priv->gtt.do_idle_maps)) {
346 dev_priv->mm.interruptible = false; 376 dev_priv->mm.interruptible = false;
347 if (i915_gpu_idle(dev_priv->dev)) { 377 if (i915_gpu_idle(dev_priv->dev)) {
348 DRM_ERROR("Couldn't idle GPU\n"); 378 DRM_ERROR("Couldn't idle GPU\n");
@@ -356,45 +386,18 @@ static bool do_idling(struct drm_i915_private *dev_priv)
356 386
357static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) 387static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
358{ 388{
359 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) 389 if (unlikely(dev_priv->gtt.do_idle_maps))
360 dev_priv->mm.interruptible = interruptible; 390 dev_priv->mm.interruptible = interruptible;
361} 391}
362 392
363
364static void i915_ggtt_clear_range(struct drm_device *dev,
365 unsigned first_entry,
366 unsigned num_entries)
367{
368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte;
370 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372 int i;
373
374 if (INTEL_INFO(dev)->gen < 6) {
375 intel_gtt_clear_range(first_entry, num_entries);
376 return;
377 }
378
379 if (WARN(num_entries > max_entries,
380 "First entry = %d; Num entries = %d (max=%d)\n",
381 first_entry, num_entries, max_entries))
382 num_entries = max_entries;
383
384 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
385 for (i = 0; i < num_entries; i++)
386 iowrite32(scratch_pte, &gtt_base[i]);
387 readl(gtt_base);
388}
389
390void i915_gem_restore_gtt_mappings(struct drm_device *dev) 393void i915_gem_restore_gtt_mappings(struct drm_device *dev)
391{ 394{
392 struct drm_i915_private *dev_priv = dev->dev_private; 395 struct drm_i915_private *dev_priv = dev->dev_private;
393 struct drm_i915_gem_object *obj; 396 struct drm_i915_gem_object *obj;
394 397
395 /* First fill our portion of the GTT with scratch pages */ 398 /* First fill our portion of the GTT with scratch pages */
396 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, 399 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
397 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 400 dev_priv->gtt.total / PAGE_SIZE);
398 401
399 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 402 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
400 i915_gem_clflush_object(obj); 403 i915_gem_clflush_object(obj);
@@ -423,16 +426,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
423 * within the global GTT as well as accessible by the GPU through the GMADR 426 * within the global GTT as well as accessible by the GPU through the GMADR
424 * mapped BAR (dev_priv->mm.gtt->gtt). 427 * mapped BAR (dev_priv->mm.gtt->gtt).
425 */ 428 */
426static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, 429static void gen6_ggtt_insert_entries(struct drm_device *dev,
427 enum i915_cache_level level) 430 struct sg_table *st,
431 unsigned int first_entry,
432 enum i915_cache_level level)
428{ 433{
429 struct drm_device *dev = obj->base.dev;
430 struct drm_i915_private *dev_priv = dev->dev_private; 434 struct drm_i915_private *dev_priv = dev->dev_private;
431 struct sg_table *st = obj->pages;
432 struct scatterlist *sg = st->sgl; 435 struct scatterlist *sg = st->sgl;
433 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; 436 gtt_pte_t __iomem *gtt_entries =
434 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; 437 (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
435 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
436 int unused, i = 0; 438 int unused, i = 0;
437 unsigned int len, m = 0; 439 unsigned int len, m = 0;
438 dma_addr_t addr; 440 dma_addr_t addr;
@@ -441,14 +443,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
441 len = sg_dma_len(sg) >> PAGE_SHIFT; 443 len = sg_dma_len(sg) >> PAGE_SHIFT;
442 for (m = 0; m < len; m++) { 444 for (m = 0; m < len; m++) {
443 addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 445 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
444 iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]); 446 iowrite32(gen6_pte_encode(dev, addr, level),
447 &gtt_entries[i]);
445 i++; 448 i++;
446 } 449 }
447 } 450 }
448 451
449 BUG_ON(i > max_entries);
450 BUG_ON(i != obj->base.size / PAGE_SIZE);
451
452 /* XXX: This serves as a posting read to make sure that the PTE has 452 /* XXX: This serves as a posting read to make sure that the PTE has
453 * actually been updated. There is some concern that even though 453 * actually been updated. There is some concern that even though
454 * registers and PTEs are within the same BAR that they are potentially 454 * registers and PTEs are within the same BAR that they are potentially
@@ -456,7 +456,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
456 * hardware should work, we must keep this posting read for paranoia. 456 * hardware should work, we must keep this posting read for paranoia.
457 */ 457 */
458 if (i != 0) 458 if (i != 0)
459 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level)); 459 WARN_ON(readl(&gtt_entries[i-1])
460 != gen6_pte_encode(dev, addr, level));
460 461
461 /* This next bit makes the above posting read even more important. We 462 /* This next bit makes the above posting read even more important. We
462 * want to flush the TLBs only after we're certain all the PTE updates 463 * want to flush the TLBs only after we're certain all the PTE updates
@@ -466,28 +467,70 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
466 POSTING_READ(GFX_FLSH_CNTL_GEN6); 467 POSTING_READ(GFX_FLSH_CNTL_GEN6);
467} 468}
468 469
470static void gen6_ggtt_clear_range(struct drm_device *dev,
471 unsigned int first_entry,
472 unsigned int num_entries)
473{
474 struct drm_i915_private *dev_priv = dev->dev_private;
475 gtt_pte_t scratch_pte;
476 gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
477 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
478 int i;
479
480 if (WARN(num_entries > max_entries,
481 "First entry = %d; Num entries = %d (max=%d)\n",
482 first_entry, num_entries, max_entries))
483 num_entries = max_entries;
484
485 scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
486 I915_CACHE_LLC);
487 for (i = 0; i < num_entries; i++)
488 iowrite32(scratch_pte, &gtt_base[i]);
489 readl(gtt_base);
490}
491
492
493static void i915_ggtt_insert_entries(struct drm_device *dev,
494 struct sg_table *st,
495 unsigned int pg_start,
496 enum i915_cache_level cache_level)
497{
498 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
499 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
500
501 intel_gtt_insert_sg_entries(st, pg_start, flags);
502
503}
504
505static void i915_ggtt_clear_range(struct drm_device *dev,
506 unsigned int first_entry,
507 unsigned int num_entries)
508{
509 intel_gtt_clear_range(first_entry, num_entries);
510}
511
512
469void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 513void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
470 enum i915_cache_level cache_level) 514 enum i915_cache_level cache_level)
471{ 515{
472 struct drm_device *dev = obj->base.dev; 516 struct drm_device *dev = obj->base.dev;
473 if (INTEL_INFO(dev)->gen < 6) { 517 struct drm_i915_private *dev_priv = dev->dev_private;
474 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 518
475 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 519 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
476 intel_gtt_insert_sg_entries(obj->pages, 520 obj->gtt_space->start >> PAGE_SHIFT,
477 obj->gtt_space->start >> PAGE_SHIFT, 521 cache_level);
478 flags);
479 } else {
480 gen6_ggtt_bind_object(obj, cache_level);
481 }
482 522
483 obj->has_global_gtt_mapping = 1; 523 obj->has_global_gtt_mapping = 1;
484} 524}
485 525
486void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 526void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
487{ 527{
488 i915_ggtt_clear_range(obj->base.dev, 528 struct drm_device *dev = obj->base.dev;
489 obj->gtt_space->start >> PAGE_SHIFT, 529 struct drm_i915_private *dev_priv = dev->dev_private;
490 obj->base.size >> PAGE_SHIFT); 530
531 dev_priv->gtt.gtt_clear_range(obj->base.dev,
532 obj->gtt_space->start >> PAGE_SHIFT,
533 obj->base.size >> PAGE_SHIFT);
491 534
492 obj->has_global_gtt_mapping = 0; 535 obj->has_global_gtt_mapping = 0;
493} 536}
@@ -524,27 +567,101 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
524 *end -= 4096; 567 *end -= 4096;
525 } 568 }
526} 569}
527 570void i915_gem_setup_global_gtt(struct drm_device *dev,
528void i915_gem_init_global_gtt(struct drm_device *dev, 571 unsigned long start,
529 unsigned long start, 572 unsigned long mappable_end,
530 unsigned long mappable_end, 573 unsigned long end)
531 unsigned long end)
532{ 574{
575 /* Let GEM Manage all of the aperture.
576 *
577 * However, leave one page at the end still bound to the scratch page.
578 * There are a number of places where the hardware apparently prefetches
579 * past the end of the object, and we've seen multiple hangs with the
580 * GPU head pointer stuck in a batchbuffer bound at the last page of the
581 * aperture. One page should be enough to keep any prefetching inside
582 * of the aperture.
583 */
533 drm_i915_private_t *dev_priv = dev->dev_private; 584 drm_i915_private_t *dev_priv = dev->dev_private;
585 struct drm_mm_node *entry;
586 struct drm_i915_gem_object *obj;
587 unsigned long hole_start, hole_end;
534 588
535 /* Substract the guard page ... */ 589 BUG_ON(mappable_end > end);
590
591 /* Subtract the guard page ... */
536 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); 592 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
537 if (!HAS_LLC(dev)) 593 if (!HAS_LLC(dev))
538 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; 594 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
539 595
540 dev_priv->mm.gtt_start = start; 596 /* Mark any preallocated objects as occupied */
541 dev_priv->mm.gtt_mappable_end = mappable_end; 597 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
542 dev_priv->mm.gtt_end = end; 598 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
543 dev_priv->mm.gtt_total = end - start; 599 obj->gtt_offset, obj->base.size);
544 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 600
601 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
602 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
603 obj->gtt_offset,
604 obj->base.size,
605 false);
606 obj->has_global_gtt_mapping = 1;
607 }
608
609 dev_priv->gtt.start = start;
610 dev_priv->gtt.total = end - start;
611
612 /* Clear any non-preallocated blocks */
613 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
614 hole_start, hole_end) {
615 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
616 hole_start, hole_end);
617 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
618 (hole_end-hole_start) / PAGE_SIZE);
619 }
545 620
546 /* ... but ensure that we clear the entire range. */ 621 /* And finally clear the reserved guard page */
547 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); 622 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
623}
624
625static bool
626intel_enable_ppgtt(struct drm_device *dev)
627{
628 if (i915_enable_ppgtt >= 0)
629 return i915_enable_ppgtt;
630
631#ifdef CONFIG_INTEL_IOMMU
632 /* Disable ppgtt on SNB if VT-d is on. */
633 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
634 return false;
635#endif
636
637 return true;
638}
639
640void i915_gem_init_global_gtt(struct drm_device *dev)
641{
642 struct drm_i915_private *dev_priv = dev->dev_private;
643 unsigned long gtt_size, mappable_size;
644
645 gtt_size = dev_priv->gtt.total;
646 mappable_size = dev_priv->gtt.mappable_end;
647
648 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
649 int ret;
650 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
651 * aperture accordingly when using aliasing ppgtt. */
652 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
653
654 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
655
656 ret = i915_gem_init_aliasing_ppgtt(dev);
657 if (!ret)
658 return;
659
660 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
661 drm_mm_takedown(&dev_priv->mm.gtt_space);
662 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
663 }
664 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
548} 665}
549 666
550static int setup_scratch_page(struct drm_device *dev) 667static int setup_scratch_page(struct drm_device *dev)
@@ -567,8 +684,8 @@ static int setup_scratch_page(struct drm_device *dev)
567#else 684#else
568 dma_addr = page_to_phys(page); 685 dma_addr = page_to_phys(page);
569#endif 686#endif
570 dev_priv->mm.gtt->scratch_page = page; 687 dev_priv->gtt.scratch_page = page;
571 dev_priv->mm.gtt->scratch_page_dma = dma_addr; 688 dev_priv->gtt.scratch_page_dma = dma_addr;
572 689
573 return 0; 690 return 0;
574} 691}
@@ -576,11 +693,11 @@ static int setup_scratch_page(struct drm_device *dev)
576static void teardown_scratch_page(struct drm_device *dev) 693static void teardown_scratch_page(struct drm_device *dev)
577{ 694{
578 struct drm_i915_private *dev_priv = dev->dev_private; 695 struct drm_i915_private *dev_priv = dev->dev_private;
579 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1); 696 set_pages_wb(dev_priv->gtt.scratch_page, 1);
580 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma, 697 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
581 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 698 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
582 put_page(dev_priv->mm.gtt->scratch_page); 699 put_page(dev_priv->gtt.scratch_page);
583 __free_page(dev_priv->mm.gtt->scratch_page); 700 __free_page(dev_priv->gtt.scratch_page);
584} 701}
585 702
586static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) 703static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -590,14 +707,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
590 return snb_gmch_ctl << 20; 707 return snb_gmch_ctl << 20;
591} 708}
592 709
593static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) 710static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
594{ 711{
595 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; 712 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
596 snb_gmch_ctl &= SNB_GMCH_GMS_MASK; 713 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
597 return snb_gmch_ctl << 25; /* 32 MB units */ 714 return snb_gmch_ctl << 25; /* 32 MB units */
598} 715}
599 716
600static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) 717static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
601{ 718{
602 static const int stolen_decoder[] = { 719 static const int stolen_decoder[] = {
603 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; 720 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
@@ -606,103 +723,127 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
606 return stolen_decoder[snb_gmch_ctl] << 20; 723 return stolen_decoder[snb_gmch_ctl] << 20;
607} 724}
608 725
609int i915_gem_gtt_init(struct drm_device *dev) 726static int gen6_gmch_probe(struct drm_device *dev,
727 size_t *gtt_total,
728 size_t *stolen,
729 phys_addr_t *mappable_base,
730 unsigned long *mappable_end)
610{ 731{
611 struct drm_i915_private *dev_priv = dev->dev_private; 732 struct drm_i915_private *dev_priv = dev->dev_private;
612 phys_addr_t gtt_bus_addr; 733 phys_addr_t gtt_bus_addr;
734 unsigned int gtt_size;
613 u16 snb_gmch_ctl; 735 u16 snb_gmch_ctl;
614 int ret; 736 int ret;
615 737
616 /* On modern platforms we need not worry ourself with the legacy 738 *mappable_base = pci_resource_start(dev->pdev, 2);
617 * hostbridge query stuff. Skip it entirely 739 *mappable_end = pci_resource_len(dev->pdev, 2);
618 */
619 if (INTEL_INFO(dev)->gen < 6) {
620 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
621 if (!ret) {
622 DRM_ERROR("failed to set up gmch\n");
623 return -EIO;
624 }
625 740
626 dev_priv->mm.gtt = intel_gtt_get(); 741 /* 64/512MB is the current min/max we actually know of, but this is just
627 if (!dev_priv->mm.gtt) { 742 * a coarse sanity check.
628 DRM_ERROR("Failed to initialize GTT\n"); 743 */
629 intel_gmch_remove(); 744 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
630 return -ENODEV; 745 DRM_ERROR("Unknown GMADR size (%lx)\n",
631 } 746 dev_priv->gtt.mappable_end);
632 return 0; 747 return -ENXIO;
633 } 748 }
634 749
635 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
636 if (!dev_priv->mm.gtt)
637 return -ENOMEM;
638
639 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) 750 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
640 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); 751 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
752 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
753 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
641 754
642#ifdef CONFIG_INTEL_IOMMU 755 if (IS_GEN7(dev))
643 dev_priv->mm.gtt->needs_dmar = 1; 756 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
644#endif 757 else
758 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
759
760 *gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
645 761
646 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ 762 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
647 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); 763 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
648 dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); 764 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
649 765 if (!dev_priv->gtt.gsm) {
650 /* i9xx_setup */ 766 DRM_ERROR("Failed to map the gtt page table\n");
651 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 767 return -ENOMEM;
652 dev_priv->mm.gtt->gtt_total_entries =
653 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
654 if (INTEL_INFO(dev)->gen < 7)
655 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
656 else
657 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
658
659 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
660 /* 64/512MB is the current min/max we actually know of, but this is just a
661 * coarse sanity check.
662 */
663 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
664 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
665 DRM_ERROR("Unknown GMADR entries (%d)\n",
666 dev_priv->mm.gtt->gtt_mappable_entries);
667 ret = -ENXIO;
668 goto err_out;
669 } 768 }
670 769
671 ret = setup_scratch_page(dev); 770 ret = setup_scratch_page(dev);
672 if (ret) { 771 if (ret)
673 DRM_ERROR("Scratch setup failed\n"); 772 DRM_ERROR("Scratch setup failed\n");
674 goto err_out;
675 }
676 773
677 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr, 774 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
678 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); 775 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
679 if (!dev_priv->mm.gtt->gtt) { 776
680 DRM_ERROR("Failed to map the gtt page table\n"); 777 return ret;
681 teardown_scratch_page(dev); 778}
682 ret = -ENOMEM; 779
683 goto err_out; 780static void gen6_gmch_remove(struct drm_device *dev)
781{
782 struct drm_i915_private *dev_priv = dev->dev_private;
783 iounmap(dev_priv->gtt.gsm);
784 teardown_scratch_page(dev_priv->dev);
785}
786
787static int i915_gmch_probe(struct drm_device *dev,
788 size_t *gtt_total,
789 size_t *stolen,
790 phys_addr_t *mappable_base,
791 unsigned long *mappable_end)
792{
793 struct drm_i915_private *dev_priv = dev->dev_private;
794 int ret;
795
796 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
797 if (!ret) {
798 DRM_ERROR("failed to set up gmch\n");
799 return -EIO;
684 } 800 }
685 801
686 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ 802 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
687 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); 803
688 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); 804 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
689 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); 805 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
806 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
690 807
691 return 0; 808 return 0;
809}
692 810
693err_out: 811static void i915_gmch_remove(struct drm_device *dev)
694 kfree(dev_priv->mm.gtt); 812{
695 if (INTEL_INFO(dev)->gen < 6) 813 intel_gmch_remove();
696 intel_gmch_remove();
697 return ret;
698} 814}
699 815
700void i915_gem_gtt_fini(struct drm_device *dev) 816int i915_gem_gtt_init(struct drm_device *dev)
701{ 817{
702 struct drm_i915_private *dev_priv = dev->dev_private; 818 struct drm_i915_private *dev_priv = dev->dev_private;
703 iounmap(dev_priv->mm.gtt->gtt); 819 struct i915_gtt *gtt = &dev_priv->gtt;
704 teardown_scratch_page(dev); 820 unsigned long gtt_size;
705 if (INTEL_INFO(dev)->gen < 6) 821 int ret;
706 intel_gmch_remove(); 822
707 kfree(dev_priv->mm.gtt); 823 if (INTEL_INFO(dev)->gen <= 5) {
824 dev_priv->gtt.gtt_probe = i915_gmch_probe;
825 dev_priv->gtt.gtt_remove = i915_gmch_remove;
826 } else {
827 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
828 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
829 }
830
831 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
832 &dev_priv->gtt.stolen_size,
833 &gtt->mappable_base,
834 &gtt->mappable_end);
835 if (ret)
836 return ret;
837
838 gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
839
840 /* GMADR is the PCI mmio aperture into the global GTT. */
841 DRM_INFO("Memory usable by graphics device = %zdM\n",
842 dev_priv->gtt.total >> 20);
843 DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
844 dev_priv->gtt.mappable_end >> 20);
845 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
846 dev_priv->gtt.stolen_size >> 20);
847
848 return 0;
708} 849}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 8e91083b126f..69d97cbac13c 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -42,85 +42,73 @@
42 * for is a boon. 42 * for is a boon.
43 */ 43 */
44 44
45#define PTE_ADDRESS_MASK 0xfffff000 45static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
47#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
48#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
49#define PTE_MAPPING_TYPE_CACHED (3 << 1)
50#define PTE_MAPPING_TYPE_MASK (3 << 1)
51#define PTE_VALID (1 << 0)
52
53/**
54 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
55 * a physical one
56 * @dev: drm device
57 * @offset: address to translate
58 *
59 * Some chip functions require allocations from stolen space and need the
60 * physical address of the memory in question.
61 */
62static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
63{ 46{
64 struct drm_i915_private *dev_priv = dev->dev_private; 47 struct drm_i915_private *dev_priv = dev->dev_private;
65 struct pci_dev *pdev = dev_priv->bridge_dev; 48 struct pci_dev *pdev = dev_priv->bridge_dev;
66 u32 base; 49 u32 base;
67 50
68#if 0
69 /* On the machines I have tested the Graphics Base of Stolen Memory 51 /* On the machines I have tested the Graphics Base of Stolen Memory
70 * is unreliable, so compute the base by subtracting the stolen memory 52 * is unreliable, so on those compute the base by subtracting the
71 * from the Top of Low Usable DRAM which is where the BIOS places 53 * stolen memory from the Top of Low Usable DRAM which is where the
72 * the graphics stolen memory. 54 * BIOS places the graphics stolen memory.
55 *
56 * On gen2, the layout is slightly different with the Graphics Segment
57 * immediately following Top of Memory (or Top of Usable DRAM). Note
58 * it appears that TOUD is only reported by 865g, so we just use the
59 * top of memory as determined by the e820 probe.
60 *
61 * XXX gen2 requires an unavailable symbol and 945gm fails with
62 * its value of TOLUD.
73 */ 63 */
74 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { 64 base = 0;
75 /* top 32bits are reserved = 0 */ 65 if (INTEL_INFO(dev)->gen >= 6) {
66 /* Read Base Data of Stolen Memory Register (BDSM) directly.
67 * Note that there is also a MCHBAR miror at 0x1080c0 or
68 * we could use device 2:0x5c instead.
69 */
70 pci_read_config_dword(pdev, 0xB0, &base);
71 base &= ~4095; /* lower bits used for locking register */
72 } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
73 /* Read Graphics Base of Stolen Memory directly */
76 pci_read_config_dword(pdev, 0xA4, &base); 74 pci_read_config_dword(pdev, 0xA4, &base);
77 } else { 75#if 0
78 /* XXX presume 8xx is the same as i915 */ 76 } else if (IS_GEN3(dev)) {
79 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
80 }
81#else
82 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
83 u16 val;
84 pci_read_config_word(pdev, 0xb0, &val);
85 base = val >> 4 << 20;
86 } else {
87 u8 val; 77 u8 val;
78 /* Stolen is immediately below Top of Low Usable DRAM */
88 pci_read_config_byte(pdev, 0x9c, &val); 79 pci_read_config_byte(pdev, 0x9c, &val);
89 base = val >> 3 << 27; 80 base = val >> 3 << 27;
90 } 81 base -= dev_priv->mm.gtt->stolen_size;
91 base -= dev_priv->mm.gtt->stolen_size; 82 } else {
83 /* Stolen is immediately above Top of Memory */
84 base = max_low_pfn_mapped << PAGE_SHIFT;
92#endif 85#endif
86 }
93 87
94 return base + offset; 88 return base;
95} 89}
96 90
97static void i915_warn_stolen(struct drm_device *dev) 91static int i915_setup_compression(struct drm_device *dev, int size)
98{
99 DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
100 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
101}
102
103static void i915_setup_compression(struct drm_device *dev, int size)
104{ 92{
105 struct drm_i915_private *dev_priv = dev->dev_private; 93 struct drm_i915_private *dev_priv = dev->dev_private;
106 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); 94 struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
107 unsigned long cfb_base;
108 unsigned long ll_base = 0;
109
110 /* Just in case the BIOS is doing something questionable. */
111 intel_disable_fbc(dev);
112 95
113 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); 96 /* Try to over-allocate to reduce reallocations and fragmentation */
97 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
98 size <<= 1, 4096, 0);
99 if (!compressed_fb)
100 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
101 size >>= 1, 4096, 0);
114 if (compressed_fb) 102 if (compressed_fb)
115 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 103 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
116 if (!compressed_fb) 104 if (!compressed_fb)
117 goto err; 105 goto err;
118 106
119 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); 107 if (HAS_PCH_SPLIT(dev))
120 if (!cfb_base) 108 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
121 goto err_fb; 109 else if (IS_GM45(dev)) {
122 110 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
123 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { 111 } else {
124 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, 112 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
125 4096, 4096, 0); 113 4096, 4096, 0);
126 if (compressed_llb) 114 if (compressed_llb)
@@ -129,73 +117,206 @@ static void i915_setup_compression(struct drm_device *dev, int size)
129 if (!compressed_llb) 117 if (!compressed_llb)
130 goto err_fb; 118 goto err_fb;
131 119
132 ll_base = i915_stolen_to_phys(dev, compressed_llb->start); 120 dev_priv->compressed_llb = compressed_llb;
133 if (!ll_base) 121
134 goto err_llb; 122 I915_WRITE(FBC_CFB_BASE,
123 dev_priv->mm.stolen_base + compressed_fb->start);
124 I915_WRITE(FBC_LL_BASE,
125 dev_priv->mm.stolen_base + compressed_llb->start);
135 } 126 }
136 127
128 dev_priv->compressed_fb = compressed_fb;
137 dev_priv->cfb_size = size; 129 dev_priv->cfb_size = size;
138 130
139 dev_priv->compressed_fb = compressed_fb; 131 DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
140 if (HAS_PCH_SPLIT(dev)) 132 size);
141 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
142 else if (IS_GM45(dev)) {
143 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
144 } else {
145 I915_WRITE(FBC_CFB_BASE, cfb_base);
146 I915_WRITE(FBC_LL_BASE, ll_base);
147 dev_priv->compressed_llb = compressed_llb;
148 }
149 133
150 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", 134 return 0;
151 cfb_base, ll_base, size >> 20);
152 return;
153 135
154err_llb:
155 drm_mm_put_block(compressed_llb);
156err_fb: 136err_fb:
157 drm_mm_put_block(compressed_fb); 137 drm_mm_put_block(compressed_fb);
158err: 138err:
159 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 139 return -ENOSPC;
160 i915_warn_stolen(dev); 140}
141
142int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
143{
144 struct drm_i915_private *dev_priv = dev->dev_private;
145
146 if (dev_priv->mm.stolen_base == 0)
147 return -ENODEV;
148
149 if (size < dev_priv->cfb_size)
150 return 0;
151
152 /* Release any current block */
153 i915_gem_stolen_cleanup_compression(dev);
154
155 return i915_setup_compression(dev, size);
161} 156}
162 157
163static void i915_cleanup_compression(struct drm_device *dev) 158void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
164{ 159{
165 struct drm_i915_private *dev_priv = dev->dev_private; 160 struct drm_i915_private *dev_priv = dev->dev_private;
166 161
167 drm_mm_put_block(dev_priv->compressed_fb); 162 if (dev_priv->cfb_size == 0)
163 return;
164
165 if (dev_priv->compressed_fb)
166 drm_mm_put_block(dev_priv->compressed_fb);
167
168 if (dev_priv->compressed_llb) 168 if (dev_priv->compressed_llb)
169 drm_mm_put_block(dev_priv->compressed_llb); 169 drm_mm_put_block(dev_priv->compressed_llb);
170
171 dev_priv->cfb_size = 0;
170} 172}
171 173
172void i915_gem_cleanup_stolen(struct drm_device *dev) 174void i915_gem_cleanup_stolen(struct drm_device *dev)
173{ 175{
174 if (I915_HAS_FBC(dev) && i915_powersave) 176 struct drm_i915_private *dev_priv = dev->dev_private;
175 i915_cleanup_compression(dev); 177
178 i915_gem_stolen_cleanup_compression(dev);
179 drm_mm_takedown(&dev_priv->mm.stolen);
176} 180}
177 181
178int i915_gem_init_stolen(struct drm_device *dev) 182int i915_gem_init_stolen(struct drm_device *dev)
179{ 183{
180 struct drm_i915_private *dev_priv = dev->dev_private; 184 struct drm_i915_private *dev_priv = dev->dev_private;
181 unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size; 185
186 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
187 if (dev_priv->mm.stolen_base == 0)
188 return 0;
189
190 DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
191 dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
182 192
183 /* Basic memrange allocator for stolen space */ 193 /* Basic memrange allocator for stolen space */
184 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); 194 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
195
196 return 0;
197}
185 198
186 /* Try to set up FBC with a reasonable compressed buffer size */ 199static struct sg_table *
187 if (I915_HAS_FBC(dev) && i915_powersave) { 200i915_pages_create_for_stolen(struct drm_device *dev,
188 int cfb_size; 201 u32 offset, u32 size)
202{
203 struct drm_i915_private *dev_priv = dev->dev_private;
204 struct sg_table *st;
205 struct scatterlist *sg;
206
207 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
208 BUG_ON(offset > dev_priv->gtt.stolen_size - size);
189 209
190 /* Leave 1M for line length buffer & misc. */ 210 /* We hide that we have no struct page backing our stolen object
211 * by wrapping the contiguous physical allocation with a fake
212 * dma mapping in a single scatterlist.
213 */
214
215 st = kmalloc(sizeof(*st), GFP_KERNEL);
216 if (st == NULL)
217 return NULL;
191 218
192 /* Try to get a 32M buffer... */ 219 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
193 if (prealloc_size > (36*1024*1024)) 220 kfree(st);
194 cfb_size = 32*1024*1024; 221 return NULL;
195 else /* fall back to 7/8 of the stolen space */
196 cfb_size = prealloc_size * 7 / 8;
197 i915_setup_compression(dev, cfb_size);
198 } 222 }
199 223
200 return 0; 224 sg = st->sgl;
225 sg->offset = offset;
226 sg->length = size;
227
228 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
229 sg_dma_len(sg) = size;
230
231 return st;
232}
233
234static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
235{
236 BUG();
237 return -EINVAL;
238}
239
240static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
241{
242 /* Should only be called during free */
243 sg_free_table(obj->pages);
244 kfree(obj->pages);
245}
246
247static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
248 .get_pages = i915_gem_object_get_pages_stolen,
249 .put_pages = i915_gem_object_put_pages_stolen,
250};
251
252static struct drm_i915_gem_object *
253_i915_gem_object_create_stolen(struct drm_device *dev,
254 struct drm_mm_node *stolen)
255{
256 struct drm_i915_gem_object *obj;
257
258 obj = i915_gem_object_alloc(dev);
259 if (obj == NULL)
260 return NULL;
261
262 if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
263 goto cleanup;
264
265 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
266
267 obj->pages = i915_pages_create_for_stolen(dev,
268 stolen->start, stolen->size);
269 if (obj->pages == NULL)
270 goto cleanup;
271
272 obj->has_dma_mapping = true;
273 obj->pages_pin_count = 1;
274 obj->stolen = stolen;
275
276 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
277 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
278 obj->cache_level = I915_CACHE_NONE;
279
280 return obj;
281
282cleanup:
283 i915_gem_object_free(obj);
284 return NULL;
285}
286
287struct drm_i915_gem_object *
288i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
289{
290 struct drm_i915_private *dev_priv = dev->dev_private;
291 struct drm_i915_gem_object *obj;
292 struct drm_mm_node *stolen;
293
294 if (dev_priv->mm.stolen_base == 0)
295 return NULL;
296
297 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
298 if (size == 0)
299 return NULL;
300
301 stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
302 if (stolen)
303 stolen = drm_mm_get_block(stolen, size, 4096);
304 if (stolen == NULL)
305 return NULL;
306
307 obj = _i915_gem_object_create_stolen(dev, stolen);
308 if (obj)
309 return obj;
310
311 drm_mm_put_block(stolen);
312 return NULL;
313}
314
315void
316i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
317{
318 if (obj->stolen) {
319 drm_mm_put_block(obj->stolen);
320 obj->stolen = NULL;
321 }
201} 322}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index cedbfd7b3dfa..abcba2f5a788 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
272 return false; 272 return false;
273 } 273 }
274 274
275 /* 275 size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
276 * Previous chips need to be aligned to the size of the smallest
277 * fence register that can contain the object.
278 */
279 if (INTEL_INFO(obj->base.dev)->gen == 3)
280 size = 1024*1024;
281 else
282 size = 512*1024;
283
284 while (size < obj->base.size)
285 size <<= 1;
286
287 if (obj->gtt_space->size != size) 276 if (obj->gtt_space->size != size)
288 return false; 277 return false;
289 278
@@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
368 357
369 obj->map_and_fenceable = 358 obj->map_and_fenceable =
370 obj->gtt_space == NULL || 359 obj->gtt_space == NULL ||
371 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 360 (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
372 i915_gem_object_fence_ok(obj, args->tiling_mode)); 361 i915_gem_object_fence_ok(obj, args->tiling_mode));
373 362
374 /* Rebind if we need a change of alignment */ 363 /* Rebind if we need a change of alignment */
375 if (!obj->map_and_fenceable) { 364 if (!obj->map_and_fenceable) {
376 u32 unfenced_alignment = 365 u32 unfenced_alignment =
377 i915_gem_get_unfenced_gtt_alignment(dev, 366 i915_gem_get_gtt_alignment(dev, obj->base.size,
378 obj->base.size, 367 args->tiling_mode,
379 args->tiling_mode); 368 false);
380 if (obj->gtt_offset & (unfenced_alignment - 1)) 369 if (obj->gtt_offset & (unfenced_alignment - 1))
381 ret = i915_gem_object_unbind(obj); 370 ret = i915_gem_object_unbind(obj);
382 } 371 }
@@ -396,6 +385,18 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
396 /* we have to maintain this existing ABI... */ 385 /* we have to maintain this existing ABI... */
397 args->stride = obj->stride; 386 args->stride = obj->stride;
398 args->tiling_mode = obj->tiling_mode; 387 args->tiling_mode = obj->tiling_mode;
388
389 /* Try to preallocate memory required to save swizzling on put-pages */
390 if (i915_gem_object_needs_bit17_swizzle(obj)) {
391 if (obj->bit_17 == NULL) {
392 obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
393 sizeof(long), GFP_KERNEL);
394 }
395 } else {
396 kfree(obj->bit_17);
397 obj->bit_17 = NULL;
398 }
399
399 drm_gem_object_unreference(&obj->base); 400 drm_gem_object_unreference(&obj->base);
400 mutex_unlock(&dev->struct_mutex); 401 mutex_unlock(&dev->struct_mutex);
401 402
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index fe843389c7b4..2cd97d1cc920 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -287,6 +287,10 @@ static void i915_hotplug_work_func(struct work_struct *work)
287 struct drm_mode_config *mode_config = &dev->mode_config; 287 struct drm_mode_config *mode_config = &dev->mode_config;
288 struct intel_encoder *encoder; 288 struct intel_encoder *encoder;
289 289
290 /* HPD irq before everything is fully set up. */
291 if (!dev_priv->enable_hotplug_processing)
292 return;
293
290 mutex_lock(&mode_config->mutex); 294 mutex_lock(&mode_config->mutex);
291 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 295 DRM_DEBUG_KMS("running encoder hotplug functions\n");
292 296
@@ -300,9 +304,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
300 drm_helper_hpd_irq_event(dev); 304 drm_helper_hpd_irq_event(dev);
301} 305}
302 306
303/* defined intel_pm.c */
304extern spinlock_t mchdev_lock;
305
306static void ironlake_handle_rps_change(struct drm_device *dev) 307static void ironlake_handle_rps_change(struct drm_device *dev)
307{ 308{
308 drm_i915_private_t *dev_priv = dev->dev_private; 309 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -355,8 +356,8 @@ static void notify_ring(struct drm_device *dev,
355 356
356 wake_up_all(&ring->irq_queue); 357 wake_up_all(&ring->irq_queue);
357 if (i915_enable_hangcheck) { 358 if (i915_enable_hangcheck) {
358 dev_priv->hangcheck_count = 0; 359 dev_priv->gpu_error.hangcheck_count = 0;
359 mod_timer(&dev_priv->hangcheck_timer, 360 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
360 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 361 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
361 } 362 }
362} 363}
@@ -524,6 +525,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
524 queue_work(dev_priv->wq, &dev_priv->rps.work); 525 queue_work(dev_priv->wq, &dev_priv->rps.work);
525} 526}
526 527
528static void gmbus_irq_handler(struct drm_device *dev)
529{
530 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
531
532 wake_up_all(&dev_priv->gmbus_wait_queue);
533}
534
535static void dp_aux_irq_handler(struct drm_device *dev)
536{
537 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
538
539 wake_up_all(&dev_priv->gmbus_wait_queue);
540}
541
527static irqreturn_t valleyview_irq_handler(int irq, void *arg) 542static irqreturn_t valleyview_irq_handler(int irq, void *arg)
528{ 543{
529 struct drm_device *dev = (struct drm_device *) arg; 544 struct drm_device *dev = (struct drm_device *) arg;
@@ -533,7 +548,6 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
533 unsigned long irqflags; 548 unsigned long irqflags;
534 int pipe; 549 int pipe;
535 u32 pipe_stats[I915_MAX_PIPES]; 550 u32 pipe_stats[I915_MAX_PIPES];
536 bool blc_event;
537 551
538 atomic_inc(&dev_priv->irq_received); 552 atomic_inc(&dev_priv->irq_received);
539 553
@@ -590,8 +604,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
590 I915_READ(PORT_HOTPLUG_STAT); 604 I915_READ(PORT_HOTPLUG_STAT);
591 } 605 }
592 606
593 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 607 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
594 blc_event = true; 608 gmbus_irq_handler(dev);
595 609
596 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 610 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
597 gen6_queue_rps_work(dev_priv, pm_iir); 611 gen6_queue_rps_work(dev_priv, pm_iir);
@@ -618,8 +632,11 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
618 (pch_iir & SDE_AUDIO_POWER_MASK) >> 632 (pch_iir & SDE_AUDIO_POWER_MASK) >>
619 SDE_AUDIO_POWER_SHIFT); 633 SDE_AUDIO_POWER_SHIFT);
620 634
635 if (pch_iir & SDE_AUX_MASK)
636 dp_aux_irq_handler(dev);
637
621 if (pch_iir & SDE_GMBUS) 638 if (pch_iir & SDE_GMBUS)
622 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 639 gmbus_irq_handler(dev);
623 640
624 if (pch_iir & SDE_AUDIO_HDCP_MASK) 641 if (pch_iir & SDE_AUDIO_HDCP_MASK)
625 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 642 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -662,10 +679,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
662 SDE_AUDIO_POWER_SHIFT_CPT); 679 SDE_AUDIO_POWER_SHIFT_CPT);
663 680
664 if (pch_iir & SDE_AUX_MASK_CPT) 681 if (pch_iir & SDE_AUX_MASK_CPT)
665 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 682 dp_aux_irq_handler(dev);
666 683
667 if (pch_iir & SDE_GMBUS_CPT) 684 if (pch_iir & SDE_GMBUS_CPT)
668 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 685 gmbus_irq_handler(dev);
669 686
670 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 687 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
671 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 688 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -703,6 +720,9 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
703 720
704 de_iir = I915_READ(DEIIR); 721 de_iir = I915_READ(DEIIR);
705 if (de_iir) { 722 if (de_iir) {
723 if (de_iir & DE_AUX_CHANNEL_A_IVB)
724 dp_aux_irq_handler(dev);
725
706 if (de_iir & DE_GSE_IVB) 726 if (de_iir & DE_GSE_IVB)
707 intel_opregion_gse_intr(dev); 727 intel_opregion_gse_intr(dev);
708 728
@@ -758,7 +778,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
758 struct drm_device *dev = (struct drm_device *) arg; 778 struct drm_device *dev = (struct drm_device *) arg;
759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 779 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
760 int ret = IRQ_NONE; 780 int ret = IRQ_NONE;
761 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 781 u32 de_iir, gt_iir, de_ier, pm_iir;
762 782
763 atomic_inc(&dev_priv->irq_received); 783 atomic_inc(&dev_priv->irq_received);
764 784
@@ -769,11 +789,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
769 789
770 de_iir = I915_READ(DEIIR); 790 de_iir = I915_READ(DEIIR);
771 gt_iir = I915_READ(GTIIR); 791 gt_iir = I915_READ(GTIIR);
772 pch_iir = I915_READ(SDEIIR);
773 pm_iir = I915_READ(GEN6_PMIIR); 792 pm_iir = I915_READ(GEN6_PMIIR);
774 793
775 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 794 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
776 (!IS_GEN6(dev) || pm_iir == 0))
777 goto done; 795 goto done;
778 796
779 ret = IRQ_HANDLED; 797 ret = IRQ_HANDLED;
@@ -783,6 +801,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
783 else 801 else
784 snb_gt_irq_handler(dev, dev_priv, gt_iir); 802 snb_gt_irq_handler(dev, dev_priv, gt_iir);
785 803
804 if (de_iir & DE_AUX_CHANNEL_A)
805 dp_aux_irq_handler(dev);
806
786 if (de_iir & DE_GSE) 807 if (de_iir & DE_GSE)
787 intel_opregion_gse_intr(dev); 808 intel_opregion_gse_intr(dev);
788 809
@@ -804,10 +825,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
804 825
805 /* check event from PCH */ 826 /* check event from PCH */
806 if (de_iir & DE_PCH_EVENT) { 827 if (de_iir & DE_PCH_EVENT) {
828 u32 pch_iir = I915_READ(SDEIIR);
829
807 if (HAS_PCH_CPT(dev)) 830 if (HAS_PCH_CPT(dev))
808 cpt_irq_handler(dev, pch_iir); 831 cpt_irq_handler(dev, pch_iir);
809 else 832 else
810 ibx_irq_handler(dev, pch_iir); 833 ibx_irq_handler(dev, pch_iir);
834
835 /* should clear PCH hotplug event before clear CPU irq */
836 I915_WRITE(SDEIIR, pch_iir);
811 } 837 }
812 838
813 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 839 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
@@ -816,8 +842,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
816 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 842 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
817 gen6_queue_rps_work(dev_priv, pm_iir); 843 gen6_queue_rps_work(dev_priv, pm_iir);
818 844
819 /* should clear PCH hotplug event before clear CPU irq */
820 I915_WRITE(SDEIIR, pch_iir);
821 I915_WRITE(GTIIR, gt_iir); 845 I915_WRITE(GTIIR, gt_iir);
822 I915_WRITE(DEIIR, de_iir); 846 I915_WRITE(DEIIR, de_iir);
823 I915_WRITE(GEN6_PMIIR, pm_iir); 847 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -838,23 +862,60 @@ done:
838 */ 862 */
839static void i915_error_work_func(struct work_struct *work) 863static void i915_error_work_func(struct work_struct *work)
840{ 864{
841 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 865 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
842 error_work); 866 work);
867 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
868 gpu_error);
843 struct drm_device *dev = dev_priv->dev; 869 struct drm_device *dev = dev_priv->dev;
870 struct intel_ring_buffer *ring;
844 char *error_event[] = { "ERROR=1", NULL }; 871 char *error_event[] = { "ERROR=1", NULL };
845 char *reset_event[] = { "RESET=1", NULL }; 872 char *reset_event[] = { "RESET=1", NULL };
846 char *reset_done_event[] = { "ERROR=0", NULL }; 873 char *reset_done_event[] = { "ERROR=0", NULL };
874 int i, ret;
847 875
848 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 876 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
849 877
850 if (atomic_read(&dev_priv->mm.wedged)) { 878 /*
879 * Note that there's only one work item which does gpu resets, so we
880 * need not worry about concurrent gpu resets potentially incrementing
881 * error->reset_counter twice. We only need to take care of another
882 * racing irq/hangcheck declaring the gpu dead for a second time. A
883 * quick check for that is good enough: schedule_work ensures the
884 * correct ordering between hang detection and this work item, and since
885 * the reset in-progress bit is only ever set by code outside of this
886 * work we don't need to worry about any other races.
887 */
888 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
851 DRM_DEBUG_DRIVER("resetting chip\n"); 889 DRM_DEBUG_DRIVER("resetting chip\n");
852 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 890 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
853 if (!i915_reset(dev)) { 891 reset_event);
854 atomic_set(&dev_priv->mm.wedged, 0); 892
855 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 893 ret = i915_reset(dev);
894
895 if (ret == 0) {
896 /*
897 * After all the gem state is reset, increment the reset
898 * counter and wake up everyone waiting for the reset to
899 * complete.
900 *
901 * Since unlock operations are a one-sided barrier only,
902 * we need to insert a barrier here to order any seqno
903 * updates before
904 * the counter increment.
905 */
906 smp_mb__before_atomic_inc();
907 atomic_inc(&dev_priv->gpu_error.reset_counter);
908
909 kobject_uevent_env(&dev->primary->kdev.kobj,
910 KOBJ_CHANGE, reset_done_event);
911 } else {
912 atomic_set(&error->reset_counter, I915_WEDGED);
856 } 913 }
857 complete_all(&dev_priv->error_completion); 914
915 for_each_ring(ring, dev_priv, i)
916 wake_up_all(&ring->irq_queue);
917
918 wake_up_all(&dev_priv->gpu_error.reset_queue);
858 } 919 }
859} 920}
860 921
@@ -915,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
915 goto unwind; 976 goto unwind;
916 977
917 local_irq_save(flags); 978 local_irq_save(flags);
918 if (reloc_offset < dev_priv->mm.gtt_mappable_end && 979 if (reloc_offset < dev_priv->gtt.mappable_end &&
919 src->has_global_gtt_mapping) { 980 src->has_global_gtt_mapping) {
920 void __iomem *s; 981 void __iomem *s;
921 982
@@ -924,10 +985,18 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
924 * captures what the GPU read. 985 * captures what the GPU read.
925 */ 986 */
926 987
927 s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 988 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
928 reloc_offset); 989 reloc_offset);
929 memcpy_fromio(d, s, PAGE_SIZE); 990 memcpy_fromio(d, s, PAGE_SIZE);
930 io_mapping_unmap_atomic(s); 991 io_mapping_unmap_atomic(s);
992 } else if (src->stolen) {
993 unsigned long offset;
994
995 offset = dev_priv->mm.stolen_base;
996 offset += src->stolen->start;
997 offset += i << PAGE_SHIFT;
998
999 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
931 } else { 1000 } else {
932 struct page *page; 1001 struct page *page;
933 void *s; 1002 void *s;
@@ -1074,6 +1143,8 @@ static void i915_gem_record_fences(struct drm_device *dev,
1074 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 1143 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1075 break; 1144 break;
1076 1145
1146 default:
1147 BUG();
1077 } 1148 }
1078} 1149}
1079 1150
@@ -1222,9 +1293,9 @@ static void i915_capture_error_state(struct drm_device *dev)
1222 unsigned long flags; 1293 unsigned long flags;
1223 int i, pipe; 1294 int i, pipe;
1224 1295
1225 spin_lock_irqsave(&dev_priv->error_lock, flags); 1296 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1226 error = dev_priv->first_error; 1297 error = dev_priv->gpu_error.first_error;
1227 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1298 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1228 if (error) 1299 if (error)
1229 return; 1300 return;
1230 1301
@@ -1235,7 +1306,8 @@ static void i915_capture_error_state(struct drm_device *dev)
1235 return; 1306 return;
1236 } 1307 }
1237 1308
1238 DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", 1309 DRM_INFO("capturing error event; look for more information in"
1310 "/sys/kernel/debug/dri/%d/i915_error_state\n",
1239 dev->primary->index); 1311 dev->primary->index);
1240 1312
1241 kref_init(&error->ref); 1313 kref_init(&error->ref);
@@ -1318,12 +1390,12 @@ static void i915_capture_error_state(struct drm_device *dev)
1318 error->overlay = intel_overlay_capture_error_state(dev); 1390 error->overlay = intel_overlay_capture_error_state(dev);
1319 error->display = intel_display_capture_error_state(dev); 1391 error->display = intel_display_capture_error_state(dev);
1320 1392
1321 spin_lock_irqsave(&dev_priv->error_lock, flags); 1393 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1322 if (dev_priv->first_error == NULL) { 1394 if (dev_priv->gpu_error.first_error == NULL) {
1323 dev_priv->first_error = error; 1395 dev_priv->gpu_error.first_error = error;
1324 error = NULL; 1396 error = NULL;
1325 } 1397 }
1326 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1398 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1327 1399
1328 if (error) 1400 if (error)
1329 i915_error_state_free(&error->ref); 1401 i915_error_state_free(&error->ref);
@@ -1335,10 +1407,10 @@ void i915_destroy_error_state(struct drm_device *dev)
1335 struct drm_i915_error_state *error; 1407 struct drm_i915_error_state *error;
1336 unsigned long flags; 1408 unsigned long flags;
1337 1409
1338 spin_lock_irqsave(&dev_priv->error_lock, flags); 1410 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1339 error = dev_priv->first_error; 1411 error = dev_priv->gpu_error.first_error;
1340 dev_priv->first_error = NULL; 1412 dev_priv->gpu_error.first_error = NULL;
1341 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 1413 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1342 1414
1343 if (error) 1415 if (error)
1344 kref_put(&error->ref, i915_error_state_free); 1416 kref_put(&error->ref, i915_error_state_free);
@@ -1459,17 +1531,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
1459 i915_report_and_clear_eir(dev); 1531 i915_report_and_clear_eir(dev);
1460 1532
1461 if (wedged) { 1533 if (wedged) {
1462 INIT_COMPLETION(dev_priv->error_completion); 1534 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
1463 atomic_set(&dev_priv->mm.wedged, 1); 1535 &dev_priv->gpu_error.reset_counter);
1464 1536
1465 /* 1537 /*
1466 * Wakeup waiting processes so they don't hang 1538 * Wakeup waiting processes so that the reset work item
1539 * doesn't deadlock trying to grab various locks.
1467 */ 1540 */
1468 for_each_ring(ring, dev_priv, i) 1541 for_each_ring(ring, dev_priv, i)
1469 wake_up_all(&ring->irq_queue); 1542 wake_up_all(&ring->irq_queue);
1470 } 1543 }
1471 1544
1472 queue_work(dev_priv->wq, &dev_priv->error_work); 1545 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
1473} 1546}
1474 1547
1475static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 1548static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@@ -1700,7 +1773,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
1700{ 1773{
1701 drm_i915_private_t *dev_priv = dev->dev_private; 1774 drm_i915_private_t *dev_priv = dev->dev_private;
1702 1775
1703 if (dev_priv->hangcheck_count++ > 1) { 1776 if (dev_priv->gpu_error.hangcheck_count++ > 1) {
1704 bool hung = true; 1777 bool hung = true;
1705 1778
1706 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1779 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
@@ -1759,25 +1832,29 @@ void i915_hangcheck_elapsed(unsigned long data)
1759 goto repeat; 1832 goto repeat;
1760 } 1833 }
1761 1834
1762 dev_priv->hangcheck_count = 0; 1835 dev_priv->gpu_error.hangcheck_count = 0;
1763 return; 1836 return;
1764 } 1837 }
1765 1838
1766 i915_get_extra_instdone(dev, instdone); 1839 i915_get_extra_instdone(dev, instdone);
1767 if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 && 1840 if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
1768 memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) { 1841 sizeof(acthd)) == 0 &&
1842 memcmp(dev_priv->gpu_error.prev_instdone, instdone,
1843 sizeof(instdone)) == 0) {
1769 if (i915_hangcheck_hung(dev)) 1844 if (i915_hangcheck_hung(dev))
1770 return; 1845 return;
1771 } else { 1846 } else {
1772 dev_priv->hangcheck_count = 0; 1847 dev_priv->gpu_error.hangcheck_count = 0;
1773 1848
1774 memcpy(dev_priv->last_acthd, acthd, sizeof(acthd)); 1849 memcpy(dev_priv->gpu_error.last_acthd, acthd,
1775 memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone)); 1850 sizeof(acthd));
1851 memcpy(dev_priv->gpu_error.prev_instdone, instdone,
1852 sizeof(instdone));
1776 } 1853 }
1777 1854
1778repeat: 1855repeat:
1779 /* Reset timer case chip hangs without another request being added */ 1856 /* Reset timer case chip hangs without another request being added */
1780 mod_timer(&dev_priv->hangcheck_timer, 1857 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
1781 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1858 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1782} 1859}
1783 1860
@@ -1847,7 +1924,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
1847 * This register is the same on all known PCH chips. 1924 * This register is the same on all known PCH chips.
1848 */ 1925 */
1849 1926
1850static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1927static void ibx_enable_hotplug(struct drm_device *dev)
1851{ 1928{
1852 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1929 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1853 u32 hotplug; 1930 u32 hotplug;
@@ -1860,14 +1937,36 @@ static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1860 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1937 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1861} 1938}
1862 1939
1940static void ibx_irq_postinstall(struct drm_device *dev)
1941{
1942 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1943 u32 mask;
1944
1945 if (HAS_PCH_IBX(dev))
1946 mask = SDE_HOTPLUG_MASK |
1947 SDE_GMBUS |
1948 SDE_AUX_MASK;
1949 else
1950 mask = SDE_HOTPLUG_MASK_CPT |
1951 SDE_GMBUS_CPT |
1952 SDE_AUX_MASK_CPT;
1953
1954 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1955 I915_WRITE(SDEIMR, ~mask);
1956 I915_WRITE(SDEIER, mask);
1957 POSTING_READ(SDEIER);
1958
1959 ibx_enable_hotplug(dev);
1960}
1961
1863static int ironlake_irq_postinstall(struct drm_device *dev) 1962static int ironlake_irq_postinstall(struct drm_device *dev)
1864{ 1963{
1865 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1964 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1866 /* enable kind of interrupts always enabled */ 1965 /* enable kind of interrupts always enabled */
1867 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1966 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1868 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1967 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
1968 DE_AUX_CHANNEL_A;
1869 u32 render_irqs; 1969 u32 render_irqs;
1870 u32 hotplug_mask;
1871 1970
1872 dev_priv->irq_mask = ~display_mask; 1971 dev_priv->irq_mask = ~display_mask;
1873 1972
@@ -1895,27 +1994,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1895 I915_WRITE(GTIER, render_irqs); 1994 I915_WRITE(GTIER, render_irqs);
1896 POSTING_READ(GTIER); 1995 POSTING_READ(GTIER);
1897 1996
1898 if (HAS_PCH_CPT(dev)) { 1997 ibx_irq_postinstall(dev);
1899 hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1900 SDE_PORTB_HOTPLUG_CPT |
1901 SDE_PORTC_HOTPLUG_CPT |
1902 SDE_PORTD_HOTPLUG_CPT);
1903 } else {
1904 hotplug_mask = (SDE_CRT_HOTPLUG |
1905 SDE_PORTB_HOTPLUG |
1906 SDE_PORTC_HOTPLUG |
1907 SDE_PORTD_HOTPLUG |
1908 SDE_AUX_MASK);
1909 }
1910
1911 dev_priv->pch_irq_mask = ~hotplug_mask;
1912
1913 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1914 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1915 I915_WRITE(SDEIER, hotplug_mask);
1916 POSTING_READ(SDEIER);
1917
1918 ironlake_enable_pch_hotplug(dev);
1919 1998
1920 if (IS_IRONLAKE_M(dev)) { 1999 if (IS_IRONLAKE_M(dev)) {
1921 /* Clear & enable PCU event interrupts */ 2000 /* Clear & enable PCU event interrupts */
@@ -1935,9 +2014,9 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1935 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | 2014 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1936 DE_PLANEC_FLIP_DONE_IVB | 2015 DE_PLANEC_FLIP_DONE_IVB |
1937 DE_PLANEB_FLIP_DONE_IVB | 2016 DE_PLANEB_FLIP_DONE_IVB |
1938 DE_PLANEA_FLIP_DONE_IVB; 2017 DE_PLANEA_FLIP_DONE_IVB |
2018 DE_AUX_CHANNEL_A_IVB;
1939 u32 render_irqs; 2019 u32 render_irqs;
1940 u32 hotplug_mask;
1941 2020
1942 dev_priv->irq_mask = ~display_mask; 2021 dev_priv->irq_mask = ~display_mask;
1943 2022
@@ -1961,18 +2040,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
1961 I915_WRITE(GTIER, render_irqs); 2040 I915_WRITE(GTIER, render_irqs);
1962 POSTING_READ(GTIER); 2041 POSTING_READ(GTIER);
1963 2042
1964 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 2043 ibx_irq_postinstall(dev);
1965 SDE_PORTB_HOTPLUG_CPT |
1966 SDE_PORTC_HOTPLUG_CPT |
1967 SDE_PORTD_HOTPLUG_CPT);
1968 dev_priv->pch_irq_mask = ~hotplug_mask;
1969
1970 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1971 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1972 I915_WRITE(SDEIER, hotplug_mask);
1973 POSTING_READ(SDEIER);
1974
1975 ironlake_enable_pch_hotplug(dev);
1976 2044
1977 return 0; 2045 return 0;
1978} 2046}
@@ -1981,7 +2049,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1981{ 2049{
1982 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2050 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1983 u32 enable_mask; 2051 u32 enable_mask;
1984 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1985 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 2052 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1986 u32 render_irqs; 2053 u32 render_irqs;
1987 u16 msid; 2054 u16 msid;
@@ -2010,6 +2077,9 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2010 msid |= (1<<14); 2077 msid |= (1<<14);
2011 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); 2078 pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
2012 2079
2080 I915_WRITE(PORT_HOTPLUG_EN, 0);
2081 POSTING_READ(PORT_HOTPLUG_EN);
2082
2013 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2083 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2014 I915_WRITE(VLV_IER, enable_mask); 2084 I915_WRITE(VLV_IER, enable_mask);
2015 I915_WRITE(VLV_IIR, 0xffffffff); 2085 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2018,6 +2088,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2018 POSTING_READ(VLV_IER); 2088 POSTING_READ(VLV_IER);
2019 2089
2020 i915_enable_pipestat(dev_priv, 0, pipestat_enable); 2090 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
2091 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2021 i915_enable_pipestat(dev_priv, 1, pipestat_enable); 2092 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2022 2093
2023 I915_WRITE(VLV_IIR, 0xffffffff); 2094 I915_WRITE(VLV_IIR, 0xffffffff);
@@ -2038,13 +2109,22 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2038#endif 2109#endif
2039 2110
2040 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2111 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2112
2113 return 0;
2114}
2115
2116static void valleyview_hpd_irq_setup(struct drm_device *dev)
2117{
2118 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2119 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2120
2041 /* Note HDMI and DP share bits */ 2121 /* Note HDMI and DP share bits */
2042 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2122 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2043 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2123 hotplug_en |= PORTB_HOTPLUG_INT_EN;
2044 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2124 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2045 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2125 hotplug_en |= PORTC_HOTPLUG_INT_EN;
2046 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2126 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2047 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2127 hotplug_en |= PORTD_HOTPLUG_INT_EN;
2048 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2128 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2049 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2129 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2050 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2130 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2055,8 +2135,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2055 } 2135 }
2056 2136
2057 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2137 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2058
2059 return 0;
2060} 2138}
2061 2139
2062static void valleyview_irq_uninstall(struct drm_device *dev) 2140static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -2286,6 +2364,9 @@ static int i915_irq_postinstall(struct drm_device *dev)
2286 I915_USER_INTERRUPT; 2364 I915_USER_INTERRUPT;
2287 2365
2288 if (I915_HAS_HOTPLUG(dev)) { 2366 if (I915_HAS_HOTPLUG(dev)) {
2367 I915_WRITE(PORT_HOTPLUG_EN, 0);
2368 POSTING_READ(PORT_HOTPLUG_EN);
2369
2289 /* Enable in IER... */ 2370 /* Enable in IER... */
2290 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 2371 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2291 /* and unmask in IMR */ 2372 /* and unmask in IMR */
@@ -2296,15 +2377,25 @@ static int i915_irq_postinstall(struct drm_device *dev)
2296 I915_WRITE(IER, enable_mask); 2377 I915_WRITE(IER, enable_mask);
2297 POSTING_READ(IER); 2378 POSTING_READ(IER);
2298 2379
2380 intel_opregion_enable_asle(dev);
2381
2382 return 0;
2383}
2384
2385static void i915_hpd_irq_setup(struct drm_device *dev)
2386{
2387 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2388 u32 hotplug_en;
2389
2299 if (I915_HAS_HOTPLUG(dev)) { 2390 if (I915_HAS_HOTPLUG(dev)) {
2300 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 2391 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2301 2392
2302 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2393 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2303 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2394 hotplug_en |= PORTB_HOTPLUG_INT_EN;
2304 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2395 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2305 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2396 hotplug_en |= PORTC_HOTPLUG_INT_EN;
2306 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2397 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2307 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2398 hotplug_en |= PORTD_HOTPLUG_INT_EN;
2308 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 2399 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2309 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2400 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2310 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 2401 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
@@ -2318,10 +2409,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
2318 2409
2319 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2410 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2320 } 2411 }
2321
2322 intel_opregion_enable_asle(dev);
2323
2324 return 0;
2325} 2412}
2326 2413
2327static irqreturn_t i915_irq_handler(int irq, void *arg) 2414static irqreturn_t i915_irq_handler(int irq, void *arg)
@@ -2481,7 +2568,6 @@ static void i965_irq_preinstall(struct drm_device * dev)
2481static int i965_irq_postinstall(struct drm_device *dev) 2568static int i965_irq_postinstall(struct drm_device *dev)
2482{ 2569{
2483 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2484 u32 hotplug_en;
2485 u32 enable_mask; 2571 u32 enable_mask;
2486 u32 error_mask; 2572 u32 error_mask;
2487 2573
@@ -2502,6 +2588,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
2502 2588
2503 dev_priv->pipestat[0] = 0; 2589 dev_priv->pipestat[0] = 0;
2504 dev_priv->pipestat[1] = 0; 2590 dev_priv->pipestat[1] = 0;
2591 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
2505 2592
2506 /* 2593 /*
2507 * Enable some error detection, note the instruction error mask 2594 * Enable some error detection, note the instruction error mask
@@ -2522,14 +2609,27 @@ static int i965_irq_postinstall(struct drm_device *dev)
2522 I915_WRITE(IER, enable_mask); 2609 I915_WRITE(IER, enable_mask);
2523 POSTING_READ(IER); 2610 POSTING_READ(IER);
2524 2611
2612 I915_WRITE(PORT_HOTPLUG_EN, 0);
2613 POSTING_READ(PORT_HOTPLUG_EN);
2614
2615 intel_opregion_enable_asle(dev);
2616
2617 return 0;
2618}
2619
2620static void i965_hpd_irq_setup(struct drm_device *dev)
2621{
2622 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2623 u32 hotplug_en;
2624
2525 /* Note HDMI and DP share hotplug bits */ 2625 /* Note HDMI and DP share hotplug bits */
2526 hotplug_en = 0; 2626 hotplug_en = 0;
2527 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2627 if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
2528 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2628 hotplug_en |= PORTB_HOTPLUG_INT_EN;
2529 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 2629 if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
2530 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2630 hotplug_en |= PORTC_HOTPLUG_INT_EN;
2531 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2631 if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
2532 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2632 hotplug_en |= PORTD_HOTPLUG_INT_EN;
2533 if (IS_G4X(dev)) { 2633 if (IS_G4X(dev)) {
2534 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) 2634 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2535 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2635 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
@@ -2556,10 +2656,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
2556 /* Ignore TV since it's buggy */ 2656 /* Ignore TV since it's buggy */
2557 2657
2558 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2658 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2559
2560 intel_opregion_enable_asle(dev);
2561
2562 return 0;
2563} 2659}
2564 2660
2565static irqreturn_t i965_irq_handler(int irq, void *arg) 2661static irqreturn_t i965_irq_handler(int irq, void *arg)
@@ -2655,6 +2751,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
2655 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 2751 if (blc_event || (iir & I915_ASLE_INTERRUPT))
2656 intel_opregion_asle_intr(dev); 2752 intel_opregion_asle_intr(dev);
2657 2753
2754 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2755 gmbus_irq_handler(dev);
2756
2658 /* With MSI, interrupts are only generated when iir 2757 /* With MSI, interrupts are only generated when iir
2659 * transitions from zero to nonzero. If another bit got 2758 * transitions from zero to nonzero. If another bit got
2660 * set while we were handling the existing iir bits, then 2759 * set while we were handling the existing iir bits, then
@@ -2706,10 +2805,16 @@ void intel_irq_init(struct drm_device *dev)
2706 struct drm_i915_private *dev_priv = dev->dev_private; 2805 struct drm_i915_private *dev_priv = dev->dev_private;
2707 2806
2708 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2807 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2709 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2808 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
2710 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2809 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2711 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 2810 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2712 2811
2812 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
2813 i915_hangcheck_elapsed,
2814 (unsigned long) dev);
2815
2816 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
2817
2713 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2818 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2714 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2819 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2715 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 2820 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
@@ -2730,7 +2835,8 @@ void intel_irq_init(struct drm_device *dev)
2730 dev->driver->irq_uninstall = valleyview_irq_uninstall; 2835 dev->driver->irq_uninstall = valleyview_irq_uninstall;
2731 dev->driver->enable_vblank = valleyview_enable_vblank; 2836 dev->driver->enable_vblank = valleyview_enable_vblank;
2732 dev->driver->disable_vblank = valleyview_disable_vblank; 2837 dev->driver->disable_vblank = valleyview_disable_vblank;
2733 } else if (IS_IVYBRIDGE(dev)) { 2838 dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
2839 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2734 /* Share pre & uninstall handlers with ILK/SNB */ 2840 /* Share pre & uninstall handlers with ILK/SNB */
2735 dev->driver->irq_handler = ivybridge_irq_handler; 2841 dev->driver->irq_handler = ivybridge_irq_handler;
2736 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2842 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2738,14 +2844,6 @@ void intel_irq_init(struct drm_device *dev)
2738 dev->driver->irq_uninstall = ironlake_irq_uninstall; 2844 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2739 dev->driver->enable_vblank = ivybridge_enable_vblank; 2845 dev->driver->enable_vblank = ivybridge_enable_vblank;
2740 dev->driver->disable_vblank = ivybridge_disable_vblank; 2846 dev->driver->disable_vblank = ivybridge_disable_vblank;
2741 } else if (IS_HASWELL(dev)) {
2742 /* Share interrupts handling with IVB */
2743 dev->driver->irq_handler = ivybridge_irq_handler;
2744 dev->driver->irq_preinstall = ironlake_irq_preinstall;
2745 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2746 dev->driver->irq_uninstall = ironlake_irq_uninstall;
2747 dev->driver->enable_vblank = ivybridge_enable_vblank;
2748 dev->driver->disable_vblank = ivybridge_disable_vblank;
2749 } else if (HAS_PCH_SPLIT(dev)) { 2847 } else if (HAS_PCH_SPLIT(dev)) {
2750 dev->driver->irq_handler = ironlake_irq_handler; 2848 dev->driver->irq_handler = ironlake_irq_handler;
2751 dev->driver->irq_preinstall = ironlake_irq_preinstall; 2849 dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -2764,13 +2862,23 @@ void intel_irq_init(struct drm_device *dev)
2764 dev->driver->irq_postinstall = i915_irq_postinstall; 2862 dev->driver->irq_postinstall = i915_irq_postinstall;
2765 dev->driver->irq_uninstall = i915_irq_uninstall; 2863 dev->driver->irq_uninstall = i915_irq_uninstall;
2766 dev->driver->irq_handler = i915_irq_handler; 2864 dev->driver->irq_handler = i915_irq_handler;
2865 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
2767 } else { 2866 } else {
2768 dev->driver->irq_preinstall = i965_irq_preinstall; 2867 dev->driver->irq_preinstall = i965_irq_preinstall;
2769 dev->driver->irq_postinstall = i965_irq_postinstall; 2868 dev->driver->irq_postinstall = i965_irq_postinstall;
2770 dev->driver->irq_uninstall = i965_irq_uninstall; 2869 dev->driver->irq_uninstall = i965_irq_uninstall;
2771 dev->driver->irq_handler = i965_irq_handler; 2870 dev->driver->irq_handler = i965_irq_handler;
2871 dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
2772 } 2872 }
2773 dev->driver->enable_vblank = i915_enable_vblank; 2873 dev->driver->enable_vblank = i915_enable_vblank;
2774 dev->driver->disable_vblank = i915_disable_vblank; 2874 dev->driver->disable_vblank = i915_disable_vblank;
2775 } 2875 }
2776} 2876}
2877
2878void intel_hpd_init(struct drm_device *dev)
2879{
2880 struct drm_i915_private *dev_priv = dev->dev_private;
2881
2882 if (dev_priv->display.hpd_irq_setup)
2883 dev_priv->display.hpd_irq_setup(dev);
2884}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 59afb7eb6db6..527b664d3434 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -141,8 +141,15 @@
141#define VGA_MSR_MEM_EN (1<<1) 141#define VGA_MSR_MEM_EN (1<<1)
142#define VGA_MSR_CGA_MODE (1<<0) 142#define VGA_MSR_CGA_MODE (1<<0)
143 143
144#define VGA_SR_INDEX 0x3c4 144/*
145#define VGA_SR_DATA 0x3c5 145 * SR01 is the only VGA register touched on non-UMS setups.
146 * VLV doesn't do UMS, so the sequencer index/data registers
147 * are the only VGA registers which need to include
148 * display_mmio_offset.
149 */
150#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
151#define SR01 1
152#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
146 153
147#define VGA_AR_INDEX 0x3c0 154#define VGA_AR_INDEX 0x3c0
148#define VGA_AR_VID_EN (1<<5) 155#define VGA_AR_VID_EN (1<<5)
@@ -301,6 +308,7 @@
301#define DISPLAY_PLANE_A (0<<20) 308#define DISPLAY_PLANE_A (0<<20)
302#define DISPLAY_PLANE_B (1<<20) 309#define DISPLAY_PLANE_B (1<<20)
303#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) 310#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
311#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
304#define PIPE_CONTROL_CS_STALL (1<<20) 312#define PIPE_CONTROL_CS_STALL (1<<20)
305#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 313#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
306#define PIPE_CONTROL_QW_WRITE (1<<14) 314#define PIPE_CONTROL_QW_WRITE (1<<14)
@@ -335,17 +343,19 @@
335 * 0x801c/3c: core clock bits 343 * 0x801c/3c: core clock bits
336 * 0x8048/68: low pass filter coefficients 344 * 0x8048/68: low pass filter coefficients
337 * 0x8100: fast clock controls 345 * 0x8100: fast clock controls
346 *
347 * DPIO is VLV only.
338 */ 348 */
339#define DPIO_PKT 0x2100 349#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
340#define DPIO_RID (0<<24) 350#define DPIO_RID (0<<24)
341#define DPIO_OP_WRITE (1<<16) 351#define DPIO_OP_WRITE (1<<16)
342#define DPIO_OP_READ (0<<16) 352#define DPIO_OP_READ (0<<16)
343#define DPIO_PORTID (0x12<<8) 353#define DPIO_PORTID (0x12<<8)
344#define DPIO_BYTE (0xf<<4) 354#define DPIO_BYTE (0xf<<4)
345#define DPIO_BUSY (1<<0) /* status only */ 355#define DPIO_BUSY (1<<0) /* status only */
346#define DPIO_DATA 0x2104 356#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
347#define DPIO_REG 0x2108 357#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
348#define DPIO_CTL 0x2110 358#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
349#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ 359#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
350#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ 360#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
351#define DPIO_SFR_BYPASS (1<<1) 361#define DPIO_SFR_BYPASS (1<<1)
@@ -556,13 +566,13 @@
556#define IIR 0x020a4 566#define IIR 0x020a4
557#define IMR 0x020a8 567#define IMR 0x020a8
558#define ISR 0x020ac 568#define ISR 0x020ac
559#define VLV_GUNIT_CLOCK_GATE 0x182060 569#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
560#define GCFG_DIS (1<<8) 570#define GCFG_DIS (1<<8)
561#define VLV_IIR_RW 0x182084 571#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
562#define VLV_IER 0x1820a0 572#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
563#define VLV_IIR 0x1820a4 573#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
564#define VLV_IMR 0x1820a8 574#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
565#define VLV_ISR 0x1820ac 575#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
566#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 576#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
567#define I915_DISPLAY_PORT_INTERRUPT (1<<17) 577#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
568#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 578#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@@ -735,6 +745,7 @@
735#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) 745#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
736#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) 746#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
737#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ 747#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
748#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
738#define GEN7_FF_VS_SCHED_HS1 (0x5<<12) 749#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
739#define GEN7_FF_VS_SCHED_HS0 (0x3<<12) 750#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
740#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ 751#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
@@ -921,8 +932,8 @@
921#define VGA1_PD_P1_DIV_2 (1 << 13) 932#define VGA1_PD_P1_DIV_2 (1 << 13)
922#define VGA1_PD_P1_SHIFT 8 933#define VGA1_PD_P1_SHIFT 8
923#define VGA1_PD_P1_MASK (0x1f << 8) 934#define VGA1_PD_P1_MASK (0x1f << 8)
924#define _DPLL_A 0x06014 935#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
925#define _DPLL_B 0x06018 936#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
926#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) 937#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
927#define DPLL_VCO_ENABLE (1 << 31) 938#define DPLL_VCO_ENABLE (1 << 31)
928#define DPLL_DVO_HIGH_SPEED (1 << 30) 939#define DPLL_DVO_HIGH_SPEED (1 << 30)
@@ -943,23 +954,6 @@
943#define DPLL_LOCK_VLV (1<<15) 954#define DPLL_LOCK_VLV (1<<15)
944#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) 955#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
945 956
946#define SRX_INDEX 0x3c4
947#define SRX_DATA 0x3c5
948#define SR01 1
949#define SR01_SCREEN_OFF (1<<5)
950
951#define PPCR 0x61204
952#define PPCR_ON (1<<0)
953
954#define DVOB 0x61140
955#define DVOB_ON (1<<31)
956#define DVOC 0x61160
957#define DVOC_ON (1<<31)
958#define LVDS 0x61180
959#define LVDS_ON (1<<31)
960
961/* Scratch pad debug 0 reg:
962 */
963#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 957#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
964/* 958/*
965 * The i830 generation, in LVDS mode, defines P1 as the bit number set within 959 * The i830 generation, in LVDS mode, defines P1 as the bit number set within
@@ -998,7 +992,7 @@
998#define SDVO_MULTIPLIER_MASK 0x000000ff 992#define SDVO_MULTIPLIER_MASK 0x000000ff
999#define SDVO_MULTIPLIER_SHIFT_HIRES 4 993#define SDVO_MULTIPLIER_SHIFT_HIRES 4
1000#define SDVO_MULTIPLIER_SHIFT_VGA 0 994#define SDVO_MULTIPLIER_SHIFT_VGA 0
1001#define _DPLL_A_MD 0x0601c /* 965+ only */ 995#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
1002/* 996/*
1003 * UDI pixel divider, controlling how many pixels are stuffed into a packet. 997 * UDI pixel divider, controlling how many pixels are stuffed into a packet.
1004 * 998 *
@@ -1035,7 +1029,7 @@
1035 */ 1029 */
1036#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 1030#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
1037#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 1031#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
1038#define _DPLL_B_MD 0x06020 /* 965+ only */ 1032#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
1039#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) 1033#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
1040 1034
1041#define _FPA0 0x06040 1035#define _FPA0 0x06040
@@ -1178,15 +1172,15 @@
1178#define RAMCLK_GATE_D 0x6210 /* CRL only */ 1172#define RAMCLK_GATE_D 0x6210 /* CRL only */
1179#define DEUC 0x6214 /* CRL only */ 1173#define DEUC 0x6214 /* CRL only */
1180 1174
1181#define FW_BLC_SELF_VLV 0x6500 1175#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
1182#define FW_CSPWRDWNEN (1<<15) 1176#define FW_CSPWRDWNEN (1<<15)
1183 1177
1184/* 1178/*
1185 * Palette regs 1179 * Palette regs
1186 */ 1180 */
1187 1181
1188#define _PALETTE_A 0x0a000 1182#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
1189#define _PALETTE_B 0x0a800 1183#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
1190#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) 1184#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
1191 1185
1192/* MCH MMIO space */ 1186/* MCH MMIO space */
@@ -1242,6 +1236,10 @@
1242#define MAD_DIMM_A_SIZE_SHIFT 0 1236#define MAD_DIMM_A_SIZE_SHIFT 0
1243#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) 1237#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
1244 1238
1239/** snb MCH registers for priority tuning */
1240#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
1241#define MCH_SSKPD_WM0_MASK 0x3f
1242#define MCH_SSKPD_WM0_VAL 0xc
1245 1243
1246/* Clocking configuration register */ 1244/* Clocking configuration register */
1247#define CLKCFG 0x10c00 1245#define CLKCFG 0x10c00
@@ -1551,26 +1549,26 @@
1551 */ 1549 */
1552 1550
1553/* Pipe A timing regs */ 1551/* Pipe A timing regs */
1554#define _HTOTAL_A 0x60000 1552#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
1555#define _HBLANK_A 0x60004 1553#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
1556#define _HSYNC_A 0x60008 1554#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
1557#define _VTOTAL_A 0x6000c 1555#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
1558#define _VBLANK_A 0x60010 1556#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
1559#define _VSYNC_A 0x60014 1557#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
1560#define _PIPEASRC 0x6001c 1558#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
1561#define _BCLRPAT_A 0x60020 1559#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
1562#define _VSYNCSHIFT_A 0x60028 1560#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
1563 1561
1564/* Pipe B timing regs */ 1562/* Pipe B timing regs */
1565#define _HTOTAL_B 0x61000 1563#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
1566#define _HBLANK_B 0x61004 1564#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
1567#define _HSYNC_B 0x61008 1565#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
1568#define _VTOTAL_B 0x6100c 1566#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
1569#define _VBLANK_B 0x61010 1567#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
1570#define _VSYNC_B 0x61014 1568#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
1571#define _PIPEBSRC 0x6101c 1569#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
1572#define _BCLRPAT_B 0x61020 1570#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
1573#define _VSYNCSHIFT_B 0x61028 1571#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
1574 1572
1575 1573
1576#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) 1574#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
@@ -1631,13 +1629,10 @@
1631 1629
1632 1630
1633/* Hotplug control (945+ only) */ 1631/* Hotplug control (945+ only) */
1634#define PORT_HOTPLUG_EN 0x61110 1632#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
1635#define HDMIB_HOTPLUG_INT_EN (1 << 29) 1633#define PORTB_HOTPLUG_INT_EN (1 << 29)
1636#define DPB_HOTPLUG_INT_EN (1 << 29) 1634#define PORTC_HOTPLUG_INT_EN (1 << 28)
1637#define HDMIC_HOTPLUG_INT_EN (1 << 28) 1635#define PORTD_HOTPLUG_INT_EN (1 << 27)
1638#define DPC_HOTPLUG_INT_EN (1 << 28)
1639#define HDMID_HOTPLUG_INT_EN (1 << 27)
1640#define DPD_HOTPLUG_INT_EN (1 << 27)
1641#define SDVOB_HOTPLUG_INT_EN (1 << 26) 1636#define SDVOB_HOTPLUG_INT_EN (1 << 26)
1642#define SDVOC_HOTPLUG_INT_EN (1 << 25) 1637#define SDVOC_HOTPLUG_INT_EN (1 << 25)
1643#define TV_HOTPLUG_INT_EN (1 << 18) 1638#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -1658,21 +1653,14 @@
1658#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) 1653#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
1659#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 1654#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
1660 1655
1661#define PORT_HOTPLUG_STAT 0x61114 1656#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
1662/* HDMI/DP bits are gen4+ */ 1657/* HDMI/DP bits are gen4+ */
1663#define DPB_HOTPLUG_LIVE_STATUS (1 << 29) 1658#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
1664#define DPC_HOTPLUG_LIVE_STATUS (1 << 28) 1659#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
1665#define DPD_HOTPLUG_LIVE_STATUS (1 << 27) 1660#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
1666#define DPD_HOTPLUG_INT_STATUS (3 << 21) 1661#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
1667#define DPC_HOTPLUG_INT_STATUS (3 << 19) 1662#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
1668#define DPB_HOTPLUG_INT_STATUS (3 << 17) 1663#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
1669/* HDMI bits are shared with the DP bits */
1670#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
1671#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
1672#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
1673#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
1674#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
1675#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
1676/* CRT/TV common between gen3+ */ 1664/* CRT/TV common between gen3+ */
1677#define CRT_HOTPLUG_INT_STATUS (1 << 11) 1665#define CRT_HOTPLUG_INT_STATUS (1 << 11)
1678#define TV_HOTPLUG_INT_STATUS (1 << 10) 1666#define TV_HOTPLUG_INT_STATUS (1 << 10)
@@ -1877,7 +1865,7 @@
1877#define PP_DIVISOR 0x61210 1865#define PP_DIVISOR 0x61210
1878 1866
1879/* Panel fitting */ 1867/* Panel fitting */
1880#define PFIT_CONTROL 0x61230 1868#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
1881#define PFIT_ENABLE (1 << 31) 1869#define PFIT_ENABLE (1 << 31)
1882#define PFIT_PIPE_MASK (3 << 29) 1870#define PFIT_PIPE_MASK (3 << 29)
1883#define PFIT_PIPE_SHIFT 29 1871#define PFIT_PIPE_SHIFT 29
@@ -1895,9 +1883,7 @@
1895#define PFIT_SCALING_PROGRAMMED (1 << 26) 1883#define PFIT_SCALING_PROGRAMMED (1 << 26)
1896#define PFIT_SCALING_PILLAR (2 << 26) 1884#define PFIT_SCALING_PILLAR (2 << 26)
1897#define PFIT_SCALING_LETTER (3 << 26) 1885#define PFIT_SCALING_LETTER (3 << 26)
1898#define PFIT_PGM_RATIOS 0x61234 1886#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
1899#define PFIT_VERT_SCALE_MASK 0xfff00000
1900#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
1901/* Pre-965 */ 1887/* Pre-965 */
1902#define PFIT_VERT_SCALE_SHIFT 20 1888#define PFIT_VERT_SCALE_SHIFT 20
1903#define PFIT_VERT_SCALE_MASK 0xfff00000 1889#define PFIT_VERT_SCALE_MASK 0xfff00000
@@ -1909,7 +1895,7 @@
1909#define PFIT_HORIZ_SCALE_SHIFT_965 0 1895#define PFIT_HORIZ_SCALE_SHIFT_965 0
1910#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff 1896#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
1911 1897
1912#define PFIT_AUTO_RATIOS 0x61238 1898#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
1913 1899
1914/* Backlight control */ 1900/* Backlight control */
1915#define BLC_PWM_CTL2 0x61250 /* 965+ only */ 1901#define BLC_PWM_CTL2 0x61250 /* 965+ only */
@@ -2639,10 +2625,10 @@
2639/* Display & cursor control */ 2625/* Display & cursor control */
2640 2626
2641/* Pipe A */ 2627/* Pipe A */
2642#define _PIPEADSL 0x70000 2628#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
2643#define DSL_LINEMASK_GEN2 0x00000fff 2629#define DSL_LINEMASK_GEN2 0x00000fff
2644#define DSL_LINEMASK_GEN3 0x00001fff 2630#define DSL_LINEMASK_GEN3 0x00001fff
2645#define _PIPEACONF 0x70008 2631#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
2646#define PIPECONF_ENABLE (1<<31) 2632#define PIPECONF_ENABLE (1<<31)
2647#define PIPECONF_DISABLE 0 2633#define PIPECONF_DISABLE 0
2648#define PIPECONF_DOUBLE_WIDE (1<<30) 2634#define PIPECONF_DOUBLE_WIDE (1<<30)
@@ -2671,18 +2657,19 @@
2671#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ 2657#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
2672#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ 2658#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
2673#define PIPECONF_CXSR_DOWNCLOCK (1<<16) 2659#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
2674#define PIPECONF_BPP_MASK (0x000000e0) 2660#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
2675#define PIPECONF_BPP_8 (0<<5) 2661#define PIPECONF_BPC_MASK (0x7 << 5)
2676#define PIPECONF_BPP_10 (1<<5) 2662#define PIPECONF_8BPC (0<<5)
2677#define PIPECONF_BPP_6 (2<<5) 2663#define PIPECONF_10BPC (1<<5)
2678#define PIPECONF_BPP_12 (3<<5) 2664#define PIPECONF_6BPC (2<<5)
2665#define PIPECONF_12BPC (3<<5)
2679#define PIPECONF_DITHER_EN (1<<4) 2666#define PIPECONF_DITHER_EN (1<<4)
2680#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) 2667#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
2681#define PIPECONF_DITHER_TYPE_SP (0<<2) 2668#define PIPECONF_DITHER_TYPE_SP (0<<2)
2682#define PIPECONF_DITHER_TYPE_ST1 (1<<2) 2669#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
2683#define PIPECONF_DITHER_TYPE_ST2 (2<<2) 2670#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
2684#define PIPECONF_DITHER_TYPE_TEMP (3<<2) 2671#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
2685#define _PIPEASTAT 0x70024 2672#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
2686#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 2673#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
2687#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) 2674#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
2688#define PIPE_CRC_ERROR_ENABLE (1UL<<29) 2675#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@@ -2693,7 +2680,7 @@
2693#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) 2680#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
2694#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 2681#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
2695#define PIPE_DPST_EVENT_ENABLE (1UL<<23) 2682#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
2696#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26) 2683#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
2697#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 2684#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
2698#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 2685#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
2699#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 2686#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
@@ -2703,7 +2690,7 @@
2703#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) 2690#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
2704#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 2691#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
2705#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) 2692#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
2706#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15) 2693#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
2707#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 2694#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
2708#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 2695#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
2709#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 2696#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
@@ -2719,11 +2706,6 @@
2719#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 2706#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
2720#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 2707#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
2721#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 2708#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
2722#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
2723#define PIPE_8BPC (0 << 5)
2724#define PIPE_10BPC (1 << 5)
2725#define PIPE_6BPC (2 << 5)
2726#define PIPE_12BPC (3 << 5)
2727 2709
2728#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 2710#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
2729#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) 2711#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
@@ -2732,7 +2714,7 @@
2732#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 2714#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
2733#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) 2715#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
2734 2716
2735#define VLV_DPFLIPSTAT 0x70028 2717#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
2736#define PIPEB_LINE_COMPARE_INT_EN (1<<29) 2718#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
2737#define PIPEB_HLINE_INT_EN (1<<28) 2719#define PIPEB_HLINE_INT_EN (1<<28)
2738#define PIPEB_VBLANK_INT_EN (1<<27) 2720#define PIPEB_VBLANK_INT_EN (1<<27)
@@ -2746,7 +2728,7 @@
2746#define SPRITEA_FLIPDONE_INT_EN (1<<17) 2728#define SPRITEA_FLIPDONE_INT_EN (1<<17)
2747#define PLANEA_FLIPDONE_INT_EN (1<<16) 2729#define PLANEA_FLIPDONE_INT_EN (1<<16)
2748 2730
2749#define DPINVGTT 0x7002c /* VLV only */ 2731#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
2750#define CURSORB_INVALID_GTT_INT_EN (1<<23) 2732#define CURSORB_INVALID_GTT_INT_EN (1<<23)
2751#define CURSORA_INVALID_GTT_INT_EN (1<<22) 2733#define CURSORA_INVALID_GTT_INT_EN (1<<22)
2752#define SPRITED_INVALID_GTT_INT_EN (1<<21) 2734#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@@ -2774,7 +2756,7 @@
2774#define DSPARB_BEND_SHIFT 9 /* on 855 */ 2756#define DSPARB_BEND_SHIFT 9 /* on 855 */
2775#define DSPARB_AEND_SHIFT 0 2757#define DSPARB_AEND_SHIFT 0
2776 2758
2777#define DSPFW1 0x70034 2759#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
2778#define DSPFW_SR_SHIFT 23 2760#define DSPFW_SR_SHIFT 23
2779#define DSPFW_SR_MASK (0x1ff<<23) 2761#define DSPFW_SR_MASK (0x1ff<<23)
2780#define DSPFW_CURSORB_SHIFT 16 2762#define DSPFW_CURSORB_SHIFT 16
@@ -2782,11 +2764,11 @@
2782#define DSPFW_PLANEB_SHIFT 8 2764#define DSPFW_PLANEB_SHIFT 8
2783#define DSPFW_PLANEB_MASK (0x7f<<8) 2765#define DSPFW_PLANEB_MASK (0x7f<<8)
2784#define DSPFW_PLANEA_MASK (0x7f) 2766#define DSPFW_PLANEA_MASK (0x7f)
2785#define DSPFW2 0x70038 2767#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
2786#define DSPFW_CURSORA_MASK 0x00003f00 2768#define DSPFW_CURSORA_MASK 0x00003f00
2787#define DSPFW_CURSORA_SHIFT 8 2769#define DSPFW_CURSORA_SHIFT 8
2788#define DSPFW_PLANEC_MASK (0x7f) 2770#define DSPFW_PLANEC_MASK (0x7f)
2789#define DSPFW3 0x7003c 2771#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
2790#define DSPFW_HPLL_SR_EN (1<<31) 2772#define DSPFW_HPLL_SR_EN (1<<31)
2791#define DSPFW_CURSOR_SR_SHIFT 24 2773#define DSPFW_CURSOR_SR_SHIFT 24
2792#define PINEVIEW_SELF_REFRESH_EN (1<<30) 2774#define PINEVIEW_SELF_REFRESH_EN (1<<30)
@@ -2798,13 +2780,13 @@
2798/* drain latency register values*/ 2780/* drain latency register values*/
2799#define DRAIN_LATENCY_PRECISION_32 32 2781#define DRAIN_LATENCY_PRECISION_32 32
2800#define DRAIN_LATENCY_PRECISION_16 16 2782#define DRAIN_LATENCY_PRECISION_16 16
2801#define VLV_DDL1 0x70050 2783#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
2802#define DDL_CURSORA_PRECISION_32 (1<<31) 2784#define DDL_CURSORA_PRECISION_32 (1<<31)
2803#define DDL_CURSORA_PRECISION_16 (0<<31) 2785#define DDL_CURSORA_PRECISION_16 (0<<31)
2804#define DDL_CURSORA_SHIFT 24 2786#define DDL_CURSORA_SHIFT 24
2805#define DDL_PLANEA_PRECISION_32 (1<<7) 2787#define DDL_PLANEA_PRECISION_32 (1<<7)
2806#define DDL_PLANEA_PRECISION_16 (0<<7) 2788#define DDL_PLANEA_PRECISION_16 (0<<7)
2807#define VLV_DDL2 0x70054 2789#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
2808#define DDL_CURSORB_PRECISION_32 (1<<31) 2790#define DDL_CURSORB_PRECISION_32 (1<<31)
2809#define DDL_CURSORB_PRECISION_16 (0<<31) 2791#define DDL_CURSORB_PRECISION_16 (0<<31)
2810#define DDL_CURSORB_SHIFT 24 2792#define DDL_CURSORB_SHIFT 24
@@ -2948,10 +2930,10 @@
2948 * } while (high1 != high2); 2930 * } while (high1 != high2);
2949 * frame = (high1 << 8) | low1; 2931 * frame = (high1 << 8) | low1;
2950 */ 2932 */
2951#define _PIPEAFRAMEHIGH 0x70040 2933#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
2952#define PIPE_FRAME_HIGH_MASK 0x0000ffff 2934#define PIPE_FRAME_HIGH_MASK 0x0000ffff
2953#define PIPE_FRAME_HIGH_SHIFT 0 2935#define PIPE_FRAME_HIGH_SHIFT 0
2954#define _PIPEAFRAMEPIXEL 0x70044 2936#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
2955#define PIPE_FRAME_LOW_MASK 0xff000000 2937#define PIPE_FRAME_LOW_MASK 0xff000000
2956#define PIPE_FRAME_LOW_SHIFT 24 2938#define PIPE_FRAME_LOW_SHIFT 24
2957#define PIPE_PIXEL_MASK 0x00ffffff 2939#define PIPE_PIXEL_MASK 0x00ffffff
@@ -2962,11 +2944,12 @@
2962#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) 2944#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
2963 2945
2964/* Cursor A & B regs */ 2946/* Cursor A & B regs */
2965#define _CURACNTR 0x70080 2947#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
2966/* Old style CUR*CNTR flags (desktop 8xx) */ 2948/* Old style CUR*CNTR flags (desktop 8xx) */
2967#define CURSOR_ENABLE 0x80000000 2949#define CURSOR_ENABLE 0x80000000
2968#define CURSOR_GAMMA_ENABLE 0x40000000 2950#define CURSOR_GAMMA_ENABLE 0x40000000
2969#define CURSOR_STRIDE_MASK 0x30000000 2951#define CURSOR_STRIDE_MASK 0x30000000
2952#define CURSOR_PIPE_CSC_ENABLE (1<<24)
2970#define CURSOR_FORMAT_SHIFT 24 2953#define CURSOR_FORMAT_SHIFT 24
2971#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) 2954#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
2972#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) 2955#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
@@ -2983,16 +2966,16 @@
2983#define MCURSOR_PIPE_A 0x00 2966#define MCURSOR_PIPE_A 0x00
2984#define MCURSOR_PIPE_B (1 << 28) 2967#define MCURSOR_PIPE_B (1 << 28)
2985#define MCURSOR_GAMMA_ENABLE (1 << 26) 2968#define MCURSOR_GAMMA_ENABLE (1 << 26)
2986#define _CURABASE 0x70084 2969#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
2987#define _CURAPOS 0x70088 2970#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
2988#define CURSOR_POS_MASK 0x007FF 2971#define CURSOR_POS_MASK 0x007FF
2989#define CURSOR_POS_SIGN 0x8000 2972#define CURSOR_POS_SIGN 0x8000
2990#define CURSOR_X_SHIFT 0 2973#define CURSOR_X_SHIFT 0
2991#define CURSOR_Y_SHIFT 16 2974#define CURSOR_Y_SHIFT 16
2992#define CURSIZE 0x700a0 2975#define CURSIZE 0x700a0
2993#define _CURBCNTR 0x700c0 2976#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
2994#define _CURBBASE 0x700c4 2977#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
2995#define _CURBPOS 0x700c8 2978#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
2996 2979
2997#define _CURBCNTR_IVB 0x71080 2980#define _CURBCNTR_IVB 0x71080
2998#define _CURBBASE_IVB 0x71084 2981#define _CURBBASE_IVB 0x71084
@@ -3007,7 +2990,7 @@
3007#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) 2990#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
3008 2991
3009/* Display A control */ 2992/* Display A control */
3010#define _DSPACNTR 0x70180 2993#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
3011#define DISPLAY_PLANE_ENABLE (1<<31) 2994#define DISPLAY_PLANE_ENABLE (1<<31)
3012#define DISPLAY_PLANE_DISABLE 0 2995#define DISPLAY_PLANE_DISABLE 0
3013#define DISPPLANE_GAMMA_ENABLE (1<<30) 2996#define DISPPLANE_GAMMA_ENABLE (1<<30)
@@ -3028,6 +3011,7 @@
3028#define DISPPLANE_RGBA888 (0xf<<26) 3011#define DISPPLANE_RGBA888 (0xf<<26)
3029#define DISPPLANE_STEREO_ENABLE (1<<25) 3012#define DISPPLANE_STEREO_ENABLE (1<<25)
3030#define DISPPLANE_STEREO_DISABLE 0 3013#define DISPPLANE_STEREO_DISABLE 0
3014#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
3031#define DISPPLANE_SEL_PIPE_SHIFT 24 3015#define DISPPLANE_SEL_PIPE_SHIFT 24
3032#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) 3016#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
3033#define DISPPLANE_SEL_PIPE_A 0 3017#define DISPPLANE_SEL_PIPE_A 0
@@ -3040,14 +3024,14 @@
3040#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 3024#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
3041#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 3025#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
3042#define DISPPLANE_TILED (1<<10) 3026#define DISPPLANE_TILED (1<<10)
3043#define _DSPAADDR 0x70184 3027#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
3044#define _DSPASTRIDE 0x70188 3028#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
3045#define _DSPAPOS 0x7018C /* reserved */ 3029#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
3046#define _DSPASIZE 0x70190 3030#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
3047#define _DSPASURF 0x7019C /* 965+ only */ 3031#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
3048#define _DSPATILEOFF 0x701A4 /* 965+ only */ 3032#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
3049#define _DSPAOFFSET 0x701A4 /* HSW */ 3033#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
3050#define _DSPASURFLIVE 0x701AC 3034#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
3051 3035
3052#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 3036#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
3053#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) 3037#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3068,44 +3052,44 @@
3068 (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) 3052 (I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
3069 3053
3070/* VBIOS flags */ 3054/* VBIOS flags */
3071#define SWF00 0x71410 3055#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
3072#define SWF01 0x71414 3056#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
3073#define SWF02 0x71418 3057#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
3074#define SWF03 0x7141c 3058#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
3075#define SWF04 0x71420 3059#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
3076#define SWF05 0x71424 3060#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
3077#define SWF06 0x71428 3061#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
3078#define SWF10 0x70410 3062#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
3079#define SWF11 0x70414 3063#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
3080#define SWF14 0x71420 3064#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
3081#define SWF30 0x72414 3065#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
3082#define SWF31 0x72418 3066#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
3083#define SWF32 0x7241c 3067#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
3084 3068
3085/* Pipe B */ 3069/* Pipe B */
3086#define _PIPEBDSL 0x71000 3070#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
3087#define _PIPEBCONF 0x71008 3071#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
3088#define _PIPEBSTAT 0x71024 3072#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
3089#define _PIPEBFRAMEHIGH 0x71040 3073#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
3090#define _PIPEBFRAMEPIXEL 0x71044 3074#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
3091#define _PIPEB_FRMCOUNT_GM45 0x71040 3075#define _PIPEB_FRMCOUNT_GM45 0x71040
3092#define _PIPEB_FLIPCOUNT_GM45 0x71044 3076#define _PIPEB_FLIPCOUNT_GM45 0x71044
3093 3077
3094 3078
3095/* Display B control */ 3079/* Display B control */
3096#define _DSPBCNTR 0x71180 3080#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
3097#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 3081#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
3098#define DISPPLANE_ALPHA_TRANS_DISABLE 0 3082#define DISPPLANE_ALPHA_TRANS_DISABLE 0
3099#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 3083#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
3100#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 3084#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
3101#define _DSPBADDR 0x71184 3085#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
3102#define _DSPBSTRIDE 0x71188 3086#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
3103#define _DSPBPOS 0x7118C 3087#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
3104#define _DSPBSIZE 0x71190 3088#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
3105#define _DSPBSURF 0x7119C 3089#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
3106#define _DSPBTILEOFF 0x711A4 3090#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
3107#define _DSPBOFFSET 0x711A4 3091#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
3108#define _DSPBSURFLIVE 0x711AC 3092#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
3109 3093
3110/* Sprite A control */ 3094/* Sprite A control */
3111#define _DVSACNTR 0x72180 3095#define _DVSACNTR 0x72180
@@ -3116,6 +3100,7 @@
3116#define DVS_FORMAT_RGBX101010 (1<<25) 3100#define DVS_FORMAT_RGBX101010 (1<<25)
3117#define DVS_FORMAT_RGBX888 (2<<25) 3101#define DVS_FORMAT_RGBX888 (2<<25)
3118#define DVS_FORMAT_RGBX161616 (3<<25) 3102#define DVS_FORMAT_RGBX161616 (3<<25)
3103#define DVS_PIPE_CSC_ENABLE (1<<24)
3119#define DVS_SOURCE_KEY (1<<22) 3104#define DVS_SOURCE_KEY (1<<22)
3120#define DVS_RGB_ORDER_XBGR (1<<20) 3105#define DVS_RGB_ORDER_XBGR (1<<20)
3121#define DVS_YUV_BYTE_ORDER_MASK (3<<16) 3106#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
@@ -3183,7 +3168,7 @@
3183#define SPRITE_FORMAT_RGBX161616 (3<<25) 3168#define SPRITE_FORMAT_RGBX161616 (3<<25)
3184#define SPRITE_FORMAT_YUV444 (4<<25) 3169#define SPRITE_FORMAT_YUV444 (4<<25)
3185#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */ 3170#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
3186#define SPRITE_CSC_ENABLE (1<<24) 3171#define SPRITE_PIPE_CSC_ENABLE (1<<24)
3187#define SPRITE_SOURCE_KEY (1<<22) 3172#define SPRITE_SOURCE_KEY (1<<22)
3188#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */ 3173#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
3189#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19) 3174#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
@@ -3254,6 +3239,8 @@
3254# define VGA_2X_MODE (1 << 30) 3239# define VGA_2X_MODE (1 << 30)
3255# define VGA_PIPE_B_SELECT (1 << 29) 3240# define VGA_PIPE_B_SELECT (1 << 29)
3256 3241
3242#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
3243
3257/* Ironlake */ 3244/* Ironlake */
3258 3245
3259#define CPU_VGACNTRL 0x41000 3246#define CPU_VGACNTRL 0x41000
@@ -3294,41 +3281,41 @@
3294#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff 3281#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
3295 3282
3296 3283
3297#define _PIPEA_DATA_M1 0x60030 3284#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
3298#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ 3285#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
3299#define TU_SIZE_MASK 0x7e000000 3286#define TU_SIZE_MASK 0x7e000000
3300#define PIPE_DATA_M1_OFFSET 0 3287#define PIPE_DATA_M1_OFFSET 0
3301#define _PIPEA_DATA_N1 0x60034 3288#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
3302#define PIPE_DATA_N1_OFFSET 0 3289#define PIPE_DATA_N1_OFFSET 0
3303 3290
3304#define _PIPEA_DATA_M2 0x60038 3291#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
3305#define PIPE_DATA_M2_OFFSET 0 3292#define PIPE_DATA_M2_OFFSET 0
3306#define _PIPEA_DATA_N2 0x6003c 3293#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
3307#define PIPE_DATA_N2_OFFSET 0 3294#define PIPE_DATA_N2_OFFSET 0
3308 3295
3309#define _PIPEA_LINK_M1 0x60040 3296#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
3310#define PIPE_LINK_M1_OFFSET 0 3297#define PIPE_LINK_M1_OFFSET 0
3311#define _PIPEA_LINK_N1 0x60044 3298#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
3312#define PIPE_LINK_N1_OFFSET 0 3299#define PIPE_LINK_N1_OFFSET 0
3313 3300
3314#define _PIPEA_LINK_M2 0x60048 3301#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
3315#define PIPE_LINK_M2_OFFSET 0 3302#define PIPE_LINK_M2_OFFSET 0
3316#define _PIPEA_LINK_N2 0x6004c 3303#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
3317#define PIPE_LINK_N2_OFFSET 0 3304#define PIPE_LINK_N2_OFFSET 0
3318 3305
3319/* PIPEB timing regs are same start from 0x61000 */ 3306/* PIPEB timing regs are same start from 0x61000 */
3320 3307
3321#define _PIPEB_DATA_M1 0x61030 3308#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
3322#define _PIPEB_DATA_N1 0x61034 3309#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
3323 3310
3324#define _PIPEB_DATA_M2 0x61038 3311#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
3325#define _PIPEB_DATA_N2 0x6103c 3312#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
3326 3313
3327#define _PIPEB_LINK_M1 0x61040 3314#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
3328#define _PIPEB_LINK_N1 0x61044 3315#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
3329 3316
3330#define _PIPEB_LINK_M2 0x61048 3317#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
3331#define _PIPEB_LINK_N2 0x6104c 3318#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
3332 3319
3333#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 3320#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
3334#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) 3321#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
@@ -3581,27 +3568,30 @@
3581#define PORTD_PULSE_DURATION_6ms (2 << 18) 3568#define PORTD_PULSE_DURATION_6ms (2 << 18)
3582#define PORTD_PULSE_DURATION_100ms (3 << 18) 3569#define PORTD_PULSE_DURATION_100ms (3 << 18)
3583#define PORTD_PULSE_DURATION_MASK (3 << 18) 3570#define PORTD_PULSE_DURATION_MASK (3 << 18)
3584#define PORTD_HOTPLUG_NO_DETECT (0) 3571#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
3585#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) 3572#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
3586#define PORTD_HOTPLUG_LONG_DETECT (1 << 17) 3573#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
3574#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
3587#define PORTC_HOTPLUG_ENABLE (1 << 12) 3575#define PORTC_HOTPLUG_ENABLE (1 << 12)
3588#define PORTC_PULSE_DURATION_2ms (0) 3576#define PORTC_PULSE_DURATION_2ms (0)
3589#define PORTC_PULSE_DURATION_4_5ms (1 << 10) 3577#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
3590#define PORTC_PULSE_DURATION_6ms (2 << 10) 3578#define PORTC_PULSE_DURATION_6ms (2 << 10)
3591#define PORTC_PULSE_DURATION_100ms (3 << 10) 3579#define PORTC_PULSE_DURATION_100ms (3 << 10)
3592#define PORTC_PULSE_DURATION_MASK (3 << 10) 3580#define PORTC_PULSE_DURATION_MASK (3 << 10)
3593#define PORTC_HOTPLUG_NO_DETECT (0) 3581#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
3594#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) 3582#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
3595#define PORTC_HOTPLUG_LONG_DETECT (1 << 9) 3583#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
3584#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
3596#define PORTB_HOTPLUG_ENABLE (1 << 4) 3585#define PORTB_HOTPLUG_ENABLE (1 << 4)
3597#define PORTB_PULSE_DURATION_2ms (0) 3586#define PORTB_PULSE_DURATION_2ms (0)
3598#define PORTB_PULSE_DURATION_4_5ms (1 << 2) 3587#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
3599#define PORTB_PULSE_DURATION_6ms (2 << 2) 3588#define PORTB_PULSE_DURATION_6ms (2 << 2)
3600#define PORTB_PULSE_DURATION_100ms (3 << 2) 3589#define PORTB_PULSE_DURATION_100ms (3 << 2)
3601#define PORTB_PULSE_DURATION_MASK (3 << 2) 3590#define PORTB_PULSE_DURATION_MASK (3 << 2)
3602#define PORTB_HOTPLUG_NO_DETECT (0) 3591#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
3603#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) 3592#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
3604#define PORTB_HOTPLUG_LONG_DETECT (1 << 1) 3593#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
3594#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
3605 3595
3606#define PCH_GPIOA 0xc5010 3596#define PCH_GPIOA 0xc5010
3607#define PCH_GPIOB 0xc5014 3597#define PCH_GPIOB 0xc5014
@@ -3722,13 +3712,13 @@
3722#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 3712#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
3723#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 3713#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
3724 3714
3725#define VLV_VIDEO_DIP_CTL_A 0x60200 3715#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
3726#define VLV_VIDEO_DIP_DATA_A 0x60208 3716#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
3727#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 3717#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
3728 3718
3729#define VLV_VIDEO_DIP_CTL_B 0x61170 3719#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
3730#define VLV_VIDEO_DIP_DATA_B 0x61174 3720#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
3731#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178 3721#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
3732 3722
3733#define VLV_TVIDEO_DIP_CTL(pipe) \ 3723#define VLV_TVIDEO_DIP_CTL(pipe) \
3734 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) 3724 _PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
@@ -3820,8 +3810,6 @@
3820#define TRANS_FSYNC_DELAY_HB2 (1<<27) 3810#define TRANS_FSYNC_DELAY_HB2 (1<<27)
3821#define TRANS_FSYNC_DELAY_HB3 (2<<27) 3811#define TRANS_FSYNC_DELAY_HB3 (2<<27)
3822#define TRANS_FSYNC_DELAY_HB4 (3<<27) 3812#define TRANS_FSYNC_DELAY_HB4 (3<<27)
3823#define TRANS_DP_AUDIO_ONLY (1<<26)
3824#define TRANS_DP_VIDEO_AUDIO (0<<26)
3825#define TRANS_INTERLACE_MASK (7<<21) 3813#define TRANS_INTERLACE_MASK (7<<21)
3826#define TRANS_PROGRESSIVE (0<<21) 3814#define TRANS_PROGRESSIVE (0<<21)
3827#define TRANS_INTERLACED (3<<21) 3815#define TRANS_INTERLACED (3<<21)
@@ -3927,7 +3915,7 @@
3927#define FDI_10BPC (1<<16) 3915#define FDI_10BPC (1<<16)
3928#define FDI_6BPC (2<<16) 3916#define FDI_6BPC (2<<16)
3929#define FDI_12BPC (3<<16) 3917#define FDI_12BPC (3<<16)
3930#define FDI_LINK_REVERSE_OVERWRITE (1<<15) 3918#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
3931#define FDI_DMI_LINK_REVERSE_MASK (1<<14) 3919#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
3932#define FDI_RX_PLL_ENABLE (1<<13) 3920#define FDI_RX_PLL_ENABLE (1<<13)
3933#define FDI_FS_ERR_CORRECT_ENABLE (1<<11) 3921#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
@@ -4020,17 +4008,17 @@
4020#define LVDS_DETECTED (1 << 1) 4008#define LVDS_DETECTED (1 << 1)
4021 4009
4022/* vlv has 2 sets of panel control regs. */ 4010/* vlv has 2 sets of panel control regs. */
4023#define PIPEA_PP_STATUS 0x61200 4011#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
4024#define PIPEA_PP_CONTROL 0x61204 4012#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
4025#define PIPEA_PP_ON_DELAYS 0x61208 4013#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
4026#define PIPEA_PP_OFF_DELAYS 0x6120c 4014#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
4027#define PIPEA_PP_DIVISOR 0x61210 4015#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
4028 4016
4029#define PIPEB_PP_STATUS 0x61300 4017#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
4030#define PIPEB_PP_CONTROL 0x61304 4018#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
4031#define PIPEB_PP_ON_DELAYS 0x61308 4019#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
4032#define PIPEB_PP_OFF_DELAYS 0x6130c 4020#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
4033#define PIPEB_PP_DIVISOR 0x61310 4021#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
4034 4022
4035#define PCH_PP_STATUS 0xc7200 4023#define PCH_PP_STATUS 0xc7200
4036#define PCH_PP_CONTROL 0xc7204 4024#define PCH_PP_CONTROL 0xc7204
@@ -4211,7 +4199,9 @@
4211#define GEN6_RP_INTERRUPT_LIMITS 0xA014 4199#define GEN6_RP_INTERRUPT_LIMITS 0xA014
4212#define GEN6_RPSTAT1 0xA01C 4200#define GEN6_RPSTAT1 0xA01C
4213#define GEN6_CAGF_SHIFT 8 4201#define GEN6_CAGF_SHIFT 8
4202#define HSW_CAGF_SHIFT 7
4214#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) 4203#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
4204#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
4215#define GEN6_RP_CONTROL 0xA024 4205#define GEN6_RP_CONTROL 0xA024
4216#define GEN6_RP_MEDIA_TURBO (1<<11) 4206#define GEN6_RP_MEDIA_TURBO (1<<11)
4217#define GEN6_RP_MEDIA_MODE_MASK (3<<9) 4207#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@@ -4280,8 +4270,8 @@
4280#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 4270#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
4281#define GEN6_PCODE_WRITE_RC6VIDS 0x4 4271#define GEN6_PCODE_WRITE_RC6VIDS 0x4
4282#define GEN6_PCODE_READ_RC6VIDS 0x5 4272#define GEN6_PCODE_READ_RC6VIDS 0x5
4283#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 4273#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
4284#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) 4274#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
4285#define GEN6_PCODE_DATA 0x138128 4275#define GEN6_PCODE_DATA 0x138128
4286#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4276#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4287 4277
@@ -4322,7 +4312,7 @@
4322#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 4312#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
4323#define DOP_CLOCK_GATING_DISABLE (1<<0) 4313#define DOP_CLOCK_GATING_DISABLE (1<<0)
4324 4314
4325#define G4X_AUD_VID_DID 0x62020 4315#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
4326#define INTEL_AUDIO_DEVCL 0x808629FB 4316#define INTEL_AUDIO_DEVCL 0x808629FB
4327#define INTEL_AUDIO_DEVBLC 0x80862801 4317#define INTEL_AUDIO_DEVBLC 0x80862801
4328#define INTEL_AUDIO_DEVCTG 0x80862802 4318#define INTEL_AUDIO_DEVCTG 0x80862802
@@ -4438,10 +4428,10 @@
4438#define AUDIO_CP_READY_C (1<<9) 4428#define AUDIO_CP_READY_C (1<<9)
4439 4429
4440/* HSW Power Wells */ 4430/* HSW Power Wells */
4441#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ 4431#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
4442#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ 4432#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
4443#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ 4433#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
4444#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ 4434#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
4445#define HSW_PWR_WELL_ENABLE (1<<31) 4435#define HSW_PWR_WELL_ENABLE (1<<31)
4446#define HSW_PWR_WELL_STATE (1<<30) 4436#define HSW_PWR_WELL_STATE (1<<30)
4447#define HSW_PWR_WELL_CTL5 0x45410 4437#define HSW_PWR_WELL_CTL5 0x45410
@@ -4524,6 +4514,7 @@
4524#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ 4514#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
4525#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 4515#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4526#define DDI_BUF_EMP_MASK (0xf<<24) 4516#define DDI_BUF_EMP_MASK (0xf<<24)
4517#define DDI_BUF_PORT_REVERSAL (1<<16)
4527#define DDI_BUF_IS_IDLE (1<<7) 4518#define DDI_BUF_IS_IDLE (1<<7)
4528#define DDI_A_4_LANES (1<<4) 4519#define DDI_A_4_LANES (1<<4)
4529#define DDI_PORT_WIDTH_X1 (0<<1) 4520#define DDI_PORT_WIDTH_X1 (0<<1)
@@ -4657,4 +4648,51 @@
4657#define WM_DBG_DISALLOW_MAXFIFO (1<<1) 4648#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
4658#define WM_DBG_DISALLOW_SPRITE (1<<2) 4649#define WM_DBG_DISALLOW_SPRITE (1<<2)
4659 4650
4651/* pipe CSC */
4652#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
4653#define _PIPE_A_CSC_COEFF_BY 0x49014
4654#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
4655#define _PIPE_A_CSC_COEFF_BU 0x4901c
4656#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
4657#define _PIPE_A_CSC_COEFF_BV 0x49024
4658#define _PIPE_A_CSC_MODE 0x49028
4659#define _PIPE_A_CSC_PREOFF_HI 0x49030
4660#define _PIPE_A_CSC_PREOFF_ME 0x49034
4661#define _PIPE_A_CSC_PREOFF_LO 0x49038
4662#define _PIPE_A_CSC_POSTOFF_HI 0x49040
4663#define _PIPE_A_CSC_POSTOFF_ME 0x49044
4664#define _PIPE_A_CSC_POSTOFF_LO 0x49048
4665
4666#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
4667#define _PIPE_B_CSC_COEFF_BY 0x49114
4668#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
4669#define _PIPE_B_CSC_COEFF_BU 0x4911c
4670#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
4671#define _PIPE_B_CSC_COEFF_BV 0x49124
4672#define _PIPE_B_CSC_MODE 0x49128
4673#define _PIPE_B_CSC_PREOFF_HI 0x49130
4674#define _PIPE_B_CSC_PREOFF_ME 0x49134
4675#define _PIPE_B_CSC_PREOFF_LO 0x49138
4676#define _PIPE_B_CSC_POSTOFF_HI 0x49140
4677#define _PIPE_B_CSC_POSTOFF_ME 0x49144
4678#define _PIPE_B_CSC_POSTOFF_LO 0x49148
4679
4680#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
4681#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
4682#define CSC_MODE_YUV_TO_RGB (1 << 0)
4683
4684#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
4685#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
4686#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
4687#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
4688#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
4689#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
4690#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
4691#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
4692#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
4693#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
4694#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
4695#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
4696#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
4697
4660#endif /* _I915_REG_H_ */ 4698#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 63d4d30c39de..2135f21ea458 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -29,67 +29,6 @@
29#include "intel_drv.h" 29#include "intel_drv.h"
30#include "i915_reg.h" 30#include "i915_reg.h"
31 31
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{
34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg;
36
37 /* On IVB, 3rd pipe shares PLL with another one */
38 if (pipe > 1)
39 return false;
40
41 if (HAS_PCH_SPLIT(dev))
42 dpll_reg = _PCH_DPLL(pipe);
43 else
44 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
45
46 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
47}
48
49static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
50{
51 struct drm_i915_private *dev_priv = dev->dev_private;
52 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
53 u32 *array;
54 int i;
55
56 if (!i915_pipe_enabled(dev, pipe))
57 return;
58
59 if (HAS_PCH_SPLIT(dev))
60 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
61
62 if (pipe == PIPE_A)
63 array = dev_priv->regfile.save_palette_a;
64 else
65 array = dev_priv->regfile.save_palette_b;
66
67 for (i = 0; i < 256; i++)
68 array[i] = I915_READ(reg + (i << 2));
69}
70
71static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
72{
73 struct drm_i915_private *dev_priv = dev->dev_private;
74 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
75 u32 *array;
76 int i;
77
78 if (!i915_pipe_enabled(dev, pipe))
79 return;
80
81 if (HAS_PCH_SPLIT(dev))
82 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
83
84 if (pipe == PIPE_A)
85 array = dev_priv->regfile.save_palette_a;
86 else
87 array = dev_priv->regfile.save_palette_b;
88
89 for (i = 0; i < 256; i++)
90 I915_WRITE(reg + (i << 2), array[i]);
91}
92
93static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) 32static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
94{ 33{
95 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -130,6 +69,12 @@ static void i915_save_vga(struct drm_device *dev)
130 int i; 69 int i;
131 u16 cr_index, cr_data, st01; 70 u16 cr_index, cr_data, st01;
132 71
72 /* VGA state */
73 dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
74 dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
75 dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
76 dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
77
133 /* VGA color palette registers */ 78 /* VGA color palette registers */
134 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK); 79 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
135 80
@@ -188,6 +133,15 @@ static void i915_restore_vga(struct drm_device *dev)
188 int i; 133 int i;
189 u16 cr_index, cr_data, st01; 134 u16 cr_index, cr_data, st01;
190 135
136 /* VGA state */
137 I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
138
139 I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
140 I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
141 I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
142 POSTING_READ(VGA_PD);
143 udelay(150);
144
191 /* MSR bits */ 145 /* MSR bits */
192 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR); 146 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
193 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) { 147 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
@@ -235,396 +189,18 @@ static void i915_restore_vga(struct drm_device *dev)
235 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK); 189 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
236} 190}
237 191
238static void i915_save_modeset_reg(struct drm_device *dev)
239{
240 struct drm_i915_private *dev_priv = dev->dev_private;
241 int i;
242
243 if (drm_core_check_feature(dev, DRIVER_MODESET))
244 return;
245
246 /* Cursor state */
247 dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
248 dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
249 dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
250 dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
251 dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
252 dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
253 if (IS_GEN2(dev))
254 dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
255
256 if (HAS_PCH_SPLIT(dev)) {
257 dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
258 dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
259 }
260
261 /* Pipe & plane A info */
262 dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
263 dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
264 if (HAS_PCH_SPLIT(dev)) {
265 dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
266 dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
267 dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
268 } else {
269 dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
270 dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
271 dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
272 }
273 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
274 dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
275 dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
276 dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
277 dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
278 dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
279 dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
280 dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
281 if (!HAS_PCH_SPLIT(dev))
282 dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
283
284 if (HAS_PCH_SPLIT(dev)) {
285 dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
286 dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
287 dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
288 dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
289
290 dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
291 dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
292
293 dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
294 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
295 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
296
297 dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
298 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
299 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
300 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
301 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
302 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
303 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
304 }
305
306 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
307 dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
308 dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
309 dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
310 dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
311 if (INTEL_INFO(dev)->gen >= 4) {
312 dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
313 dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
314 }
315 i915_save_palette(dev, PIPE_A);
316 dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
317
318 /* Pipe & plane B info */
319 dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
320 dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
321 if (HAS_PCH_SPLIT(dev)) {
322 dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
323 dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
324 dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
325 } else {
326 dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
327 dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
328 dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
329 }
330 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
331 dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
332 dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
333 dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
334 dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
335 dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
336 dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
337 dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
338 if (!HAS_PCH_SPLIT(dev))
339 dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
340
341 if (HAS_PCH_SPLIT(dev)) {
342 dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
343 dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
344 dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
345 dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
346
347 dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
348 dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
349
350 dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
351 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
352 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
353
354 dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
355 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
356 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
357 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
358 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
359 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
360 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
361 }
362
363 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
364 dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
365 dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
366 dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
367 dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
368 if (INTEL_INFO(dev)->gen >= 4) {
369 dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
370 dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
371 }
372 i915_save_palette(dev, PIPE_B);
373 dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
374
375 /* Fences */
376 switch (INTEL_INFO(dev)->gen) {
377 case 7:
378 case 6:
379 for (i = 0; i < 16; i++)
380 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
381 break;
382 case 5:
383 case 4:
384 for (i = 0; i < 16; i++)
385 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
386 break;
387 case 3:
388 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
389 for (i = 0; i < 8; i++)
390 dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
391 case 2:
392 for (i = 0; i < 8; i++)
393 dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
394 break;
395 }
396
397 /* CRT state */
398 if (HAS_PCH_SPLIT(dev))
399 dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
400 else
401 dev_priv->regfile.saveADPA = I915_READ(ADPA);
402
403 return;
404}
405
406static void i915_restore_modeset_reg(struct drm_device *dev)
407{
408 struct drm_i915_private *dev_priv = dev->dev_private;
409 int dpll_a_reg, fpa0_reg, fpa1_reg;
410 int dpll_b_reg, fpb0_reg, fpb1_reg;
411 int i;
412
413 if (drm_core_check_feature(dev, DRIVER_MODESET))
414 return;
415
416 /* Fences */
417 switch (INTEL_INFO(dev)->gen) {
418 case 7:
419 case 6:
420 for (i = 0; i < 16; i++)
421 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
422 break;
423 case 5:
424 case 4:
425 for (i = 0; i < 16; i++)
426 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
427 break;
428 case 3:
429 case 2:
430 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
431 for (i = 0; i < 8; i++)
432 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
433 for (i = 0; i < 8; i++)
434 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
435 break;
436 }
437
438
439 if (HAS_PCH_SPLIT(dev)) {
440 dpll_a_reg = _PCH_DPLL_A;
441 dpll_b_reg = _PCH_DPLL_B;
442 fpa0_reg = _PCH_FPA0;
443 fpb0_reg = _PCH_FPB0;
444 fpa1_reg = _PCH_FPA1;
445 fpb1_reg = _PCH_FPB1;
446 } else {
447 dpll_a_reg = _DPLL_A;
448 dpll_b_reg = _DPLL_B;
449 fpa0_reg = _FPA0;
450 fpb0_reg = _FPB0;
451 fpa1_reg = _FPA1;
452 fpb1_reg = _FPB1;
453 }
454
455 if (HAS_PCH_SPLIT(dev)) {
456 I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
457 I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
458 }
459
460 /* Pipe & plane A info */
461 /* Prime the clock */
462 if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
463 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
464 ~DPLL_VCO_ENABLE);
465 POSTING_READ(dpll_a_reg);
466 udelay(150);
467 }
468 I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
469 I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
470 /* Actually enable it */
471 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
472 POSTING_READ(dpll_a_reg);
473 udelay(150);
474 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
475 I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
476 POSTING_READ(_DPLL_A_MD);
477 }
478 udelay(150);
479
480 /* Restore mode */
481 I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
482 I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
483 I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
484 I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
485 I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
486 I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
487 if (!HAS_PCH_SPLIT(dev))
488 I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
489
490 if (HAS_PCH_SPLIT(dev)) {
491 I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
492 I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
493 I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
494 I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
495
496 I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
497 I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
498
499 I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
500 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
501 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
502
503 I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
504 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
505 I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
506 I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
507 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
508 I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
509 I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
510 }
511
512 /* Restore plane info */
513 I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
514 I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
515 I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
516 I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
517 I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
518 if (INTEL_INFO(dev)->gen >= 4) {
519 I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
520 I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
521 }
522
523 I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
524
525 i915_restore_palette(dev, PIPE_A);
526 /* Enable the plane */
527 I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
528 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
529
530 /* Pipe & plane B info */
531 if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
532 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
533 ~DPLL_VCO_ENABLE);
534 POSTING_READ(dpll_b_reg);
535 udelay(150);
536 }
537 I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
538 I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
539 /* Actually enable it */
540 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
541 POSTING_READ(dpll_b_reg);
542 udelay(150);
543 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
544 I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
545 POSTING_READ(_DPLL_B_MD);
546 }
547 udelay(150);
548
549 /* Restore mode */
550 I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
551 I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
552 I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
553 I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
554 I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
555 I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
556 if (!HAS_PCH_SPLIT(dev))
557 I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
558
559 if (HAS_PCH_SPLIT(dev)) {
560 I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
561 I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
562 I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
563 I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
564
565 I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
566 I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
567
568 I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
569 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
570 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
571
572 I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
573 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
574 I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
575 I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
576 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
577 I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
578 I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
579 }
580
581 /* Restore plane info */
582 I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
583 I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
584 I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
585 I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
586 I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
587 if (INTEL_INFO(dev)->gen >= 4) {
588 I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
589 I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
590 }
591
592 I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
593
594 i915_restore_palette(dev, PIPE_B);
595 /* Enable the plane */
596 I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
597 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
598
599 /* Cursor state */
600 I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
601 I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
602 I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
603 I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
604 I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
605 I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
606 if (IS_GEN2(dev))
607 I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
608
609 /* CRT state */
610 if (HAS_PCH_SPLIT(dev))
611 I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
612 else
613 I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
614
615 return;
616}
617
618static void i915_save_display(struct drm_device *dev) 192static void i915_save_display(struct drm_device *dev)
619{ 193{
620 struct drm_i915_private *dev_priv = dev->dev_private; 194 struct drm_i915_private *dev_priv = dev->dev_private;
621 195
622 /* Display arbitration control */ 196 /* Display arbitration control */
623 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); 197 if (INTEL_INFO(dev)->gen <= 4)
198 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
624 199
625 /* This is only meaningful in non-KMS mode */ 200 /* This is only meaningful in non-KMS mode */
626 /* Don't regfile.save them in KMS mode */ 201 /* Don't regfile.save them in KMS mode */
627 i915_save_modeset_reg(dev); 202 if (!drm_core_check_feature(dev, DRIVER_MODESET))
203 i915_save_display_reg(dev);
628 204
629 /* LVDS state */ 205 /* LVDS state */
630 if (HAS_PCH_SPLIT(dev)) { 206 if (HAS_PCH_SPLIT(dev)) {
@@ -658,24 +234,6 @@ static void i915_save_display(struct drm_device *dev)
658 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 234 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
659 } 235 }
660 236
661 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
662 /* Display Port state */
663 if (SUPPORTS_INTEGRATED_DP(dev)) {
664 dev_priv->regfile.saveDP_B = I915_READ(DP_B);
665 dev_priv->regfile.saveDP_C = I915_READ(DP_C);
666 dev_priv->regfile.saveDP_D = I915_READ(DP_D);
667 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
668 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
669 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
670 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
671 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
672 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
673 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
674 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
675 }
676 /* FIXME: regfile.save TV & SDVO state */
677 }
678
679 /* Only regfile.save FBC state on the platform that supports FBC */ 237 /* Only regfile.save FBC state on the platform that supports FBC */
680 if (I915_HAS_FBC(dev)) { 238 if (I915_HAS_FBC(dev)) {
681 if (HAS_PCH_SPLIT(dev)) { 239 if (HAS_PCH_SPLIT(dev)) {
@@ -690,16 +248,8 @@ static void i915_save_display(struct drm_device *dev)
690 } 248 }
691 } 249 }
692 250
693 /* VGA state */ 251 if (!drm_core_check_feature(dev, DRIVER_MODESET))
694 dev_priv->regfile.saveVGA0 = I915_READ(VGA0); 252 i915_save_vga(dev);
695 dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
696 dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
697 if (HAS_PCH_SPLIT(dev))
698 dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
699 else
700 dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
701
702 i915_save_vga(dev);
703} 253}
704 254
705static void i915_restore_display(struct drm_device *dev) 255static void i915_restore_display(struct drm_device *dev)
@@ -707,25 +257,11 @@ static void i915_restore_display(struct drm_device *dev)
707 struct drm_i915_private *dev_priv = dev->dev_private; 257 struct drm_i915_private *dev_priv = dev->dev_private;
708 258
709 /* Display arbitration */ 259 /* Display arbitration */
710 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); 260 if (INTEL_INFO(dev)->gen <= 4)
261 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
711 262
712 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 263 if (!drm_core_check_feature(dev, DRIVER_MODESET))
713 /* Display port ratios (must be done before clock is set) */ 264 i915_restore_display_reg(dev);
714 if (SUPPORTS_INTEGRATED_DP(dev)) {
715 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
716 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
717 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
718 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
719 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
720 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
721 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
722 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
723 }
724 }
725
726 /* This is only meaningful in non-KMS mode */
727 /* Don't restore them in KMS mode */
728 i915_restore_modeset_reg(dev);
729 265
730 /* LVDS state */ 266 /* LVDS state */
731 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 267 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
@@ -763,16 +299,6 @@ static void i915_restore_display(struct drm_device *dev)
763 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 299 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
764 } 300 }
765 301
766 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
767 /* Display Port state */
768 if (SUPPORTS_INTEGRATED_DP(dev)) {
769 I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
770 I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
771 I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
772 }
773 /* FIXME: restore TV & SDVO state */
774 }
775
776 /* only restore FBC info on the platform that supports FBC*/ 302 /* only restore FBC info on the platform that supports FBC*/
777 intel_disable_fbc(dev); 303 intel_disable_fbc(dev);
778 if (I915_HAS_FBC(dev)) { 304 if (I915_HAS_FBC(dev)) {
@@ -787,19 +313,11 @@ static void i915_restore_display(struct drm_device *dev)
787 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); 313 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
788 } 314 }
789 } 315 }
790 /* VGA state */
791 if (HAS_PCH_SPLIT(dev))
792 I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
793 else
794 I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
795 316
796 I915_WRITE(VGA0, dev_priv->regfile.saveVGA0); 317 if (!drm_core_check_feature(dev, DRIVER_MODESET))
797 I915_WRITE(VGA1, dev_priv->regfile.saveVGA1); 318 i915_restore_vga(dev);
798 I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD); 319 else
799 POSTING_READ(VGA_PD); 320 i915_redisable_vga(dev);
800 udelay(150);
801
802 i915_restore_vga(dev);
803} 321}
804 322
805int i915_save_state(struct drm_device *dev) 323int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
new file mode 100644
index 000000000000..985a09716237
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -0,0 +1,503 @@
1/*
2 *
3 * Copyright 2008 (c) Intel Corporation
4 * Jesse Barnes <jbarnes@virtuousgeek.org>
5 * Copyright 2013 (c) Intel Corporation
6 * Daniel Vetter <daniel.vetter@ffwll.ch>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#include <drm/drmP.h>
30#include <drm/i915_drm.h>
31#include "intel_drv.h"
32#include "i915_reg.h"
33
34static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
35{
36 struct drm_i915_private *dev_priv = dev->dev_private;
37 u32 dpll_reg;
38
39 /* On IVB, 3rd pipe shares PLL with another one */
40 if (pipe > 1)
41 return false;
42
43 if (HAS_PCH_SPLIT(dev))
44 dpll_reg = _PCH_DPLL(pipe);
45 else
46 dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
47
48 return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
49}
50
51static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
52{
53 struct drm_i915_private *dev_priv = dev->dev_private;
54 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
55 u32 *array;
56 int i;
57
58 if (!i915_pipe_enabled(dev, pipe))
59 return;
60
61 if (HAS_PCH_SPLIT(dev))
62 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
63
64 if (pipe == PIPE_A)
65 array = dev_priv->regfile.save_palette_a;
66 else
67 array = dev_priv->regfile.save_palette_b;
68
69 for (i = 0; i < 256; i++)
70 array[i] = I915_READ(reg + (i << 2));
71}
72
73static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
74{
75 struct drm_i915_private *dev_priv = dev->dev_private;
76 unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
77 u32 *array;
78 int i;
79
80 if (!i915_pipe_enabled(dev, pipe))
81 return;
82
83 if (HAS_PCH_SPLIT(dev))
84 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
85
86 if (pipe == PIPE_A)
87 array = dev_priv->regfile.save_palette_a;
88 else
89 array = dev_priv->regfile.save_palette_b;
90
91 for (i = 0; i < 256; i++)
92 I915_WRITE(reg + (i << 2), array[i]);
93}
94
95void i915_save_display_reg(struct drm_device *dev)
96{
97 struct drm_i915_private *dev_priv = dev->dev_private;
98 int i;
99
100 /* Cursor state */
101 dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
102 dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
103 dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
104 dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
105 dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
106 dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
107 if (IS_GEN2(dev))
108 dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
109
110 if (HAS_PCH_SPLIT(dev)) {
111 dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
112 dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
113 }
114
115 /* Pipe & plane A info */
116 dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
117 dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
118 if (HAS_PCH_SPLIT(dev)) {
119 dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
120 dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
121 dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
122 } else {
123 dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
124 dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
125 dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
126 }
127 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
128 dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
129 dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
130 dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
131 dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
132 dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
133 dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
134 dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
135 if (!HAS_PCH_SPLIT(dev))
136 dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
137
138 if (HAS_PCH_SPLIT(dev)) {
139 dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
140 dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
141 dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
142 dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
143
144 dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
145 dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
146
147 dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
148 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
149 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
150
151 dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
152 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
153 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
154 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
155 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
156 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
157 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
158 }
159
160 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
161 dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
162 dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
163 dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
164 dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
165 if (INTEL_INFO(dev)->gen >= 4) {
166 dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
167 dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
168 }
169 i915_save_palette(dev, PIPE_A);
170 dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
171
172 /* Pipe & plane B info */
173 dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
174 dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
175 if (HAS_PCH_SPLIT(dev)) {
176 dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
177 dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
178 dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
179 } else {
180 dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
181 dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
182 dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
183 }
184 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
185 dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
186 dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
187 dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
188 dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
189 dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
190 dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
191 dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
192 if (!HAS_PCH_SPLIT(dev))
193 dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
194
195 if (HAS_PCH_SPLIT(dev)) {
196 dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
197 dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
198 dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
199 dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
200
201 dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
202 dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
203
204 dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
205 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
206 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
207
208 dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
209 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
210 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
211 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
212 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
213 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
214 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
215 }
216
217 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
218 dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
219 dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
220 dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
221 dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
222 if (INTEL_INFO(dev)->gen >= 4) {
223 dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
224 dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
225 }
226 i915_save_palette(dev, PIPE_B);
227 dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
228
229 /* Fences */
230 switch (INTEL_INFO(dev)->gen) {
231 case 7:
232 case 6:
233 for (i = 0; i < 16; i++)
234 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
235 break;
236 case 5:
237 case 4:
238 for (i = 0; i < 16; i++)
239 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
240 break;
241 case 3:
242 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
243 for (i = 0; i < 8; i++)
244 dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
245 case 2:
246 for (i = 0; i < 8; i++)
247 dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
248 break;
249 }
250
251 /* CRT state */
252 if (HAS_PCH_SPLIT(dev))
253 dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
254 else
255 dev_priv->regfile.saveADPA = I915_READ(ADPA);
256
257 /* Display Port state */
258 if (SUPPORTS_INTEGRATED_DP(dev)) {
259 dev_priv->regfile.saveDP_B = I915_READ(DP_B);
260 dev_priv->regfile.saveDP_C = I915_READ(DP_C);
261 dev_priv->regfile.saveDP_D = I915_READ(DP_D);
262 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
263 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
264 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
265 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
266 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
267 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
268 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
269 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
270 }
271 /* FIXME: regfile.save TV & SDVO state */
272
273 return;
274}
275
276void i915_restore_display_reg(struct drm_device *dev)
277{
278 struct drm_i915_private *dev_priv = dev->dev_private;
279 int dpll_a_reg, fpa0_reg, fpa1_reg;
280 int dpll_b_reg, fpb0_reg, fpb1_reg;
281 int i;
282
283 /* Display port ratios (must be done before clock is set) */
284 if (SUPPORTS_INTEGRATED_DP(dev)) {
285 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
286 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
287 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
288 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
289 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
290 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
291 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
292 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
293 }
294
295 /* Fences */
296 switch (INTEL_INFO(dev)->gen) {
297 case 7:
298 case 6:
299 for (i = 0; i < 16; i++)
300 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
301 break;
302 case 5:
303 case 4:
304 for (i = 0; i < 16; i++)
305 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
306 break;
307 case 3:
308 case 2:
309 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
310 for (i = 0; i < 8; i++)
311 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
312 for (i = 0; i < 8; i++)
313 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
314 break;
315 }
316
317
318 if (HAS_PCH_SPLIT(dev)) {
319 dpll_a_reg = _PCH_DPLL_A;
320 dpll_b_reg = _PCH_DPLL_B;
321 fpa0_reg = _PCH_FPA0;
322 fpb0_reg = _PCH_FPB0;
323 fpa1_reg = _PCH_FPA1;
324 fpb1_reg = _PCH_FPB1;
325 } else {
326 dpll_a_reg = _DPLL_A;
327 dpll_b_reg = _DPLL_B;
328 fpa0_reg = _FPA0;
329 fpb0_reg = _FPB0;
330 fpa1_reg = _FPA1;
331 fpb1_reg = _FPB1;
332 }
333
334 if (HAS_PCH_SPLIT(dev)) {
335 I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
336 I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
337 }
338
339 /* Pipe & plane A info */
340 /* Prime the clock */
341 if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
342 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
343 ~DPLL_VCO_ENABLE);
344 POSTING_READ(dpll_a_reg);
345 udelay(150);
346 }
347 I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
348 I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
349 /* Actually enable it */
350 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
351 POSTING_READ(dpll_a_reg);
352 udelay(150);
353 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
354 I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
355 POSTING_READ(_DPLL_A_MD);
356 }
357 udelay(150);
358
359 /* Restore mode */
360 I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
361 I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
362 I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
363 I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
364 I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
365 I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
366 if (!HAS_PCH_SPLIT(dev))
367 I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
368
369 if (HAS_PCH_SPLIT(dev)) {
370 I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
371 I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
372 I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
373 I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
374
375 I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
376 I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
377
378 I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
379 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
380 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
381
382 I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
383 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
384 I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
385 I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
386 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
387 I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
388 I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
389 }
390
391 /* Restore plane info */
392 I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
393 I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
394 I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
395 I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
396 I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
397 if (INTEL_INFO(dev)->gen >= 4) {
398 I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
399 I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
400 }
401
402 I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
403
404 i915_restore_palette(dev, PIPE_A);
405 /* Enable the plane */
406 I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
407 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
408
409 /* Pipe & plane B info */
410 if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
411 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
412 ~DPLL_VCO_ENABLE);
413 POSTING_READ(dpll_b_reg);
414 udelay(150);
415 }
416 I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
417 I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
418 /* Actually enable it */
419 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
420 POSTING_READ(dpll_b_reg);
421 udelay(150);
422 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
423 I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
424 POSTING_READ(_DPLL_B_MD);
425 }
426 udelay(150);
427
428 /* Restore mode */
429 I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
430 I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
431 I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
432 I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
433 I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
434 I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
435 if (!HAS_PCH_SPLIT(dev))
436 I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
437
438 if (HAS_PCH_SPLIT(dev)) {
439 I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
440 I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
441 I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
442 I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
443
444 I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
445 I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
446
447 I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
448 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
449 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
450
451 I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
452 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
453 I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
454 I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
455 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
456 I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
457 I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
458 }
459
460 /* Restore plane info */
461 I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
462 I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
463 I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
464 I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
465 I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
466 if (INTEL_INFO(dev)->gen >= 4) {
467 I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
468 I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
469 }
470
471 I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
472
473 i915_restore_palette(dev, PIPE_B);
474 /* Enable the plane */
475 I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
476 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
477
478 /* Cursor state */
479 I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
480 I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
481 I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
482 I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
483 I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
484 I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
485 if (IS_GEN2(dev))
486 I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
487
488 /* CRT state */
489 if (HAS_PCH_SPLIT(dev))
490 I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
491 else
492 I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
493
494 /* Display Port state */
495 if (SUPPORTS_INTEGRATED_DP(dev)) {
496 I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
497 I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
498 I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
499 }
500 /* FIXME: restore TV & SDVO state */
501
502 return;
503}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9293878ec7eb..969d08c72d10 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
267 267
268 crt->force_hotplug_required = 0; 268 crt->force_hotplug_required = 0;
269 269
270 save_adpa = adpa = I915_READ(PCH_ADPA); 270 save_adpa = adpa = I915_READ(crt->adpa_reg);
271 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); 271 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
272 272
273 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; 273 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
274 if (turn_off_dac) 274 if (turn_off_dac)
275 adpa &= ~ADPA_DAC_ENABLE; 275 adpa &= ~ADPA_DAC_ENABLE;
276 276
277 I915_WRITE(PCH_ADPA, adpa); 277 I915_WRITE(crt->adpa_reg, adpa);
278 278
279 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 279 if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
280 1000)) 280 1000))
281 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 281 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
282 282
283 if (turn_off_dac) { 283 if (turn_off_dac) {
284 I915_WRITE(PCH_ADPA, save_adpa); 284 I915_WRITE(crt->adpa_reg, save_adpa);
285 POSTING_READ(PCH_ADPA); 285 POSTING_READ(crt->adpa_reg);
286 } 286 }
287 } 287 }
288 288
289 /* Check the status to see if both blue and green are on now */ 289 /* Check the status to see if both blue and green are on now */
290 adpa = I915_READ(PCH_ADPA); 290 adpa = I915_READ(crt->adpa_reg);
291 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) 291 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
292 ret = true; 292 ret = true;
293 else 293 else
@@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
300static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) 300static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
301{ 301{
302 struct drm_device *dev = connector->dev; 302 struct drm_device *dev = connector->dev;
303 struct intel_crt *crt = intel_attached_crt(connector);
303 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
304 u32 adpa; 305 u32 adpa;
305 bool ret; 306 bool ret;
306 u32 save_adpa; 307 u32 save_adpa;
307 308
308 save_adpa = adpa = I915_READ(ADPA); 309 save_adpa = adpa = I915_READ(crt->adpa_reg);
309 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); 310 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
310 311
311 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; 312 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
312 313
313 I915_WRITE(ADPA, adpa); 314 I915_WRITE(crt->adpa_reg, adpa);
314 315
315 if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 316 if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
316 1000)) { 317 1000)) {
317 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 318 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
318 I915_WRITE(ADPA, save_adpa); 319 I915_WRITE(crt->adpa_reg, save_adpa);
319 } 320 }
320 321
321 /* Check the status to see if both blue and green are on now */ 322 /* Check the status to see if both blue and green are on now */
322 adpa = I915_READ(ADPA); 323 adpa = I915_READ(crt->adpa_reg);
323 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) 324 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
324 ret = true; 325 ret = true;
325 else 326 else
@@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector)
665 if (HAS_PCH_SPLIT(dev)) { 666 if (HAS_PCH_SPLIT(dev)) {
666 u32 adpa; 667 u32 adpa;
667 668
668 adpa = I915_READ(PCH_ADPA); 669 adpa = I915_READ(crt->adpa_reg);
669 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 670 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
670 adpa |= ADPA_HOTPLUG_BITS; 671 adpa |= ADPA_HOTPLUG_BITS;
671 I915_WRITE(PCH_ADPA, adpa); 672 I915_WRITE(crt->adpa_reg, adpa);
672 POSTING_READ(PCH_ADPA); 673 POSTING_READ(crt->adpa_reg);
673 674
674 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); 675 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
675 crt->force_hotplug_required = 1; 676 crt->force_hotplug_required = 1;
@@ -684,7 +685,6 @@ static void intel_crt_reset(struct drm_connector *connector)
684static const struct drm_encoder_helper_funcs crt_encoder_funcs = { 685static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
685 .mode_fixup = intel_crt_mode_fixup, 686 .mode_fixup = intel_crt_mode_fixup,
686 .mode_set = intel_crt_mode_set, 687 .mode_set = intel_crt_mode_set,
687 .disable = intel_encoder_noop,
688}; 688};
689 689
690static const struct drm_connector_funcs intel_crt_connector_funcs = { 690static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -776,7 +776,7 @@ void intel_crt_init(struct drm_device *dev)
776 776
777 crt->base.disable = intel_disable_crt; 777 crt->base.disable = intel_disable_crt;
778 crt->base.enable = intel_enable_crt; 778 crt->base.enable = intel_enable_crt;
779 if (IS_HASWELL(dev)) 779 if (HAS_DDI(dev))
780 crt->base.get_hw_state = intel_ddi_get_hw_state; 780 crt->base.get_hw_state = intel_ddi_get_hw_state;
781 else 781 else
782 crt->base.get_hw_state = intel_crt_get_hw_state; 782 crt->base.get_hw_state = intel_crt_get_hw_state;
@@ -800,10 +800,14 @@ void intel_crt_init(struct drm_device *dev)
800 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 800 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
801 801
802 /* 802 /*
803 * TODO: find a proper way to discover whether we need to set the 803 * TODO: find a proper way to discover whether we need to set the the
804 * polarity reversal bit or not, instead of relying on the BIOS. 804 * polarity and link reversal bits or not, instead of relying on the
805 * BIOS.
805 */ 806 */
806 if (HAS_PCH_LPT(dev)) 807 if (HAS_PCH_LPT(dev)) {
807 dev_priv->fdi_rx_polarity_reversed = 808 u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
808 !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT); 809 FDI_RX_LINK_REVERSAL_OVERRIDE;
810
811 dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
812 }
809} 813}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4bad0f724019..d64af5aa4a1c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,7 +84,8 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
84 * in either FDI or DP modes only, as HDMI connections will work with both 84 * in either FDI or DP modes only, as HDMI connections will work with both
85 * of those 85 * of those
86 */ 86 */
87void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode) 87static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
88 bool use_fdi_mode)
88{ 89{
89 struct drm_i915_private *dev_priv = dev->dev_private; 90 struct drm_i915_private *dev_priv = dev->dev_private;
90 u32 reg; 91 u32 reg;
@@ -114,16 +115,17 @@ void intel_prepare_ddi(struct drm_device *dev)
114{ 115{
115 int port; 116 int port;
116 117
117 if (IS_HASWELL(dev)) { 118 if (!HAS_DDI(dev))
118 for (port = PORT_A; port < PORT_E; port++) 119 return;
119 intel_prepare_ddi_buffers(dev, port, false);
120 120
121 /* DDI E is the suggested one to work in FDI mode, so program is as such by 121 for (port = PORT_A; port < PORT_E; port++)
122 * default. It will have to be re-programmed in case a digital DP output 122 intel_prepare_ddi_buffers(dev, port, false);
123 * will be detected on it 123
124 */ 124 /* DDI E is the suggested one to work in FDI mode, so program is as such
125 intel_prepare_ddi_buffers(dev, PORT_E, true); 125 * by default. It will have to be re-programmed in case a digital DP
126 } 126 * output will be detected on it
127 */
128 intel_prepare_ddi_buffers(dev, PORT_E, true);
127} 129}
128 130
129static const long hsw_ddi_buf_ctl_values[] = { 131static const long hsw_ddi_buf_ctl_values[] = {
@@ -178,10 +180,8 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
178 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 180 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
179 181
180 /* Enable the PCH Receiver FDI PLL */ 182 /* Enable the PCH Receiver FDI PLL */
181 rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | 183 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
182 ((intel_crtc->fdi_lanes - 1) << 19); 184 FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
183 if (dev_priv->fdi_rx_polarity_reversed)
184 rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
186 POSTING_READ(_FDI_RXA_CTL); 186 POSTING_READ(_FDI_RXA_CTL);
187 udelay(220); 187 udelay(220);
@@ -203,7 +203,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
203 DP_TP_CTL_LINK_TRAIN_PAT1 | 203 DP_TP_CTL_LINK_TRAIN_PAT1 |
204 DP_TP_CTL_ENABLE); 204 DP_TP_CTL_ENABLE);
205 205
206 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ 206 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
207 * DDI E does not support port reversal, the functionality is
208 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
209 * port reversal bit */
207 I915_WRITE(DDI_BUF_CTL(PORT_E), 210 I915_WRITE(DDI_BUF_CTL(PORT_E),
208 DDI_BUF_CTL_ENABLE | 211 DDI_BUF_CTL_ENABLE |
209 ((intel_crtc->fdi_lanes - 1) << 1) | 212 ((intel_crtc->fdi_lanes - 1) << 1) |
@@ -675,10 +678,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
675 DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", 678 DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
676 port_name(port), pipe_name(pipe)); 679 port_name(port), pipe_name(pipe));
677 680
681 intel_crtc->eld_vld = false;
678 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 682 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 683 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
684 struct intel_digital_port *intel_dig_port =
685 enc_to_dig_port(encoder);
680 686
681 intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; 687 intel_dp->DP = intel_dig_port->port_reversal |
688 DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
682 switch (intel_dp->lane_count) { 689 switch (intel_dp->lane_count) {
683 case 1: 690 case 1:
684 intel_dp->DP |= DDI_PORT_WIDTH_X1; 691 intel_dp->DP |= DDI_PORT_WIDTH_X1;
@@ -985,7 +992,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
985 if (cpu_transcoder == TRANSCODER_EDP) { 992 if (cpu_transcoder == TRANSCODER_EDP) {
986 switch (pipe) { 993 switch (pipe) {
987 case PIPE_A: 994 case PIPE_A:
988 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; 995 /* Can only use the always-on power well for eDP when
996 * not using the panel fitter, and when not using motion
997 * blur mitigation (which we don't support). */
998 if (dev_priv->pch_pf_size)
999 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
1000 else
1001 temp |= TRANS_DDI_EDP_INPUT_A_ON;
989 break; 1002 break;
990 case PIPE_B: 1003 case PIPE_B:
991 temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; 1004 temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
@@ -1069,7 +1082,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1069 if (port == PORT_A) 1082 if (port == PORT_A)
1070 cpu_transcoder = TRANSCODER_EDP; 1083 cpu_transcoder = TRANSCODER_EDP;
1071 else 1084 else
1072 cpu_transcoder = pipe; 1085 cpu_transcoder = (enum transcoder) pipe;
1073 1086
1074 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1087 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1075 1088
@@ -1285,34 +1298,58 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1285static void intel_enable_ddi(struct intel_encoder *intel_encoder) 1298static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1286{ 1299{
1287 struct drm_encoder *encoder = &intel_encoder->base; 1300 struct drm_encoder *encoder = &intel_encoder->base;
1301 struct drm_crtc *crtc = encoder->crtc;
1302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1303 int pipe = intel_crtc->pipe;
1288 struct drm_device *dev = encoder->dev; 1304 struct drm_device *dev = encoder->dev;
1289 struct drm_i915_private *dev_priv = dev->dev_private; 1305 struct drm_i915_private *dev_priv = dev->dev_private;
1290 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1306 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1291 int type = intel_encoder->type; 1307 int type = intel_encoder->type;
1308 uint32_t tmp;
1292 1309
1293 if (type == INTEL_OUTPUT_HDMI) { 1310 if (type == INTEL_OUTPUT_HDMI) {
1311 struct intel_digital_port *intel_dig_port =
1312 enc_to_dig_port(encoder);
1313
1294 /* In HDMI/DVI mode, the port width, and swing/emphasis values 1314 /* In HDMI/DVI mode, the port width, and swing/emphasis values
1295 * are ignored so nothing special needs to be done besides 1315 * are ignored so nothing special needs to be done besides
1296 * enabling the port. 1316 * enabling the port.
1297 */ 1317 */
1298 I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); 1318 I915_WRITE(DDI_BUF_CTL(port),
1319 intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
1299 } else if (type == INTEL_OUTPUT_EDP) { 1320 } else if (type == INTEL_OUTPUT_EDP) {
1300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1321 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1301 1322
1302 ironlake_edp_backlight_on(intel_dp); 1323 ironlake_edp_backlight_on(intel_dp);
1303 } 1324 }
1325
1326 if (intel_crtc->eld_vld) {
1327 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1328 tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
1329 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1330 }
1304} 1331}
1305 1332
1306static void intel_disable_ddi(struct intel_encoder *intel_encoder) 1333static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1307{ 1334{
1308 struct drm_encoder *encoder = &intel_encoder->base; 1335 struct drm_encoder *encoder = &intel_encoder->base;
1336 struct drm_crtc *crtc = encoder->crtc;
1337 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1338 int pipe = intel_crtc->pipe;
1309 int type = intel_encoder->type; 1339 int type = intel_encoder->type;
1340 struct drm_device *dev = encoder->dev;
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 uint32_t tmp;
1310 1343
1311 if (type == INTEL_OUTPUT_EDP) { 1344 if (type == INTEL_OUTPUT_EDP) {
1312 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1345 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1313 1346
1314 ironlake_edp_backlight_off(intel_dp); 1347 ironlake_edp_backlight_off(intel_dp);
1315 } 1348 }
1349
1350 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1351 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
1352 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1316} 1353}
1317 1354
1318int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) 1355int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
@@ -1452,11 +1489,11 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
1452static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { 1489static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
1453 .mode_fixup = intel_ddi_mode_fixup, 1490 .mode_fixup = intel_ddi_mode_fixup,
1454 .mode_set = intel_ddi_mode_set, 1491 .mode_set = intel_ddi_mode_set,
1455 .disable = intel_encoder_noop,
1456}; 1492};
1457 1493
1458void intel_ddi_init(struct drm_device *dev, enum port port) 1494void intel_ddi_init(struct drm_device *dev, enum port port)
1459{ 1495{
1496 struct drm_i915_private *dev_priv = dev->dev_private;
1460 struct intel_digital_port *intel_dig_port; 1497 struct intel_digital_port *intel_dig_port;
1461 struct intel_encoder *intel_encoder; 1498 struct intel_encoder *intel_encoder;
1462 struct drm_encoder *encoder; 1499 struct drm_encoder *encoder;
@@ -1497,6 +1534,8 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
1497 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 1534 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1498 1535
1499 intel_dig_port->port = port; 1536 intel_dig_port->port = port;
1537 intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
1538 DDI_BUF_PORT_REVERSAL;
1500 if (hdmi_connector) 1539 if (hdmi_connector)
1501 intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); 1540 intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
1502 else 1541 else
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index da1ad9c80bb5..a05ac2c91ba2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -154,8 +154,8 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
154 .vco = { .min = 1400000, .max = 2800000 }, 154 .vco = { .min = 1400000, .max = 2800000 },
155 .n = { .min = 1, .max = 6 }, 155 .n = { .min = 1, .max = 6 },
156 .m = { .min = 70, .max = 120 }, 156 .m = { .min = 70, .max = 120 },
157 .m1 = { .min = 10, .max = 22 }, 157 .m1 = { .min = 8, .max = 18 },
158 .m2 = { .min = 5, .max = 9 }, 158 .m2 = { .min = 3, .max = 7 },
159 .p = { .min = 5, .max = 80 }, 159 .p = { .min = 5, .max = 80 },
160 .p1 = { .min = 1, .max = 8 }, 160 .p1 = { .min = 1, .max = 8 },
161 .p2 = { .dot_limit = 200000, 161 .p2 = { .dot_limit = 200000,
@@ -168,8 +168,8 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
168 .vco = { .min = 1400000, .max = 2800000 }, 168 .vco = { .min = 1400000, .max = 2800000 },
169 .n = { .min = 1, .max = 6 }, 169 .n = { .min = 1, .max = 6 },
170 .m = { .min = 70, .max = 120 }, 170 .m = { .min = 70, .max = 120 },
171 .m1 = { .min = 10, .max = 22 }, 171 .m1 = { .min = 8, .max = 18 },
172 .m2 = { .min = 5, .max = 9 }, 172 .m2 = { .min = 3, .max = 7 },
173 .p = { .min = 7, .max = 98 }, 173 .p = { .min = 7, .max = 98 },
174 .p1 = { .min = 1, .max = 8 }, 174 .p1 = { .min = 1, .max = 8 },
175 .p2 = { .dot_limit = 112000, 175 .p2 = { .dot_limit = 112000,
@@ -416,13 +416,11 @@ static const intel_limit_t intel_limits_vlv_dp = {
416 416
417u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) 417u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
418{ 418{
419 unsigned long flags; 419 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
420 u32 val = 0;
421 420
422 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
423 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 421 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
424 DRM_ERROR("DPIO idle wait timed out\n"); 422 DRM_ERROR("DPIO idle wait timed out\n");
425 goto out_unlock; 423 return 0;
426 } 424 }
427 425
428 I915_WRITE(DPIO_REG, reg); 426 I915_WRITE(DPIO_REG, reg);
@@ -430,24 +428,20 @@ u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
430 DPIO_BYTE); 428 DPIO_BYTE);
431 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 429 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
432 DRM_ERROR("DPIO read wait timed out\n"); 430 DRM_ERROR("DPIO read wait timed out\n");
433 goto out_unlock; 431 return 0;
434 } 432 }
435 val = I915_READ(DPIO_DATA);
436 433
437out_unlock: 434 return I915_READ(DPIO_DATA);
438 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
439 return val;
440} 435}
441 436
442static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, 437static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
443 u32 val) 438 u32 val)
444{ 439{
445 unsigned long flags; 440 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
446 441
447 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
448 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { 442 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
449 DRM_ERROR("DPIO idle wait timed out\n"); 443 DRM_ERROR("DPIO idle wait timed out\n");
450 goto out_unlock; 444 return;
451 } 445 }
452 446
453 I915_WRITE(DPIO_DATA, val); 447 I915_WRITE(DPIO_DATA, val);
@@ -456,9 +450,6 @@ static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
456 DPIO_BYTE); 450 DPIO_BYTE);
457 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) 451 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
458 DRM_ERROR("DPIO write wait timed out\n"); 452 DRM_ERROR("DPIO write wait timed out\n");
459
460out_unlock:
461 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
462} 453}
463 454
464static void vlv_init_dpio(struct drm_device *dev) 455static void vlv_init_dpio(struct drm_device *dev)
@@ -472,61 +463,14 @@ static void vlv_init_dpio(struct drm_device *dev)
472 POSTING_READ(DPIO_CTL); 463 POSTING_READ(DPIO_CTL);
473} 464}
474 465
475static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
476{
477 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
478 return 1;
479}
480
481static const struct dmi_system_id intel_dual_link_lvds[] = {
482 {
483 .callback = intel_dual_link_lvds_callback,
484 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
485 .matches = {
486 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
487 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
488 },
489 },
490 { } /* terminating entry */
491};
492
493static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
494 unsigned int reg)
495{
496 unsigned int val;
497
498 /* use the module option value if specified */
499 if (i915_lvds_channel_mode > 0)
500 return i915_lvds_channel_mode == 2;
501
502 if (dmi_check_system(intel_dual_link_lvds))
503 return true;
504
505 if (dev_priv->lvds_val)
506 val = dev_priv->lvds_val;
507 else {
508 /* BIOS should set the proper LVDS register value at boot, but
509 * in reality, it doesn't set the value when the lid is closed;
510 * we need to check "the value to be set" in VBT when LVDS
511 * register is uninitialized.
512 */
513 val = I915_READ(reg);
514 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
515 val = dev_priv->bios_lvds_val;
516 dev_priv->lvds_val = val;
517 }
518 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
519}
520
521static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 466static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
522 int refclk) 467 int refclk)
523{ 468{
524 struct drm_device *dev = crtc->dev; 469 struct drm_device *dev = crtc->dev;
525 struct drm_i915_private *dev_priv = dev->dev_private;
526 const intel_limit_t *limit; 470 const intel_limit_t *limit;
527 471
528 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 472 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
529 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { 473 if (intel_is_dual_link_lvds(dev)) {
530 /* LVDS dual channel */ 474 /* LVDS dual channel */
531 if (refclk == 100000) 475 if (refclk == 100000)
532 limit = &intel_limits_ironlake_dual_lvds_100m; 476 limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -550,11 +494,10 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
550static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 494static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
551{ 495{
552 struct drm_device *dev = crtc->dev; 496 struct drm_device *dev = crtc->dev;
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 const intel_limit_t *limit; 497 const intel_limit_t *limit;
555 498
556 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 499 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
557 if (is_dual_link_lvds(dev_priv, LVDS)) 500 if (intel_is_dual_link_lvds(dev))
558 /* LVDS with dual channel */ 501 /* LVDS with dual channel */
559 limit = &intel_limits_g4x_dual_channel_lvds; 502 limit = &intel_limits_g4x_dual_channel_lvds;
560 else 503 else
@@ -686,19 +629,16 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
686 629
687{ 630{
688 struct drm_device *dev = crtc->dev; 631 struct drm_device *dev = crtc->dev;
689 struct drm_i915_private *dev_priv = dev->dev_private;
690 intel_clock_t clock; 632 intel_clock_t clock;
691 int err = target; 633 int err = target;
692 634
693 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 635 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
694 (I915_READ(LVDS)) != 0) {
695 /* 636 /*
696 * For LVDS, if the panel is on, just rely on its current 637 * For LVDS just rely on its current settings for dual-channel.
697 * settings for dual-channel. We haven't figured out how to 638 * We haven't figured out how to reliably set up different
698 * reliably set up different single/dual channel state, if we 639 * single/dual channel state, if we even can.
699 * even can.
700 */ 640 */
701 if (is_dual_link_lvds(dev_priv, LVDS)) 641 if (intel_is_dual_link_lvds(dev))
702 clock.p2 = limit->p2.p2_fast; 642 clock.p2 = limit->p2.p2_fast;
703 else 643 else
704 clock.p2 = limit->p2.p2_slow; 644 clock.p2 = limit->p2.p2_slow;
@@ -751,7 +691,6 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
751 intel_clock_t *best_clock) 691 intel_clock_t *best_clock)
752{ 692{
753 struct drm_device *dev = crtc->dev; 693 struct drm_device *dev = crtc->dev;
754 struct drm_i915_private *dev_priv = dev->dev_private;
755 intel_clock_t clock; 694 intel_clock_t clock;
756 int max_n; 695 int max_n;
757 bool found; 696 bool found;
@@ -766,8 +705,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
766 lvds_reg = PCH_LVDS; 705 lvds_reg = PCH_LVDS;
767 else 706 else
768 lvds_reg = LVDS; 707 lvds_reg = LVDS;
769 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 708 if (intel_is_dual_link_lvds(dev))
770 LVDS_CLKB_POWER_UP)
771 clock.p2 = limit->p2.p2_fast; 709 clock.p2 = limit->p2.p2_fast;
772 else 710 else
773 clock.p2 = limit->p2.p2_slow; 711 clock.p2 = limit->p2.p2_slow;
@@ -1047,6 +985,51 @@ void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1047 } 985 }
1048} 986}
1049 987
988/*
989 * ibx_digital_port_connected - is the specified port connected?
990 * @dev_priv: i915 private structure
991 * @port: the port to test
992 *
993 * Returns true if @port is connected, false otherwise.
994 */
995bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
996 struct intel_digital_port *port)
997{
998 u32 bit;
999
1000 if (HAS_PCH_IBX(dev_priv->dev)) {
1001 switch(port->port) {
1002 case PORT_B:
1003 bit = SDE_PORTB_HOTPLUG;
1004 break;
1005 case PORT_C:
1006 bit = SDE_PORTC_HOTPLUG;
1007 break;
1008 case PORT_D:
1009 bit = SDE_PORTD_HOTPLUG;
1010 break;
1011 default:
1012 return true;
1013 }
1014 } else {
1015 switch(port->port) {
1016 case PORT_B:
1017 bit = SDE_PORTB_HOTPLUG_CPT;
1018 break;
1019 case PORT_C:
1020 bit = SDE_PORTC_HOTPLUG_CPT;
1021 break;
1022 case PORT_D:
1023 bit = SDE_PORTD_HOTPLUG_CPT;
1024 break;
1025 default:
1026 return true;
1027 }
1028 }
1029
1030 return I915_READ(SDEISR) & bit;
1031}
1032
1050static const char *state_string(bool enabled) 1033static const char *state_string(bool enabled)
1051{ 1034{
1052 return enabled ? "on" : "off"; 1035 return enabled ? "on" : "off";
@@ -1125,8 +1108,8 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1108 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1126 pipe); 1109 pipe);
1127 1110
1128 if (IS_HASWELL(dev_priv->dev)) { 1111 if (HAS_DDI(dev_priv->dev)) {
1129 /* On Haswell, DDI is used instead of FDI_TX_CTL */ 1112 /* DDI does not have a specific FDI_TX register */
1130 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1113 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1131 val = I915_READ(reg); 1114 val = I915_READ(reg);
1132 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1115 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
@@ -1170,7 +1153,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1170 return; 1153 return;
1171 1154
1172 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1155 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1173 if (IS_HASWELL(dev_priv->dev)) 1156 if (HAS_DDI(dev_priv->dev))
1174 return; 1157 return;
1175 1158
1176 reg = FDI_TX_CTL(pipe); 1159 reg = FDI_TX_CTL(pipe);
@@ -1231,9 +1214,15 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1231 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1214 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1232 state = true; 1215 state = true;
1233 1216
1234 reg = PIPECONF(cpu_transcoder); 1217 if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
1235 val = I915_READ(reg); 1218 !(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
1236 cur_state = !!(val & PIPECONF_ENABLE); 1219 cur_state = false;
1220 } else {
1221 reg = PIPECONF(cpu_transcoder);
1222 val = I915_READ(reg);
1223 cur_state = !!(val & PIPECONF_ENABLE);
1224 }
1225
1237 WARN(cur_state != state, 1226 WARN(cur_state != state,
1238 "pipe %c assertion failure (expected %s, current %s)\n", 1227 "pipe %c assertion failure (expected %s, current %s)\n",
1239 pipe_name(pipe), state_string(state), state_string(cur_state)); 1228 pipe_name(pipe), state_string(state), state_string(cur_state));
@@ -1509,13 +1498,14 @@ static void
1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, 1498intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1510 enum intel_sbi_destination destination) 1499 enum intel_sbi_destination destination)
1511{ 1500{
1512 unsigned long flags;
1513 u32 tmp; 1501 u32 tmp;
1514 1502
1515 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1503 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1516 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1504
1505 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1506 100)) {
1517 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1507 DRM_ERROR("timeout waiting for SBI to become ready\n");
1518 goto out_unlock; 1508 return;
1519 } 1509 }
1520 1510
1521 I915_WRITE(SBI_ADDR, (reg << 16)); 1511 I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1530,24 +1520,21 @@ intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1520 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1531 100)) { 1521 100)) {
1532 DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); 1522 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1533 goto out_unlock; 1523 return;
1534 } 1524 }
1535
1536out_unlock:
1537 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1538} 1525}
1539 1526
1540static u32 1527static u32
1541intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, 1528intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1542 enum intel_sbi_destination destination) 1529 enum intel_sbi_destination destination)
1543{ 1530{
1544 unsigned long flags;
1545 u32 value = 0; 1531 u32 value = 0;
1532 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1546 1533
1547 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1534 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
1548 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { 1535 100)) {
1549 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1536 DRM_ERROR("timeout waiting for SBI to become ready\n");
1550 goto out_unlock; 1537 return 0;
1551 } 1538 }
1552 1539
1553 I915_WRITE(SBI_ADDR, (reg << 16)); 1540 I915_WRITE(SBI_ADDR, (reg << 16));
@@ -1561,14 +1548,10 @@ intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1548 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1562 100)) { 1549 100)) {
1563 DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); 1550 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1564 goto out_unlock; 1551 return 0;
1565 } 1552 }
1566 1553
1567 value = I915_READ(SBI_DATA); 1554 return I915_READ(SBI_DATA);
1568
1569out_unlock:
1570 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1571 return value;
1572} 1555}
1573 1556
1574/** 1557/**
@@ -1700,8 +1683,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1700 * make the BPC in transcoder be consistent with 1683 * make the BPC in transcoder be consistent with
1701 * that in pipeconf reg. 1684 * that in pipeconf reg.
1702 */ 1685 */
1703 val &= ~PIPE_BPC_MASK; 1686 val &= ~PIPECONF_BPC_MASK;
1704 val |= pipeconf_val & PIPE_BPC_MASK; 1687 val |= pipeconf_val & PIPECONF_BPC_MASK;
1705 } 1688 }
1706 1689
1707 val &= ~TRANS_INTERLACE_MASK; 1690 val &= ~TRANS_INTERLACE_MASK;
@@ -1728,7 +1711,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1728 BUG_ON(dev_priv->info->gen < 5); 1711 BUG_ON(dev_priv->info->gen < 5);
1729 1712
1730 /* FDI must be feeding us bits for PCH ports */ 1713 /* FDI must be feeding us bits for PCH ports */
1731 assert_fdi_tx_enabled(dev_priv, cpu_transcoder); 1714 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1732 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1715 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1733 1716
1734 /* Workaround: set timing override bit. */ 1717 /* Workaround: set timing override bit. */
@@ -1816,11 +1799,11 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1816{ 1799{
1817 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1800 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1818 pipe); 1801 pipe);
1819 enum transcoder pch_transcoder; 1802 enum pipe pch_transcoder;
1820 int reg; 1803 int reg;
1821 u32 val; 1804 u32 val;
1822 1805
1823 if (IS_HASWELL(dev_priv->dev)) 1806 if (HAS_PCH_LPT(dev_priv->dev))
1824 pch_transcoder = TRANSCODER_A; 1807 pch_transcoder = TRANSCODER_A;
1825 else 1808 else
1826 pch_transcoder = pipe; 1809 pch_transcoder = pipe;
@@ -1836,7 +1819,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1836 if (pch_port) { 1819 if (pch_port) {
1837 /* if driving the PCH, we need FDI enabled */ 1820 /* if driving the PCH, we need FDI enabled */
1838 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1821 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1839 assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); 1822 assert_fdi_tx_pll_enabled(dev_priv,
1823 (enum pipe) cpu_transcoder);
1840 } 1824 }
1841 /* FIXME: assert CPU port conditions for SNB+ */ 1825 /* FIXME: assert CPU port conditions for SNB+ */
1842 } 1826 }
@@ -2017,18 +2001,29 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
2017 2001
2018/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2002/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2019 * is assumed to be a power-of-two. */ 2003 * is assumed to be a power-of-two. */
2020unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, 2004unsigned long intel_gen4_compute_page_offset(int *x, int *y,
2021 unsigned int bpp, 2005 unsigned int tiling_mode,
2022 unsigned int pitch) 2006 unsigned int cpp,
2007 unsigned int pitch)
2023{ 2008{
2024 int tile_rows, tiles; 2009 if (tiling_mode != I915_TILING_NONE) {
2010 unsigned int tile_rows, tiles;
2011
2012 tile_rows = *y / 8;
2013 *y %= 8;
2014
2015 tiles = *x / (512/cpp);
2016 *x %= 512/cpp;
2025 2017
2026 tile_rows = *y / 8; 2018 return tile_rows * pitch * 8 + tiles * 4096;
2027 *y %= 8; 2019 } else {
2028 tiles = *x / (512/bpp); 2020 unsigned int offset;
2029 *x %= 512/bpp;
2030 2021
2031 return tile_rows * pitch * 8 + tiles * 4096; 2022 offset = *y * pitch + *x * cpp;
2023 *y = 0;
2024 *x = (offset & 4095) / cpp;
2025 return offset & -4096;
2026 }
2032} 2027}
2033 2028
2034static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2029static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2105,9 +2100,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2105 2100
2106 if (INTEL_INFO(dev)->gen >= 4) { 2101 if (INTEL_INFO(dev)->gen >= 4) {
2107 intel_crtc->dspaddr_offset = 2102 intel_crtc->dspaddr_offset =
2108 intel_gen4_compute_offset_xtiled(&x, &y, 2103 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2109 fb->bits_per_pixel / 8, 2104 fb->bits_per_pixel / 8,
2110 fb->pitches[0]); 2105 fb->pitches[0]);
2111 linear_offset -= intel_crtc->dspaddr_offset; 2106 linear_offset -= intel_crtc->dspaddr_offset;
2112 } else { 2107 } else {
2113 intel_crtc->dspaddr_offset = linear_offset; 2108 intel_crtc->dspaddr_offset = linear_offset;
@@ -2198,9 +2193,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2198 2193
2199 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2194 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2200 intel_crtc->dspaddr_offset = 2195 intel_crtc->dspaddr_offset =
2201 intel_gen4_compute_offset_xtiled(&x, &y, 2196 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2202 fb->bits_per_pixel / 8, 2197 fb->bits_per_pixel / 8,
2203 fb->pitches[0]); 2198 fb->pitches[0]);
2204 linear_offset -= intel_crtc->dspaddr_offset; 2199 linear_offset -= intel_crtc->dspaddr_offset;
2205 2200
2206 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2201 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2242,10 +2237,6 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
2242 bool was_interruptible = dev_priv->mm.interruptible; 2237 bool was_interruptible = dev_priv->mm.interruptible;
2243 int ret; 2238 int ret;
2244 2239
2245 wait_event(dev_priv->pending_flip_queue,
2246 atomic_read(&dev_priv->mm.wedged) ||
2247 atomic_read(&obj->pending_flip) == 0);
2248
2249 /* Big Hammer, we also need to ensure that any pending 2240 /* Big Hammer, we also need to ensure that any pending
2250 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2241 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2251 * current scanout is retired before unpinning the old 2242 * current scanout is retired before unpinning the old
@@ -2350,43 +2341,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2350 return 0; 2341 return 0;
2351} 2342}
2352 2343
2353static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
2354{
2355 struct drm_device *dev = crtc->dev;
2356 struct drm_i915_private *dev_priv = dev->dev_private;
2357 u32 dpa_ctl;
2358
2359 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
2360 dpa_ctl = I915_READ(DP_A);
2361 dpa_ctl &= ~DP_PLL_FREQ_MASK;
2362
2363 if (clock < 200000) {
2364 u32 temp;
2365 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2366 /* workaround for 160Mhz:
2367 1) program 0x4600c bits 15:0 = 0x8124
2368 2) program 0x46010 bit 0 = 1
2369 3) program 0x46034 bit 24 = 1
2370 4) program 0x64000 bit 14 = 1
2371 */
2372 temp = I915_READ(0x4600c);
2373 temp &= 0xffff0000;
2374 I915_WRITE(0x4600c, temp | 0x8124);
2375
2376 temp = I915_READ(0x46010);
2377 I915_WRITE(0x46010, temp | 1);
2378
2379 temp = I915_READ(0x46034);
2380 I915_WRITE(0x46034, temp | (1 << 24));
2381 } else {
2382 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2383 }
2384 I915_WRITE(DP_A, dpa_ctl);
2385
2386 POSTING_READ(DP_A);
2387 udelay(500);
2388}
2389
2390static void intel_fdi_normal_train(struct drm_crtc *crtc) 2344static void intel_fdi_normal_train(struct drm_crtc *crtc)
2391{ 2345{
2392 struct drm_device *dev = crtc->dev; 2346 struct drm_device *dev = crtc->dev;
@@ -2815,7 +2769,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2815 temp = I915_READ(reg); 2769 temp = I915_READ(reg);
2816 temp &= ~((0x7 << 19) | (0x7 << 16)); 2770 temp &= ~((0x7 << 19) | (0x7 << 16));
2817 temp |= (intel_crtc->fdi_lanes - 1) << 19; 2771 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2818 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2772 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2819 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 2773 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2820 2774
2821 POSTING_READ(reg); 2775 POSTING_READ(reg);
@@ -2828,18 +2782,14 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2828 POSTING_READ(reg); 2782 POSTING_READ(reg);
2829 udelay(200); 2783 udelay(200);
2830 2784
2831 /* On Haswell, the PLL configuration for ports and pipes is handled 2785 /* Enable CPU FDI TX PLL, always on for Ironlake */
2832 * separately, as part of DDI setup */ 2786 reg = FDI_TX_CTL(pipe);
2833 if (!IS_HASWELL(dev)) { 2787 temp = I915_READ(reg);
2834 /* Enable CPU FDI TX PLL, always on for Ironlake */ 2788 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2835 reg = FDI_TX_CTL(pipe); 2789 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2836 temp = I915_READ(reg);
2837 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2838 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
2839 2790
2840 POSTING_READ(reg); 2791 POSTING_READ(reg);
2841 udelay(100); 2792 udelay(100);
2842 }
2843 } 2793 }
2844} 2794}
2845 2795
@@ -2889,7 +2839,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2889 reg = FDI_RX_CTL(pipe); 2839 reg = FDI_RX_CTL(pipe);
2890 temp = I915_READ(reg); 2840 temp = I915_READ(reg);
2891 temp &= ~(0x7 << 16); 2841 temp &= ~(0x7 << 16);
2892 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2842 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2893 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 2843 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2894 2844
2895 POSTING_READ(reg); 2845 POSTING_READ(reg);
@@ -2918,7 +2868,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2918 } 2868 }
2919 /* BPC in FDI rx is consistent with that in PIPECONF */ 2869 /* BPC in FDI rx is consistent with that in PIPECONF */
2920 temp &= ~(0x07 << 16); 2870 temp &= ~(0x07 << 16);
2921 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; 2871 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
2922 I915_WRITE(reg, temp); 2872 I915_WRITE(reg, temp);
2923 2873
2924 POSTING_READ(reg); 2874 POSTING_READ(reg);
@@ -2929,10 +2879,12 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2929{ 2879{
2930 struct drm_device *dev = crtc->dev; 2880 struct drm_device *dev = crtc->dev;
2931 struct drm_i915_private *dev_priv = dev->dev_private; 2881 struct drm_i915_private *dev_priv = dev->dev_private;
2882 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2932 unsigned long flags; 2883 unsigned long flags;
2933 bool pending; 2884 bool pending;
2934 2885
2935 if (atomic_read(&dev_priv->mm.wedged)) 2886 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2887 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2936 return false; 2888 return false;
2937 2889
2938 spin_lock_irqsave(&dev->event_lock, flags); 2890 spin_lock_irqsave(&dev->event_lock, flags);
@@ -2950,6 +2902,8 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2950 if (crtc->fb == NULL) 2902 if (crtc->fb == NULL)
2951 return; 2903 return;
2952 2904
2905 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2906
2953 wait_event(dev_priv->pending_flip_queue, 2907 wait_event(dev_priv->pending_flip_queue,
2954 !intel_crtc_has_pending_flip(crtc)); 2908 !intel_crtc_has_pending_flip(crtc));
2955 2909
@@ -2992,6 +2946,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2992 u32 divsel, phaseinc, auxdiv, phasedir = 0; 2946 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2993 u32 temp; 2947 u32 temp;
2994 2948
2949 mutex_lock(&dev_priv->dpio_lock);
2950
2995 /* It is necessary to ungate the pixclk gate prior to programming 2951 /* It is necessary to ungate the pixclk gate prior to programming
2996 * the divisors, and gate it back when it is done. 2952 * the divisors, and gate it back when it is done.
2997 */ 2953 */
@@ -3066,6 +3022,8 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
3066 udelay(24); 3022 udelay(24);
3067 3023
3068 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3024 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3025
3026 mutex_unlock(&dev_priv->dpio_lock);
3069} 3027}
3070 3028
3071/* 3029/*
@@ -3146,7 +3104,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3146 if (HAS_PCH_CPT(dev) && 3104 if (HAS_PCH_CPT(dev) &&
3147 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 3105 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3148 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3106 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3149 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 3107 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3150 reg = TRANS_DP_CTL(pipe); 3108 reg = TRANS_DP_CTL(pipe);
3151 temp = I915_READ(reg); 3109 temp = I915_READ(reg);
3152 temp &= ~(TRANS_DP_PORT_SEL_MASK | 3110 temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -3623,7 +3581,7 @@ static void haswell_crtc_off(struct drm_crtc *crtc)
3623 3581
3624 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might 3582 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3625 * start using it. */ 3583 * start using it. */
3626 intel_crtc->cpu_transcoder = intel_crtc->pipe; 3584 intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
3627 3585
3628 intel_ddi_put_crtc_pll(crtc); 3586 intel_ddi_put_crtc_pll(crtc);
3629} 3587}
@@ -3664,6 +3622,11 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
3664 intel_update_watermarks(dev); 3622 intel_update_watermarks(dev);
3665 3623
3666 intel_enable_pll(dev_priv, pipe); 3624 intel_enable_pll(dev_priv, pipe);
3625
3626 for_each_encoder_on_crtc(dev, crtc, encoder)
3627 if (encoder->pre_enable)
3628 encoder->pre_enable(encoder);
3629
3667 intel_enable_pipe(dev_priv, pipe, false); 3630 intel_enable_pipe(dev_priv, pipe, false);
3668 intel_enable_plane(dev_priv, plane, pipe); 3631 intel_enable_plane(dev_priv, plane, pipe);
3669 3632
@@ -3686,6 +3649,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3686 struct intel_encoder *encoder; 3649 struct intel_encoder *encoder;
3687 int pipe = intel_crtc->pipe; 3650 int pipe = intel_crtc->pipe;
3688 int plane = intel_crtc->plane; 3651 int plane = intel_crtc->plane;
3652 u32 pctl;
3689 3653
3690 3654
3691 if (!intel_crtc->active) 3655 if (!intel_crtc->active)
@@ -3705,6 +3669,13 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
3705 3669
3706 intel_disable_plane(dev_priv, plane, pipe); 3670 intel_disable_plane(dev_priv, plane, pipe);
3707 intel_disable_pipe(dev_priv, pipe); 3671 intel_disable_pipe(dev_priv, pipe);
3672
3673 /* Disable pannel fitter if it is on this pipe. */
3674 pctl = I915_READ(PFIT_CONTROL);
3675 if ((pctl & PFIT_ENABLE) &&
3676 ((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
3677 I915_WRITE(PFIT_CONTROL, 0);
3678
3708 intel_disable_pll(dev_priv, pipe); 3679 intel_disable_pll(dev_priv, pipe);
3709 3680
3710 intel_crtc->active = false; 3681 intel_crtc->active = false;
@@ -3767,19 +3738,17 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
3767 intel_crtc_update_sarea(crtc, enable); 3738 intel_crtc_update_sarea(crtc, enable);
3768} 3739}
3769 3740
3770static void intel_crtc_noop(struct drm_crtc *crtc)
3771{
3772}
3773
3774static void intel_crtc_disable(struct drm_crtc *crtc) 3741static void intel_crtc_disable(struct drm_crtc *crtc)
3775{ 3742{
3776 struct drm_device *dev = crtc->dev; 3743 struct drm_device *dev = crtc->dev;
3777 struct drm_connector *connector; 3744 struct drm_connector *connector;
3778 struct drm_i915_private *dev_priv = dev->dev_private; 3745 struct drm_i915_private *dev_priv = dev->dev_private;
3746 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3779 3747
3780 /* crtc should still be enabled when we disable it. */ 3748 /* crtc should still be enabled when we disable it. */
3781 WARN_ON(!crtc->enabled); 3749 WARN_ON(!crtc->enabled);
3782 3750
3751 intel_crtc->eld_vld = false;
3783 dev_priv->display.crtc_disable(crtc); 3752 dev_priv->display.crtc_disable(crtc);
3784 intel_crtc_update_sarea(crtc, false); 3753 intel_crtc_update_sarea(crtc, false);
3785 dev_priv->display.off(crtc); 3754 dev_priv->display.off(crtc);
@@ -3817,10 +3786,6 @@ void intel_modeset_disable(struct drm_device *dev)
3817 } 3786 }
3818} 3787}
3819 3788
3820void intel_encoder_noop(struct drm_encoder *encoder)
3821{
3822}
3823
3824void intel_encoder_destroy(struct drm_encoder *encoder) 3789void intel_encoder_destroy(struct drm_encoder *encoder)
3825{ 3790{
3826 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 3791 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -4012,16 +3977,8 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
4012 return 133000; 3977 return 133000;
4013} 3978}
4014 3979
4015struct fdi_m_n {
4016 u32 tu;
4017 u32 gmch_m;
4018 u32 gmch_n;
4019 u32 link_m;
4020 u32 link_n;
4021};
4022
4023static void 3980static void
4024fdi_reduce_ratio(u32 *num, u32 *den) 3981intel_reduce_ratio(uint32_t *num, uint32_t *den)
4025{ 3982{
4026 while (*num > 0xffffff || *den > 0xffffff) { 3983 while (*num > 0xffffff || *den > 0xffffff) {
4027 *num >>= 1; 3984 *num >>= 1;
@@ -4029,20 +3986,18 @@ fdi_reduce_ratio(u32 *num, u32 *den)
4029 } 3986 }
4030} 3987}
4031 3988
4032static void 3989void
4033ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 3990intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4034 int link_clock, struct fdi_m_n *m_n) 3991 int pixel_clock, int link_clock,
3992 struct intel_link_m_n *m_n)
4035{ 3993{
4036 m_n->tu = 64; /* default size */ 3994 m_n->tu = 64;
4037
4038 /* BUG_ON(pixel_clock > INT_MAX / 36); */
4039 m_n->gmch_m = bits_per_pixel * pixel_clock; 3995 m_n->gmch_m = bits_per_pixel * pixel_clock;
4040 m_n->gmch_n = link_clock * nlanes * 8; 3996 m_n->gmch_n = link_clock * nlanes * 8;
4041 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 3997 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
4042
4043 m_n->link_m = pixel_clock; 3998 m_n->link_m = pixel_clock;
4044 m_n->link_n = link_clock; 3999 m_n->link_n = link_clock;
4045 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 4000 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
4046} 4001}
4047 4002
4048static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 4003static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
@@ -4289,51 +4244,6 @@ static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4289 } 4244 }
4290} 4245}
4291 4246
4292static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
4293 struct drm_display_mode *adjusted_mode)
4294{
4295 struct drm_device *dev = crtc->dev;
4296 struct drm_i915_private *dev_priv = dev->dev_private;
4297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4298 int pipe = intel_crtc->pipe;
4299 u32 temp;
4300
4301 temp = I915_READ(LVDS);
4302 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4303 if (pipe == 1) {
4304 temp |= LVDS_PIPEB_SELECT;
4305 } else {
4306 temp &= ~LVDS_PIPEB_SELECT;
4307 }
4308 /* set the corresponsding LVDS_BORDER bit */
4309 temp |= dev_priv->lvds_border_bits;
4310 /* Set the B0-B3 data pairs corresponding to whether we're going to
4311 * set the DPLLs for dual-channel mode or not.
4312 */
4313 if (clock->p2 == 7)
4314 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4315 else
4316 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4317
4318 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4319 * appropriately here, but we need to look more thoroughly into how
4320 * panels behave in the two modes.
4321 */
4322 /* set the dithering flag on LVDS as needed */
4323 if (INTEL_INFO(dev)->gen >= 4) {
4324 if (dev_priv->lvds_dither)
4325 temp |= LVDS_ENABLE_DITHER;
4326 else
4327 temp &= ~LVDS_ENABLE_DITHER;
4328 }
4329 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
4330 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
4331 temp |= LVDS_HSYNC_POLARITY;
4332 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
4333 temp |= LVDS_VSYNC_POLARITY;
4334 I915_WRITE(LVDS, temp);
4335}
4336
4337static void vlv_update_pll(struct drm_crtc *crtc, 4247static void vlv_update_pll(struct drm_crtc *crtc,
4338 struct drm_display_mode *mode, 4248 struct drm_display_mode *mode,
4339 struct drm_display_mode *adjusted_mode, 4249 struct drm_display_mode *adjusted_mode,
@@ -4349,6 +4259,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4349 bool is_sdvo; 4259 bool is_sdvo;
4350 u32 temp; 4260 u32 temp;
4351 4261
4262 mutex_lock(&dev_priv->dpio_lock);
4263
4352 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4264 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4353 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4265 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4354 4266
@@ -4432,6 +4344,8 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4432 temp |= (1 << 21); 4344 temp |= (1 << 21);
4433 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); 4345 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4434 } 4346 }
4347
4348 mutex_unlock(&dev_priv->dpio_lock);
4435} 4349}
4436 4350
4437static void i9xx_update_pll(struct drm_crtc *crtc, 4351static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4443,6 +4357,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4443 struct drm_device *dev = crtc->dev; 4357 struct drm_device *dev = crtc->dev;
4444 struct drm_i915_private *dev_priv = dev->dev_private; 4358 struct drm_i915_private *dev_priv = dev->dev_private;
4445 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4359 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4360 struct intel_encoder *encoder;
4446 int pipe = intel_crtc->pipe; 4361 int pipe = intel_crtc->pipe;
4447 u32 dpll; 4362 u32 dpll;
4448 bool is_sdvo; 4363 bool is_sdvo;
@@ -4511,12 +4426,9 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4511 POSTING_READ(DPLL(pipe)); 4426 POSTING_READ(DPLL(pipe));
4512 udelay(150); 4427 udelay(150);
4513 4428
4514 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4429 for_each_encoder_on_crtc(dev, crtc, encoder)
4515 * This is an exception to the general rule that mode_set doesn't turn 4430 if (encoder->pre_pll_enable)
4516 * things on. 4431 encoder->pre_pll_enable(encoder);
4517 */
4518 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4519 intel_update_lvds(crtc, clock, adjusted_mode);
4520 4432
4521 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 4433 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4522 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4434 intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -4555,6 +4467,7 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4555 struct drm_device *dev = crtc->dev; 4467 struct drm_device *dev = crtc->dev;
4556 struct drm_i915_private *dev_priv = dev->dev_private; 4468 struct drm_i915_private *dev_priv = dev->dev_private;
4557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4469 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4470 struct intel_encoder *encoder;
4558 int pipe = intel_crtc->pipe; 4471 int pipe = intel_crtc->pipe;
4559 u32 dpll; 4472 u32 dpll;
4560 4473
@@ -4588,12 +4501,9 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4588 POSTING_READ(DPLL(pipe)); 4501 POSTING_READ(DPLL(pipe));
4589 udelay(150); 4502 udelay(150);
4590 4503
4591 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 4504 for_each_encoder_on_crtc(dev, crtc, encoder)
4592 * This is an exception to the general rule that mode_set doesn't turn 4505 if (encoder->pre_pll_enable)
4593 * things on. 4506 encoder->pre_pll_enable(encoder);
4594 */
4595 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4596 intel_update_lvds(crtc, clock, adjusted_mode);
4597 4507
4598 I915_WRITE(DPLL(pipe), dpll); 4508 I915_WRITE(DPLL(pipe), dpll);
4599 4509
@@ -4783,10 +4693,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4783 } 4693 }
4784 4694
4785 /* default to 8bpc */ 4695 /* default to 8bpc */
4786 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); 4696 pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
4787 if (is_dp) { 4697 if (is_dp) {
4788 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4698 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4789 pipeconf |= PIPECONF_BPP_6 | 4699 pipeconf |= PIPECONF_6BPC |
4790 PIPECONF_DITHER_EN | 4700 PIPECONF_DITHER_EN |
4791 PIPECONF_DITHER_TYPE_SP; 4701 PIPECONF_DITHER_TYPE_SP;
4792 } 4702 }
@@ -4794,7 +4704,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4794 4704
4795 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { 4705 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4796 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { 4706 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4797 pipeconf |= PIPECONF_BPP_6 | 4707 pipeconf |= PIPECONF_6BPC |
4798 PIPECONF_ENABLE | 4708 PIPECONF_ENABLE |
4799 I965_PIPECONF_ACTIVE; 4709 I965_PIPECONF_ACTIVE;
4800 } 4710 }
@@ -4981,6 +4891,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
4981 if (!has_vga) 4891 if (!has_vga)
4982 return; 4892 return;
4983 4893
4894 mutex_lock(&dev_priv->dpio_lock);
4895
4984 /* XXX: Rip out SDV support once Haswell ships for real. */ 4896 /* XXX: Rip out SDV support once Haswell ships for real. */
4985 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) 4897 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4986 is_sdv = true; 4898 is_sdv = true;
@@ -5123,6 +5035,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
5123 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); 5035 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5124 tmp |= SBI_DBUFF0_ENABLE; 5036 tmp |= SBI_DBUFF0_ENABLE;
5125 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); 5037 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5038
5039 mutex_unlock(&dev_priv->dpio_lock);
5126} 5040}
5127 5041
5128/* 5042/*
@@ -5177,19 +5091,19 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5177 5091
5178 val = I915_READ(PIPECONF(pipe)); 5092 val = I915_READ(PIPECONF(pipe));
5179 5093
5180 val &= ~PIPE_BPC_MASK; 5094 val &= ~PIPECONF_BPC_MASK;
5181 switch (intel_crtc->bpp) { 5095 switch (intel_crtc->bpp) {
5182 case 18: 5096 case 18:
5183 val |= PIPE_6BPC; 5097 val |= PIPECONF_6BPC;
5184 break; 5098 break;
5185 case 24: 5099 case 24:
5186 val |= PIPE_8BPC; 5100 val |= PIPECONF_8BPC;
5187 break; 5101 break;
5188 case 30: 5102 case 30:
5189 val |= PIPE_10BPC; 5103 val |= PIPECONF_10BPC;
5190 break; 5104 break;
5191 case 36: 5105 case 36:
5192 val |= PIPE_12BPC; 5106 val |= PIPECONF_12BPC;
5193 break; 5107 break;
5194 default: 5108 default:
5195 /* Case prevented by intel_choose_pipe_bpp_dither. */ 5109 /* Case prevented by intel_choose_pipe_bpp_dither. */
@@ -5206,10 +5120,80 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
5206 else 5120 else
5207 val |= PIPECONF_PROGRESSIVE; 5121 val |= PIPECONF_PROGRESSIVE;
5208 5122
5123 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
5124 val |= PIPECONF_COLOR_RANGE_SELECT;
5125 else
5126 val &= ~PIPECONF_COLOR_RANGE_SELECT;
5127
5209 I915_WRITE(PIPECONF(pipe), val); 5128 I915_WRITE(PIPECONF(pipe), val);
5210 POSTING_READ(PIPECONF(pipe)); 5129 POSTING_READ(PIPECONF(pipe));
5211} 5130}
5212 5131
5132/*
5133 * Set up the pipe CSC unit.
5134 *
5135 * Currently only full range RGB to limited range RGB conversion
5136 * is supported, but eventually this should handle various
5137 * RGB<->YCbCr scenarios as well.
5138 */
5139static void intel_set_pipe_csc(struct drm_crtc *crtc,
5140 const struct drm_display_mode *adjusted_mode)
5141{
5142 struct drm_device *dev = crtc->dev;
5143 struct drm_i915_private *dev_priv = dev->dev_private;
5144 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5145 int pipe = intel_crtc->pipe;
5146 uint16_t coeff = 0x7800; /* 1.0 */
5147
5148 /*
5149 * TODO: Check what kind of values actually come out of the pipe
5150 * with these coeff/postoff values and adjust to get the best
5151 * accuracy. Perhaps we even need to take the bpc value into
5152 * consideration.
5153 */
5154
5155 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
5156 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
5157
5158 /*
5159 * GY/GU and RY/RU should be the other way around according
5160 * to BSpec, but reality doesn't agree. Just set them up in
5161 * a way that results in the correct picture.
5162 */
5163 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
5164 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
5165
5166 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
5167 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
5168
5169 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
5170 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
5171
5172 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
5173 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
5174 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
5175
5176 if (INTEL_INFO(dev)->gen > 6) {
5177 uint16_t postoff = 0;
5178
5179 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
5180 postoff = (16 * (1 << 13) / 255) & 0x1fff;
5181
5182 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5183 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
5184 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
5185
5186 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
5187 } else {
5188 uint32_t mode = CSC_MODE_YUV_TO_RGB;
5189
5190 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
5191 mode |= CSC_BLACK_SCREEN_OFFSET;
5192
5193 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
5194 }
5195}
5196
5213static void haswell_set_pipeconf(struct drm_crtc *crtc, 5197static void haswell_set_pipeconf(struct drm_crtc *crtc,
5214 struct drm_display_mode *adjusted_mode, 5198 struct drm_display_mode *adjusted_mode,
5215 bool dither) 5199 bool dither)
@@ -5400,7 +5384,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5401 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 5385 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5402 struct intel_encoder *intel_encoder, *edp_encoder = NULL; 5386 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
5403 struct fdi_m_n m_n = {0}; 5387 struct intel_link_m_n m_n = {0};
5404 int target_clock, pixel_multiplier, lane, link_bw; 5388 int target_clock, pixel_multiplier, lane, link_bw;
5405 bool is_dp = false, is_cpu_edp = false; 5389 bool is_dp = false, is_cpu_edp = false;
5406 5390
@@ -5452,8 +5436,7 @@ static void ironlake_set_m_n(struct drm_crtc *crtc,
5452 5436
5453 if (pixel_multiplier > 1) 5437 if (pixel_multiplier > 1)
5454 link_bw *= pixel_multiplier; 5438 link_bw *= pixel_multiplier;
5455 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5439 intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
5456 &m_n);
5457 5440
5458 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); 5441 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
5459 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); 5442 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
@@ -5506,7 +5489,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5506 if (is_lvds) { 5489 if (is_lvds) {
5507 if ((intel_panel_use_ssc(dev_priv) && 5490 if ((intel_panel_use_ssc(dev_priv) &&
5508 dev_priv->lvds_ssc_freq == 100) || 5491 dev_priv->lvds_ssc_freq == 100) ||
5509 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) 5492 intel_is_dual_link_lvds(dev))
5510 factor = 25; 5493 factor = 25;
5511 } else if (is_sdvo && is_tv) 5494 } else if (is_sdvo && is_tv)
5512 factor = 20; 5495 factor = 20;
@@ -5581,7 +5564,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5581 bool ok, has_reduced_clock = false; 5564 bool ok, has_reduced_clock = false;
5582 bool is_lvds = false, is_dp = false, is_cpu_edp = false; 5565 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5583 struct intel_encoder *encoder; 5566 struct intel_encoder *encoder;
5584 u32 temp;
5585 int ret; 5567 int ret;
5586 bool dither, fdi_config_ok; 5568 bool dither, fdi_config_ok;
5587 5569
@@ -5645,54 +5627,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5645 } else 5627 } else
5646 intel_put_pch_pll(intel_crtc); 5628 intel_put_pch_pll(intel_crtc);
5647 5629
5648 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 5630 if (is_dp && !is_cpu_edp)
5649 * This is an exception to the general rule that mode_set doesn't turn
5650 * things on.
5651 */
5652 if (is_lvds) {
5653 temp = I915_READ(PCH_LVDS);
5654 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5655 if (HAS_PCH_CPT(dev)) {
5656 temp &= ~PORT_TRANS_SEL_MASK;
5657 temp |= PORT_TRANS_SEL_CPT(pipe);
5658 } else {
5659 if (pipe == 1)
5660 temp |= LVDS_PIPEB_SELECT;
5661 else
5662 temp &= ~LVDS_PIPEB_SELECT;
5663 }
5664
5665 /* set the corresponsding LVDS_BORDER bit */
5666 temp |= dev_priv->lvds_border_bits;
5667 /* Set the B0-B3 data pairs corresponding to whether we're going to
5668 * set the DPLLs for dual-channel mode or not.
5669 */
5670 if (clock.p2 == 7)
5671 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5672 else
5673 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
5674
5675 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5676 * appropriately here, but we need to look more thoroughly into how
5677 * panels behave in the two modes.
5678 */
5679 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5680 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5681 temp |= LVDS_HSYNC_POLARITY;
5682 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5683 temp |= LVDS_VSYNC_POLARITY;
5684 I915_WRITE(PCH_LVDS, temp);
5685 }
5686
5687 if (is_dp && !is_cpu_edp) {
5688 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5631 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5689 } else { 5632
5690 /* For non-DP output, clear any trans DP clock recovery setting.*/ 5633 for_each_encoder_on_crtc(dev, crtc, encoder)
5691 I915_WRITE(TRANSDATA_M1(pipe), 0); 5634 if (encoder->pre_pll_enable)
5692 I915_WRITE(TRANSDATA_N1(pipe), 0); 5635 encoder->pre_pll_enable(encoder);
5693 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5694 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5695 }
5696 5636
5697 if (intel_crtc->pch_pll) { 5637 if (intel_crtc->pch_pll) {
5698 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); 5638 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
@@ -5727,9 +5667,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5727 5667
5728 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); 5668 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5729 5669
5730 if (is_cpu_edp)
5731 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5732
5733 ironlake_set_pipeconf(crtc, adjusted_mode, dither); 5670 ironlake_set_pipeconf(crtc, adjusted_mode, dither);
5734 5671
5735 intel_wait_for_vblank(dev, pipe); 5672 intel_wait_for_vblank(dev, pipe);
@@ -5747,6 +5684,35 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5747 return fdi_config_ok ? ret : -EINVAL; 5684 return fdi_config_ok ? ret : -EINVAL;
5748} 5685}
5749 5686
5687static void haswell_modeset_global_resources(struct drm_device *dev)
5688{
5689 struct drm_i915_private *dev_priv = dev->dev_private;
5690 bool enable = false;
5691 struct intel_crtc *crtc;
5692 struct intel_encoder *encoder;
5693
5694 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
5695 if (crtc->pipe != PIPE_A && crtc->base.enabled)
5696 enable = true;
5697 /* XXX: Should check for edp transcoder here, but thanks to init
5698 * sequence that's not yet available. Just in case desktop eDP
5699 * on PORT D is possible on haswell, too. */
5700 }
5701
5702 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
5703 base.head) {
5704 if (encoder->type != INTEL_OUTPUT_EDP &&
5705 encoder->connectors_active)
5706 enable = true;
5707 }
5708
5709 /* Even the eDP panel fitter is outside the always-on well. */
5710 if (dev_priv->pch_pf_size)
5711 enable = true;
5712
5713 intel_set_power_well(dev, enable);
5714}
5715
5750static int haswell_crtc_mode_set(struct drm_crtc *crtc, 5716static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5751 struct drm_display_mode *mode, 5717 struct drm_display_mode *mode,
5752 struct drm_display_mode *adjusted_mode, 5718 struct drm_display_mode *adjusted_mode,
@@ -5759,20 +5725,13 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5759 int pipe = intel_crtc->pipe; 5725 int pipe = intel_crtc->pipe;
5760 int plane = intel_crtc->plane; 5726 int plane = intel_crtc->plane;
5761 int num_connectors = 0; 5727 int num_connectors = 0;
5762 intel_clock_t clock, reduced_clock; 5728 bool is_dp = false, is_cpu_edp = false;
5763 u32 dpll = 0, fp = 0, fp2 = 0;
5764 bool ok, has_reduced_clock = false;
5765 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5766 struct intel_encoder *encoder; 5729 struct intel_encoder *encoder;
5767 u32 temp;
5768 int ret; 5730 int ret;
5769 bool dither; 5731 bool dither;
5770 5732
5771 for_each_encoder_on_crtc(dev, crtc, encoder) { 5733 for_each_encoder_on_crtc(dev, crtc, encoder) {
5772 switch (encoder->type) { 5734 switch (encoder->type) {
5773 case INTEL_OUTPUT_LVDS:
5774 is_lvds = true;
5775 break;
5776 case INTEL_OUTPUT_DISPLAYPORT: 5735 case INTEL_OUTPUT_DISPLAYPORT:
5777 is_dp = true; 5736 is_dp = true;
5778 break; 5737 break;
@@ -5786,11 +5745,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5786 num_connectors++; 5745 num_connectors++;
5787 } 5746 }
5788 5747
5789 if (is_cpu_edp)
5790 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5791 else
5792 intel_crtc->cpu_transcoder = pipe;
5793
5794 /* We are not sure yet this won't happen. */ 5748 /* We are not sure yet this won't happen. */
5795 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n", 5749 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5796 INTEL_PCH_TYPE(dev)); 5750 INTEL_PCH_TYPE(dev));
@@ -5806,147 +5760,32 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5806 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) 5760 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5807 return -EINVAL; 5761 return -EINVAL;
5808 5762
5809 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5810 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5811 &has_reduced_clock,
5812 &reduced_clock);
5813 if (!ok) {
5814 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5815 return -EINVAL;
5816 }
5817 }
5818
5819 /* Ensure that the cursor is valid for the new mode before changing... */ 5763 /* Ensure that the cursor is valid for the new mode before changing... */
5820 intel_crtc_update_cursor(crtc, true); 5764 intel_crtc_update_cursor(crtc, true);
5821 5765
5822 /* determine panel color depth */ 5766 /* determine panel color depth */
5823 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, 5767 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5824 adjusted_mode); 5768 adjusted_mode);
5825 if (is_lvds && dev_priv->lvds_dither)
5826 dither = true;
5827 5769
5828 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5770 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5829 drm_mode_debug_printmodeline(mode); 5771 drm_mode_debug_printmodeline(mode);
5830 5772
5831 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 5773 if (is_dp && !is_cpu_edp)
5832 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5833 if (has_reduced_clock)
5834 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5835 reduced_clock.m2;
5836
5837 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
5838 fp);
5839
5840 /* CPU eDP is the only output that doesn't need a PCH PLL of its
5841 * own on pre-Haswell/LPT generation */
5842 if (!is_cpu_edp) {
5843 struct intel_pch_pll *pll;
5844
5845 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5846 if (pll == NULL) {
5847 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5848 pipe);
5849 return -EINVAL;
5850 }
5851 } else
5852 intel_put_pch_pll(intel_crtc);
5853
5854 /* The LVDS pin pair needs to be on before the DPLLs are
5855 * enabled. This is an exception to the general rule that
5856 * mode_set doesn't turn things on.
5857 */
5858 if (is_lvds) {
5859 temp = I915_READ(PCH_LVDS);
5860 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5861 if (HAS_PCH_CPT(dev)) {
5862 temp &= ~PORT_TRANS_SEL_MASK;
5863 temp |= PORT_TRANS_SEL_CPT(pipe);
5864 } else {
5865 if (pipe == 1)
5866 temp |= LVDS_PIPEB_SELECT;
5867 else
5868 temp &= ~LVDS_PIPEB_SELECT;
5869 }
5870
5871 /* set the corresponsding LVDS_BORDER bit */
5872 temp |= dev_priv->lvds_border_bits;
5873 /* Set the B0-B3 data pairs corresponding to whether
5874 * we're going to set the DPLLs for dual-channel mode or
5875 * not.
5876 */
5877 if (clock.p2 == 7)
5878 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5879 else
5880 temp &= ~(LVDS_B0B3_POWER_UP |
5881 LVDS_CLKB_POWER_UP);
5882
5883 /* It would be nice to set 24 vs 18-bit mode
5884 * (LVDS_A3_POWER_UP) appropriately here, but we need to
5885 * look more thoroughly into how panels behave in the
5886 * two modes.
5887 */
5888 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5889 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5890 temp |= LVDS_HSYNC_POLARITY;
5891 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5892 temp |= LVDS_VSYNC_POLARITY;
5893 I915_WRITE(PCH_LVDS, temp);
5894 }
5895 }
5896
5897 if (is_dp && !is_cpu_edp) {
5898 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5774 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5899 } else {
5900 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5901 /* For non-DP output, clear any trans DP clock recovery
5902 * setting.*/
5903 I915_WRITE(TRANSDATA_M1(pipe), 0);
5904 I915_WRITE(TRANSDATA_N1(pipe), 0);
5905 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5906 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5907 }
5908 }
5909 5775
5910 intel_crtc->lowfreq_avail = false; 5776 intel_crtc->lowfreq_avail = false;
5911 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5912 if (intel_crtc->pch_pll) {
5913 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5914
5915 /* Wait for the clocks to stabilize. */
5916 POSTING_READ(intel_crtc->pch_pll->pll_reg);
5917 udelay(150);
5918
5919 /* The pixel multiplier can only be updated once the
5920 * DPLL is enabled and the clocks are stable.
5921 *
5922 * So write it again.
5923 */
5924 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5925 }
5926
5927 if (intel_crtc->pch_pll) {
5928 if (is_lvds && has_reduced_clock && i915_powersave) {
5929 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
5930 intel_crtc->lowfreq_avail = true;
5931 } else {
5932 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5933 }
5934 }
5935 }
5936 5777
5937 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); 5778 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5938 5779
5939 if (!is_dp || is_cpu_edp) 5780 if (!is_dp || is_cpu_edp)
5940 ironlake_set_m_n(crtc, mode, adjusted_mode); 5781 ironlake_set_m_n(crtc, mode, adjusted_mode);
5941 5782
5942 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5943 if (is_cpu_edp)
5944 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5945
5946 haswell_set_pipeconf(crtc, adjusted_mode, dither); 5783 haswell_set_pipeconf(crtc, adjusted_mode, dither);
5947 5784
5785 intel_set_pipe_csc(crtc, adjusted_mode);
5786
5948 /* Set up the display plane register */ 5787 /* Set up the display plane register */
5949 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); 5788 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
5950 POSTING_READ(DSPCNTR(plane)); 5789 POSTING_READ(DSPCNTR(plane));
5951 5790
5952 ret = intel_pipe_set_base(crtc, x, y, fb); 5791 ret = intel_pipe_set_base(crtc, x, y, fb);
@@ -5972,6 +5811,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5972 int pipe = intel_crtc->pipe; 5811 int pipe = intel_crtc->pipe;
5973 int ret; 5812 int ret;
5974 5813
5814 if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
5815 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5816 else
5817 intel_crtc->cpu_transcoder = pipe;
5818
5975 drm_vblank_pre_modeset(dev, pipe); 5819 drm_vblank_pre_modeset(dev, pipe);
5976 5820
5977 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, 5821 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
@@ -6068,6 +5912,7 @@ static void haswell_write_eld(struct drm_connector *connector,
6068 struct drm_i915_private *dev_priv = connector->dev->dev_private; 5912 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6069 uint8_t *eld = connector->eld; 5913 uint8_t *eld = connector->eld;
6070 struct drm_device *dev = crtc->dev; 5914 struct drm_device *dev = crtc->dev;
5915 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6071 uint32_t eldv; 5916 uint32_t eldv;
6072 uint32_t i; 5917 uint32_t i;
6073 int len; 5918 int len;
@@ -6109,6 +5954,7 @@ static void haswell_write_eld(struct drm_connector *connector,
6109 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); 5954 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6110 5955
6111 eldv = AUDIO_ELD_VALID_A << (pipe * 4); 5956 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
5957 intel_crtc->eld_vld = true;
6112 5958
6113 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 5959 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6114 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); 5960 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@@ -6344,6 +6190,8 @@ static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
6344 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6190 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6345 cntl |= CURSOR_MODE_DISABLE; 6191 cntl |= CURSOR_MODE_DISABLE;
6346 } 6192 }
6193 if (IS_HASWELL(dev))
6194 cntl |= CURSOR_PIPE_CSC_ENABLE;
6347 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6195 I915_WRITE(CURCNTR_IVB(pipe), cntl);
6348 6196
6349 intel_crtc->cursor_visible = visible; 6197 intel_crtc->cursor_visible = visible;
@@ -6700,6 +6548,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
6700 if (encoder->crtc) { 6548 if (encoder->crtc) {
6701 crtc = encoder->crtc; 6549 crtc = encoder->crtc;
6702 6550
6551 mutex_lock(&crtc->mutex);
6552
6703 old->dpms_mode = connector->dpms; 6553 old->dpms_mode = connector->dpms;
6704 old->load_detect_temp = false; 6554 old->load_detect_temp = false;
6705 6555
@@ -6729,6 +6579,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
6729 return false; 6579 return false;
6730 } 6580 }
6731 6581
6582 mutex_lock(&crtc->mutex);
6732 intel_encoder->new_crtc = to_intel_crtc(crtc); 6583 intel_encoder->new_crtc = to_intel_crtc(crtc);
6733 to_intel_connector(connector)->new_encoder = intel_encoder; 6584 to_intel_connector(connector)->new_encoder = intel_encoder;
6734 6585
@@ -6756,13 +6607,15 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
6756 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 6607 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
6757 if (IS_ERR(fb)) { 6608 if (IS_ERR(fb)) {
6758 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 6609 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
6610 mutex_unlock(&crtc->mutex);
6759 return false; 6611 return false;
6760 } 6612 }
6761 6613
6762 if (!intel_set_mode(crtc, mode, 0, 0, fb)) { 6614 if (intel_set_mode(crtc, mode, 0, 0, fb)) {
6763 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 6615 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
6764 if (old->release_fb) 6616 if (old->release_fb)
6765 old->release_fb->funcs->destroy(old->release_fb); 6617 old->release_fb->funcs->destroy(old->release_fb);
6618 mutex_unlock(&crtc->mutex);
6766 return false; 6619 return false;
6767 } 6620 }
6768 6621
@@ -6777,27 +6630,31 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
6777 struct intel_encoder *intel_encoder = 6630 struct intel_encoder *intel_encoder =
6778 intel_attached_encoder(connector); 6631 intel_attached_encoder(connector);
6779 struct drm_encoder *encoder = &intel_encoder->base; 6632 struct drm_encoder *encoder = &intel_encoder->base;
6633 struct drm_crtc *crtc = encoder->crtc;
6780 6634
6781 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 6635 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6782 connector->base.id, drm_get_connector_name(connector), 6636 connector->base.id, drm_get_connector_name(connector),
6783 encoder->base.id, drm_get_encoder_name(encoder)); 6637 encoder->base.id, drm_get_encoder_name(encoder));
6784 6638
6785 if (old->load_detect_temp) { 6639 if (old->load_detect_temp) {
6786 struct drm_crtc *crtc = encoder->crtc;
6787
6788 to_intel_connector(connector)->new_encoder = NULL; 6640 to_intel_connector(connector)->new_encoder = NULL;
6789 intel_encoder->new_crtc = NULL; 6641 intel_encoder->new_crtc = NULL;
6790 intel_set_mode(crtc, NULL, 0, 0, NULL); 6642 intel_set_mode(crtc, NULL, 0, 0, NULL);
6791 6643
6792 if (old->release_fb) 6644 if (old->release_fb) {
6793 old->release_fb->funcs->destroy(old->release_fb); 6645 drm_framebuffer_unregister_private(old->release_fb);
6646 drm_framebuffer_unreference(old->release_fb);
6647 }
6794 6648
6649 mutex_unlock(&crtc->mutex);
6795 return; 6650 return;
6796 } 6651 }
6797 6652
6798 /* Switch crtc and encoder back off if necessary */ 6653 /* Switch crtc and encoder back off if necessary */
6799 if (old->dpms_mode != DRM_MODE_DPMS_ON) 6654 if (old->dpms_mode != DRM_MODE_DPMS_ON)
6800 connector->funcs->dpms(connector, old->dpms_mode); 6655 connector->funcs->dpms(connector, old->dpms_mode);
6656
6657 mutex_unlock(&crtc->mutex);
6801} 6658}
6802 6659
6803/* Returns the clock of the currently programmed mode of the given pipe. */ 6660/* Returns the clock of the currently programmed mode of the given pipe. */
@@ -6993,11 +6850,6 @@ void intel_mark_busy(struct drm_device *dev)
6993 6850
6994void intel_mark_idle(struct drm_device *dev) 6851void intel_mark_idle(struct drm_device *dev)
6995{ 6852{
6996}
6997
6998void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6999{
7000 struct drm_device *dev = obj->base.dev;
7001 struct drm_crtc *crtc; 6853 struct drm_crtc *crtc;
7002 6854
7003 if (!i915_powersave) 6855 if (!i915_powersave)
@@ -7007,12 +6859,11 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
7007 if (!crtc->fb) 6859 if (!crtc->fb)
7008 continue; 6860 continue;
7009 6861
7010 if (to_intel_framebuffer(crtc->fb)->obj == obj) 6862 intel_decrease_pllclock(crtc);
7011 intel_increase_pllclock(crtc);
7012 } 6863 }
7013} 6864}
7014 6865
7015void intel_mark_fb_idle(struct drm_i915_gem_object *obj) 6866void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
7016{ 6867{
7017 struct drm_device *dev = obj->base.dev; 6868 struct drm_device *dev = obj->base.dev;
7018 struct drm_crtc *crtc; 6869 struct drm_crtc *crtc;
@@ -7025,7 +6876,7 @@ void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
7025 continue; 6876 continue;
7026 6877
7027 if (to_intel_framebuffer(crtc->fb)->obj == obj) 6878 if (to_intel_framebuffer(crtc->fb)->obj == obj)
7028 intel_decrease_pllclock(crtc); 6879 intel_increase_pllclock(crtc);
7029 } 6880 }
7030} 6881}
7031 6882
@@ -7109,9 +6960,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
7109 6960
7110 obj = work->old_fb_obj; 6961 obj = work->old_fb_obj;
7111 6962
7112 atomic_clear_mask(1 << intel_crtc->plane, 6963 wake_up_all(&dev_priv->pending_flip_queue);
7113 &obj->pending_flip.counter);
7114 wake_up(&dev_priv->pending_flip_queue);
7115 6964
7116 queue_work(dev_priv->wq, &work->work); 6965 queue_work(dev_priv->wq, &work->work);
7117 6966
@@ -7474,11 +7323,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7474 7323
7475 work->enable_stall_check = true; 7324 work->enable_stall_check = true;
7476 7325
7477 /* Block clients from rendering to the new back buffer until
7478 * the flip occurs and the object is no longer visible.
7479 */
7480 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7481 atomic_inc(&intel_crtc->unpin_work_count); 7326 atomic_inc(&intel_crtc->unpin_work_count);
7327 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
7482 7328
7483 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7329 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7484 if (ret) 7330 if (ret)
@@ -7494,7 +7340,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
7494 7340
7495cleanup_pending: 7341cleanup_pending:
7496 atomic_dec(&intel_crtc->unpin_work_count); 7342 atomic_dec(&intel_crtc->unpin_work_count);
7497 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7498 drm_gem_object_unreference(&work->old_fb_obj->base); 7343 drm_gem_object_unreference(&work->old_fb_obj->base);
7499 drm_gem_object_unreference(&obj->base); 7344 drm_gem_object_unreference(&obj->base);
7500 mutex_unlock(&dev->struct_mutex); 7345 mutex_unlock(&dev->struct_mutex);
@@ -7514,7 +7359,6 @@ free_work:
7514static struct drm_crtc_helper_funcs intel_helper_funcs = { 7359static struct drm_crtc_helper_funcs intel_helper_funcs = {
7515 .mode_set_base_atomic = intel_pipe_set_base_atomic, 7360 .mode_set_base_atomic = intel_pipe_set_base_atomic,
7516 .load_lut = intel_crtc_load_lut, 7361 .load_lut = intel_crtc_load_lut,
7517 .disable = intel_crtc_noop,
7518}; 7362};
7519 7363
7520bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) 7364bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
@@ -7904,16 +7748,21 @@ intel_modeset_check_state(struct drm_device *dev)
7904 } 7748 }
7905} 7749}
7906 7750
7907bool intel_set_mode(struct drm_crtc *crtc, 7751int intel_set_mode(struct drm_crtc *crtc,
7908 struct drm_display_mode *mode, 7752 struct drm_display_mode *mode,
7909 int x, int y, struct drm_framebuffer *fb) 7753 int x, int y, struct drm_framebuffer *fb)
7910{ 7754{
7911 struct drm_device *dev = crtc->dev; 7755 struct drm_device *dev = crtc->dev;
7912 drm_i915_private_t *dev_priv = dev->dev_private; 7756 drm_i915_private_t *dev_priv = dev->dev_private;
7913 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 7757 struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
7914 struct intel_crtc *intel_crtc; 7758 struct intel_crtc *intel_crtc;
7915 unsigned disable_pipes, prepare_pipes, modeset_pipes; 7759 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7916 bool ret = true; 7760 int ret = 0;
7761
7762 saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
7763 if (!saved_mode)
7764 return -ENOMEM;
7765 saved_hwmode = saved_mode + 1;
7917 7766
7918 intel_modeset_affected_pipes(crtc, &modeset_pipes, 7767 intel_modeset_affected_pipes(crtc, &modeset_pipes,
7919 &prepare_pipes, &disable_pipes); 7768 &prepare_pipes, &disable_pipes);
@@ -7924,8 +7773,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
7924 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 7773 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7925 intel_crtc_disable(&intel_crtc->base); 7774 intel_crtc_disable(&intel_crtc->base);
7926 7775
7927 saved_hwmode = crtc->hwmode; 7776 *saved_hwmode = crtc->hwmode;
7928 saved_mode = crtc->mode; 7777 *saved_mode = crtc->mode;
7929 7778
7930 /* Hack: Because we don't (yet) support global modeset on multiple 7779 /* Hack: Because we don't (yet) support global modeset on multiple
7931 * crtcs, we don't keep track of the new mode for more than one crtc. 7780 * crtcs, we don't keep track of the new mode for more than one crtc.
@@ -7936,7 +7785,8 @@ bool intel_set_mode(struct drm_crtc *crtc,
7936 if (modeset_pipes) { 7785 if (modeset_pipes) {
7937 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); 7786 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7938 if (IS_ERR(adjusted_mode)) { 7787 if (IS_ERR(adjusted_mode)) {
7939 return false; 7788 ret = PTR_ERR(adjusted_mode);
7789 goto out;
7940 } 7790 }
7941 } 7791 }
7942 7792
@@ -7962,11 +7812,11 @@ bool intel_set_mode(struct drm_crtc *crtc,
7962 * on the DPLL. 7812 * on the DPLL.
7963 */ 7813 */
7964 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 7814 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7965 ret = !intel_crtc_mode_set(&intel_crtc->base, 7815 ret = intel_crtc_mode_set(&intel_crtc->base,
7966 mode, adjusted_mode, 7816 mode, adjusted_mode,
7967 x, y, fb); 7817 x, y, fb);
7968 if (!ret) 7818 if (ret)
7969 goto done; 7819 goto done;
7970 } 7820 }
7971 7821
7972 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7822 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7987,16 +7837,23 @@ bool intel_set_mode(struct drm_crtc *crtc,
7987 /* FIXME: add subpixel order */ 7837 /* FIXME: add subpixel order */
7988done: 7838done:
7989 drm_mode_destroy(dev, adjusted_mode); 7839 drm_mode_destroy(dev, adjusted_mode);
7990 if (!ret && crtc->enabled) { 7840 if (ret && crtc->enabled) {
7991 crtc->hwmode = saved_hwmode; 7841 crtc->hwmode = *saved_hwmode;
7992 crtc->mode = saved_mode; 7842 crtc->mode = *saved_mode;
7993 } else { 7843 } else {
7994 intel_modeset_check_state(dev); 7844 intel_modeset_check_state(dev);
7995 } 7845 }
7996 7846
7847out:
7848 kfree(saved_mode);
7997 return ret; 7849 return ret;
7998} 7850}
7999 7851
7852void intel_crtc_restore_mode(struct drm_crtc *crtc)
7853{
7854 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
7855}
7856
8000#undef for_each_intel_crtc_masked 7857#undef for_each_intel_crtc_masked
8001 7858
8002static void intel_set_config_free(struct intel_set_config *config) 7859static void intel_set_config_free(struct intel_set_config *config)
@@ -8109,7 +7966,7 @@ intel_modeset_stage_output_state(struct drm_device *dev,
8109 struct intel_encoder *encoder; 7966 struct intel_encoder *encoder;
8110 int count, ro; 7967 int count, ro;
8111 7968
8112 /* The upper layers ensure that we either disabl a crtc or have a list 7969 /* The upper layers ensure that we either disable a crtc or have a list
8113 * of connectors. For paranoia, double-check this. */ 7970 * of connectors. For paranoia, double-check this. */
8114 WARN_ON(!set->fb && (set->num_connectors != 0)); 7971 WARN_ON(!set->fb && (set->num_connectors != 0));
8115 WARN_ON(set->fb && (set->num_connectors == 0)); 7972 WARN_ON(set->fb && (set->num_connectors == 0));
@@ -8211,14 +8068,9 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8211 BUG_ON(!set->crtc); 8068 BUG_ON(!set->crtc);
8212 BUG_ON(!set->crtc->helper_private); 8069 BUG_ON(!set->crtc->helper_private);
8213 8070
8214 if (!set->mode) 8071 /* Enforce sane interface api - has been abused by the fb helper. */
8215 set->fb = NULL; 8072 BUG_ON(!set->mode && set->fb);
8216 8073 BUG_ON(set->fb && set->num_connectors == 0);
8217 /* The fb helper likes to play gross jokes with ->mode_set_config.
8218 * Unfortunately the crtc helper doesn't do much at all for this case,
8219 * so we have to cope with this madness until the fb helper is fixed up. */
8220 if (set->fb && set->num_connectors == 0)
8221 return 0;
8222 8074
8223 if (set->fb) { 8075 if (set->fb) {
8224 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 8076 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
@@ -8262,11 +8114,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
8262 drm_mode_debug_printmodeline(set->mode); 8114 drm_mode_debug_printmodeline(set->mode);
8263 } 8115 }
8264 8116
8265 if (!intel_set_mode(set->crtc, set->mode, 8117 ret = intel_set_mode(set->crtc, set->mode,
8266 set->x, set->y, set->fb)) { 8118 set->x, set->y, set->fb);
8267 DRM_ERROR("failed to set mode on [CRTC:%d]\n", 8119 if (ret) {
8268 set->crtc->base.id); 8120 DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
8269 ret = -EINVAL; 8121 set->crtc->base.id, ret);
8270 goto fail; 8122 goto fail;
8271 } 8123 }
8272 } else if (config->fb_changed) { 8124 } else if (config->fb_changed) {
@@ -8283,8 +8135,8 @@ fail:
8283 8135
8284 /* Try to restore the config */ 8136 /* Try to restore the config */
8285 if (config->mode_changed && 8137 if (config->mode_changed &&
8286 !intel_set_mode(save_set.crtc, save_set.mode, 8138 intel_set_mode(save_set.crtc, save_set.mode,
8287 save_set.x, save_set.y, save_set.fb)) 8139 save_set.x, save_set.y, save_set.fb))
8288 DRM_ERROR("failed to restore config after modeset failure\n"); 8140 DRM_ERROR("failed to restore config after modeset failure\n");
8289 8141
8290out_config: 8142out_config:
@@ -8303,7 +8155,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
8303 8155
8304static void intel_cpu_pll_init(struct drm_device *dev) 8156static void intel_cpu_pll_init(struct drm_device *dev)
8305{ 8157{
8306 if (IS_HASWELL(dev)) 8158 if (HAS_DDI(dev))
8307 intel_ddi_pll_init(dev); 8159 intel_ddi_pll_init(dev);
8308} 8160}
8309 8161
@@ -8439,11 +8291,10 @@ static void intel_setup_outputs(struct drm_device *dev)
8439 I915_WRITE(PFIT_CONTROL, 0); 8291 I915_WRITE(PFIT_CONTROL, 0);
8440 } 8292 }
8441 8293
8442 if (!(IS_HASWELL(dev) && 8294 if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
8443 (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
8444 intel_crt_init(dev); 8295 intel_crt_init(dev);
8445 8296
8446 if (IS_HASWELL(dev)) { 8297 if (HAS_DDI(dev)) {
8447 int found; 8298 int found;
8448 8299
8449 /* Haswell uses DDI functions to detect digital outputs */ 8300 /* Haswell uses DDI functions to detect digital outputs */
@@ -8490,23 +8341,18 @@ static void intel_setup_outputs(struct drm_device *dev)
8490 if (I915_READ(PCH_DP_D) & DP_DETECTED) 8341 if (I915_READ(PCH_DP_D) & DP_DETECTED)
8491 intel_dp_init(dev, PCH_DP_D, PORT_D); 8342 intel_dp_init(dev, PCH_DP_D, PORT_D);
8492 } else if (IS_VALLEYVIEW(dev)) { 8343 } else if (IS_VALLEYVIEW(dev)) {
8493 int found;
8494
8495 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ 8344 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8496 if (I915_READ(DP_C) & DP_DETECTED) 8345 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
8497 intel_dp_init(dev, DP_C, PORT_C); 8346 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
8498 8347
8499 if (I915_READ(SDVOB) & PORT_DETECTED) { 8348 if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
8500 /* SDVOB multiplex with HDMIB */ 8349 intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
8501 found = intel_sdvo_init(dev, SDVOB, true); 8350 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
8502 if (!found) 8351 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
8503 intel_hdmi_init(dev, SDVOB, PORT_B);
8504 if (!found && (I915_READ(DP_B) & DP_DETECTED))
8505 intel_dp_init(dev, DP_B, PORT_B);
8506 } 8352 }
8507 8353
8508 if (I915_READ(SDVOC) & PORT_DETECTED) 8354 if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
8509 intel_hdmi_init(dev, SDVOC, PORT_C); 8355 intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
8510 8356
8511 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 8357 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
8512 bool found = false; 8358 bool found = false;
@@ -8666,14 +8512,15 @@ int intel_framebuffer_init(struct drm_device *dev,
8666 if (mode_cmd->offsets[0] != 0) 8512 if (mode_cmd->offsets[0] != 0)
8667 return -EINVAL; 8513 return -EINVAL;
8668 8514
8515 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8516 intel_fb->obj = obj;
8517
8669 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 8518 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8670 if (ret) { 8519 if (ret) {
8671 DRM_ERROR("framebuffer init failed %d\n", ret); 8520 DRM_ERROR("framebuffer init failed %d\n", ret);
8672 return ret; 8521 return ret;
8673 } 8522 }
8674 8523
8675 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
8676 intel_fb->obj = obj;
8677 return 0; 8524 return 0;
8678} 8525}
8679 8526
@@ -8703,7 +8550,7 @@ static void intel_init_display(struct drm_device *dev)
8703 struct drm_i915_private *dev_priv = dev->dev_private; 8550 struct drm_i915_private *dev_priv = dev->dev_private;
8704 8551
8705 /* We always want a DPMS function */ 8552 /* We always want a DPMS function */
8706 if (IS_HASWELL(dev)) { 8553 if (HAS_DDI(dev)) {
8707 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 8554 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8708 dev_priv->display.crtc_enable = haswell_crtc_enable; 8555 dev_priv->display.crtc_enable = haswell_crtc_enable;
8709 dev_priv->display.crtc_disable = haswell_crtc_disable; 8556 dev_priv->display.crtc_disable = haswell_crtc_disable;
@@ -8765,8 +8612,9 @@ static void intel_init_display(struct drm_device *dev)
8765 } else if (IS_HASWELL(dev)) { 8612 } else if (IS_HASWELL(dev)) {
8766 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8613 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
8767 dev_priv->display.write_eld = haswell_write_eld; 8614 dev_priv->display.write_eld = haswell_write_eld;
8768 } else 8615 dev_priv->display.modeset_global_resources =
8769 dev_priv->display.update_wm = NULL; 8616 haswell_modeset_global_resources;
8617 }
8770 } else if (IS_G4X(dev)) { 8618 } else if (IS_G4X(dev)) {
8771 dev_priv->display.write_eld = g4x_write_eld; 8619 dev_priv->display.write_eld = g4x_write_eld;
8772 } 8620 }
@@ -8888,6 +8736,18 @@ static struct intel_quirk intel_quirks[] = {
8888 8736
8889 /* Acer Aspire 5734Z must invert backlight brightness */ 8737 /* Acer Aspire 5734Z must invert backlight brightness */
8890 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 8738 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
8739
8740 /* Acer/eMachines G725 */
8741 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
8742
8743 /* Acer/eMachines e725 */
8744 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
8745
8746 /* Acer/Packard Bell NCL20 */
8747 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
8748
8749 /* Acer Aspire 4736Z */
8750 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
8891}; 8751};
8892 8752
8893static void intel_init_quirks(struct drm_device *dev) 8753static void intel_init_quirks(struct drm_device *dev)
@@ -8916,12 +8776,7 @@ static void i915_disable_vga(struct drm_device *dev)
8916{ 8776{
8917 struct drm_i915_private *dev_priv = dev->dev_private; 8777 struct drm_i915_private *dev_priv = dev->dev_private;
8918 u8 sr1; 8778 u8 sr1;
8919 u32 vga_reg; 8779 u32 vga_reg = i915_vgacntrl_reg(dev);
8920
8921 if (HAS_PCH_SPLIT(dev))
8922 vga_reg = CPU_VGACNTRL;
8923 else
8924 vga_reg = VGACNTRL;
8925 8780
8926 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 8781 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
8927 outb(SR01, VGA_SR_INDEX); 8782 outb(SR01, VGA_SR_INDEX);
@@ -8936,10 +8791,7 @@ static void i915_disable_vga(struct drm_device *dev)
8936 8791
8937void intel_modeset_init_hw(struct drm_device *dev) 8792void intel_modeset_init_hw(struct drm_device *dev)
8938{ 8793{
8939 /* We attempt to init the necessary power wells early in the initialization 8794 intel_init_power_well(dev);
8940 * time, so the subsystems that expect power to be enabled can work.
8941 */
8942 intel_init_power_wells(dev);
8943 8795
8944 intel_prepare_ddi(dev); 8796 intel_prepare_ddi(dev);
8945 8797
@@ -8981,7 +8833,7 @@ void intel_modeset_init(struct drm_device *dev)
8981 dev->mode_config.max_width = 8192; 8833 dev->mode_config.max_width = 8192;
8982 dev->mode_config.max_height = 8192; 8834 dev->mode_config.max_height = 8192;
8983 } 8835 }
8984 dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr; 8836 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
8985 8837
8986 DRM_DEBUG_KMS("%d display pipe%s available.\n", 8838 DRM_DEBUG_KMS("%d display pipe%s available.\n",
8987 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); 8839 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@@ -8999,6 +8851,9 @@ void intel_modeset_init(struct drm_device *dev)
8999 /* Just disable it once at startup */ 8851 /* Just disable it once at startup */
9000 i915_disable_vga(dev); 8852 i915_disable_vga(dev);
9001 intel_setup_outputs(dev); 8853 intel_setup_outputs(dev);
8854
8855 /* Just in case the BIOS is doing something questionable. */
8856 intel_disable_fbc(dev);
9002} 8857}
9003 8858
9004static void 8859static void
@@ -9180,20 +9035,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
9180 * the crtc fixup. */ 9035 * the crtc fixup. */
9181} 9036}
9182 9037
9183static void i915_redisable_vga(struct drm_device *dev) 9038void i915_redisable_vga(struct drm_device *dev)
9184{ 9039{
9185 struct drm_i915_private *dev_priv = dev->dev_private; 9040 struct drm_i915_private *dev_priv = dev->dev_private;
9186 u32 vga_reg; 9041 u32 vga_reg = i915_vgacntrl_reg(dev);
9187
9188 if (HAS_PCH_SPLIT(dev))
9189 vga_reg = CPU_VGACNTRL;
9190 else
9191 vga_reg = VGACNTRL;
9192 9042
9193 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 9043 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9194 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 9044 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9195 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 9045 i915_disable_vga(dev);
9196 POSTING_READ(vga_reg);
9197 } 9046 }
9198} 9047}
9199 9048
@@ -9209,7 +9058,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9209 struct intel_encoder *encoder; 9058 struct intel_encoder *encoder;
9210 struct intel_connector *connector; 9059 struct intel_connector *connector;
9211 9060
9212 if (IS_HASWELL(dev)) { 9061 if (HAS_DDI(dev)) {
9213 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 9062 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9214 9063
9215 if (tmp & TRANS_DDI_FUNC_ENABLE) { 9064 if (tmp & TRANS_DDI_FUNC_ENABLE) {
@@ -9250,7 +9099,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9250 crtc->active ? "enabled" : "disabled"); 9099 crtc->active ? "enabled" : "disabled");
9251 } 9100 }
9252 9101
9253 if (IS_HASWELL(dev)) 9102 if (HAS_DDI(dev))
9254 intel_ddi_setup_hw_pll_state(dev); 9103 intel_ddi_setup_hw_pll_state(dev);
9255 9104
9256 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9105 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
@@ -9301,9 +9150,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
9301 9150
9302 if (force_restore) { 9151 if (force_restore) {
9303 for_each_pipe(pipe) { 9152 for_each_pipe(pipe) {
9304 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9153 intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
9305 intel_set_mode(&crtc->base, &crtc->base.mode,
9306 crtc->base.x, crtc->base.y, crtc->base.fb);
9307 } 9154 }
9308 9155
9309 i915_redisable_vga(dev); 9156 i915_redisable_vga(dev);
@@ -9367,6 +9214,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
9367 flush_scheduled_work(); 9214 flush_scheduled_work();
9368 9215
9369 drm_mode_config_cleanup(dev); 9216 drm_mode_config_cleanup(dev);
9217
9218 intel_cleanup_overlay(dev);
9370} 9219}
9371 9220
9372/* 9221/*
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb3715b4b09d..f61cb7998c72 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -148,15 +148,6 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp)
148 return max_link_bw; 148 return max_link_bw;
149} 149}
150 150
151static int
152intel_dp_link_clock(uint8_t link_bw)
153{
154 if (link_bw == DP_LINK_BW_2_7)
155 return 270000;
156 else
157 return 162000;
158}
159
160/* 151/*
161 * The units on the numbers in the next two are... bizarre. Examples will 152 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec. 153 * make it clearer; this one parallels an example in the eDP spec.
@@ -191,7 +182,8 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
191 struct drm_display_mode *mode, 182 struct drm_display_mode *mode,
192 bool adjust_mode) 183 bool adjust_mode)
193{ 184{
194 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 185 int max_link_clock =
186 drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
195 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); 187 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
196 int max_rate, mode_rate; 188 int max_rate, mode_rate;
197 189
@@ -330,6 +322,48 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
330 } 322 }
331} 323}
332 324
325static uint32_t
326intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
327{
328 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
329 struct drm_device *dev = intel_dig_port->base.base.dev;
330 struct drm_i915_private *dev_priv = dev->dev_private;
331 uint32_t ch_ctl = intel_dp->output_reg + 0x10;
332 uint32_t status;
333 bool done;
334
335 if (IS_HASWELL(dev)) {
336 switch (intel_dig_port->port) {
337 case PORT_A:
338 ch_ctl = DPA_AUX_CH_CTL;
339 break;
340 case PORT_B:
341 ch_ctl = PCH_DPB_AUX_CH_CTL;
342 break;
343 case PORT_C:
344 ch_ctl = PCH_DPC_AUX_CH_CTL;
345 break;
346 case PORT_D:
347 ch_ctl = PCH_DPD_AUX_CH_CTL;
348 break;
349 default:
350 BUG();
351 }
352 }
353
354#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
355 if (has_aux_irq)
356 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
357 else
358 done = wait_for_atomic(C, 10) == 0;
359 if (!done)
360 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
361 has_aux_irq);
362#undef C
363
364 return status;
365}
366
333static int 367static int
334intel_dp_aux_ch(struct intel_dp *intel_dp, 368intel_dp_aux_ch(struct intel_dp *intel_dp,
335 uint8_t *send, int send_bytes, 369 uint8_t *send, int send_bytes,
@@ -341,11 +375,17 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
341 struct drm_i915_private *dev_priv = dev->dev_private; 375 struct drm_i915_private *dev_priv = dev->dev_private;
342 uint32_t ch_ctl = output_reg + 0x10; 376 uint32_t ch_ctl = output_reg + 0x10;
343 uint32_t ch_data = ch_ctl + 4; 377 uint32_t ch_data = ch_ctl + 4;
344 int i; 378 int i, ret, recv_bytes;
345 int recv_bytes;
346 uint32_t status; 379 uint32_t status;
347 uint32_t aux_clock_divider; 380 uint32_t aux_clock_divider;
348 int try, precharge; 381 int try, precharge;
382 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
383
384 /* dp aux is extremely sensitive to irq latency, hence request the
385 * lowest possible wakeup latency and so prevent the cpu from going into
386 * deep sleep states.
387 */
388 pm_qos_update_request(&dev_priv->pm_qos, 0);
349 389
350 if (IS_HASWELL(dev)) { 390 if (IS_HASWELL(dev)) {
351 switch (intel_dig_port->port) { 391 switch (intel_dig_port->port) {
@@ -379,7 +419,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
379 * clock divider. 419 * clock divider.
380 */ 420 */
381 if (is_cpu_edp(intel_dp)) { 421 if (is_cpu_edp(intel_dp)) {
382 if (IS_HASWELL(dev)) 422 if (HAS_DDI(dev))
383 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; 423 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
384 else if (IS_VALLEYVIEW(dev)) 424 else if (IS_VALLEYVIEW(dev))
385 aux_clock_divider = 100; 425 aux_clock_divider = 100;
@@ -399,7 +439,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
399 439
400 /* Try to wait for any previous AUX channel activity */ 440 /* Try to wait for any previous AUX channel activity */
401 for (try = 0; try < 3; try++) { 441 for (try = 0; try < 3; try++) {
402 status = I915_READ(ch_ctl); 442 status = I915_READ_NOTRACE(ch_ctl);
403 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 443 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
404 break; 444 break;
405 msleep(1); 445 msleep(1);
@@ -408,7 +448,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
408 if (try == 3) { 448 if (try == 3) {
409 WARN(1, "dp_aux_ch not started status 0x%08x\n", 449 WARN(1, "dp_aux_ch not started status 0x%08x\n",
410 I915_READ(ch_ctl)); 450 I915_READ(ch_ctl));
411 return -EBUSY; 451 ret = -EBUSY;
452 goto out;
412 } 453 }
413 454
414 /* Must try at least 3 times according to DP spec */ 455 /* Must try at least 3 times according to DP spec */
@@ -421,6 +462,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
421 /* Send the command and wait for it to complete */ 462 /* Send the command and wait for it to complete */
422 I915_WRITE(ch_ctl, 463 I915_WRITE(ch_ctl,
423 DP_AUX_CH_CTL_SEND_BUSY | 464 DP_AUX_CH_CTL_SEND_BUSY |
465 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
424 DP_AUX_CH_CTL_TIME_OUT_400us | 466 DP_AUX_CH_CTL_TIME_OUT_400us |
425 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 467 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
426 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 468 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
@@ -428,12 +470,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
428 DP_AUX_CH_CTL_DONE | 470 DP_AUX_CH_CTL_DONE |
429 DP_AUX_CH_CTL_TIME_OUT_ERROR | 471 DP_AUX_CH_CTL_TIME_OUT_ERROR |
430 DP_AUX_CH_CTL_RECEIVE_ERROR); 472 DP_AUX_CH_CTL_RECEIVE_ERROR);
431 for (;;) { 473
432 status = I915_READ(ch_ctl); 474 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
433 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
434 break;
435 udelay(100);
436 }
437 475
438 /* Clear done status and any errors */ 476 /* Clear done status and any errors */
439 I915_WRITE(ch_ctl, 477 I915_WRITE(ch_ctl,
@@ -451,7 +489,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
451 489
452 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 490 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
453 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 491 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
454 return -EBUSY; 492 ret = -EBUSY;
493 goto out;
455 } 494 }
456 495
457 /* Check for timeout or receive error. 496 /* Check for timeout or receive error.
@@ -459,14 +498,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
459 */ 498 */
460 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 499 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
461 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 500 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
462 return -EIO; 501 ret = -EIO;
502 goto out;
463 } 503 }
464 504
465 /* Timeouts occur when the device isn't connected, so they're 505 /* Timeouts occur when the device isn't connected, so they're
466 * "normal" -- don't fill the kernel log with these */ 506 * "normal" -- don't fill the kernel log with these */
467 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 507 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
468 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 508 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
469 return -ETIMEDOUT; 509 ret = -ETIMEDOUT;
510 goto out;
470 } 511 }
471 512
472 /* Unload any bytes sent back from the other side */ 513 /* Unload any bytes sent back from the other side */
@@ -479,7 +520,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
479 unpack_aux(I915_READ(ch_data + i), 520 unpack_aux(I915_READ(ch_data + i),
480 recv + i, recv_bytes - i); 521 recv + i, recv_bytes - i);
481 522
482 return recv_bytes; 523 ret = recv_bytes;
524out:
525 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
526
527 return ret;
483} 528}
484 529
485/* Write data to the aux channel in native mode */ 530/* Write data to the aux channel in native mode */
@@ -718,16 +763,35 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
718 return false; 763 return false;
719 764
720 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 765 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
766
767 if (intel_dp->color_range_auto) {
768 /*
769 * See:
770 * CEA-861-E - 5.1 Default Encoding Parameters
771 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
772 */
773 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
774 intel_dp->color_range = DP_COLOR_RANGE_16_235;
775 else
776 intel_dp->color_range = 0;
777 }
778
779 if (intel_dp->color_range)
780 adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
781
721 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 782 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
722 783
723 for (clock = 0; clock <= max_clock; clock++) { 784 for (clock = 0; clock <= max_clock; clock++) {
724 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 785 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
725 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 786 int link_bw_clock =
787 drm_dp_bw_code_to_link_rate(bws[clock]);
788 int link_avail = intel_dp_max_data_rate(link_bw_clock,
789 lane_count);
726 790
727 if (mode_rate <= link_avail) { 791 if (mode_rate <= link_avail) {
728 intel_dp->link_bw = bws[clock]; 792 intel_dp->link_bw = bws[clock];
729 intel_dp->lane_count = lane_count; 793 intel_dp->lane_count = lane_count;
730 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 794 adjusted_mode->clock = link_bw_clock;
731 DRM_DEBUG_KMS("DP link bw %02x lane " 795 DRM_DEBUG_KMS("DP link bw %02x lane "
732 "count %d clock %d bpp %d\n", 796 "count %d clock %d bpp %d\n",
733 intel_dp->link_bw, intel_dp->lane_count, 797 intel_dp->link_bw, intel_dp->lane_count,
@@ -742,39 +806,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
742 return false; 806 return false;
743} 807}
744 808
745struct intel_dp_m_n {
746 uint32_t tu;
747 uint32_t gmch_m;
748 uint32_t gmch_n;
749 uint32_t link_m;
750 uint32_t link_n;
751};
752
753static void
754intel_reduce_ratio(uint32_t *num, uint32_t *den)
755{
756 while (*num > 0xffffff || *den > 0xffffff) {
757 *num >>= 1;
758 *den >>= 1;
759 }
760}
761
762static void
763intel_dp_compute_m_n(int bpp,
764 int nlanes,
765 int pixel_clock,
766 int link_clock,
767 struct intel_dp_m_n *m_n)
768{
769 m_n->tu = 64;
770 m_n->gmch_m = (pixel_clock * bpp) >> 3;
771 m_n->gmch_n = link_clock * nlanes;
772 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
773 m_n->link_m = pixel_clock;
774 m_n->link_n = link_clock;
775 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
776}
777
778void 809void
779intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 810intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
780 struct drm_display_mode *adjusted_mode) 811 struct drm_display_mode *adjusted_mode)
@@ -785,7 +816,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
785 struct drm_i915_private *dev_priv = dev->dev_private; 816 struct drm_i915_private *dev_priv = dev->dev_private;
786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 817 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
787 int lane_count = 4; 818 int lane_count = 4;
788 struct intel_dp_m_n m_n; 819 struct intel_link_m_n m_n;
789 int pipe = intel_crtc->pipe; 820 int pipe = intel_crtc->pipe;
790 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; 821 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
791 822
@@ -808,8 +839,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
808 * the number of bytes_per_pixel post-LUT, which we always 839 * the number of bytes_per_pixel post-LUT, which we always
809 * set up for 8-bits of R/G/B, or 3 bytes total. 840 * set up for 8-bits of R/G/B, or 3 bytes total.
810 */ 841 */
811 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 842 intel_link_compute_m_n(intel_crtc->bpp, lane_count,
812 mode->clock, adjusted_mode->clock, &m_n); 843 mode->clock, adjusted_mode->clock, &m_n);
813 844
814 if (IS_HASWELL(dev)) { 845 if (IS_HASWELL(dev)) {
815 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), 846 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
@@ -851,6 +882,32 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp)
851 } 882 }
852} 883}
853 884
885static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
886{
887 struct drm_device *dev = crtc->dev;
888 struct drm_i915_private *dev_priv = dev->dev_private;
889 u32 dpa_ctl;
890
891 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
892 dpa_ctl = I915_READ(DP_A);
893 dpa_ctl &= ~DP_PLL_FREQ_MASK;
894
895 if (clock < 200000) {
896 /* For a long time we've carried around a ILK-DevA w/a for the
897 * 160MHz clock. If we're really unlucky, it's still required.
898 */
899 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
900 dpa_ctl |= DP_PLL_FREQ_160MHZ;
901 } else {
902 dpa_ctl |= DP_PLL_FREQ_270MHZ;
903 }
904
905 I915_WRITE(DP_A, dpa_ctl);
906
907 POSTING_READ(DP_A);
908 udelay(500);
909}
910
854static void 911static void
855intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 912intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
856 struct drm_display_mode *adjusted_mode) 913 struct drm_display_mode *adjusted_mode)
@@ -926,7 +983,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
926 else 983 else
927 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 984 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
928 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 985 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
929 intel_dp->DP |= intel_dp->color_range; 986 if (!HAS_PCH_SPLIT(dev))
987 intel_dp->DP |= intel_dp->color_range;
930 988
931 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 989 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
932 intel_dp->DP |= DP_SYNC_HS_HIGH; 990 intel_dp->DP |= DP_SYNC_HS_HIGH;
@@ -950,6 +1008,9 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
950 } else { 1008 } else {
951 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 1009 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
952 } 1010 }
1011
1012 if (is_cpu_edp(intel_dp))
1013 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
953} 1014}
954 1015
955#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 1016#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
@@ -1057,6 +1118,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1057 struct drm_i915_private *dev_priv = dev->dev_private; 1118 struct drm_i915_private *dev_priv = dev->dev_private;
1058 u32 pp; 1119 u32 pp;
1059 1120
1121 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1122
1060 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1123 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1061 pp = ironlake_get_pp_control(dev_priv); 1124 pp = ironlake_get_pp_control(dev_priv);
1062 pp &= ~EDP_FORCE_VDD; 1125 pp &= ~EDP_FORCE_VDD;
@@ -1543,7 +1606,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
1543} 1606}
1544 1607
1545static uint32_t 1608static uint32_t
1546intel_dp_signal_levels(uint8_t train_set) 1609intel_gen4_signal_levels(uint8_t train_set)
1547{ 1610{
1548 uint32_t signal_levels = 0; 1611 uint32_t signal_levels = 0;
1549 1612
@@ -1641,7 +1704,7 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
1641 1704
1642/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ 1705/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1643static uint32_t 1706static uint32_t
1644intel_dp_signal_levels_hsw(uint8_t train_set) 1707intel_hsw_signal_levels(uint8_t train_set)
1645{ 1708{
1646 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1709 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1647 DP_TRAIN_PRE_EMPHASIS_MASK); 1710 DP_TRAIN_PRE_EMPHASIS_MASK);
@@ -1673,6 +1736,34 @@ intel_dp_signal_levels_hsw(uint8_t train_set)
1673 } 1736 }
1674} 1737}
1675 1738
1739/* Properly updates "DP" with the correct signal levels. */
1740static void
1741intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1742{
1743 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1744 struct drm_device *dev = intel_dig_port->base.base.dev;
1745 uint32_t signal_levels, mask;
1746 uint8_t train_set = intel_dp->train_set[0];
1747
1748 if (IS_HASWELL(dev)) {
1749 signal_levels = intel_hsw_signal_levels(train_set);
1750 mask = DDI_BUF_EMP_MASK;
1751 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1752 signal_levels = intel_gen7_edp_signal_levels(train_set);
1753 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1754 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1755 signal_levels = intel_gen6_edp_signal_levels(train_set);
1756 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1757 } else {
1758 signal_levels = intel_gen4_signal_levels(train_set);
1759 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
1760 }
1761
1762 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
1763
1764 *DP = (*DP & ~mask) | signal_levels;
1765}
1766
1676static bool 1767static bool
1677intel_dp_set_link_train(struct intel_dp *intel_dp, 1768intel_dp_set_link_train(struct intel_dp *intel_dp,
1678 uint32_t dp_reg_value, 1769 uint32_t dp_reg_value,
@@ -1696,14 +1787,18 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1696 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1787 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1697 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1788 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1698 case DP_TRAINING_PATTERN_DISABLE: 1789 case DP_TRAINING_PATTERN_DISABLE:
1699 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1700 I915_WRITE(DP_TP_CTL(port), temp);
1701 1790
1702 if (wait_for((I915_READ(DP_TP_STATUS(port)) & 1791 if (port != PORT_A) {
1703 DP_TP_STATUS_IDLE_DONE), 1)) 1792 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1704 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 1793 I915_WRITE(DP_TP_CTL(port), temp);
1794
1795 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1796 DP_TP_STATUS_IDLE_DONE), 1))
1797 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1798
1799 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1800 }
1705 1801
1706 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1707 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1802 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1708 1803
1709 break; 1804 break;
@@ -1791,7 +1886,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1791 int voltage_tries, loop_tries; 1886 int voltage_tries, loop_tries;
1792 uint32_t DP = intel_dp->DP; 1887 uint32_t DP = intel_dp->DP;
1793 1888
1794 if (IS_HASWELL(dev)) 1889 if (HAS_DDI(dev))
1795 intel_ddi_prepare_link_retrain(encoder); 1890 intel_ddi_prepare_link_retrain(encoder);
1796 1891
1797 /* Write the link configuration data */ 1892 /* Write the link configuration data */
@@ -1809,24 +1904,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1809 for (;;) { 1904 for (;;) {
1810 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1905 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1811 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1906 uint8_t link_status[DP_LINK_STATUS_SIZE];
1812 uint32_t signal_levels; 1907
1813 1908 intel_dp_set_signal_levels(intel_dp, &DP);
1814 if (IS_HASWELL(dev)) {
1815 signal_levels = intel_dp_signal_levels_hsw(
1816 intel_dp->train_set[0]);
1817 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1818 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1819 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1820 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1821 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1822 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1823 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1824 } else {
1825 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1826 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1827 }
1828 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1829 signal_levels);
1830 1909
1831 /* Set training pattern 1 */ 1910 /* Set training pattern 1 */
1832 if (!intel_dp_set_link_train(intel_dp, DP, 1911 if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1882,7 +1961,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1882void 1961void
1883intel_dp_complete_link_train(struct intel_dp *intel_dp) 1962intel_dp_complete_link_train(struct intel_dp *intel_dp)
1884{ 1963{
1885 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1886 bool channel_eq = false; 1964 bool channel_eq = false;
1887 int tries, cr_tries; 1965 int tries, cr_tries;
1888 uint32_t DP = intel_dp->DP; 1966 uint32_t DP = intel_dp->DP;
@@ -1892,8 +1970,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1892 cr_tries = 0; 1970 cr_tries = 0;
1893 channel_eq = false; 1971 channel_eq = false;
1894 for (;;) { 1972 for (;;) {
1895 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1896 uint32_t signal_levels;
1897 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1973 uint8_t link_status[DP_LINK_STATUS_SIZE];
1898 1974
1899 if (cr_tries > 5) { 1975 if (cr_tries > 5) {
@@ -1902,19 +1978,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1902 break; 1978 break;
1903 } 1979 }
1904 1980
1905 if (IS_HASWELL(dev)) { 1981 intel_dp_set_signal_levels(intel_dp, &DP);
1906 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1907 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1908 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1909 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1910 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1911 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1912 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1913 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1914 } else {
1915 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1916 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1917 }
1918 1982
1919 /* channel eq pattern */ 1983 /* channel eq pattern */
1920 if (!intel_dp_set_link_train(intel_dp, DP, 1984 if (!intel_dp_set_link_train(intel_dp, DP,
@@ -1964,6 +2028,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1964 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2028 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1965 struct drm_device *dev = intel_dig_port->base.base.dev; 2029 struct drm_device *dev = intel_dig_port->base.base.dev;
1966 struct drm_i915_private *dev_priv = dev->dev_private; 2030 struct drm_i915_private *dev_priv = dev->dev_private;
2031 struct intel_crtc *intel_crtc =
2032 to_intel_crtc(intel_dig_port->base.base.crtc);
1967 uint32_t DP = intel_dp->DP; 2033 uint32_t DP = intel_dp->DP;
1968 2034
1969 /* 2035 /*
@@ -1981,7 +2047,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1981 * intel_ddi_prepare_link_retrain will take care of redoing the link 2047 * intel_ddi_prepare_link_retrain will take care of redoing the link
1982 * train. 2048 * train.
1983 */ 2049 */
1984 if (IS_HASWELL(dev)) 2050 if (HAS_DDI(dev))
1985 return; 2051 return;
1986 2052
1987 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 2053 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
@@ -1998,7 +2064,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1998 } 2064 }
1999 POSTING_READ(intel_dp->output_reg); 2065 POSTING_READ(intel_dp->output_reg);
2000 2066
2001 msleep(17); 2067 /* We don't really know why we're doing this */
2068 intel_wait_for_vblank(dev, intel_crtc->pipe);
2002 2069
2003 if (HAS_PCH_IBX(dev) && 2070 if (HAS_PCH_IBX(dev) &&
2004 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2071 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -2018,19 +2085,14 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2018 /* Changes to enable or select take place the vblank 2085 /* Changes to enable or select take place the vblank
2019 * after being written. 2086 * after being written.
2020 */ 2087 */
2021 if (crtc == NULL) { 2088 if (WARN_ON(crtc == NULL)) {
2022 /* We can arrive here never having been attached 2089 /* We should never try to disable a port without a crtc
2023 * to a CRTC, for instance, due to inheriting 2090 * attached. For paranoia keep the code around for a
2024 * random state from the BIOS. 2091 * bit. */
2025 *
2026 * If the pipe is not running, play safe and
2027 * wait for the clocks to stabilise before
2028 * continuing.
2029 */
2030 POSTING_READ(intel_dp->output_reg); 2092 POSTING_READ(intel_dp->output_reg);
2031 msleep(50); 2093 msleep(50);
2032 } else 2094 } else
2033 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 2095 intel_wait_for_vblank(dev, intel_crtc->pipe);
2034 } 2096 }
2035 2097
2036 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 2098 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
@@ -2042,10 +2104,16 @@ intel_dp_link_down(struct intel_dp *intel_dp)
2042static bool 2104static bool
2043intel_dp_get_dpcd(struct intel_dp *intel_dp) 2105intel_dp_get_dpcd(struct intel_dp *intel_dp)
2044{ 2106{
2107 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2108
2045 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2109 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2046 sizeof(intel_dp->dpcd)) == 0) 2110 sizeof(intel_dp->dpcd)) == 0)
2047 return false; /* aux transfer failed */ 2111 return false; /* aux transfer failed */
2048 2112
2113 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2114 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2115 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2116
2049 if (intel_dp->dpcd[DP_DPCD_REV] == 0) 2117 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2050 return false; /* DPCD not present */ 2118 return false; /* DPCD not present */
2051 2119
@@ -2206,6 +2274,8 @@ static enum drm_connector_status
2206ironlake_dp_detect(struct intel_dp *intel_dp) 2274ironlake_dp_detect(struct intel_dp *intel_dp)
2207{ 2275{
2208 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2276 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2277 struct drm_i915_private *dev_priv = dev->dev_private;
2278 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2209 enum drm_connector_status status; 2279 enum drm_connector_status status;
2210 2280
2211 /* Can't disconnect eDP, but you can close the lid... */ 2281 /* Can't disconnect eDP, but you can close the lid... */
@@ -2216,6 +2286,9 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
2216 return status; 2286 return status;
2217 } 2287 }
2218 2288
2289 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2290 return connector_status_disconnected;
2291
2219 return intel_dp_detect_dpcd(intel_dp); 2292 return intel_dp_detect_dpcd(intel_dp);
2220} 2293}
2221 2294
@@ -2224,17 +2297,18 @@ g4x_dp_detect(struct intel_dp *intel_dp)
2224{ 2297{
2225 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2298 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2226 struct drm_i915_private *dev_priv = dev->dev_private; 2299 struct drm_i915_private *dev_priv = dev->dev_private;
2300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2227 uint32_t bit; 2301 uint32_t bit;
2228 2302
2229 switch (intel_dp->output_reg) { 2303 switch (intel_dig_port->port) {
2230 case DP_B: 2304 case PORT_B:
2231 bit = DPB_HOTPLUG_LIVE_STATUS; 2305 bit = PORTB_HOTPLUG_LIVE_STATUS;
2232 break; 2306 break;
2233 case DP_C: 2307 case PORT_C:
2234 bit = DPC_HOTPLUG_LIVE_STATUS; 2308 bit = PORTC_HOTPLUG_LIVE_STATUS;
2235 break; 2309 break;
2236 case DP_D: 2310 case PORT_D:
2237 bit = DPD_HOTPLUG_LIVE_STATUS; 2311 bit = PORTD_HOTPLUG_LIVE_STATUS;
2238 break; 2312 break;
2239 default: 2313 default:
2240 return connector_status_unknown; 2314 return connector_status_unknown;
@@ -2290,13 +2364,6 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
2290 return intel_ddc_get_modes(connector, adapter); 2364 return intel_ddc_get_modes(connector, adapter);
2291} 2365}
2292 2366
2293
2294/**
2295 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2296 *
2297 * \return true if DP port is connected.
2298 * \return false if DP port is disconnected.
2299 */
2300static enum drm_connector_status 2367static enum drm_connector_status
2301intel_dp_detect(struct drm_connector *connector, bool force) 2368intel_dp_detect(struct drm_connector *connector, bool force)
2302{ 2369{
@@ -2306,7 +2373,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2306 struct drm_device *dev = connector->dev; 2373 struct drm_device *dev = connector->dev;
2307 enum drm_connector_status status; 2374 enum drm_connector_status status;
2308 struct edid *edid = NULL; 2375 struct edid *edid = NULL;
2309 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2310 2376
2311 intel_dp->has_audio = false; 2377 intel_dp->has_audio = false;
2312 2378
@@ -2315,10 +2381,6 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2315 else 2381 else
2316 status = g4x_dp_detect(intel_dp); 2382 status = g4x_dp_detect(intel_dp);
2317 2383
2318 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2319 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2320 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2321
2322 if (status != connector_status_connected) 2384 if (status != connector_status_connected)
2323 return status; 2385 return status;
2324 2386
@@ -2419,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector,
2419 } 2481 }
2420 2482
2421 if (property == dev_priv->broadcast_rgb_property) { 2483 if (property == dev_priv->broadcast_rgb_property) {
2422 if (val == !!intel_dp->color_range) 2484 switch (val) {
2423 return 0; 2485 case INTEL_BROADCAST_RGB_AUTO:
2424 2486 intel_dp->color_range_auto = true;
2425 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2487 break;
2488 case INTEL_BROADCAST_RGB_FULL:
2489 intel_dp->color_range_auto = false;
2490 intel_dp->color_range = 0;
2491 break;
2492 case INTEL_BROADCAST_RGB_LIMITED:
2493 intel_dp->color_range_auto = false;
2494 intel_dp->color_range = DP_COLOR_RANGE_16_235;
2495 break;
2496 default:
2497 return -EINVAL;
2498 }
2426 goto done; 2499 goto done;
2427 } 2500 }
2428 2501
@@ -2445,11 +2518,8 @@ intel_dp_set_property(struct drm_connector *connector,
2445 return -EINVAL; 2518 return -EINVAL;
2446 2519
2447done: 2520done:
2448 if (intel_encoder->base.crtc) { 2521 if (intel_encoder->base.crtc)
2449 struct drm_crtc *crtc = intel_encoder->base.crtc; 2522 intel_crtc_restore_mode(intel_encoder->base.crtc);
2450 intel_set_mode(crtc, &crtc->mode,
2451 crtc->x, crtc->y, crtc->fb);
2452 }
2453 2523
2454 return 0; 2524 return 0;
2455} 2525}
@@ -2491,7 +2561,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2491static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2561static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2492 .mode_fixup = intel_dp_mode_fixup, 2562 .mode_fixup = intel_dp_mode_fixup,
2493 .mode_set = intel_dp_mode_set, 2563 .mode_set = intel_dp_mode_set,
2494 .disable = intel_encoder_noop,
2495}; 2564};
2496 2565
2497static const struct drm_connector_funcs intel_dp_connector_funcs = { 2566static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -2566,6 +2635,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
2566 2635
2567 intel_attach_force_audio_property(connector); 2636 intel_attach_force_audio_property(connector);
2568 intel_attach_broadcast_rgb_property(connector); 2637 intel_attach_broadcast_rgb_property(connector);
2638 intel_dp->color_range_auto = true;
2569 2639
2570 if (is_edp(intel_dp)) { 2640 if (is_edp(intel_dp)) {
2571 drm_mode_create_scaling_mode_property(connector->dev); 2641 drm_mode_create_scaling_mode_property(connector->dev);
@@ -2755,7 +2825,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2755 intel_connector_attach_encoder(intel_connector, intel_encoder); 2825 intel_connector_attach_encoder(intel_connector, intel_encoder);
2756 drm_sysfs_connector_add(connector); 2826 drm_sysfs_connector_add(connector);
2757 2827
2758 if (IS_HASWELL(dev)) 2828 if (HAS_DDI(dev))
2759 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 2829 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2760 else 2830 else
2761 intel_connector->get_hw_state = intel_connector_get_hw_state; 2831 intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -2767,15 +2837,15 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2767 name = "DPDDC-A"; 2837 name = "DPDDC-A";
2768 break; 2838 break;
2769 case PORT_B: 2839 case PORT_B:
2770 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2840 dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
2771 name = "DPDDC-B"; 2841 name = "DPDDC-B";
2772 break; 2842 break;
2773 case PORT_C: 2843 case PORT_C:
2774 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2844 dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
2775 name = "DPDDC-C"; 2845 name = "DPDDC-C";
2776 break; 2846 break;
2777 case PORT_D: 2847 case PORT_D:
2778 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2848 dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
2779 name = "DPDDC-D"; 2849 name = "DPDDC-D";
2780 break; 2850 break;
2781 default: 2851 default:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a1bd4a3ad0d..07ebac6fe8ca 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -109,6 +109,11 @@
109 * timings in the mode to prevent the crtc fixup from overwriting them. 109 * timings in the mode to prevent the crtc fixup from overwriting them.
110 * Currently only lvds needs that. */ 110 * Currently only lvds needs that. */
111#define INTEL_MODE_CRTC_TIMINGS_SET (0x20) 111#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
112/*
113 * Set when limited 16-235 (as opposed to full 0-255) RGB color range is
114 * to be used.
115 */
116#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
112 117
113static inline void 118static inline void
114intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, 119intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@@ -153,6 +158,7 @@ struct intel_encoder {
153 bool cloneable; 158 bool cloneable;
154 bool connectors_active; 159 bool connectors_active;
155 void (*hot_plug)(struct intel_encoder *); 160 void (*hot_plug)(struct intel_encoder *);
161 void (*pre_pll_enable)(struct intel_encoder *);
156 void (*pre_enable)(struct intel_encoder *); 162 void (*pre_enable)(struct intel_encoder *);
157 void (*enable)(struct intel_encoder *); 163 void (*enable)(struct intel_encoder *);
158 void (*disable)(struct intel_encoder *); 164 void (*disable)(struct intel_encoder *);
@@ -205,6 +211,7 @@ struct intel_crtc {
205 * some outputs connected to this crtc. 211 * some outputs connected to this crtc.
206 */ 212 */
207 bool active; 213 bool active;
214 bool eld_vld;
208 bool primary_disabled; /* is the crtc obscured by a plane? */ 215 bool primary_disabled; /* is the crtc obscured by a plane? */
209 bool lowfreq_avail; 216 bool lowfreq_avail;
210 struct intel_overlay *overlay; 217 struct intel_overlay *overlay;
@@ -228,6 +235,9 @@ struct intel_crtc {
228 /* We can share PLLs across outputs if the timings match */ 235 /* We can share PLLs across outputs if the timings match */
229 struct intel_pch_pll *pch_pll; 236 struct intel_pch_pll *pch_pll;
230 uint32_t ddi_pll_sel; 237 uint32_t ddi_pll_sel;
238
239 /* reset counter value when the last flip was submitted */
240 unsigned int reset_counter;
231}; 241};
232 242
233struct intel_plane { 243struct intel_plane {
@@ -283,6 +293,9 @@ struct cxsr_latency {
283#define DIP_LEN_AVI 13 293#define DIP_LEN_AVI 13
284#define DIP_AVI_PR_1 0 294#define DIP_AVI_PR_1 0
285#define DIP_AVI_PR_2 1 295#define DIP_AVI_PR_2 1
296#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
297#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
298#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
286 299
287#define DIP_TYPE_SPD 0x83 300#define DIP_TYPE_SPD 0x83
288#define DIP_VERSION_SPD 0x1 301#define DIP_VERSION_SPD 0x1
@@ -337,9 +350,11 @@ struct intel_hdmi {
337 u32 sdvox_reg; 350 u32 sdvox_reg;
338 int ddc_bus; 351 int ddc_bus;
339 uint32_t color_range; 352 uint32_t color_range;
353 bool color_range_auto;
340 bool has_hdmi_sink; 354 bool has_hdmi_sink;
341 bool has_audio; 355 bool has_audio;
342 enum hdmi_force_audio force_audio; 356 enum hdmi_force_audio force_audio;
357 bool rgb_quant_range_selectable;
343 void (*write_infoframe)(struct drm_encoder *encoder, 358 void (*write_infoframe)(struct drm_encoder *encoder,
344 struct dip_infoframe *frame); 359 struct dip_infoframe *frame);
345 void (*set_infoframes)(struct drm_encoder *encoder, 360 void (*set_infoframes)(struct drm_encoder *encoder,
@@ -356,6 +371,7 @@ struct intel_dp {
356 bool has_audio; 371 bool has_audio;
357 enum hdmi_force_audio force_audio; 372 enum hdmi_force_audio force_audio;
358 uint32_t color_range; 373 uint32_t color_range;
374 bool color_range_auto;
359 uint8_t link_bw; 375 uint8_t link_bw;
360 uint8_t lane_count; 376 uint8_t lane_count;
361 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 377 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
@@ -377,6 +393,7 @@ struct intel_dp {
377struct intel_digital_port { 393struct intel_digital_port {
378 struct intel_encoder base; 394 struct intel_encoder base;
379 enum port port; 395 enum port port;
396 u32 port_reversal;
380 struct intel_dp dp; 397 struct intel_dp dp;
381 struct intel_hdmi hdmi; 398 struct intel_hdmi hdmi;
382}; 399};
@@ -439,10 +456,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
439extern void intel_dvo_init(struct drm_device *dev); 456extern void intel_dvo_init(struct drm_device *dev);
440extern void intel_tv_init(struct drm_device *dev); 457extern void intel_tv_init(struct drm_device *dev);
441extern void intel_mark_busy(struct drm_device *dev); 458extern void intel_mark_busy(struct drm_device *dev);
442extern void intel_mark_idle(struct drm_device *dev);
443extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); 459extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
444extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); 460extern void intel_mark_idle(struct drm_device *dev);
445extern bool intel_lvds_init(struct drm_device *dev); 461extern bool intel_lvds_init(struct drm_device *dev);
462extern bool intel_is_dual_link_lvds(struct drm_device *dev);
446extern void intel_dp_init(struct drm_device *dev, int output_reg, 463extern void intel_dp_init(struct drm_device *dev, int output_reg,
447 enum port port); 464 enum port port);
448extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, 465extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
@@ -502,12 +519,12 @@ struct intel_set_config {
502 bool mode_changed; 519 bool mode_changed;
503}; 520};
504 521
505extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 522extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
506 int x, int y, struct drm_framebuffer *old_fb); 523 int x, int y, struct drm_framebuffer *old_fb);
507extern void intel_modeset_disable(struct drm_device *dev); 524extern void intel_modeset_disable(struct drm_device *dev);
525extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
508extern void intel_crtc_load_lut(struct drm_crtc *crtc); 526extern void intel_crtc_load_lut(struct drm_crtc *crtc);
509extern void intel_crtc_update_dpms(struct drm_crtc *crtc); 527extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
510extern void intel_encoder_noop(struct drm_encoder *encoder);
511extern void intel_encoder_destroy(struct drm_encoder *encoder); 528extern void intel_encoder_destroy(struct drm_encoder *encoder);
512extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); 529extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
513extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder); 530extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
@@ -546,6 +563,9 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
546 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 563 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
547} 564}
548 565
566bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
567 struct intel_digital_port *port);
568
549extern void intel_connector_attach_encoder(struct intel_connector *connector, 569extern void intel_connector_attach_encoder(struct intel_connector *connector,
550 struct intel_encoder *encoder); 570 struct intel_encoder *encoder);
551extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 571extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -589,6 +609,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
589 struct drm_mode_fb_cmd2 *mode_cmd, 609 struct drm_mode_fb_cmd2 *mode_cmd,
590 struct drm_i915_gem_object *obj); 610 struct drm_i915_gem_object *obj);
591extern int intel_fbdev_init(struct drm_device *dev); 611extern int intel_fbdev_init(struct drm_device *dev);
612extern void intel_fbdev_initial_config(struct drm_device *dev);
592extern void intel_fbdev_fini(struct drm_device *dev); 613extern void intel_fbdev_fini(struct drm_device *dev);
593extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); 614extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
594extern void intel_prepare_page_flip(struct drm_device *dev, int plane); 615extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
@@ -627,9 +648,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
627extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, 648extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
628 struct drm_display_mode *mode); 649 struct drm_display_mode *mode);
629 650
630extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, 651extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
631 unsigned int bpp, 652 unsigned int tiling_mode,
632 unsigned int pitch); 653 unsigned int bpp,
654 unsigned int pitch);
633 655
634extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 656extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
635 struct drm_file *file_priv); 657 struct drm_file *file_priv);
@@ -648,7 +670,8 @@ extern void intel_update_fbc(struct drm_device *dev);
648extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 670extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
649extern void intel_gpu_ips_teardown(void); 671extern void intel_gpu_ips_teardown(void);
650 672
651extern void intel_init_power_wells(struct drm_device *dev); 673extern void intel_init_power_well(struct drm_device *dev);
674extern void intel_set_power_well(struct drm_device *dev, bool enable);
652extern void intel_enable_gt_powersave(struct drm_device *dev); 675extern void intel_enable_gt_powersave(struct drm_device *dev);
653extern void intel_disable_gt_powersave(struct drm_device *dev); 676extern void intel_disable_gt_powersave(struct drm_device *dev);
654extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); 677extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 15da99533e5b..00e70dbe82da 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -345,7 +345,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
345static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { 345static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
346 .mode_fixup = intel_dvo_mode_fixup, 346 .mode_fixup = intel_dvo_mode_fixup,
347 .mode_set = intel_dvo_mode_set, 347 .mode_set = intel_dvo_mode_set,
348 .disable = intel_encoder_noop,
349}; 348};
350 349
351static const struct drm_connector_funcs intel_dvo_connector_funcs = { 350static const struct drm_connector_funcs intel_dvo_connector_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7b30b5c2c4ee..981bdce3634e 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -57,9 +57,10 @@ static struct fb_ops intelfb_ops = {
57 .fb_debug_leave = drm_fb_helper_debug_leave, 57 .fb_debug_leave = drm_fb_helper_debug_leave,
58}; 58};
59 59
60static int intelfb_create(struct intel_fbdev *ifbdev, 60static int intelfb_create(struct drm_fb_helper *helper,
61 struct drm_fb_helper_surface_size *sizes) 61 struct drm_fb_helper_surface_size *sizes)
62{ 62{
63 struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
63 struct drm_device *dev = ifbdev->helper.dev; 64 struct drm_device *dev = ifbdev->helper.dev;
64 struct drm_i915_private *dev_priv = dev->dev_private; 65 struct drm_i915_private *dev_priv = dev->dev_private;
65 struct fb_info *info; 66 struct fb_info *info;
@@ -83,7 +84,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
83 84
84 size = mode_cmd.pitches[0] * mode_cmd.height; 85 size = mode_cmd.pitches[0] * mode_cmd.height;
85 size = ALIGN(size, PAGE_SIZE); 86 size = ALIGN(size, PAGE_SIZE);
86 obj = i915_gem_alloc_object(dev, size); 87 obj = i915_gem_object_create_stolen(dev, size);
88 if (obj == NULL)
89 obj = i915_gem_alloc_object(dev, size);
87 if (!obj) { 90 if (!obj) {
88 DRM_ERROR("failed to allocate framebuffer\n"); 91 DRM_ERROR("failed to allocate framebuffer\n");
89 ret = -ENOMEM; 92 ret = -ENOMEM;
@@ -133,14 +136,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
133 goto out_unpin; 136 goto out_unpin;
134 } 137 }
135 info->apertures->ranges[0].base = dev->mode_config.fb_base; 138 info->apertures->ranges[0].base = dev->mode_config.fb_base;
136 info->apertures->ranges[0].size = 139 info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
137 dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
138 140
139 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; 141 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
140 info->fix.smem_len = size; 142 info->fix.smem_len = size;
141 143
142 info->screen_base = 144 info->screen_base =
143 ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset, 145 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
144 size); 146 size);
145 if (!info->screen_base) { 147 if (!info->screen_base) {
146 ret = -ENOSPC; 148 ret = -ENOSPC;
@@ -153,6 +155,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
153 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); 155 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
154 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); 156 drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
155 157
158 /* If the object is shmemfs backed, it will have given us zeroed pages.
159 * If the object is stolen however, it will be full of whatever
160 * garbage was left in there.
161 */
162 if (ifbdev->ifb.obj->stolen)
163 memset_io(info->screen_base, 0, info->screen_size);
164
156 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ 165 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
157 166
158 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 167 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
@@ -173,26 +182,10 @@ out:
173 return ret; 182 return ret;
174} 183}
175 184
176static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
177 struct drm_fb_helper_surface_size *sizes)
178{
179 struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
180 int new_fb = 0;
181 int ret;
182
183 if (!helper->fb) {
184 ret = intelfb_create(ifbdev, sizes);
185 if (ret)
186 return ret;
187 new_fb = 1;
188 }
189 return new_fb;
190}
191
192static struct drm_fb_helper_funcs intel_fb_helper_funcs = { 185static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
193 .gamma_set = intel_crtc_fb_gamma_set, 186 .gamma_set = intel_crtc_fb_gamma_set,
194 .gamma_get = intel_crtc_fb_gamma_get, 187 .gamma_get = intel_crtc_fb_gamma_get,
195 .fb_probe = intel_fb_find_or_create_single, 188 .fb_probe = intelfb_create,
196}; 189};
197 190
198static void intel_fbdev_destroy(struct drm_device *dev, 191static void intel_fbdev_destroy(struct drm_device *dev,
@@ -212,6 +205,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
212 205
213 drm_fb_helper_fini(&ifbdev->helper); 206 drm_fb_helper_fini(&ifbdev->helper);
214 207
208 drm_framebuffer_unregister_private(&ifb->base);
215 drm_framebuffer_cleanup(&ifb->base); 209 drm_framebuffer_cleanup(&ifb->base);
216 if (ifb->obj) { 210 if (ifb->obj) {
217 drm_gem_object_unreference_unlocked(&ifb->obj->base); 211 drm_gem_object_unreference_unlocked(&ifb->obj->base);
@@ -241,10 +235,18 @@ int intel_fbdev_init(struct drm_device *dev)
241 } 235 }
242 236
243 drm_fb_helper_single_add_all_connectors(&ifbdev->helper); 237 drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
244 drm_fb_helper_initial_config(&ifbdev->helper, 32); 238
245 return 0; 239 return 0;
246} 240}
247 241
242void intel_fbdev_initial_config(struct drm_device *dev)
243{
244 drm_i915_private_t *dev_priv = dev->dev_private;
245
246 /* Due to peculiar init order wrt to hpd handling this is separate. */
247 drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
248}
249
248void intel_fbdev_fini(struct drm_device *dev) 250void intel_fbdev_fini(struct drm_device *dev)
249{ 251{
250 drm_i915_private_t *dev_priv = dev->dev_private; 252 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -280,7 +282,7 @@ void intel_fb_restore_mode(struct drm_device *dev)
280 struct drm_mode_config *config = &dev->mode_config; 282 struct drm_mode_config *config = &dev->mode_config;
281 struct drm_plane *plane; 283 struct drm_plane *plane;
282 284
283 mutex_lock(&dev->mode_config.mutex); 285 drm_modeset_lock_all(dev);
284 286
285 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper); 287 ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
286 if (ret) 288 if (ret)
@@ -288,7 +290,8 @@ void intel_fb_restore_mode(struct drm_device *dev)
288 290
289 /* Be sure to shut off any planes that may be active */ 291 /* Be sure to shut off any planes that may be active */
290 list_for_each_entry(plane, &config->plane_list, head) 292 list_for_each_entry(plane, &config->plane_list, head)
291 plane->funcs->disable_plane(plane); 293 if (plane->enabled)
294 plane->funcs->disable_plane(plane);
292 295
293 mutex_unlock(&dev->mode_config.mutex); 296 drm_modeset_unlock_all(dev);
294} 297}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2ee9821b9d93..fa8ec4a26041 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -48,7 +48,7 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
48 struct drm_i915_private *dev_priv = dev->dev_private; 48 struct drm_i915_private *dev_priv = dev->dev_private;
49 uint32_t enabled_bits; 49 uint32_t enabled_bits;
50 50
51 enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 51 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
52 52
53 WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, 53 WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
54 "HDMI port enabled, expecting disabled\n"); 54 "HDMI port enabled, expecting disabled\n");
@@ -331,6 +331,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
331static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 331static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
332 struct drm_display_mode *adjusted_mode) 332 struct drm_display_mode *adjusted_mode)
333{ 333{
334 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
334 struct dip_infoframe avi_if = { 335 struct dip_infoframe avi_if = {
335 .type = DIP_TYPE_AVI, 336 .type = DIP_TYPE_AVI,
336 .ver = DIP_VERSION_AVI, 337 .ver = DIP_VERSION_AVI,
@@ -340,7 +341,14 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
340 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 341 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
341 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 342 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
342 343
343 avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode); 344 if (intel_hdmi->rgb_quant_range_selectable) {
345 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
346 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
347 else
348 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
349 }
350
351 avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
344 352
345 intel_set_infoframe(encoder, &avi_if); 353 intel_set_infoframe(encoder, &avi_if);
346} 354}
@@ -364,7 +372,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
364 struct drm_display_mode *adjusted_mode) 372 struct drm_display_mode *adjusted_mode)
365{ 373{
366 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 374 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
367 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 375 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
376 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
368 u32 reg = VIDEO_DIP_CTL; 377 u32 reg = VIDEO_DIP_CTL;
369 u32 val = I915_READ(reg); 378 u32 val = I915_READ(reg);
370 u32 port; 379 u32 port;
@@ -391,11 +400,11 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
391 return; 400 return;
392 } 401 }
393 402
394 switch (intel_hdmi->sdvox_reg) { 403 switch (intel_dig_port->port) {
395 case SDVOB: 404 case PORT_B:
396 port = VIDEO_DIP_PORT_B; 405 port = VIDEO_DIP_PORT_B;
397 break; 406 break;
398 case SDVOC: 407 case PORT_C:
399 port = VIDEO_DIP_PORT_C; 408 port = VIDEO_DIP_PORT_C;
400 break; 409 break;
401 default: 410 default:
@@ -428,7 +437,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
428{ 437{
429 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 438 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
430 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 439 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
431 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 440 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
441 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
432 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 442 u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
433 u32 val = I915_READ(reg); 443 u32 val = I915_READ(reg);
434 u32 port; 444 u32 port;
@@ -447,14 +457,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
447 return; 457 return;
448 } 458 }
449 459
450 switch (intel_hdmi->sdvox_reg) { 460 switch (intel_dig_port->port) {
451 case HDMIB: 461 case PORT_B:
452 port = VIDEO_DIP_PORT_B; 462 port = VIDEO_DIP_PORT_B;
453 break; 463 break;
454 case HDMIC: 464 case PORT_C:
455 port = VIDEO_DIP_PORT_C; 465 port = VIDEO_DIP_PORT_C;
456 break; 466 break;
457 case HDMID: 467 case PORT_D:
458 port = VIDEO_DIP_PORT_D; 468 port = VIDEO_DIP_PORT_D;
459 break; 469 break;
460 default: 470 default:
@@ -766,46 +776,38 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
766 const struct drm_display_mode *mode, 776 const struct drm_display_mode *mode,
767 struct drm_display_mode *adjusted_mode) 777 struct drm_display_mode *adjusted_mode)
768{ 778{
769 return true; 779 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
770}
771
772static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
773{
774 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
775 struct drm_i915_private *dev_priv = dev->dev_private;
776 uint32_t bit;
777 780
778 switch (intel_hdmi->sdvox_reg) { 781 if (intel_hdmi->color_range_auto) {
779 case SDVOB: 782 /* See CEA-861-E - 5.1 Default Encoding Parameters */
780 bit = HDMIB_HOTPLUG_LIVE_STATUS; 783 if (intel_hdmi->has_hdmi_sink &&
781 break; 784 drm_match_cea_mode(adjusted_mode) > 1)
782 case SDVOC: 785 intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
783 bit = HDMIC_HOTPLUG_LIVE_STATUS; 786 else
784 break; 787 intel_hdmi->color_range = 0;
785 default:
786 bit = 0;
787 break;
788 } 788 }
789 789
790 return I915_READ(PORT_HOTPLUG_STAT) & bit; 790 if (intel_hdmi->color_range)
791 adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
792
793 return true;
791} 794}
792 795
793static enum drm_connector_status 796static enum drm_connector_status
794intel_hdmi_detect(struct drm_connector *connector, bool force) 797intel_hdmi_detect(struct drm_connector *connector, bool force)
795{ 798{
799 struct drm_device *dev = connector->dev;
796 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 800 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
797 struct intel_digital_port *intel_dig_port = 801 struct intel_digital_port *intel_dig_port =
798 hdmi_to_dig_port(intel_hdmi); 802 hdmi_to_dig_port(intel_hdmi);
799 struct intel_encoder *intel_encoder = &intel_dig_port->base; 803 struct intel_encoder *intel_encoder = &intel_dig_port->base;
800 struct drm_i915_private *dev_priv = connector->dev->dev_private; 804 struct drm_i915_private *dev_priv = dev->dev_private;
801 struct edid *edid; 805 struct edid *edid;
802 enum drm_connector_status status = connector_status_disconnected; 806 enum drm_connector_status status = connector_status_disconnected;
803 807
804 if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
805 return status;
806
807 intel_hdmi->has_hdmi_sink = false; 808 intel_hdmi->has_hdmi_sink = false;
808 intel_hdmi->has_audio = false; 809 intel_hdmi->has_audio = false;
810 intel_hdmi->rgb_quant_range_selectable = false;
809 edid = drm_get_edid(connector, 811 edid = drm_get_edid(connector,
810 intel_gmbus_get_adapter(dev_priv, 812 intel_gmbus_get_adapter(dev_priv,
811 intel_hdmi->ddc_bus)); 813 intel_hdmi->ddc_bus));
@@ -817,6 +819,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
817 intel_hdmi->has_hdmi_sink = 819 intel_hdmi->has_hdmi_sink =
818 drm_detect_hdmi_monitor(edid); 820 drm_detect_hdmi_monitor(edid);
819 intel_hdmi->has_audio = drm_detect_monitor_audio(edid); 821 intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
822 intel_hdmi->rgb_quant_range_selectable =
823 drm_rgb_quant_range_selectable(edid);
820 } 824 }
821 kfree(edid); 825 kfree(edid);
822 } 826 }
@@ -902,21 +906,29 @@ intel_hdmi_set_property(struct drm_connector *connector,
902 } 906 }
903 907
904 if (property == dev_priv->broadcast_rgb_property) { 908 if (property == dev_priv->broadcast_rgb_property) {
905 if (val == !!intel_hdmi->color_range) 909 switch (val) {
906 return 0; 910 case INTEL_BROADCAST_RGB_AUTO:
907 911 intel_hdmi->color_range_auto = true;
908 intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; 912 break;
913 case INTEL_BROADCAST_RGB_FULL:
914 intel_hdmi->color_range_auto = false;
915 intel_hdmi->color_range = 0;
916 break;
917 case INTEL_BROADCAST_RGB_LIMITED:
918 intel_hdmi->color_range_auto = false;
919 intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
920 break;
921 default:
922 return -EINVAL;
923 }
909 goto done; 924 goto done;
910 } 925 }
911 926
912 return -EINVAL; 927 return -EINVAL;
913 928
914done: 929done:
915 if (intel_dig_port->base.base.crtc) { 930 if (intel_dig_port->base.base.crtc)
916 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 931 intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
917 intel_set_mode(crtc, &crtc->mode,
918 crtc->x, crtc->y, crtc->fb);
919 }
920 932
921 return 0; 933 return 0;
922} 934}
@@ -931,7 +943,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
931static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 943static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
932 .mode_fixup = intel_hdmi_mode_fixup, 944 .mode_fixup = intel_hdmi_mode_fixup,
933 .mode_set = intel_hdmi_mode_set, 945 .mode_set = intel_hdmi_mode_set,
934 .disable = intel_encoder_noop,
935}; 946};
936 947
937static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 948static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
@@ -957,6 +968,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
957{ 968{
958 intel_attach_force_audio_property(connector); 969 intel_attach_force_audio_property(connector);
959 intel_attach_broadcast_rgb_property(connector); 970 intel_attach_broadcast_rgb_property(connector);
971 intel_hdmi->color_range_auto = true;
960} 972}
961 973
962void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, 974void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
@@ -980,15 +992,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
980 switch (port) { 992 switch (port) {
981 case PORT_B: 993 case PORT_B:
982 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 994 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
983 dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 995 dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
984 break; 996 break;
985 case PORT_C: 997 case PORT_C:
986 intel_hdmi->ddc_bus = GMBUS_PORT_DPC; 998 intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
987 dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 999 dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
988 break; 1000 break;
989 case PORT_D: 1001 case PORT_D:
990 intel_hdmi->ddc_bus = GMBUS_PORT_DPD; 1002 intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
991 dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 1003 dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
992 break; 1004 break;
993 case PORT_A: 1005 case PORT_A:
994 /* Internal port only for eDP. */ 1006 /* Internal port only for eDP. */
@@ -1013,7 +1025,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1013 intel_hdmi->set_infoframes = cpt_set_infoframes; 1025 intel_hdmi->set_infoframes = cpt_set_infoframes;
1014 } 1026 }
1015 1027
1016 if (IS_HASWELL(dev)) 1028 if (HAS_DDI(dev))
1017 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 1029 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
1018 else 1030 else
1019 intel_connector->get_hw_state = intel_connector_get_hw_state; 1031 intel_connector->get_hw_state = intel_connector_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3ef5af15b812..acf8aec9ada7 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -63,6 +63,7 @@ intel_i2c_reset(struct drm_device *dev)
63{ 63{
64 struct drm_i915_private *dev_priv = dev->dev_private; 64 struct drm_i915_private *dev_priv = dev->dev_private;
65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); 65 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
66 I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
66} 67}
67 68
68static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) 69static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
@@ -202,6 +203,68 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
202 algo->data = bus; 203 algo->data = bus;
203} 204}
204 205
206#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 4)
207static int
208gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
209 u32 gmbus2_status,
210 u32 gmbus4_irq_en)
211{
212 int i;
213 int reg_offset = dev_priv->gpio_mmio_base;
214 u32 gmbus2 = 0;
215 DEFINE_WAIT(wait);
216
217 /* Important: The hw handles only the first bit, so set only one! Since
218 * we also need to check for NAKs besides the hw ready/idle signal, we
219 * need to wake up periodically and check that ourselves. */
220 I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
221
222 for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
223 prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
224 TASK_UNINTERRUPTIBLE);
225
226 gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
227 if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
228 break;
229
230 schedule_timeout(1);
231 }
232 finish_wait(&dev_priv->gmbus_wait_queue, &wait);
233
234 I915_WRITE(GMBUS4 + reg_offset, 0);
235
236 if (gmbus2 & GMBUS_SATOER)
237 return -ENXIO;
238 if (gmbus2 & gmbus2_status)
239 return 0;
240 return -ETIMEDOUT;
241}
242
243static int
244gmbus_wait_idle(struct drm_i915_private *dev_priv)
245{
246 int ret;
247 int reg_offset = dev_priv->gpio_mmio_base;
248
249#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
250
251 if (!HAS_GMBUS_IRQ(dev_priv->dev))
252 return wait_for(C, 10);
253
254 /* Important: The hw handles only the first bit, so set only one! */
255 I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
256
257 ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
258
259 I915_WRITE(GMBUS4 + reg_offset, 0);
260
261 if (ret)
262 return 0;
263 else
264 return -ETIMEDOUT;
265#undef C
266}
267
205static int 268static int
206gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, 269gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
207 u32 gmbus1_index) 270 u32 gmbus1_index)
@@ -219,15 +282,11 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
219 while (len) { 282 while (len) {
220 int ret; 283 int ret;
221 u32 val, loop = 0; 284 u32 val, loop = 0;
222 u32 gmbus2;
223 285
224 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 286 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
225 (GMBUS_SATOER | GMBUS_HW_RDY), 287 GMBUS_HW_RDY_EN);
226 50);
227 if (ret) 288 if (ret)
228 return -ETIMEDOUT; 289 return ret;
229 if (gmbus2 & GMBUS_SATOER)
230 return -ENXIO;
231 290
232 val = I915_READ(GMBUS3 + reg_offset); 291 val = I915_READ(GMBUS3 + reg_offset);
233 do { 292 do {
@@ -261,7 +320,6 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
261 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); 320 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
262 while (len) { 321 while (len) {
263 int ret; 322 int ret;
264 u32 gmbus2;
265 323
266 val = loop = 0; 324 val = loop = 0;
267 do { 325 do {
@@ -270,13 +328,10 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
270 328
271 I915_WRITE(GMBUS3 + reg_offset, val); 329 I915_WRITE(GMBUS3 + reg_offset, val);
272 330
273 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 331 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
274 (GMBUS_SATOER | GMBUS_HW_RDY), 332 GMBUS_HW_RDY_EN);
275 50);
276 if (ret) 333 if (ret)
277 return -ETIMEDOUT; 334 return ret;
278 if (gmbus2 & GMBUS_SATOER)
279 return -ENXIO;
280 } 335 }
281 return 0; 336 return 0;
282} 337}
@@ -345,8 +400,6 @@ gmbus_xfer(struct i2c_adapter *adapter,
345 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 400 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
346 401
347 for (i = 0; i < num; i++) { 402 for (i = 0; i < num; i++) {
348 u32 gmbus2;
349
350 if (gmbus_is_index_read(msgs, i, num)) { 403 if (gmbus_is_index_read(msgs, i, num)) {
351 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); 404 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
352 i += 1; /* set i to the index of the read xfer */ 405 i += 1; /* set i to the index of the read xfer */
@@ -361,13 +414,12 @@ gmbus_xfer(struct i2c_adapter *adapter,
361 if (ret == -ENXIO) 414 if (ret == -ENXIO)
362 goto clear_err; 415 goto clear_err;
363 416
364 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & 417 ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
365 (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 418 GMBUS_HW_WAIT_EN);
366 50); 419 if (ret == -ENXIO)
420 goto clear_err;
367 if (ret) 421 if (ret)
368 goto timeout; 422 goto timeout;
369 if (gmbus2 & GMBUS_SATOER)
370 goto clear_err;
371 } 423 }
372 424
373 /* Generate a STOP condition on the bus. Note that gmbus can't generata 425 /* Generate a STOP condition on the bus. Note that gmbus can't generata
@@ -380,8 +432,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
380 * We will re-enable it at the start of the next xfer, 432 * We will re-enable it at the start of the next xfer,
381 * till then let it sleep. 433 * till then let it sleep.
382 */ 434 */
383 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 435 if (gmbus_wait_idle(dev_priv)) {
384 10)) {
385 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", 436 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
386 adapter->name); 437 adapter->name);
387 ret = -ETIMEDOUT; 438 ret = -ETIMEDOUT;
@@ -405,8 +456,7 @@ clear_err:
405 * it's slow responding and only answers on the 2nd retry. 456 * it's slow responding and only answers on the 2nd retry.
406 */ 457 */
407 ret = -ENXIO; 458 ret = -ENXIO;
408 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 459 if (gmbus_wait_idle(dev_priv)) {
409 10)) {
410 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", 460 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
411 adapter->name); 461 adapter->name);
412 ret = -ETIMEDOUT; 462 ret = -ETIMEDOUT;
@@ -465,10 +515,13 @@ int intel_setup_gmbus(struct drm_device *dev)
465 515
466 if (HAS_PCH_SPLIT(dev)) 516 if (HAS_PCH_SPLIT(dev))
467 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; 517 dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
518 else if (IS_VALLEYVIEW(dev))
519 dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
468 else 520 else
469 dev_priv->gpio_mmio_base = 0; 521 dev_priv->gpio_mmio_base = 0;
470 522
471 mutex_init(&dev_priv->gmbus_mutex); 523 mutex_init(&dev_priv->gmbus_mutex);
524 init_waitqueue_head(&dev_priv->gmbus_wait_queue);
472 525
473 for (i = 0; i < GMBUS_NUM_PORTS; i++) { 526 for (i = 0; i < GMBUS_NUM_PORTS; i++) {
474 struct intel_gmbus *bus = &dev_priv->gmbus[i]; 527 struct intel_gmbus *bus = &dev_priv->gmbus[i];
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 17aee74258ad..3d1d97488cc9 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -51,7 +51,8 @@ struct intel_lvds_encoder {
51 51
52 u32 pfit_control; 52 u32 pfit_control;
53 u32 pfit_pgm_ratios; 53 u32 pfit_pgm_ratios;
54 bool pfit_dirty; 54 bool is_dual_link;
55 u32 reg;
55 56
56 struct intel_lvds_connector *attached_connector; 57 struct intel_lvds_connector *attached_connector;
57}; 58};
@@ -71,15 +72,10 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
71{ 72{
72 struct drm_device *dev = encoder->base.dev; 73 struct drm_device *dev = encoder->base.dev;
73 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
74 u32 lvds_reg, tmp; 75 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
75 76 u32 tmp;
76 if (HAS_PCH_SPLIT(dev)) {
77 lvds_reg = PCH_LVDS;
78 } else {
79 lvds_reg = LVDS;
80 }
81 77
82 tmp = I915_READ(lvds_reg); 78 tmp = I915_READ(lvds_encoder->reg);
83 79
84 if (!(tmp & LVDS_PORT_EN)) 80 if (!(tmp & LVDS_PORT_EN))
85 return false; 81 return false;
@@ -92,6 +88,91 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
92 return true; 88 return true;
93} 89}
94 90
91/* The LVDS pin pair needs to be on before the DPLLs are enabled.
92 * This is an exception to the general rule that mode_set doesn't turn
93 * things on.
94 */
95static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
96{
97 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
98 struct drm_device *dev = encoder->base.dev;
99 struct drm_i915_private *dev_priv = dev->dev_private;
100 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
101 struct drm_display_mode *fixed_mode =
102 lvds_encoder->attached_connector->base.panel.fixed_mode;
103 int pipe = intel_crtc->pipe;
104 u32 temp;
105
106 temp = I915_READ(lvds_encoder->reg);
107 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
108
109 if (HAS_PCH_CPT(dev)) {
110 temp &= ~PORT_TRANS_SEL_MASK;
111 temp |= PORT_TRANS_SEL_CPT(pipe);
112 } else {
113 if (pipe == 1) {
114 temp |= LVDS_PIPEB_SELECT;
115 } else {
116 temp &= ~LVDS_PIPEB_SELECT;
117 }
118 }
119
120 /* set the corresponsding LVDS_BORDER bit */
121 temp |= dev_priv->lvds_border_bits;
122 /* Set the B0-B3 data pairs corresponding to whether we're going to
123 * set the DPLLs for dual-channel mode or not.
124 */
125 if (lvds_encoder->is_dual_link)
126 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
127 else
128 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
129
130 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
131 * appropriately here, but we need to look more thoroughly into how
132 * panels behave in the two modes.
133 */
134
135 /* Set the dithering flag on LVDS as needed, note that there is no
136 * special lvds dither control bit on pch-split platforms, dithering is
137 * only controlled through the PIPECONF reg. */
138 if (INTEL_INFO(dev)->gen == 4) {
139 if (dev_priv->lvds_dither)
140 temp |= LVDS_ENABLE_DITHER;
141 else
142 temp &= ~LVDS_ENABLE_DITHER;
143 }
144 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
145 if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
146 temp |= LVDS_HSYNC_POLARITY;
147 if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
148 temp |= LVDS_VSYNC_POLARITY;
149
150 I915_WRITE(lvds_encoder->reg, temp);
151}
152
153static void intel_pre_enable_lvds(struct intel_encoder *encoder)
154{
155 struct drm_device *dev = encoder->base.dev;
156 struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
157 struct drm_i915_private *dev_priv = dev->dev_private;
158
159 if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
160 return;
161
162 /*
163 * Enable automatic panel scaling so that non-native modes
164 * fill the screen. The panel fitter should only be
165 * adjusted whilst the pipe is disabled, according to
166 * register description and PRM.
167 */
168 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
169 enc->pfit_control,
170 enc->pfit_pgm_ratios);
171
172 I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
173 I915_WRITE(PFIT_CONTROL, enc->pfit_control);
174}
175
95/** 176/**
96 * Sets the power state for the panel. 177 * Sets the power state for the panel.
97 */ 178 */
@@ -101,38 +182,20 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
101 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 182 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
102 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 183 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
103 struct drm_i915_private *dev_priv = dev->dev_private; 184 struct drm_i915_private *dev_priv = dev->dev_private;
104 u32 ctl_reg, lvds_reg, stat_reg; 185 u32 ctl_reg, stat_reg;
105 186
106 if (HAS_PCH_SPLIT(dev)) { 187 if (HAS_PCH_SPLIT(dev)) {
107 ctl_reg = PCH_PP_CONTROL; 188 ctl_reg = PCH_PP_CONTROL;
108 lvds_reg = PCH_LVDS;
109 stat_reg = PCH_PP_STATUS; 189 stat_reg = PCH_PP_STATUS;
110 } else { 190 } else {
111 ctl_reg = PP_CONTROL; 191 ctl_reg = PP_CONTROL;
112 lvds_reg = LVDS;
113 stat_reg = PP_STATUS; 192 stat_reg = PP_STATUS;
114 } 193 }
115 194
116 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 195 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
117
118 if (lvds_encoder->pfit_dirty) {
119 /*
120 * Enable automatic panel scaling so that non-native modes
121 * fill the screen. The panel fitter should only be
122 * adjusted whilst the pipe is disabled, according to
123 * register description and PRM.
124 */
125 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
126 lvds_encoder->pfit_control,
127 lvds_encoder->pfit_pgm_ratios);
128
129 I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
130 I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
131 lvds_encoder->pfit_dirty = false;
132 }
133 196
134 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 197 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
135 POSTING_READ(lvds_reg); 198 POSTING_READ(lvds_encoder->reg);
136 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 199 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
137 DRM_ERROR("timed out waiting for panel to power on\n"); 200 DRM_ERROR("timed out waiting for panel to power on\n");
138 201
@@ -144,15 +207,13 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
144 struct drm_device *dev = encoder->base.dev; 207 struct drm_device *dev = encoder->base.dev;
145 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 208 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
146 struct drm_i915_private *dev_priv = dev->dev_private; 209 struct drm_i915_private *dev_priv = dev->dev_private;
147 u32 ctl_reg, lvds_reg, stat_reg; 210 u32 ctl_reg, stat_reg;
148 211
149 if (HAS_PCH_SPLIT(dev)) { 212 if (HAS_PCH_SPLIT(dev)) {
150 ctl_reg = PCH_PP_CONTROL; 213 ctl_reg = PCH_PP_CONTROL;
151 lvds_reg = PCH_LVDS;
152 stat_reg = PCH_PP_STATUS; 214 stat_reg = PCH_PP_STATUS;
153 } else { 215 } else {
154 ctl_reg = PP_CONTROL; 216 ctl_reg = PP_CONTROL;
155 lvds_reg = LVDS;
156 stat_reg = PP_STATUS; 217 stat_reg = PP_STATUS;
157 } 218 }
158 219
@@ -162,13 +223,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
162 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 223 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
163 DRM_ERROR("timed out waiting for panel to power off\n"); 224 DRM_ERROR("timed out waiting for panel to power off\n");
164 225
165 if (lvds_encoder->pfit_control) { 226 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
166 I915_WRITE(PFIT_CONTROL, 0); 227 POSTING_READ(lvds_encoder->reg);
167 lvds_encoder->pfit_dirty = true;
168 }
169
170 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
171 POSTING_READ(lvds_reg);
172} 228}
173 229
174static int intel_lvds_mode_valid(struct drm_connector *connector, 230static int intel_lvds_mode_valid(struct drm_connector *connector,
@@ -406,7 +462,6 @@ out:
406 pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { 462 pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
407 lvds_encoder->pfit_control = pfit_control; 463 lvds_encoder->pfit_control = pfit_control;
408 lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; 464 lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
409 lvds_encoder->pfit_dirty = true;
410 } 465 }
411 dev_priv->lvds_border_bits = border; 466 dev_priv->lvds_border_bits = border;
412 467
@@ -492,13 +547,14 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
492}; 547};
493 548
494/* 549/*
495 * Lid events. Note the use of 'modeset_on_lid': 550 * Lid events. Note the use of 'modeset':
496 * - we set it on lid close, and reset it on open 551 * - we set it to MODESET_ON_LID_OPEN on lid close,
552 * and set it to MODESET_DONE on open
497 * - we use it as a "only once" bit (ie we ignore 553 * - we use it as a "only once" bit (ie we ignore
498 * duplicate events where it was already properly 554 * duplicate events where it was already properly set)
499 * set/reset) 555 * - the suspend/resume paths will set it to
500 * - the suspend/resume paths will also set it to 556 * MODESET_SUSPENDED and ignore the lid open event,
501 * zero, since they restore the mode ("lid open"). 557 * because they restore the mode ("lid open").
502 */ 558 */
503static int intel_lid_notify(struct notifier_block *nb, unsigned long val, 559static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
504 void *unused) 560 void *unused)
@@ -512,6 +568,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
512 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 568 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
513 return NOTIFY_OK; 569 return NOTIFY_OK;
514 570
571 mutex_lock(&dev_priv->modeset_restore_lock);
572 if (dev_priv->modeset_restore == MODESET_SUSPENDED)
573 goto exit;
515 /* 574 /*
516 * check and update the status of LVDS connector after receiving 575 * check and update the status of LVDS connector after receiving
517 * the LID nofication event. 576 * the LID nofication event.
@@ -520,21 +579,24 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
520 579
521 /* Don't force modeset on machines where it causes a GPU lockup */ 580 /* Don't force modeset on machines where it causes a GPU lockup */
522 if (dmi_check_system(intel_no_modeset_on_lid)) 581 if (dmi_check_system(intel_no_modeset_on_lid))
523 return NOTIFY_OK; 582 goto exit;
524 if (!acpi_lid_open()) { 583 if (!acpi_lid_open()) {
525 dev_priv->modeset_on_lid = 1; 584 /* do modeset on next lid open event */
526 return NOTIFY_OK; 585 dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
586 goto exit;
527 } 587 }
528 588
529 if (!dev_priv->modeset_on_lid) 589 if (dev_priv->modeset_restore == MODESET_DONE)
530 return NOTIFY_OK; 590 goto exit;
531
532 dev_priv->modeset_on_lid = 0;
533 591
534 mutex_lock(&dev->mode_config.mutex); 592 drm_modeset_lock_all(dev);
535 intel_modeset_setup_hw_state(dev, true); 593 intel_modeset_setup_hw_state(dev, true);
536 mutex_unlock(&dev->mode_config.mutex); 594 drm_modeset_unlock_all(dev);
595
596 dev_priv->modeset_restore = MODESET_DONE;
537 597
598exit:
599 mutex_unlock(&dev_priv->modeset_restore_lock);
538 return NOTIFY_OK; 600 return NOTIFY_OK;
539} 601}
540 602
@@ -591,8 +653,7 @@ static int intel_lvds_set_property(struct drm_connector *connector,
591 * If the CRTC is enabled, the display will be changed 653 * If the CRTC is enabled, the display will be changed
592 * according to the new panel fitting mode. 654 * according to the new panel fitting mode.
593 */ 655 */
594 intel_set_mode(crtc, &crtc->mode, 656 intel_crtc_restore_mode(crtc);
595 crtc->x, crtc->y, crtc->fb);
596 } 657 }
597 } 658 }
598 659
@@ -602,7 +663,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
602static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { 663static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
603 .mode_fixup = intel_lvds_mode_fixup, 664 .mode_fixup = intel_lvds_mode_fixup,
604 .mode_set = intel_lvds_mode_set, 665 .mode_set = intel_lvds_mode_set,
605 .disable = intel_encoder_noop,
606}; 666};
607 667
608static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 668static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
@@ -895,6 +955,66 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
895 return false; 955 return false;
896} 956}
897 957
958static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
959{
960 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
961 return 1;
962}
963
964static const struct dmi_system_id intel_dual_link_lvds[] = {
965 {
966 .callback = intel_dual_link_lvds_callback,
967 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
968 .matches = {
969 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
970 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
971 },
972 },
973 { } /* terminating entry */
974};
975
976bool intel_is_dual_link_lvds(struct drm_device *dev)
977{
978 struct intel_encoder *encoder;
979 struct intel_lvds_encoder *lvds_encoder;
980
981 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
982 base.head) {
983 if (encoder->type == INTEL_OUTPUT_LVDS) {
984 lvds_encoder = to_lvds_encoder(&encoder->base);
985
986 return lvds_encoder->is_dual_link;
987 }
988 }
989
990 return false;
991}
992
993static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
994{
995 struct drm_device *dev = lvds_encoder->base.base.dev;
996 unsigned int val;
997 struct drm_i915_private *dev_priv = dev->dev_private;
998
999 /* use the module option value if specified */
1000 if (i915_lvds_channel_mode > 0)
1001 return i915_lvds_channel_mode == 2;
1002
1003 if (dmi_check_system(intel_dual_link_lvds))
1004 return true;
1005
1006 /* BIOS should set the proper LVDS register value at boot, but
1007 * in reality, it doesn't set the value when the lid is closed;
1008 * we need to check "the value to be set" in VBT when LVDS
1009 * register is uninitialized.
1010 */
1011 val = I915_READ(lvds_encoder->reg);
1012 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
1013 val = dev_priv->bios_lvds_val;
1014
1015 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
1016}
1017
898static bool intel_lvds_supported(struct drm_device *dev) 1018static bool intel_lvds_supported(struct drm_device *dev)
899{ 1019{
900 /* With the introduction of the PCH we gained a dedicated 1020 /* With the introduction of the PCH we gained a dedicated
@@ -980,6 +1100,8 @@ bool intel_lvds_init(struct drm_device *dev)
980 DRM_MODE_ENCODER_LVDS); 1100 DRM_MODE_ENCODER_LVDS);
981 1101
982 intel_encoder->enable = intel_enable_lvds; 1102 intel_encoder->enable = intel_enable_lvds;
1103 intel_encoder->pre_enable = intel_pre_enable_lvds;
1104 intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
983 intel_encoder->disable = intel_disable_lvds; 1105 intel_encoder->disable = intel_disable_lvds;
984 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 1106 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
985 intel_connector->get_hw_state = intel_connector_get_hw_state; 1107 intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1001,6 +1123,12 @@ bool intel_lvds_init(struct drm_device *dev)
1001 connector->interlace_allowed = false; 1123 connector->interlace_allowed = false;
1002 connector->doublescan_allowed = false; 1124 connector->doublescan_allowed = false;
1003 1125
1126 if (HAS_PCH_SPLIT(dev)) {
1127 lvds_encoder->reg = PCH_LVDS;
1128 } else {
1129 lvds_encoder->reg = LVDS;
1130 }
1131
1004 /* create the scaling mode property */ 1132 /* create the scaling mode property */
1005 drm_mode_create_scaling_mode_property(dev); 1133 drm_mode_create_scaling_mode_property(dev);
1006 drm_object_attach_property(&connector->base, 1134 drm_object_attach_property(&connector->base,
@@ -1101,6 +1229,10 @@ bool intel_lvds_init(struct drm_device *dev)
1101 goto failed; 1229 goto failed;
1102 1230
1103out: 1231out:
1232 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
1233 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
1234 lvds_encoder->is_dual_link ? "dual" : "single");
1235
1104 /* 1236 /*
1105 * Unlock registers and just 1237 * Unlock registers and just
1106 * leave them unlocked 1238 * leave them unlocked
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index b00f1c83adce..0e860f39933d 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -28,7 +28,6 @@
28#include <linux/fb.h> 28#include <linux/fb.h>
29#include <drm/drm_edid.h> 29#include <drm/drm_edid.h>
30#include <drm/drmP.h> 30#include <drm/drmP.h>
31#include <drm/drm_edid.h>
32#include "intel_drv.h" 31#include "intel_drv.h"
33#include "i915_drv.h" 32#include "i915_drv.h"
34 33
@@ -101,8 +100,9 @@ intel_attach_force_audio_property(struct drm_connector *connector)
101} 100}
102 101
103static const struct drm_prop_enum_list broadcast_rgb_names[] = { 102static const struct drm_prop_enum_list broadcast_rgb_names[] = {
104 { 0, "Full" }, 103 { INTEL_BROADCAST_RGB_AUTO, "Automatic" },
105 { 1, "Limited 16:235" }, 104 { INTEL_BROADCAST_RGB_FULL, "Full" },
105 { INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
106}; 106};
107 107
108void 108void
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 7741c22c934c..4d338740f2cb 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -347,7 +347,7 @@ static void intel_didl_outputs(struct drm_device *dev)
347 int i = 0; 347 int i = 0;
348 348
349 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); 349 handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
350 if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) 350 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
351 return; 351 return;
352 352
353 if (acpi_is_video_device(acpi_dev)) 353 if (acpi_is_video_device(acpi_dev))
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7bc817f51a0..67a2501d519d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -195,7 +195,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 195 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; 196 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
197 else 197 else
198 regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping, 198 regs = io_mapping_map_wc(dev_priv->gtt.mappable,
199 overlay->reg_bo->gtt_offset); 199 overlay->reg_bo->gtt_offset);
200 200
201 return regs; 201 return regs;
@@ -1045,13 +1045,13 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1045 } 1045 }
1046 1046
1047 if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) { 1047 if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
1048 mutex_lock(&dev->mode_config.mutex); 1048 drm_modeset_lock_all(dev);
1049 mutex_lock(&dev->struct_mutex); 1049 mutex_lock(&dev->struct_mutex);
1050 1050
1051 ret = intel_overlay_switch_off(overlay); 1051 ret = intel_overlay_switch_off(overlay);
1052 1052
1053 mutex_unlock(&dev->struct_mutex); 1053 mutex_unlock(&dev->struct_mutex);
1054 mutex_unlock(&dev->mode_config.mutex); 1054 drm_modeset_unlock_all(dev);
1055 1055
1056 return ret; 1056 return ret;
1057 } 1057 }
@@ -1075,7 +1075,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1075 goto out_free; 1075 goto out_free;
1076 } 1076 }
1077 1077
1078 mutex_lock(&dev->mode_config.mutex); 1078 drm_modeset_lock_all(dev);
1079 mutex_lock(&dev->struct_mutex); 1079 mutex_lock(&dev->struct_mutex);
1080 1080
1081 if (new_bo->tiling_mode) { 1081 if (new_bo->tiling_mode) {
@@ -1157,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1157 goto out_unlock; 1157 goto out_unlock;
1158 1158
1159 mutex_unlock(&dev->struct_mutex); 1159 mutex_unlock(&dev->struct_mutex);
1160 mutex_unlock(&dev->mode_config.mutex); 1160 drm_modeset_unlock_all(dev);
1161 1161
1162 kfree(params); 1162 kfree(params);
1163 1163
@@ -1165,7 +1165,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1165 1165
1166out_unlock: 1166out_unlock:
1167 mutex_unlock(&dev->struct_mutex); 1167 mutex_unlock(&dev->struct_mutex);
1168 mutex_unlock(&dev->mode_config.mutex); 1168 drm_modeset_unlock_all(dev);
1169 drm_gem_object_unreference_unlocked(&new_bo->base); 1169 drm_gem_object_unreference_unlocked(&new_bo->base);
1170out_free: 1170out_free:
1171 kfree(params); 1171 kfree(params);
@@ -1241,7 +1241,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1241 return -ENODEV; 1241 return -ENODEV;
1242 } 1242 }
1243 1243
1244 mutex_lock(&dev->mode_config.mutex); 1244 drm_modeset_lock_all(dev);
1245 mutex_lock(&dev->struct_mutex); 1245 mutex_lock(&dev->struct_mutex);
1246 1246
1247 ret = -EINVAL; 1247 ret = -EINVAL;
@@ -1307,7 +1307,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1307 ret = 0; 1307 ret = 0;
1308out_unlock: 1308out_unlock:
1309 mutex_unlock(&dev->struct_mutex); 1309 mutex_unlock(&dev->struct_mutex);
1310 mutex_unlock(&dev->mode_config.mutex); 1310 drm_modeset_unlock_all(dev);
1311 1311
1312 return ret; 1312 return ret;
1313} 1313}
@@ -1333,8 +1333,10 @@ void intel_setup_overlay(struct drm_device *dev)
1333 1333
1334 overlay->dev = dev; 1334 overlay->dev = dev;
1335 1335
1336 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1336 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
1337 if (!reg_bo) 1337 if (reg_bo == NULL)
1338 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1339 if (reg_bo == NULL)
1338 goto out_free; 1340 goto out_free;
1339 overlay->reg_bo = reg_bo; 1341 overlay->reg_bo = reg_bo;
1340 1342
@@ -1432,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1432 regs = (struct overlay_registers __iomem *) 1434 regs = (struct overlay_registers __iomem *)
1433 overlay->reg_bo->phys_obj->handle->vaddr; 1435 overlay->reg_bo->phys_obj->handle->vaddr;
1434 else 1436 else
1435 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 1437 regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
1436 overlay->reg_bo->gtt_offset); 1438 overlay->reg_bo->gtt_offset);
1437 1439
1438 return regs; 1440 return regs;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index bee8cb6108a7..a3730e0289e5 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -321,6 +321,9 @@ void intel_panel_enable_backlight(struct drm_device *dev,
321 if (dev_priv->backlight_level == 0) 321 if (dev_priv->backlight_level == 0)
322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev); 322 dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
323 323
324 dev_priv->backlight_enabled = true;
325 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
326
324 if (INTEL_INFO(dev)->gen >= 4) { 327 if (INTEL_INFO(dev)->gen >= 4) {
325 uint32_t reg, tmp; 328 uint32_t reg, tmp;
326 329
@@ -356,12 +359,12 @@ void intel_panel_enable_backlight(struct drm_device *dev,
356 } 359 }
357 360
358set_level: 361set_level:
359 /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. 362 /* Check the current backlight level and try to set again if it's zero.
360 * BLC_PWM_CPU_CTL may be cleared to zero automatically when these 363 * On some machines, BLC_PWM_CPU_CTL is cleared to zero automatically
361 * registers are set. 364 * when BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1 are written.
362 */ 365 */
363 dev_priv->backlight_enabled = true; 366 if (!intel_panel_get_backlight(dev))
364 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); 367 intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
365} 368}
366 369
367static void intel_panel_init_backlight(struct drm_device *dev) 370static void intel_panel_init_backlight(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 3280cffe50f4..61fee7fcdc2c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -447,12 +447,6 @@ void intel_update_fbc(struct drm_device *dev)
447 dev_priv->no_fbc_reason = FBC_MODULE_PARAM; 447 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
448 goto out_disable; 448 goto out_disable;
449 } 449 }
450 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
451 DRM_DEBUG_KMS("framebuffer too large, disabling "
452 "compression\n");
453 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
454 goto out_disable;
455 }
456 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || 450 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
457 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { 451 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
458 DRM_DEBUG_KMS("mode incompatible with compression, " 452 DRM_DEBUG_KMS("mode incompatible with compression, "
@@ -486,6 +480,14 @@ void intel_update_fbc(struct drm_device *dev)
486 if (in_dbg_master()) 480 if (in_dbg_master())
487 goto out_disable; 481 goto out_disable;
488 482
483 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
484 DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
485 DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
486 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
487 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
488 goto out_disable;
489 }
490
489 /* If the scanout has not changed, don't modify the FBC settings. 491 /* If the scanout has not changed, don't modify the FBC settings.
490 * Note that we make the fundamental assumption that the fb->obj 492 * Note that we make the fundamental assumption that the fb->obj
491 * cannot be unpinned (and have its GTT offset and fence revoked) 493 * cannot be unpinned (and have its GTT offset and fence revoked)
@@ -533,6 +535,7 @@ out_disable:
533 DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); 535 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
534 intel_disable_fbc(dev); 536 intel_disable_fbc(dev);
535 } 537 }
538 i915_gem_stolen_cleanup_compression(dev);
536} 539}
537 540
538static void i915_pineview_get_mem_freq(struct drm_device *dev) 541static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -2286,7 +2289,6 @@ err_unpin:
2286 i915_gem_object_unpin(ctx); 2289 i915_gem_object_unpin(ctx);
2287err_unref: 2290err_unref:
2288 drm_gem_object_unreference(&ctx->base); 2291 drm_gem_object_unreference(&ctx->base);
2289 mutex_unlock(&dev->struct_mutex);
2290 return NULL; 2292 return NULL;
2291} 2293}
2292 2294
@@ -3581,6 +3583,19 @@ static void cpt_init_clock_gating(struct drm_device *dev)
3581 } 3583 }
3582} 3584}
3583 3585
3586static void gen6_check_mch_setup(struct drm_device *dev)
3587{
3588 struct drm_i915_private *dev_priv = dev->dev_private;
3589 uint32_t tmp;
3590
3591 tmp = I915_READ(MCH_SSKPD);
3592 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
3593 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
3594 DRM_INFO("This can cause pipe underruns and display issues.\n");
3595 DRM_INFO("Please upgrade your BIOS to fix this.\n");
3596 }
3597}
3598
3584static void gen6_init_clock_gating(struct drm_device *dev) 3599static void gen6_init_clock_gating(struct drm_device *dev)
3585{ 3600{
3586 struct drm_i915_private *dev_priv = dev->dev_private; 3601 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3673,6 +3688,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3673 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); 3688 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
3674 3689
3675 cpt_init_clock_gating(dev); 3690 cpt_init_clock_gating(dev);
3691
3692 gen6_check_mch_setup(dev);
3676} 3693}
3677 3694
3678static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 3695static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3684,6 +3701,10 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3684 reg |= GEN7_FF_VS_SCHED_HW; 3701 reg |= GEN7_FF_VS_SCHED_HW;
3685 reg |= GEN7_FF_DS_SCHED_HW; 3702 reg |= GEN7_FF_DS_SCHED_HW;
3686 3703
3704 /* WaVSRefCountFullforceMissDisable */
3705 if (IS_HASWELL(dev_priv->dev))
3706 reg &= ~GEN7_FF_VS_REF_CNT_FFME;
3707
3687 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 3708 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3688} 3709}
3689 3710
@@ -3854,6 +3875,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3854 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3875 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3855 3876
3856 cpt_init_clock_gating(dev); 3877 cpt_init_clock_gating(dev);
3878
3879 gen6_check_mch_setup(dev);
3857} 3880}
3858 3881
3859static void valleyview_init_clock_gating(struct drm_device *dev) 3882static void valleyview_init_clock_gating(struct drm_device *dev)
@@ -4047,35 +4070,57 @@ void intel_init_clock_gating(struct drm_device *dev)
4047 dev_priv->display.init_clock_gating(dev); 4070 dev_priv->display.init_clock_gating(dev);
4048} 4071}
4049 4072
4050/* Starting with Haswell, we have different power wells for 4073void intel_set_power_well(struct drm_device *dev, bool enable)
4051 * different parts of the GPU. This attempts to enable them all.
4052 */
4053void intel_init_power_wells(struct drm_device *dev)
4054{ 4074{
4055 struct drm_i915_private *dev_priv = dev->dev_private; 4075 struct drm_i915_private *dev_priv = dev->dev_private;
4056 unsigned long power_wells[] = { 4076 bool is_enabled, enable_requested;
4057 HSW_PWR_WELL_CTL1, 4077 uint32_t tmp;
4058 HSW_PWR_WELL_CTL2,
4059 HSW_PWR_WELL_CTL4
4060 };
4061 int i;
4062 4078
4063 if (!IS_HASWELL(dev)) 4079 if (!IS_HASWELL(dev))
4064 return; 4080 return;
4065 4081
4066 mutex_lock(&dev->struct_mutex); 4082 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
4083 is_enabled = tmp & HSW_PWR_WELL_STATE;
4084 enable_requested = tmp & HSW_PWR_WELL_ENABLE;
4067 4085
4068 for (i = 0; i < ARRAY_SIZE(power_wells); i++) { 4086 if (enable) {
4069 int well = I915_READ(power_wells[i]); 4087 if (!enable_requested)
4088 I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
4070 4089
4071 if ((well & HSW_PWR_WELL_STATE) == 0) { 4090 if (!is_enabled) {
4072 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); 4091 DRM_DEBUG_KMS("Enabling power well\n");
4073 if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) 4092 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
4074 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); 4093 HSW_PWR_WELL_STATE), 20))
4094 DRM_ERROR("Timeout enabling power well\n");
4095 }
4096 } else {
4097 if (enable_requested) {
4098 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
4099 DRM_DEBUG_KMS("Requesting to disable the power well\n");
4075 } 4100 }
4076 } 4101 }
4102}
4077 4103
4078 mutex_unlock(&dev->struct_mutex); 4104/*
4105 * Starting with Haswell, we have a "Power Down Well" that can be turned off
4106 * when not needed anymore. We have 4 registers that can request the power well
4107 * to be enabled, and it will only be disabled if none of the registers is
4108 * requesting it to be enabled.
4109 */
4110void intel_init_power_well(struct drm_device *dev)
4111{
4112 struct drm_i915_private *dev_priv = dev->dev_private;
4113
4114 if (!IS_HASWELL(dev))
4115 return;
4116
4117 /* For now, we need the power well to be always enabled. */
4118 intel_set_power_well(dev, true);
4119
4120 /* We're taking over the BIOS, so clear any requests made by it since
4121 * the driver is in charge now. */
4122 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
4123 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
4079} 4124}
4080 4125
4081/* Set up chip specific power management-related functions */ 4126/* Set up chip specific power management-related functions */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 42ff97d667d2..1d5d613eb6be 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -318,6 +318,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
318 * TLB invalidate requires a post-sync write. 318 * TLB invalidate requires a post-sync write.
319 */ 319 */
320 flags |= PIPE_CONTROL_QW_WRITE; 320 flags |= PIPE_CONTROL_QW_WRITE;
321 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
321 322
322 /* Workaround: we must issue a pipe_control with CS-stall bit 323 /* Workaround: we must issue a pipe_control with CS-stall bit
323 * set before a pipe_control command that has the state cache 324 * set before a pipe_control command that has the state cache
@@ -331,7 +332,7 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
331 332
332 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); 333 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
333 intel_ring_emit(ring, flags); 334 intel_ring_emit(ring, flags);
334 intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); 335 intel_ring_emit(ring, scratch_addr);
335 intel_ring_emit(ring, 0); 336 intel_ring_emit(ring, 0);
336 intel_ring_advance(ring); 337 intel_ring_advance(ring);
337 338
@@ -467,6 +468,9 @@ init_pipe_control(struct intel_ring_buffer *ring)
467 if (pc->cpu_page == NULL) 468 if (pc->cpu_page == NULL)
468 goto err_unpin; 469 goto err_unpin;
469 470
471 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
472 ring->name, pc->gtt_offset);
473
470 pc->obj = obj; 474 pc->obj = obj;
471 ring->private = pc; 475 ring->private = pc;
472 return 0; 476 return 0;
@@ -613,6 +617,13 @@ gen6_add_request(struct intel_ring_buffer *ring)
613 return 0; 617 return 0;
614} 618}
615 619
620static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
621 u32 seqno)
622{
623 struct drm_i915_private *dev_priv = dev->dev_private;
624 return dev_priv->last_seqno < seqno;
625}
626
616/** 627/**
617 * intel_ring_sync - sync the waiter to the signaller on seqno 628 * intel_ring_sync - sync the waiter to the signaller on seqno
618 * 629 *
@@ -643,11 +654,20 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,
643 if (ret) 654 if (ret)
644 return ret; 655 return ret;
645 656
646 intel_ring_emit(waiter, 657 /* If seqno wrap happened, omit the wait with no-ops */
647 dw1 | signaller->semaphore_register[waiter->id]); 658 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
648 intel_ring_emit(waiter, seqno); 659 intel_ring_emit(waiter,
649 intel_ring_emit(waiter, 0); 660 dw1 |
650 intel_ring_emit(waiter, MI_NOOP); 661 signaller->semaphore_register[waiter->id]);
662 intel_ring_emit(waiter, seqno);
663 intel_ring_emit(waiter, 0);
664 intel_ring_emit(waiter, MI_NOOP);
665 } else {
666 intel_ring_emit(waiter, MI_NOOP);
667 intel_ring_emit(waiter, MI_NOOP);
668 intel_ring_emit(waiter, MI_NOOP);
669 intel_ring_emit(waiter, MI_NOOP);
670 }
651 intel_ring_advance(waiter); 671 intel_ring_advance(waiter);
652 672
653 return 0; 673 return 0;
@@ -728,6 +748,12 @@ ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
728 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 748 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
729} 749}
730 750
751static void
752ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
753{
754 intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
755}
756
731static u32 757static u32
732pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 758pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
733{ 759{
@@ -735,6 +761,13 @@ pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
735 return pc->cpu_page[0]; 761 return pc->cpu_page[0];
736} 762}
737 763
764static void
765pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
766{
767 struct pipe_control *pc = ring->private;
768 pc->cpu_page[0] = seqno;
769}
770
738static bool 771static bool
739gen5_ring_get_irq(struct intel_ring_buffer *ring) 772gen5_ring_get_irq(struct intel_ring_buffer *ring)
740{ 773{
@@ -1164,7 +1197,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1164 return ret; 1197 return ret;
1165 } 1198 }
1166 1199
1167 obj = i915_gem_alloc_object(dev, ring->size); 1200 obj = NULL;
1201 if (!HAS_LLC(dev))
1202 obj = i915_gem_object_create_stolen(dev, ring->size);
1203 if (obj == NULL)
1204 obj = i915_gem_alloc_object(dev, ring->size);
1168 if (obj == NULL) { 1205 if (obj == NULL) {
1169 DRM_ERROR("Failed to allocate ringbuffer\n"); 1206 DRM_ERROR("Failed to allocate ringbuffer\n");
1170 ret = -ENOMEM; 1207 ret = -ENOMEM;
@@ -1182,7 +1219,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1182 goto err_unpin; 1219 goto err_unpin;
1183 1220
1184 ring->virtual_start = 1221 ring->virtual_start =
1185 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, 1222 ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
1186 ring->size); 1223 ring->size);
1187 if (ring->virtual_start == NULL) { 1224 if (ring->virtual_start == NULL) {
1188 DRM_ERROR("Failed to map ringbuffer.\n"); 1225 DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1348,7 +1385,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1348 1385
1349 msleep(1); 1386 msleep(1);
1350 1387
1351 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 1388 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1389 dev_priv->mm.interruptible);
1352 if (ret) 1390 if (ret)
1353 return ret; 1391 return ret;
1354 } while (!time_after(jiffies, end)); 1392 } while (!time_after(jiffies, end));
@@ -1410,14 +1448,35 @@ intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1410 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); 1448 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1411} 1449}
1412 1450
1451static int __intel_ring_begin(struct intel_ring_buffer *ring,
1452 int bytes)
1453{
1454 int ret;
1455
1456 if (unlikely(ring->tail + bytes > ring->effective_size)) {
1457 ret = intel_wrap_ring_buffer(ring);
1458 if (unlikely(ret))
1459 return ret;
1460 }
1461
1462 if (unlikely(ring->space < bytes)) {
1463 ret = ring_wait_for_space(ring, bytes);
1464 if (unlikely(ret))
1465 return ret;
1466 }
1467
1468 ring->space -= bytes;
1469 return 0;
1470}
1471
1413int intel_ring_begin(struct intel_ring_buffer *ring, 1472int intel_ring_begin(struct intel_ring_buffer *ring,
1414 int num_dwords) 1473 int num_dwords)
1415{ 1474{
1416 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1475 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1417 int n = 4*num_dwords;
1418 int ret; 1476 int ret;
1419 1477
1420 ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); 1478 ret = i915_gem_check_wedge(&dev_priv->gpu_error,
1479 dev_priv->mm.interruptible);
1421 if (ret) 1480 if (ret)
1422 return ret; 1481 return ret;
1423 1482
@@ -1426,20 +1485,21 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1426 if (ret) 1485 if (ret)
1427 return ret; 1486 return ret;
1428 1487
1429 if (unlikely(ring->tail + n > ring->effective_size)) { 1488 return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
1430 ret = intel_wrap_ring_buffer(ring); 1489}
1431 if (unlikely(ret))
1432 return ret;
1433 }
1434 1490
1435 if (unlikely(ring->space < n)) { 1491void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1436 ret = ring_wait_for_space(ring, n); 1492{
1437 if (unlikely(ret)) 1493 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1438 return ret; 1494
1495 BUG_ON(ring->outstanding_lazy_request);
1496
1497 if (INTEL_INFO(ring->dev)->gen >= 6) {
1498 I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
1499 I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
1439 } 1500 }
1440 1501
1441 ring->space -= n; 1502 ring->set_seqno(ring, seqno);
1442 return 0;
1443} 1503}
1444 1504
1445void intel_ring_advance(struct intel_ring_buffer *ring) 1505void intel_ring_advance(struct intel_ring_buffer *ring)
@@ -1447,7 +1507,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
1447 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1507 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1448 1508
1449 ring->tail &= ring->size - 1; 1509 ring->tail &= ring->size - 1;
1450 if (dev_priv->stop_rings & intel_ring_flag(ring)) 1510 if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
1451 return; 1511 return;
1452 ring->write_tail(ring, ring->tail); 1512 ring->write_tail(ring, ring->tail);
1453} 1513}
@@ -1604,6 +1664,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1604 ring->irq_put = gen6_ring_put_irq; 1664 ring->irq_put = gen6_ring_put_irq;
1605 ring->irq_enable_mask = GT_USER_INTERRUPT; 1665 ring->irq_enable_mask = GT_USER_INTERRUPT;
1606 ring->get_seqno = gen6_ring_get_seqno; 1666 ring->get_seqno = gen6_ring_get_seqno;
1667 ring->set_seqno = ring_set_seqno;
1607 ring->sync_to = gen6_ring_sync; 1668 ring->sync_to = gen6_ring_sync;
1608 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; 1669 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1609 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; 1670 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
@@ -1614,6 +1675,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1614 ring->add_request = pc_render_add_request; 1675 ring->add_request = pc_render_add_request;
1615 ring->flush = gen4_render_ring_flush; 1676 ring->flush = gen4_render_ring_flush;
1616 ring->get_seqno = pc_render_get_seqno; 1677 ring->get_seqno = pc_render_get_seqno;
1678 ring->set_seqno = pc_render_set_seqno;
1617 ring->irq_get = gen5_ring_get_irq; 1679 ring->irq_get = gen5_ring_get_irq;
1618 ring->irq_put = gen5_ring_put_irq; 1680 ring->irq_put = gen5_ring_put_irq;
1619 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; 1681 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
@@ -1624,6 +1686,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1624 else 1686 else
1625 ring->flush = gen4_render_ring_flush; 1687 ring->flush = gen4_render_ring_flush;
1626 ring->get_seqno = ring_get_seqno; 1688 ring->get_seqno = ring_get_seqno;
1689 ring->set_seqno = ring_set_seqno;
1627 if (IS_GEN2(dev)) { 1690 if (IS_GEN2(dev)) {
1628 ring->irq_get = i8xx_ring_get_irq; 1691 ring->irq_get = i8xx_ring_get_irq;
1629 ring->irq_put = i8xx_ring_put_irq; 1692 ring->irq_put = i8xx_ring_put_irq;
@@ -1695,6 +1758,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1695 else 1758 else
1696 ring->flush = gen4_render_ring_flush; 1759 ring->flush = gen4_render_ring_flush;
1697 ring->get_seqno = ring_get_seqno; 1760 ring->get_seqno = ring_get_seqno;
1761 ring->set_seqno = ring_set_seqno;
1698 if (IS_GEN2(dev)) { 1762 if (IS_GEN2(dev)) {
1699 ring->irq_get = i8xx_ring_get_irq; 1763 ring->irq_get = i8xx_ring_get_irq;
1700 ring->irq_put = i8xx_ring_put_irq; 1764 ring->irq_put = i8xx_ring_put_irq;
@@ -1755,6 +1819,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1755 ring->flush = gen6_ring_flush; 1819 ring->flush = gen6_ring_flush;
1756 ring->add_request = gen6_add_request; 1820 ring->add_request = gen6_add_request;
1757 ring->get_seqno = gen6_ring_get_seqno; 1821 ring->get_seqno = gen6_ring_get_seqno;
1822 ring->set_seqno = ring_set_seqno;
1758 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; 1823 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1759 ring->irq_get = gen6_ring_get_irq; 1824 ring->irq_get = gen6_ring_get_irq;
1760 ring->irq_put = gen6_ring_put_irq; 1825 ring->irq_put = gen6_ring_put_irq;
@@ -1770,6 +1835,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1770 ring->flush = bsd_ring_flush; 1835 ring->flush = bsd_ring_flush;
1771 ring->add_request = i9xx_add_request; 1836 ring->add_request = i9xx_add_request;
1772 ring->get_seqno = ring_get_seqno; 1837 ring->get_seqno = ring_get_seqno;
1838 ring->set_seqno = ring_set_seqno;
1773 if (IS_GEN5(dev)) { 1839 if (IS_GEN5(dev)) {
1774 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1840 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1775 ring->irq_get = gen5_ring_get_irq; 1841 ring->irq_get = gen5_ring_get_irq;
@@ -1799,6 +1865,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1799 ring->flush = blt_ring_flush; 1865 ring->flush = blt_ring_flush;
1800 ring->add_request = gen6_add_request; 1866 ring->add_request = gen6_add_request;
1801 ring->get_seqno = gen6_ring_get_seqno; 1867 ring->get_seqno = gen6_ring_get_seqno;
1868 ring->set_seqno = ring_set_seqno;
1802 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; 1869 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1803 ring->irq_get = gen6_ring_get_irq; 1870 ring->irq_get = gen6_ring_get_irq;
1804 ring->irq_put = gen6_ring_put_irq; 1871 ring->irq_put = gen6_ring_put_irq;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6af87cd05725..d66208c2c48b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -90,6 +90,8 @@ struct intel_ring_buffer {
90 */ 90 */
91 u32 (*get_seqno)(struct intel_ring_buffer *ring, 91 u32 (*get_seqno)(struct intel_ring_buffer *ring,
92 bool lazy_coherency); 92 bool lazy_coherency);
93 void (*set_seqno)(struct intel_ring_buffer *ring,
94 u32 seqno);
93 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 95 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
94 u32 offset, u32 length, 96 u32 offset, u32 length,
95 unsigned flags); 97 unsigned flags);
@@ -178,6 +180,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
178 return ring->status_page.page_addr[reg]; 180 return ring->status_page.page_addr[reg];
179} 181}
180 182
183static inline void
184intel_write_status_page(struct intel_ring_buffer *ring,
185 int reg, u32 value)
186{
187 ring->status_page.page_addr[reg] = value;
188}
189
181/** 190/**
182 * Reads a dword out of the status page, which is written to from the command 191 * Reads a dword out of the status page, which is written to from the command
183 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 192 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -208,7 +217,7 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
208} 217}
209void intel_ring_advance(struct intel_ring_buffer *ring); 218void intel_ring_advance(struct intel_ring_buffer *ring);
210int __must_check intel_ring_idle(struct intel_ring_buffer *ring); 219int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
211 220void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
212int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 221int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
213int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 222int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
214 223
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c275bf0fa36d..d07a8cdf998e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -103,6 +103,7 @@ struct intel_sdvo {
103 * It is only valid when using TMDS encoding and 8 bit per color mode. 103 * It is only valid when using TMDS encoding and 8 bit per color mode.
104 */ 104 */
105 uint32_t color_range; 105 uint32_t color_range;
106 bool color_range_auto;
106 107
107 /** 108 /**
108 * This is set if we're going to treat the device as TV-out. 109 * This is set if we're going to treat the device as TV-out.
@@ -125,6 +126,7 @@ struct intel_sdvo {
125 bool is_hdmi; 126 bool is_hdmi;
126 bool has_hdmi_monitor; 127 bool has_hdmi_monitor;
127 bool has_hdmi_audio; 128 bool has_hdmi_audio;
129 bool rgb_quant_range_selectable;
128 130
129 /** 131 /**
130 * This is set if we detect output of sdvo device as LVDS and 132 * This is set if we detect output of sdvo device as LVDS and
@@ -946,7 +948,8 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
946 &tx_rate, 1); 948 &tx_rate, 1);
947} 949}
948 950
949static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) 951static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
952 const struct drm_display_mode *adjusted_mode)
950{ 953{
951 struct dip_infoframe avi_if = { 954 struct dip_infoframe avi_if = {
952 .type = DIP_TYPE_AVI, 955 .type = DIP_TYPE_AVI,
@@ -955,6 +958,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
955 }; 958 };
956 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 959 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
957 960
961 if (intel_sdvo->rgb_quant_range_selectable) {
962 if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
963 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
964 else
965 avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
966 }
967
958 intel_dip_infoframe_csum(&avi_if); 968 intel_dip_infoframe_csum(&avi_if);
959 969
960 /* sdvo spec says that the ecc is handled by the hw, and it looks like 970 /* sdvo spec says that the ecc is handled by the hw, and it looks like
@@ -1064,6 +1074,18 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
1064 multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); 1074 multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
1065 intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); 1075 intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
1066 1076
1077 if (intel_sdvo->color_range_auto) {
1078 /* See CEA-861-E - 5.1 Default Encoding Parameters */
1079 if (intel_sdvo->has_hdmi_monitor &&
1080 drm_match_cea_mode(adjusted_mode) > 1)
1081 intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
1082 else
1083 intel_sdvo->color_range = 0;
1084 }
1085
1086 if (intel_sdvo->color_range)
1087 adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
1088
1067 return true; 1089 return true;
1068} 1090}
1069 1091
@@ -1121,7 +1143,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1121 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); 1143 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
1122 intel_sdvo_set_colorimetry(intel_sdvo, 1144 intel_sdvo_set_colorimetry(intel_sdvo,
1123 SDVO_COLORIMETRY_RGB256); 1145 SDVO_COLORIMETRY_RGB256);
1124 intel_sdvo_set_avi_infoframe(intel_sdvo); 1146 intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
1125 } else 1147 } else
1126 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); 1148 intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
1127 1149
@@ -1153,7 +1175,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1153 /* The real mode polarity is set by the SDVO commands, using 1175 /* The real mode polarity is set by the SDVO commands, using
1154 * struct intel_sdvo_dtd. */ 1176 * struct intel_sdvo_dtd. */
1155 sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; 1177 sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
1156 if (intel_sdvo->is_hdmi) 1178 if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
1157 sdvox |= intel_sdvo->color_range; 1179 sdvox |= intel_sdvo->color_range;
1158 if (INTEL_INFO(dev)->gen < 5) 1180 if (INTEL_INFO(dev)->gen < 5)
1159 sdvox |= SDVO_BORDER_ENABLE; 1181 sdvox |= SDVO_BORDER_ENABLE;
@@ -1513,6 +1535,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1513 if (intel_sdvo->is_hdmi) { 1535 if (intel_sdvo->is_hdmi) {
1514 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1536 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1515 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1537 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1538 intel_sdvo->rgb_quant_range_selectable =
1539 drm_rgb_quant_range_selectable(edid);
1516 } 1540 }
1517 } else 1541 } else
1518 status = connector_status_disconnected; 1542 status = connector_status_disconnected;
@@ -1564,6 +1588,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1564 1588
1565 intel_sdvo->has_hdmi_monitor = false; 1589 intel_sdvo->has_hdmi_monitor = false;
1566 intel_sdvo->has_hdmi_audio = false; 1590 intel_sdvo->has_hdmi_audio = false;
1591 intel_sdvo->rgb_quant_range_selectable = false;
1567 1592
1568 if ((intel_sdvo_connector->output_flag & response) == 0) 1593 if ((intel_sdvo_connector->output_flag & response) == 0)
1569 ret = connector_status_disconnected; 1594 ret = connector_status_disconnected;
@@ -1897,10 +1922,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
1897 } 1922 }
1898 1923
1899 if (property == dev_priv->broadcast_rgb_property) { 1924 if (property == dev_priv->broadcast_rgb_property) {
1900 if (val == !!intel_sdvo->color_range) 1925 switch (val) {
1901 return 0; 1926 case INTEL_BROADCAST_RGB_AUTO:
1902 1927 intel_sdvo->color_range_auto = true;
1903 intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; 1928 break;
1929 case INTEL_BROADCAST_RGB_FULL:
1930 intel_sdvo->color_range_auto = false;
1931 intel_sdvo->color_range = 0;
1932 break;
1933 case INTEL_BROADCAST_RGB_LIMITED:
1934 intel_sdvo->color_range_auto = false;
1935 intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
1936 break;
1937 default:
1938 return -EINVAL;
1939 }
1904 goto done; 1940 goto done;
1905 } 1941 }
1906 1942
@@ -1997,11 +2033,8 @@ set_value:
1997 2033
1998 2034
1999done: 2035done:
2000 if (intel_sdvo->base.base.crtc) { 2036 if (intel_sdvo->base.base.crtc)
2001 struct drm_crtc *crtc = intel_sdvo->base.base.crtc; 2037 intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
2002 intel_set_mode(crtc, &crtc->mode,
2003 crtc->x, crtc->y, crtc->fb);
2004 }
2005 2038
2006 return 0; 2039 return 0;
2007#undef CHECK_PROPERTY 2040#undef CHECK_PROPERTY
@@ -2010,7 +2043,6 @@ done:
2010static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { 2043static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
2011 .mode_fixup = intel_sdvo_mode_fixup, 2044 .mode_fixup = intel_sdvo_mode_fixup,
2012 .mode_set = intel_sdvo_mode_set, 2045 .mode_set = intel_sdvo_mode_set,
2013 .disable = intel_encoder_noop,
2014}; 2046};
2015 2047
2016static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 2048static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
@@ -2200,13 +2232,16 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2200} 2232}
2201 2233
2202static void 2234static void
2203intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) 2235intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
2236 struct intel_sdvo_connector *connector)
2204{ 2237{
2205 struct drm_device *dev = connector->base.base.dev; 2238 struct drm_device *dev = connector->base.base.dev;
2206 2239
2207 intel_attach_force_audio_property(&connector->base.base); 2240 intel_attach_force_audio_property(&connector->base.base);
2208 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) 2241 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
2209 intel_attach_broadcast_rgb_property(&connector->base.base); 2242 intel_attach_broadcast_rgb_property(&connector->base.base);
2243 intel_sdvo->color_range_auto = true;
2244 }
2210} 2245}
2211 2246
2212static bool 2247static bool
@@ -2254,7 +2289,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2254 2289
2255 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2290 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2256 if (intel_sdvo->is_hdmi) 2291 if (intel_sdvo->is_hdmi)
2257 intel_sdvo_add_hdmi_properties(intel_sdvo_connector); 2292 intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
2258 2293
2259 return true; 2294 return true;
2260} 2295}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index d7b060e0a231..1b6eb76beb7c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -50,6 +50,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
50 u32 sprctl, sprscale = 0; 50 u32 sprctl, sprscale = 0;
51 unsigned long sprsurf_offset, linear_offset; 51 unsigned long sprsurf_offset, linear_offset;
52 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 52 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
53 bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
53 54
54 sprctl = I915_READ(SPRCTL(pipe)); 55 sprctl = I915_READ(SPRCTL(pipe));
55 56
@@ -89,6 +90,9 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
89 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 90 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
90 sprctl |= SPRITE_ENABLE; 91 sprctl |= SPRITE_ENABLE;
91 92
93 if (IS_HASWELL(dev))
94 sprctl |= SPRITE_PIPE_CSC_ENABLE;
95
92 /* Sizes are 0 based */ 96 /* Sizes are 0 based */
93 src_w--; 97 src_w--;
94 src_h--; 98 src_h--;
@@ -103,27 +107,23 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
103 * when scaling is disabled. 107 * when scaling is disabled.
104 */ 108 */
105 if (crtc_w != src_w || crtc_h != src_h) { 109 if (crtc_w != src_w || crtc_h != src_h) {
106 if (!dev_priv->sprite_scaling_enabled) { 110 dev_priv->sprite_scaling_enabled |= 1 << pipe;
107 dev_priv->sprite_scaling_enabled = true; 111
112 if (!scaling_was_enabled) {
108 intel_update_watermarks(dev); 113 intel_update_watermarks(dev);
109 intel_wait_for_vblank(dev, pipe); 114 intel_wait_for_vblank(dev, pipe);
110 } 115 }
111 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 116 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
112 } else { 117 } else
113 if (dev_priv->sprite_scaling_enabled) { 118 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
114 dev_priv->sprite_scaling_enabled = false;
115 /* potentially re-enable LP watermarks */
116 intel_update_watermarks(dev);
117 }
118 }
119 119
120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
122 122
123 linear_offset = y * fb->pitches[0] + x * pixel_size; 123 linear_offset = y * fb->pitches[0] + x * pixel_size;
124 sprsurf_offset = 124 sprsurf_offset =
125 intel_gen4_compute_offset_xtiled(&x, &y, 125 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
126 pixel_size, fb->pitches[0]); 126 pixel_size, fb->pitches[0]);
127 linear_offset -= sprsurf_offset; 127 linear_offset -= sprsurf_offset;
128 128
129 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET 129 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
@@ -141,6 +141,10 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
141 I915_WRITE(SPRCTL(pipe), sprctl); 141 I915_WRITE(SPRCTL(pipe), sprctl);
142 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); 142 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
143 POSTING_READ(SPRSURF(pipe)); 143 POSTING_READ(SPRSURF(pipe));
144
145 /* potentially re-enable LP watermarks */
146 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
147 intel_update_watermarks(dev);
144} 148}
145 149
146static void 150static void
@@ -150,6 +154,7 @@ ivb_disable_plane(struct drm_plane *plane)
150 struct drm_i915_private *dev_priv = dev->dev_private; 154 struct drm_i915_private *dev_priv = dev->dev_private;
151 struct intel_plane *intel_plane = to_intel_plane(plane); 155 struct intel_plane *intel_plane = to_intel_plane(plane);
152 int pipe = intel_plane->pipe; 156 int pipe = intel_plane->pipe;
157 bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
153 158
154 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 159 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
155 /* Can't leave the scaler enabled... */ 160 /* Can't leave the scaler enabled... */
@@ -159,8 +164,11 @@ ivb_disable_plane(struct drm_plane *plane)
159 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); 164 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
160 POSTING_READ(SPRSURF(pipe)); 165 POSTING_READ(SPRSURF(pipe));
161 166
162 dev_priv->sprite_scaling_enabled = false; 167 dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
163 intel_update_watermarks(dev); 168
169 /* potentially re-enable LP watermarks */
170 if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
171 intel_update_watermarks(dev);
164} 172}
165 173
166static int 174static int
@@ -287,8 +295,8 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
287 295
288 linear_offset = y * fb->pitches[0] + x * pixel_size; 296 linear_offset = y * fb->pitches[0] + x * pixel_size;
289 dvssurf_offset = 297 dvssurf_offset =
290 intel_gen4_compute_offset_xtiled(&x, &y, 298 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
291 pixel_size, fb->pitches[0]); 299 pixel_size, fb->pitches[0]);
292 linear_offset -= dvssurf_offset; 300 linear_offset -= dvssurf_offset;
293 301
294 if (obj->tiling_mode != I915_TILING_NONE) 302 if (obj->tiling_mode != I915_TILING_NONE)
@@ -593,7 +601,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
593 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 601 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
594 return -EINVAL; 602 return -EINVAL;
595 603
596 mutex_lock(&dev->mode_config.mutex); 604 drm_modeset_lock_all(dev);
597 605
598 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); 606 obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
599 if (!obj) { 607 if (!obj) {
@@ -606,7 +614,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
606 ret = intel_plane->update_colorkey(plane, set); 614 ret = intel_plane->update_colorkey(plane, set);
607 615
608out_unlock: 616out_unlock:
609 mutex_unlock(&dev->mode_config.mutex); 617 drm_modeset_unlock_all(dev);
610 return ret; 618 return ret;
611} 619}
612 620
@@ -622,7 +630,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
622 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 630 if (!drm_core_check_feature(dev, DRIVER_MODESET))
623 return -ENODEV; 631 return -ENODEV;
624 632
625 mutex_lock(&dev->mode_config.mutex); 633 drm_modeset_lock_all(dev);
626 634
627 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); 635 obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
628 if (!obj) { 636 if (!obj) {
@@ -635,7 +643,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
635 intel_plane->get_colorkey(plane, get); 643 intel_plane->get_colorkey(plane, get);
636 644
637out_unlock: 645out_unlock:
638 mutex_unlock(&dev->mode_config.mutex); 646 drm_modeset_unlock_all(dev);
639 return ret; 647 return ret;
640} 648}
641 649
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ea93520c1278..d808421c1c80 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1479,8 +1479,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1479 } 1479 }
1480 1480
1481 if (changed && crtc) 1481 if (changed && crtc)
1482 intel_set_mode(crtc, &crtc->mode, 1482 intel_crtc_restore_mode(crtc);
1483 crtc->x, crtc->y, crtc->fb);
1484out: 1483out:
1485 return ret; 1484 return ret;
1486} 1485}
@@ -1488,7 +1487,6 @@ out:
1488static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = { 1487static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1489 .mode_fixup = intel_tv_mode_fixup, 1488 .mode_fixup = intel_tv_mode_fixup,
1490 .mode_set = intel_tv_mode_set, 1489 .mode_set = intel_tv_mode_set,
1491 .disable = intel_encoder_noop,
1492}; 1490};
1493 1491
1494static const struct drm_connector_funcs intel_tv_connector_funcs = { 1492static const struct drm_connector_funcs intel_tv_connector_funcs = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 2f486481d79a..d2253f639481 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_fb_helper.h> 15#include <drm/drm_fb_helper.h>
16#include <drm/drm_crtc_helper.h>
16 17
17#include <linux/fb.h> 18#include <linux/fb.h>
18 19
@@ -120,9 +121,10 @@ static int mgag200fb_create_object(struct mga_fbdev *afbdev,
120 return ret; 121 return ret;
121} 122}
122 123
123static int mgag200fb_create(struct mga_fbdev *mfbdev, 124static int mgag200fb_create(struct drm_fb_helper *helper,
124 struct drm_fb_helper_surface_size *sizes) 125 struct drm_fb_helper_surface_size *sizes)
125{ 126{
127 struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
126 struct drm_device *dev = mfbdev->helper.dev; 128 struct drm_device *dev = mfbdev->helper.dev;
127 struct drm_mode_fb_cmd2 mode_cmd; 129 struct drm_mode_fb_cmd2 mode_cmd;
128 struct mga_device *mdev = dev->dev_private; 130 struct mga_device *mdev = dev->dev_private;
@@ -209,23 +211,6 @@ out:
209 return ret; 211 return ret;
210} 212}
211 213
212static int mga_fb_find_or_create_single(struct drm_fb_helper *helper,
213 struct drm_fb_helper_surface_size
214 *sizes)
215{
216 struct mga_fbdev *mfbdev = (struct mga_fbdev *)helper;
217 int new_fb = 0;
218 int ret;
219
220 if (!helper->fb) {
221 ret = mgag200fb_create(mfbdev, sizes);
222 if (ret)
223 return ret;
224 new_fb = 1;
225 }
226 return new_fb;
227}
228
229static int mga_fbdev_destroy(struct drm_device *dev, 214static int mga_fbdev_destroy(struct drm_device *dev,
230 struct mga_fbdev *mfbdev) 215 struct mga_fbdev *mfbdev)
231{ 216{
@@ -247,6 +232,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
247 } 232 }
248 drm_fb_helper_fini(&mfbdev->helper); 233 drm_fb_helper_fini(&mfbdev->helper);
249 vfree(mfbdev->sysram); 234 vfree(mfbdev->sysram);
235 drm_framebuffer_unregister_private(&mfb->base);
250 drm_framebuffer_cleanup(&mfb->base); 236 drm_framebuffer_cleanup(&mfb->base);
251 237
252 return 0; 238 return 0;
@@ -255,7 +241,7 @@ static int mga_fbdev_destroy(struct drm_device *dev,
255static struct drm_fb_helper_funcs mga_fb_helper_funcs = { 241static struct drm_fb_helper_funcs mga_fb_helper_funcs = {
256 .gamma_set = mga_crtc_fb_gamma_set, 242 .gamma_set = mga_crtc_fb_gamma_set,
257 .gamma_get = mga_crtc_fb_gamma_get, 243 .gamma_get = mga_crtc_fb_gamma_get,
258 .fb_probe = mga_fb_find_or_create_single, 244 .fb_probe = mgag200fb_create,
259}; 245};
260 246
261int mgag200_fbdev_init(struct mga_device *mdev) 247int mgag200_fbdev_init(struct mga_device *mdev)
@@ -277,6 +263,10 @@ int mgag200_fbdev_init(struct mga_device *mdev)
277 return ret; 263 return ret;
278 } 264 }
279 drm_fb_helper_single_add_all_connectors(&mfbdev->helper); 265 drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
266
267 /* disable all the possible outputs/crtcs before entering KMS mode */
268 drm_helper_disable_unused_functions(mdev->dev);
269
280 drm_fb_helper_initial_config(&mfbdev->helper, 32); 270 drm_fb_helper_initial_config(&mfbdev->helper, 32);
281 271
282 return 0; 272 return 0;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 70dd3c5529d4..64297c72464f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -23,16 +23,8 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
23 kfree(fb); 23 kfree(fb);
24} 24}
25 25
26static int mga_user_framebuffer_create_handle(struct drm_framebuffer *fb,
27 struct drm_file *file_priv,
28 unsigned int *handle)
29{
30 return 0;
31}
32
33static const struct drm_framebuffer_funcs mga_fb_funcs = { 26static const struct drm_framebuffer_funcs mga_fb_funcs = {
34 .destroy = mga_user_framebuffer_destroy, 27 .destroy = mga_user_framebuffer_destroy,
35 .create_handle = mga_user_framebuffer_create_handle,
36}; 28};
37 29
38int mgag200_framebuffer_init(struct drm_device *dev, 30int mgag200_framebuffer_init(struct drm_device *dev,
@@ -40,13 +32,15 @@ int mgag200_framebuffer_init(struct drm_device *dev,
40 struct drm_mode_fb_cmd2 *mode_cmd, 32 struct drm_mode_fb_cmd2 *mode_cmd,
41 struct drm_gem_object *obj) 33 struct drm_gem_object *obj)
42{ 34{
43 int ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs); 35 int ret;
36
37 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
38 gfb->obj = obj;
39 ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs);
44 if (ret) { 40 if (ret) {
45 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret); 41 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
46 return ret; 42 return ret;
47 } 43 }
48 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
49 gfb->obj = obj;
50 return 0; 44 return 0;
51} 45}
52 46
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 8a55beeb8bdc..a7ff6d5a34b9 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,8 +11,9 @@ config DRM_NOUVEAU
11 select FRAMEBUFFER_CONSOLE if !EXPERT 11 select FRAMEBUFFER_CONSOLE if !EXPERT
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT 13 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL && INPUT
14 select ACPI_WMI if ACPI 14 select X86_PLATFORM_DEVICES if ACPI && X86
15 select MXM_WMI if ACPI 15 select ACPI_WMI if ACPI && X86
16 select MXM_WMI if ACPI && X86
16 select POWER_SUPPLY 17 select POWER_SUPPLY
17 help 18 help
18 Choose this option for open-source nVidia support. 19 Choose this option for open-source nVidia support.
@@ -52,26 +53,3 @@ config DRM_NOUVEAU_BACKLIGHT
52 help 53 help
53 Say Y here if you want to control the backlight of your display 54 Say Y here if you want to control the backlight of your display
54 (e.g. a laptop panel). 55 (e.g. a laptop panel).
55
56menu "I2C encoder or helper chips"
57 depends on DRM && DRM_KMS_HELPER && I2C
58
59config DRM_I2C_CH7006
60 tristate "Chrontel ch7006 TV encoder"
61 default m if DRM_NOUVEAU
62 help
63 Support for Chrontel ch7006 and similar TV encoders, found
64 on some nVidia video cards.
65
66 This driver is currently only useful if you're also using
67 the nouveau driver.
68
69config DRM_I2C_SIL164
70 tristate "Silicon Image sil164 TMDS transmitter"
71 default m if DRM_NOUVEAU
72 help
73 Support for sil164 and similar single-link (or dual-link
74 when used in pairs) TMDS transmitters, used in some nVidia
75 video cards.
76
77endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index ab25752a0b1e..90f9140eeefd 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
11nouveau-y += core/core/engctx.o 11nouveau-y += core/core/engctx.o
12nouveau-y += core/core/engine.o 12nouveau-y += core/core/engine.o
13nouveau-y += core/core/enum.o 13nouveau-y += core/core/enum.o
14nouveau-y += core/core/event.o
14nouveau-y += core/core/falcon.o 15nouveau-y += core/core/falcon.o
15nouveau-y += core/core/gpuobj.o 16nouveau-y += core/core/gpuobj.o
16nouveau-y += core/core/handle.o 17nouveau-y += core/core/handle.o
@@ -40,6 +41,11 @@ nouveau-y += core/subdev/bios/mxm.o
40nouveau-y += core/subdev/bios/perf.o 41nouveau-y += core/subdev/bios/perf.o
41nouveau-y += core/subdev/bios/pll.o 42nouveau-y += core/subdev/bios/pll.o
42nouveau-y += core/subdev/bios/therm.o 43nouveau-y += core/subdev/bios/therm.o
44nouveau-y += core/subdev/bios/xpio.o
45nouveau-y += core/subdev/bus/nv04.o
46nouveau-y += core/subdev/bus/nv31.o
47nouveau-y += core/subdev/bus/nv50.o
48nouveau-y += core/subdev/bus/nvc0.o
43nouveau-y += core/subdev/clock/nv04.o 49nouveau-y += core/subdev/clock/nv04.o
44nouveau-y += core/subdev/clock/nv40.o 50nouveau-y += core/subdev/clock/nv40.o
45nouveau-y += core/subdev/clock/nv50.o 51nouveau-y += core/subdev/clock/nv50.o
@@ -85,9 +91,16 @@ nouveau-y += core/subdev/gpio/base.o
85nouveau-y += core/subdev/gpio/nv10.o 91nouveau-y += core/subdev/gpio/nv10.o
86nouveau-y += core/subdev/gpio/nv50.o 92nouveau-y += core/subdev/gpio/nv50.o
87nouveau-y += core/subdev/gpio/nvd0.o 93nouveau-y += core/subdev/gpio/nvd0.o
94nouveau-y += core/subdev/gpio/nve0.o
88nouveau-y += core/subdev/i2c/base.o 95nouveau-y += core/subdev/i2c/base.o
96nouveau-y += core/subdev/i2c/anx9805.o
89nouveau-y += core/subdev/i2c/aux.o 97nouveau-y += core/subdev/i2c/aux.o
90nouveau-y += core/subdev/i2c/bit.o 98nouveau-y += core/subdev/i2c/bit.o
99nouveau-y += core/subdev/i2c/nv04.o
100nouveau-y += core/subdev/i2c/nv4e.o
101nouveau-y += core/subdev/i2c/nv50.o
102nouveau-y += core/subdev/i2c/nv94.o
103nouveau-y += core/subdev/i2c/nvd0.o
91nouveau-y += core/subdev/ibus/nvc0.o 104nouveau-y += core/subdev/ibus/nvc0.o
92nouveau-y += core/subdev/ibus/nve0.o 105nouveau-y += core/subdev/ibus/nve0.o
93nouveau-y += core/subdev/instmem/base.o 106nouveau-y += core/subdev/instmem/base.o
@@ -106,10 +119,15 @@ nouveau-y += core/subdev/mxm/mxms.o
106nouveau-y += core/subdev/mxm/nv50.o 119nouveau-y += core/subdev/mxm/nv50.o
107nouveau-y += core/subdev/therm/base.o 120nouveau-y += core/subdev/therm/base.o
108nouveau-y += core/subdev/therm/fan.o 121nouveau-y += core/subdev/therm/fan.o
122nouveau-y += core/subdev/therm/fannil.o
123nouveau-y += core/subdev/therm/fanpwm.o
124nouveau-y += core/subdev/therm/fantog.o
109nouveau-y += core/subdev/therm/ic.o 125nouveau-y += core/subdev/therm/ic.o
126nouveau-y += core/subdev/therm/temp.o
110nouveau-y += core/subdev/therm/nv40.o 127nouveau-y += core/subdev/therm/nv40.o
111nouveau-y += core/subdev/therm/nv50.o 128nouveau-y += core/subdev/therm/nv50.o
112nouveau-y += core/subdev/therm/temp.o 129nouveau-y += core/subdev/therm/nva3.o
130nouveau-y += core/subdev/therm/nvd0.o
113nouveau-y += core/subdev/timer/base.o 131nouveau-y += core/subdev/timer/base.o
114nouveau-y += core/subdev/timer/nv04.o 132nouveau-y += core/subdev/timer/nv04.o
115nouveau-y += core/subdev/vm/base.o 133nouveau-y += core/subdev/vm/base.o
@@ -132,6 +150,7 @@ nouveau-y += core/engine/copy/nvc0.o
132nouveau-y += core/engine/copy/nve0.o 150nouveau-y += core/engine/copy/nve0.o
133nouveau-y += core/engine/crypt/nv84.o 151nouveau-y += core/engine/crypt/nv84.o
134nouveau-y += core/engine/crypt/nv98.o 152nouveau-y += core/engine/crypt/nv98.o
153nouveau-y += core/engine/disp/base.o
135nouveau-y += core/engine/disp/nv04.o 154nouveau-y += core/engine/disp/nv04.o
136nouveau-y += core/engine/disp/nv50.o 155nouveau-y += core/engine/disp/nv50.o
137nouveau-y += core/engine/disp/nv84.o 156nouveau-y += core/engine/disp/nv84.o
@@ -141,11 +160,13 @@ nouveau-y += core/engine/disp/nva3.o
141nouveau-y += core/engine/disp/nvd0.o 160nouveau-y += core/engine/disp/nvd0.o
142nouveau-y += core/engine/disp/nve0.o 161nouveau-y += core/engine/disp/nve0.o
143nouveau-y += core/engine/disp/dacnv50.o 162nouveau-y += core/engine/disp/dacnv50.o
163nouveau-y += core/engine/disp/dport.o
144nouveau-y += core/engine/disp/hdanva3.o 164nouveau-y += core/engine/disp/hdanva3.o
145nouveau-y += core/engine/disp/hdanvd0.o 165nouveau-y += core/engine/disp/hdanvd0.o
146nouveau-y += core/engine/disp/hdminv84.o 166nouveau-y += core/engine/disp/hdminv84.o
147nouveau-y += core/engine/disp/hdminva3.o 167nouveau-y += core/engine/disp/hdminva3.o
148nouveau-y += core/engine/disp/hdminvd0.o 168nouveau-y += core/engine/disp/hdminvd0.o
169nouveau-y += core/engine/disp/piornv50.o
149nouveau-y += core/engine/disp/sornv50.o 170nouveau-y += core/engine/disp/sornv50.o
150nouveau-y += core/engine/disp/sornv94.o 171nouveau-y += core/engine/disp/sornv94.o
151nouveau-y += core/engine/disp/sornvd0.o 172nouveau-y += core/engine/disp/sornvd0.o
@@ -194,7 +215,8 @@ nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
194nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o 215nouveau-y += nouveau_irq.o nouveau_vga.o nouveau_agp.o
195nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o 216nouveau-y += nouveau_ttm.o nouveau_sgdma.o nouveau_bo.o nouveau_gem.o
196nouveau-y += nouveau_prime.o nouveau_abi16.o 217nouveau-y += nouveau_prime.o nouveau_abi16.o
197nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o 218nouveau-y += nv04_fence.o nv10_fence.o nv17_fence.o
219nouveau-y += nv50_fence.o nv84_fence.o nvc0_fence.o
198 220
199# drm/kms 221# drm/kms
200nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o 222nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
@@ -216,7 +238,10 @@ nouveau-y += nouveau_mem.o
216 238
217# other random bits 239# other random bits
218nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o 240nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
241ifdef CONFIG_X86
219nouveau-$(CONFIG_ACPI) += nouveau_acpi.o 242nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
243endif
220nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o 244nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
245nouveau-$(CONFIG_DEBUG_FS) += nouveau_debugfs.o
221 246
222obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o 247obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 8bbb58f94a19..295c22165eac 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -99,3 +99,13 @@ nouveau_client_fini(struct nouveau_client *client, bool suspend)
99 nv_debug(client, "%s completed with %d\n", name[suspend], ret); 99 nv_debug(client, "%s completed with %d\n", name[suspend], ret);
100 return ret; 100 return ret;
101} 101}
102
103const char *
104nouveau_client_name(void *obj)
105{
106 const char *client_name = "unknown";
107 struct nouveau_client *client = nouveau_client(obj);
108 if (client)
109 client_name = client->name;
110 return client_name;
111}
diff --git a/drivers/gpu/drm/nouveau/core/core/enum.c b/drivers/gpu/drm/nouveau/core/core/enum.c
index 7cc7133d82de..dd434790ccc4 100644
--- a/drivers/gpu/drm/nouveau/core/core/enum.c
+++ b/drivers/gpu/drm/nouveau/core/core/enum.c
@@ -40,14 +40,15 @@ nouveau_enum_find(const struct nouveau_enum *en, u32 value)
40 return NULL; 40 return NULL;
41} 41}
42 42
43void 43const struct nouveau_enum *
44nouveau_enum_print(const struct nouveau_enum *en, u32 value) 44nouveau_enum_print(const struct nouveau_enum *en, u32 value)
45{ 45{
46 en = nouveau_enum_find(en, value); 46 en = nouveau_enum_find(en, value);
47 if (en) 47 if (en)
48 printk("%s", en->name); 48 pr_cont("%s", en->name);
49 else 49 else
50 printk("(unknown enum 0x%08x)", value); 50 pr_cont("(unknown enum 0x%08x)", value);
51 return en;
51} 52}
52 53
53void 54void
@@ -55,7 +56,7 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
55{ 56{
56 while (bf->name) { 57 while (bf->name) {
57 if (value & bf->mask) { 58 if (value & bf->mask) {
58 printk(" %s", bf->name); 59 pr_cont(" %s", bf->name);
59 value &= ~bf->mask; 60 value &= ~bf->mask;
60 } 61 }
61 62
@@ -63,5 +64,5 @@ nouveau_bitfield_print(const struct nouveau_bitfield *bf, u32 value)
63 } 64 }
64 65
65 if (value) 66 if (value)
66 printk(" (unknown bits 0x%08x)", value); 67 pr_cont(" (unknown bits 0x%08x)", value);
67} 68}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
new file mode 100644
index 000000000000..6d01e0f0fc8a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <core/os.h>
24#include <core/event.h>
25
26static void
27nouveau_event_put_locked(struct nouveau_event *event, int index,
28 struct nouveau_eventh *handler)
29{
30 if (!--event->index[index].refs)
31 event->disable(event, index);
32 list_del(&handler->head);
33}
34
35void
36nouveau_event_put(struct nouveau_event *event, int index,
37 struct nouveau_eventh *handler)
38{
39 unsigned long flags;
40
41 spin_lock_irqsave(&event->lock, flags);
42 if (index < event->index_nr)
43 nouveau_event_put_locked(event, index, handler);
44 spin_unlock_irqrestore(&event->lock, flags);
45}
46
47void
48nouveau_event_get(struct nouveau_event *event, int index,
49 struct nouveau_eventh *handler)
50{
51 unsigned long flags;
52
53 spin_lock_irqsave(&event->lock, flags);
54 if (index < event->index_nr) {
55 list_add(&handler->head, &event->index[index].list);
56 if (!event->index[index].refs++)
57 event->enable(event, index);
58 }
59 spin_unlock_irqrestore(&event->lock, flags);
60}
61
62void
63nouveau_event_trigger(struct nouveau_event *event, int index)
64{
65 struct nouveau_eventh *handler, *temp;
66 unsigned long flags;
67
68 if (index >= event->index_nr)
69 return;
70
71 spin_lock_irqsave(&event->lock, flags);
72 list_for_each_entry_safe(handler, temp, &event->index[index].list, head) {
73 if (handler->func(handler, index) == NVKM_EVENT_DROP) {
74 nouveau_event_put_locked(event, index, handler);
75 }
76 }
77 spin_unlock_irqrestore(&event->lock, flags);
78}
79
80void
81nouveau_event_destroy(struct nouveau_event **pevent)
82{
83 struct nouveau_event *event = *pevent;
84 if (event) {
85 kfree(event);
86 *pevent = NULL;
87 }
88}
89
90int
91nouveau_event_create(int index_nr, struct nouveau_event **pevent)
92{
93 struct nouveau_event *event;
94 int i;
95
96 event = *pevent = kzalloc(sizeof(*event) + index_nr *
97 sizeof(event->index[0]), GFP_KERNEL);
98 if (!event)
99 return -ENOMEM;
100
101 spin_lock_init(&event->lock);
102 for (i = 0; i < index_nr; i++)
103 INIT_LIST_HEAD(&event->index[i].list);
104 event->index_nr = index_nr;
105 return 0;
106}
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 283248c7b050..d6dc2a65ccd1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/falcon.h> 26#include <core/falcon.h>
26#include <core/class.h> 27#include <core/class.h>
27#include <core/enum.h> 28#include <core/enum.h>
@@ -100,8 +101,9 @@ nva3_copy_intr(struct nouveau_subdev *subdev)
100 if (stat & 0x00000040) { 101 if (stat & 0x00000040) {
101 nv_error(falcon, "DISPATCH_ERROR ["); 102 nv_error(falcon, "DISPATCH_ERROR [");
102 nouveau_enum_print(nva3_copy_isr_error_name, ssta); 103 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
103 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", 104 pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
104 chid, inst << 12, subc, mthd, data); 105 chid, inst << 12, nouveau_client_name(engctx), subc,
106 mthd, data);
105 nv_wo32(falcon, 0x004, 0x00000040); 107 nv_wo32(falcon, 0x004, 0x00000040);
106 stat &= ~0x00000040; 108 stat &= ~0x00000040;
107 } 109 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index b97490512723..5bc021f471f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/os.h> 26#include <core/os.h>
26#include <core/enum.h> 27#include <core/enum.h>
27#include <core/class.h> 28#include <core/class.h>
@@ -126,10 +127,11 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
126 chid = pfifo->chid(pfifo, engctx); 127 chid = pfifo->chid(pfifo, engctx);
127 128
128 if (stat) { 129 if (stat) {
129 nv_error(priv, ""); 130 nv_error(priv, "%s", "");
130 nouveau_bitfield_print(nv84_crypt_intr_mask, stat); 131 nouveau_bitfield_print(nv84_crypt_intr_mask, stat);
131 printk(" ch %d [0x%010llx] mthd 0x%04x data 0x%08x\n", 132 pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n",
132 chid, (u64)inst << 12, mthd, data); 133 chid, (u64)inst << 12, nouveau_client_name(engctx),
134 mthd, data);
133 } 135 }
134 136
135 nv_wr32(priv, 0x102130, stat); 137 nv_wr32(priv, 0x102130, stat);
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 21986f3bf0c8..8bf8955051d4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/os.h> 26#include <core/os.h>
26#include <core/enum.h> 27#include <core/enum.h>
27#include <core/class.h> 28#include <core/class.h>
@@ -102,8 +103,9 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
102 if (stat & 0x00000040) { 103 if (stat & 0x00000040) {
103 nv_error(priv, "DISPATCH_ERROR ["); 104 nv_error(priv, "DISPATCH_ERROR [");
104 nouveau_enum_print(nv98_crypt_isr_error_name, ssta); 105 nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
105 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", 106 pr_cont("] ch %d [0x%010llx %s] subc %d mthd 0x%04x data 0x%08x\n",
106 chid, (u64)inst << 12, subc, mthd, data); 107 chid, (u64)inst << 12, nouveau_client_name(engctx),
108 subc, mthd, data);
107 nv_wr32(priv, 0x087004, 0x00000040); 109 nv_wr32(priv, 0x087004, 0x00000040);
108 stat &= ~0x00000040; 110 stat &= ~0x00000040;
109 } 111 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/base.c b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
new file mode 100644
index 000000000000..7a5cae42834f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/base.c
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/disp.h>
26
27void
28_nouveau_disp_dtor(struct nouveau_object *object)
29{
30 struct nouveau_disp *disp = (void *)object;
31 nouveau_event_destroy(&disp->vblank);
32 nouveau_engine_destroy(&disp->base);
33}
34
35int
36nouveau_disp_create_(struct nouveau_object *parent,
37 struct nouveau_object *engine,
38 struct nouveau_oclass *oclass, int heads,
39 const char *intname, const char *extname,
40 int length, void **pobject)
41{
42 struct nouveau_disp *disp;
43 int ret;
44
45 ret = nouveau_engine_create_(parent, engine, oclass, true,
46 intname, extname, length, pobject);
47 disp = *pobject;
48 if (ret)
49 return ret;
50
51 return nouveau_event_create(heads, &disp->vblank);
52}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
new file mode 100644
index 000000000000..fa27b02ff829
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -0,0 +1,346 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h>
27#include <subdev/bios/dp.h>
28#include <subdev/bios/init.h>
29#include <subdev/i2c.h>
30
31#include <engine/disp.h>
32
33#include "dport.h"
34
35#define DBG(fmt, args...) nv_debug(dp->disp, "DP:%04x:%04x: " fmt, \
36 dp->outp->hasht, dp->outp->hashm, ##args)
37#define ERR(fmt, args...) nv_error(dp->disp, "DP:%04x:%04x: " fmt, \
38 dp->outp->hasht, dp->outp->hashm, ##args)
39
40/******************************************************************************
41 * link training
42 *****************************************************************************/
43struct dp_state {
44 const struct nouveau_dp_func *func;
45 struct nouveau_disp *disp;
46 struct dcb_output *outp;
47 struct nvbios_dpout info;
48 u8 version;
49 struct nouveau_i2c_port *aux;
50 int head;
51 u8 dpcd[4];
52 int link_nr;
53 u32 link_bw;
54 u8 stat[6];
55 u8 conf[4];
56};
57
58static int
59dp_set_link_config(struct dp_state *dp)
60{
61 struct nouveau_disp *disp = dp->disp;
62 struct nouveau_bios *bios = nouveau_bios(disp);
63 struct nvbios_init init = {
64 .subdev = nv_subdev(dp->disp),
65 .bios = bios,
66 .offset = 0x0000,
67 .outp = dp->outp,
68 .crtc = dp->head,
69 .execute = 1,
70 };
71 u32 lnkcmp;
72 u8 sink[2];
73
74 DBG("%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
75
76 /* set desired link configuration on the sink */
77 sink[0] = dp->link_bw / 27000;
78 sink[1] = dp->link_nr;
79 if (dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
80 sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
81
82 nv_wraux(dp->aux, DPCD_LC00, sink, 2);
83
84 /* set desired link configuration on the source */
85 if ((lnkcmp = dp->info.lnkcmp)) {
86 if (dp->version < 0x30) {
87 while ((dp->link_bw / 10) < nv_ro16(bios, lnkcmp))
88 lnkcmp += 4;
89 init.offset = nv_ro16(bios, lnkcmp + 2);
90 } else {
91 while ((dp->link_bw / 27000) < nv_ro08(bios, lnkcmp))
92 lnkcmp += 3;
93 init.offset = nv_ro16(bios, lnkcmp + 1);
94 }
95
96 nvbios_exec(&init);
97 }
98
99 return dp->func->lnk_ctl(dp->disp, dp->outp, dp->head,
100 dp->link_nr, dp->link_bw / 27000,
101 dp->dpcd[DPCD_RC02] &
102 DPCD_RC02_ENHANCED_FRAME_CAP);
103}
104
105static void
106dp_set_training_pattern(struct dp_state *dp, u8 pattern)
107{
108 u8 sink_tp;
109
110 DBG("training pattern %d\n", pattern);
111 dp->func->pattern(dp->disp, dp->outp, dp->head, pattern);
112
113 nv_rdaux(dp->aux, DPCD_LC02, &sink_tp, 1);
114 sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
115 sink_tp |= pattern;
116 nv_wraux(dp->aux, DPCD_LC02, &sink_tp, 1);
117}
118
119static int
120dp_link_train_commit(struct dp_state *dp)
121{
122 int i;
123
124 for (i = 0; i < dp->link_nr; i++) {
125 u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
126 u8 lpre = (lane & 0x0c) >> 2;
127 u8 lvsw = (lane & 0x03) >> 0;
128
129 dp->conf[i] = (lpre << 3) | lvsw;
130 if (lvsw == 3)
131 dp->conf[i] |= DPCD_LC03_MAX_SWING_REACHED;
132 if (lpre == 3)
133 dp->conf[i] |= DPCD_LC03_MAX_PRE_EMPHASIS_REACHED;
134
135 DBG("config lane %d %02x\n", i, dp->conf[i]);
136 dp->func->drv_ctl(dp->disp, dp->outp, dp->head, i, lvsw, lpre);
137 }
138
139 return nv_wraux(dp->aux, DPCD_LC03(0), dp->conf, 4);
140}
141
142static int
143dp_link_train_update(struct dp_state *dp, u32 delay)
144{
145 int ret;
146
147 udelay(delay);
148
149 ret = nv_rdaux(dp->aux, DPCD_LS02, dp->stat, 6);
150 if (ret)
151 return ret;
152
153 DBG("status %*ph\n", 6, dp->stat);
154 return 0;
155}
156
157static int
158dp_link_train_cr(struct dp_state *dp)
159{
160 bool cr_done = false, abort = false;
161 int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
162 int tries = 0, i;
163
164 dp_set_training_pattern(dp, 1);
165
166 do {
167 if (dp_link_train_commit(dp) ||
168 dp_link_train_update(dp, 100))
169 break;
170
171 cr_done = true;
172 for (i = 0; i < dp->link_nr; i++) {
173 u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
174 if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
175 cr_done = false;
176 if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
177 abort = true;
178 break;
179 }
180 }
181
182 if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
183 voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
184 tries = 0;
185 }
186 } while (!cr_done && !abort && ++tries < 5);
187
188 return cr_done ? 0 : -1;
189}
190
191static int
192dp_link_train_eq(struct dp_state *dp)
193{
194 bool eq_done, cr_done = true;
195 int tries = 0, i;
196
197 dp_set_training_pattern(dp, 2);
198
199 do {
200 if (dp_link_train_update(dp, 400))
201 break;
202
203 eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
204 for (i = 0; i < dp->link_nr && eq_done; i++) {
205 u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
206 if (!(lane & DPCD_LS02_LANE0_CR_DONE))
207 cr_done = false;
208 if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
209 !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
210 eq_done = false;
211 }
212
213 if (dp_link_train_commit(dp))
214 break;
215 } while (!eq_done && cr_done && ++tries <= 5);
216
217 return eq_done ? 0 : -1;
218}
219
220static void
221dp_link_train_init(struct dp_state *dp, bool spread)
222{
223 struct nvbios_init init = {
224 .subdev = nv_subdev(dp->disp),
225 .bios = nouveau_bios(dp->disp),
226 .outp = dp->outp,
227 .crtc = dp->head,
228 .execute = 1,
229 };
230
231 /* set desired spread */
232 if (spread)
233 init.offset = dp->info.script[2];
234 else
235 init.offset = dp->info.script[3];
236 nvbios_exec(&init);
237
238 /* pre-train script */
239 init.offset = dp->info.script[0];
240 nvbios_exec(&init);
241}
242
243static void
244dp_link_train_fini(struct dp_state *dp)
245{
246 struct nvbios_init init = {
247 .subdev = nv_subdev(dp->disp),
248 .bios = nouveau_bios(dp->disp),
249 .outp = dp->outp,
250 .crtc = dp->head,
251 .execute = 1,
252 };
253
254 /* post-train script */
255 init.offset = dp->info.script[1],
256 nvbios_exec(&init);
257}
258
259int
260nouveau_dp_train(struct nouveau_disp *disp, const struct nouveau_dp_func *func,
261 struct dcb_output *outp, int head, u32 datarate)
262{
263 struct nouveau_bios *bios = nouveau_bios(disp);
264 struct nouveau_i2c *i2c = nouveau_i2c(disp);
265 struct dp_state _dp = {
266 .disp = disp,
267 .func = func,
268 .outp = outp,
269 .head = head,
270 }, *dp = &_dp;
271 const u32 bw_list[] = { 270000, 162000, 0 };
272 const u32 *link_bw = bw_list;
273 u8 hdr, cnt, len;
274 u32 data;
275 int ret;
276
277 /* find the bios displayport data relevant to this output */
278 data = nvbios_dpout_match(bios, outp->hasht, outp->hashm, &dp->version,
279 &hdr, &cnt, &len, &dp->info);
280 if (!data) {
281 ERR("bios data not found\n");
282 return -EINVAL;
283 }
284
285 /* acquire the aux channel and fetch some info about the display */
286 if (outp->location)
287 dp->aux = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
288 else
289 dp->aux = i2c->find(i2c, NV_I2C_TYPE_DCBI2C(outp->i2c_index));
290 if (!dp->aux) {
291 ERR("no aux channel?!\n");
292 return -ENODEV;
293 }
294
295 ret = nv_rdaux(dp->aux, 0x00000, dp->dpcd, sizeof(dp->dpcd));
296 if (ret) {
297 ERR("failed to read DPCD\n");
298 return ret;
299 }
300
301 /* adjust required bandwidth for 8B/10B coding overhead */
302 datarate = (datarate / 8) * 10;
303
304 /* enable down-spreading and execute pre-train script from vbios */
305 dp_link_train_init(dp, dp->dpcd[3] & 0x01);
306
307 /* start off at highest link rate supported by encoder and display */
308 while (*link_bw > (dp->dpcd[1] * 27000))
309 link_bw++;
310
311 while (link_bw[0]) {
312 /* find minimum required lane count at this link rate */
313 dp->link_nr = dp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT;
314 while ((dp->link_nr >> 1) * link_bw[0] > datarate)
315 dp->link_nr >>= 1;
316
317 /* drop link rate to minimum with this lane count */
318 while ((link_bw[1] * dp->link_nr) > datarate)
319 link_bw++;
320 dp->link_bw = link_bw[0];
321
322 /* program selected link configuration */
323 ret = dp_set_link_config(dp);
324 if (ret == 0) {
325 /* attempt to train the link at this configuration */
326 memset(dp->stat, 0x00, sizeof(dp->stat));
327 if (!dp_link_train_cr(dp) &&
328 !dp_link_train_eq(dp))
329 break;
330 } else
331 if (ret >= 1) {
332 /* dp_set_link_config() handled training */
333 break;
334 }
335
336 /* retry at lower rate */
337 link_bw++;
338 }
339
340 /* finish link training */
341 dp_set_training_pattern(dp, 0);
342
343 /* execute post-train script from vbios */
344 dp_link_train_fini(dp);
345 return true;
346}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.h b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
new file mode 100644
index 000000000000..0e1bbd18ff6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.h
@@ -0,0 +1,78 @@
1#ifndef __NVKM_DISP_DPORT_H__
2#define __NVKM_DISP_DPORT_H__
3
4/* DPCD Receiver Capabilities */
5#define DPCD_RC00 0x00000
6#define DPCD_RC00_DPCD_REV 0xff
7#define DPCD_RC01 0x00001
8#define DPCD_RC01_MAX_LINK_RATE 0xff
9#define DPCD_RC02 0x00002
10#define DPCD_RC02_ENHANCED_FRAME_CAP 0x80
11#define DPCD_RC02_MAX_LANE_COUNT 0x1f
12#define DPCD_RC03 0x00003
13#define DPCD_RC03_MAX_DOWNSPREAD 0x01
14
15/* DPCD Link Configuration */
16#define DPCD_LC00 0x00100
17#define DPCD_LC00_LINK_BW_SET 0xff
18#define DPCD_LC01 0x00101
19#define DPCD_LC01_ENHANCED_FRAME_EN 0x80
20#define DPCD_LC01_LANE_COUNT_SET 0x1f
21#define DPCD_LC02 0x00102
22#define DPCD_LC02_TRAINING_PATTERN_SET 0x03
23#define DPCD_LC03(l) ((l) + 0x00103)
24#define DPCD_LC03_MAX_PRE_EMPHASIS_REACHED 0x20
25#define DPCD_LC03_PRE_EMPHASIS_SET 0x18
26#define DPCD_LC03_MAX_SWING_REACHED 0x04
27#define DPCD_LC03_VOLTAGE_SWING_SET 0x03
28
29/* DPCD Link/Sink Status */
30#define DPCD_LS02 0x00202
31#define DPCD_LS02_LANE1_SYMBOL_LOCKED 0x40
32#define DPCD_LS02_LANE1_CHANNEL_EQ_DONE 0x20
33#define DPCD_LS02_LANE1_CR_DONE 0x10
34#define DPCD_LS02_LANE0_SYMBOL_LOCKED 0x04
35#define DPCD_LS02_LANE0_CHANNEL_EQ_DONE 0x02
36#define DPCD_LS02_LANE0_CR_DONE 0x01
37#define DPCD_LS03 0x00203
38#define DPCD_LS03_LANE3_SYMBOL_LOCKED 0x40
39#define DPCD_LS03_LANE3_CHANNEL_EQ_DONE 0x20
40#define DPCD_LS03_LANE3_CR_DONE 0x10
41#define DPCD_LS03_LANE2_SYMBOL_LOCKED 0x04
42#define DPCD_LS03_LANE2_CHANNEL_EQ_DONE 0x02
43#define DPCD_LS03_LANE2_CR_DONE 0x01
44#define DPCD_LS04 0x00204
45#define DPCD_LS04_LINK_STATUS_UPDATED 0x80
46#define DPCD_LS04_DOWNSTREAM_PORT_STATUS_CHANGED 0x40
47#define DPCD_LS04_INTERLANE_ALIGN_DONE 0x01
48#define DPCD_LS06 0x00206
49#define DPCD_LS06_LANE1_PRE_EMPHASIS 0xc0
50#define DPCD_LS06_LANE1_VOLTAGE_SWING 0x30
51#define DPCD_LS06_LANE0_PRE_EMPHASIS 0x0c
52#define DPCD_LS06_LANE0_VOLTAGE_SWING 0x03
53#define DPCD_LS07 0x00207
54#define DPCD_LS07_LANE3_PRE_EMPHASIS 0xc0
55#define DPCD_LS07_LANE3_VOLTAGE_SWING 0x30
56#define DPCD_LS07_LANE2_PRE_EMPHASIS 0x0c
57#define DPCD_LS07_LANE2_VOLTAGE_SWING 0x03
58
59struct nouveau_disp;
60struct dcb_output;
61
62struct nouveau_dp_func {
63 int (*pattern)(struct nouveau_disp *, struct dcb_output *,
64 int head, int pattern);
65 int (*lnk_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
66 int link_nr, int link_bw, bool enh_frame);
67 int (*drv_ctl)(struct nouveau_disp *, struct dcb_output *, int head,
68 int lane, int swing, int preem);
69};
70
71extern const struct nouveau_dp_func nv94_sor_dp_func;
72extern const struct nouveau_dp_func nvd0_sor_dp_func;
73extern const struct nouveau_dp_func nv50_pior_dp_func;
74
75int nouveau_dp_train(struct nouveau_disp *, const struct nouveau_dp_func *,
76 struct dcb_output *, int, u32);
77
78#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
index 1c919f2af89f..05e903f08a36 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv04.c
@@ -24,21 +24,33 @@
24 24
25#include <engine/disp.h> 25#include <engine/disp.h>
26 26
27#include <core/event.h>
28#include <core/class.h>
29
27struct nv04_disp_priv { 30struct nv04_disp_priv {
28 struct nouveau_disp base; 31 struct nouveau_disp base;
29}; 32};
30 33
31static struct nouveau_oclass 34static struct nouveau_oclass
32nv04_disp_sclass[] = { 35nv04_disp_sclass[] = {
36 { NV04_DISP_CLASS, &nouveau_object_ofuncs },
33 {}, 37 {},
34}; 38};
35 39
40/*******************************************************************************
41 * Display engine implementation
42 ******************************************************************************/
43
44static void
45nv04_disp_vblank_enable(struct nouveau_event *event, int head)
46{
47 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000001);
48}
49
36static void 50static void
37nv04_disp_intr_vblank(struct nv04_disp_priv *priv, int crtc) 51nv04_disp_vblank_disable(struct nouveau_event *event, int head)
38{ 52{
39 struct nouveau_disp *disp = &priv->base; 53 nv_wr32(event->priv, 0x600140 + (head * 0x2000) , 0x00000000);
40 if (disp->vblank.notify)
41 disp->vblank.notify(disp->vblank.data, crtc);
42} 54}
43 55
44static void 56static void
@@ -49,25 +61,25 @@ nv04_disp_intr(struct nouveau_subdev *subdev)
49 u32 crtc1 = nv_rd32(priv, 0x602100); 61 u32 crtc1 = nv_rd32(priv, 0x602100);
50 62
51 if (crtc0 & 0x00000001) { 63 if (crtc0 & 0x00000001) {
52 nv04_disp_intr_vblank(priv, 0); 64 nouveau_event_trigger(priv->base.vblank, 0);
53 nv_wr32(priv, 0x600100, 0x00000001); 65 nv_wr32(priv, 0x600100, 0x00000001);
54 } 66 }
55 67
56 if (crtc1 & 0x00000001) { 68 if (crtc1 & 0x00000001) {
57 nv04_disp_intr_vblank(priv, 1); 69 nouveau_event_trigger(priv->base.vblank, 1);
58 nv_wr32(priv, 0x602100, 0x00000001); 70 nv_wr32(priv, 0x602100, 0x00000001);
59 } 71 }
60} 72}
61 73
62static int 74static int
63nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 75nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
64 struct nouveau_oclass *oclass, void *data, u32 size, 76 struct nouveau_oclass *oclass, void *data, u32 size,
65 struct nouveau_object **pobject) 77 struct nouveau_object **pobject)
66{ 78{
67 struct nv04_disp_priv *priv; 79 struct nv04_disp_priv *priv;
68 int ret; 80 int ret;
69 81
70 ret = nouveau_disp_create(parent, engine, oclass, "DISPLAY", 82 ret = nouveau_disp_create(parent, engine, oclass, 2, "DISPLAY",
71 "display", &priv); 83 "display", &priv);
72 *pobject = nv_object(priv); 84 *pobject = nv_object(priv);
73 if (ret) 85 if (ret)
@@ -75,6 +87,9 @@ nv04_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
75 87
76 nv_engine(priv)->sclass = nv04_disp_sclass; 88 nv_engine(priv)->sclass = nv04_disp_sclass;
77 nv_subdev(priv)->intr = nv04_disp_intr; 89 nv_subdev(priv)->intr = nv04_disp_intr;
90 priv->base.vblank->priv = priv;
91 priv->base.vblank->enable = nv04_disp_vblank_enable;
92 priv->base.vblank->disable = nv04_disp_vblank_disable;
78 return 0; 93 return 0;
79} 94}
80 95
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index ca1a7d76a95b..5fa13267bd9f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -27,7 +27,6 @@
27#include <core/handle.h> 27#include <core/handle.h>
28#include <core/class.h> 28#include <core/class.h>
29 29
30#include <engine/software.h>
31#include <engine/disp.h> 30#include <engine/disp.h>
32 31
33#include <subdev/bios.h> 32#include <subdev/bios.h>
@@ -37,7 +36,6 @@
37#include <subdev/bios/pll.h> 36#include <subdev/bios/pll.h>
38#include <subdev/timer.h> 37#include <subdev/timer.h>
39#include <subdev/fb.h> 38#include <subdev/fb.h>
40#include <subdev/bar.h>
41#include <subdev/clock.h> 39#include <subdev/clock.h>
42 40
43#include "nv50.h" 41#include "nv50.h"
@@ -335,7 +333,7 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
335 struct nv50_disp_dmac *dmac; 333 struct nv50_disp_dmac *dmac;
336 int ret; 334 int ret;
337 335
338 if (size < sizeof(*data) || args->head > 1) 336 if (size < sizeof(*args) || args->head > 1)
339 return -EINVAL; 337 return -EINVAL;
340 338
341 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, 339 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -374,7 +372,7 @@ nv50_disp_ovly_ctor(struct nouveau_object *parent,
374 struct nv50_disp_dmac *dmac; 372 struct nv50_disp_dmac *dmac;
375 int ret; 373 int ret;
376 374
377 if (size < sizeof(*data) || args->head > 1) 375 if (size < sizeof(*args) || args->head > 1)
378 return -EINVAL; 376 return -EINVAL;
379 377
380 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, 378 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -543,6 +541,18 @@ nv50_disp_curs_ofuncs = {
543 * Base display object 541 * Base display object
544 ******************************************************************************/ 542 ******************************************************************************/
545 543
544static void
545nv50_disp_base_vblank_enable(struct nouveau_event *event, int head)
546{
547 nv_mask(event->priv, 0x61002c, (1 << head), (1 << head));
548}
549
550static void
551nv50_disp_base_vblank_disable(struct nouveau_event *event, int head)
552{
553 nv_mask(event->priv, 0x61002c, (1 << head), (0 << head));
554}
555
546static int 556static int
547nv50_disp_base_ctor(struct nouveau_object *parent, 557nv50_disp_base_ctor(struct nouveau_object *parent,
548 struct nouveau_object *engine, 558 struct nouveau_object *engine,
@@ -559,6 +569,9 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
559 if (ret) 569 if (ret)
560 return ret; 570 return ret;
561 571
572 priv->base.vblank->priv = priv;
573 priv->base.vblank->enable = nv50_disp_base_vblank_enable;
574 priv->base.vblank->disable = nv50_disp_base_vblank_disable;
562 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); 575 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
563} 576}
564 577
@@ -613,7 +626,7 @@ nv50_disp_base_init(struct nouveau_object *object)
613 nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp); 626 nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
614 } 627 }
615 628
616 /* ... EXT caps */ 629 /* ... PIOR caps */
617 for (i = 0; i < 3; i++) { 630 for (i = 0; i < 3; i++) {
618 tmp = nv_rd32(priv, 0x61e000 + (i * 0x800)); 631 tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
619 nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp); 632 nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
@@ -665,6 +678,9 @@ nv50_disp_base_omthds[] = {
665 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, 678 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
666 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, 679 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
667 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, 680 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
681 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
682 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
683 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
668 {}, 684 {},
669}; 685};
670 686
@@ -756,50 +772,6 @@ nv50_disp_intr_error(struct nv50_disp_priv *priv)
756 } 772 }
757} 773}
758 774
759static void
760nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
761{
762 struct nouveau_bar *bar = nouveau_bar(priv);
763 struct nouveau_disp *disp = &priv->base;
764 struct nouveau_software_chan *chan, *temp;
765 unsigned long flags;
766
767 spin_lock_irqsave(&disp->vblank.lock, flags);
768 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) {
769 if (chan->vblank.crtc != crtc)
770 continue;
771
772 if (nv_device(priv)->chipset >= 0xc0) {
773 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
774 bar->flush(bar);
775 nv_wr32(priv, 0x06000c,
776 upper_32_bits(chan->vblank.offset));
777 nv_wr32(priv, 0x060010,
778 lower_32_bits(chan->vblank.offset));
779 nv_wr32(priv, 0x060014, chan->vblank.value);
780 } else {
781 nv_wr32(priv, 0x001704, chan->vblank.channel);
782 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
783 bar->flush(bar);
784 if (nv_device(priv)->chipset == 0x50) {
785 nv_wr32(priv, 0x001570, chan->vblank.offset);
786 nv_wr32(priv, 0x001574, chan->vblank.value);
787 } else {
788 nv_wr32(priv, 0x060010, chan->vblank.offset);
789 nv_wr32(priv, 0x060014, chan->vblank.value);
790 }
791 }
792
793 list_del(&chan->vblank.head);
794 if (disp->vblank.put)
795 disp->vblank.put(disp->vblank.data, crtc);
796 }
797 spin_unlock_irqrestore(&disp->vblank.lock, flags);
798
799 if (disp->vblank.notify)
800 disp->vblank.notify(disp->vblank.data, crtc);
801}
802
803static u16 775static u16
804exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, 776exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
805 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, 777 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
@@ -811,8 +783,8 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
811 if (outp < 4) { 783 if (outp < 4) {
812 type = DCB_OUTPUT_ANALOG; 784 type = DCB_OUTPUT_ANALOG;
813 mask = 0; 785 mask = 0;
814 } else { 786 } else
815 outp -= 4; 787 if (outp < 8) {
816 switch (ctrl & 0x00000f00) { 788 switch (ctrl & 0x00000f00) {
817 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break; 789 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
818 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break; 790 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
@@ -824,6 +796,17 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
824 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 796 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
825 return 0x0000; 797 return 0x0000;
826 } 798 }
799 outp -= 4;
800 } else {
801 outp = outp - 8;
802 type = 0x0010;
803 mask = 0;
804 switch (ctrl & 0x00000f00) {
805 case 0x00000000: type |= priv->pior.type[outp]; break;
806 default:
807 nv_error(priv, "unknown PIOR mc 0x%08x\n", ctrl);
808 return 0x0000;
809 }
827 } 810 }
828 811
829 mask = 0x00c0 & (mask << 6); 812 mask = 0x00c0 & (mask << 6);
@@ -834,6 +817,10 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
834 if (!data) 817 if (!data)
835 return 0x0000; 818 return 0x0000;
836 819
820 /* off-chip encoders require matching the exact encoder type */
821 if (dcb->location != 0)
822 type |= dcb->extdev << 8;
823
837 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info); 824 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
838} 825}
839 826
@@ -848,9 +835,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
848 u32 ctrl = 0x00000000; 835 u32 ctrl = 0x00000000;
849 int i; 836 int i;
850 837
838 /* DAC */
851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 839 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8)); 840 ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
853 841
842 /* SOR */
854 if (!(ctrl & (1 << head))) { 843 if (!(ctrl & (1 << head))) {
855 if (nv_device(priv)->chipset < 0x90 || 844 if (nv_device(priv)->chipset < 0x90 ||
856 nv_device(priv)->chipset == 0x92 || 845 nv_device(priv)->chipset == 0x92 ||
@@ -865,6 +854,13 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
865 } 854 }
866 } 855 }
867 856
857 /* PIOR */
858 if (!(ctrl & (1 << head))) {
859 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
860 ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
861 i += 8;
862 }
863
868 if (!(ctrl & (1 << head))) 864 if (!(ctrl & (1 << head)))
869 return false; 865 return false;
870 i--; 866 i--;
@@ -894,13 +890,15 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
894 struct nvbios_outp info1; 890 struct nvbios_outp info1;
895 struct nvbios_ocfg info2; 891 struct nvbios_ocfg info2;
896 u8 ver, hdr, cnt, len; 892 u8 ver, hdr, cnt, len;
897 u16 data, conf;
898 u32 ctrl = 0x00000000; 893 u32 ctrl = 0x00000000;
894 u32 data, conf = ~0;
899 int i; 895 int i;
900 896
897 /* DAC */
901 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++) 898 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
902 ctrl = nv_rd32(priv, 0x610b58 + (i * 8)); 899 ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
903 900
901 /* SOR */
904 if (!(ctrl & (1 << head))) { 902 if (!(ctrl & (1 << head))) {
905 if (nv_device(priv)->chipset < 0x90 || 903 if (nv_device(priv)->chipset < 0x90 ||
906 nv_device(priv)->chipset == 0x92 || 904 nv_device(priv)->chipset == 0x92 ||
@@ -915,34 +913,46 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
915 } 913 }
916 } 914 }
917 915
916 /* PIOR */
917 if (!(ctrl & (1 << head))) {
918 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
919 ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
920 i += 8;
921 }
922
918 if (!(ctrl & (1 << head))) 923 if (!(ctrl & (1 << head)))
919 return 0x0000; 924 return conf;
920 i--; 925 i--;
921 926
922 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1); 927 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
923 if (!data) 928 if (!data)
924 return 0x0000; 929 return conf;
925 930
926 switch (outp->type) { 931 if (outp->location == 0) {
927 case DCB_OUTPUT_TMDS: 932 switch (outp->type) {
928 conf = (ctrl & 0x00000f00) >> 8; 933 case DCB_OUTPUT_TMDS:
929 if (pclk >= 165000) 934 conf = (ctrl & 0x00000f00) >> 8;
930 conf |= 0x0100; 935 if (pclk >= 165000)
931 break; 936 conf |= 0x0100;
932 case DCB_OUTPUT_LVDS: 937 break;
933 conf = priv->sor.lvdsconf; 938 case DCB_OUTPUT_LVDS:
934 break; 939 conf = priv->sor.lvdsconf;
935 case DCB_OUTPUT_DP: 940 break;
941 case DCB_OUTPUT_DP:
942 conf = (ctrl & 0x00000f00) >> 8;
943 break;
944 case DCB_OUTPUT_ANALOG:
945 default:
946 conf = 0x00ff;
947 break;
948 }
949 } else {
936 conf = (ctrl & 0x00000f00) >> 8; 950 conf = (ctrl & 0x00000f00) >> 8;
937 break; 951 pclk = pclk / 2;
938 case DCB_OUTPUT_ANALOG:
939 default:
940 conf = 0x00ff;
941 break;
942 } 952 }
943 953
944 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); 954 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
945 if (data) { 955 if (data && id < 0xff) {
946 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 956 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
947 if (data) { 957 if (data) {
948 struct nvbios_init init = { 958 struct nvbios_init init = {
@@ -954,32 +964,37 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
954 .execute = 1, 964 .execute = 1,
955 }; 965 };
956 966
957 if (nvbios_exec(&init)) 967 nvbios_exec(&init);
958 return 0x0000;
959 return conf;
960 } 968 }
961 } 969 }
962 970
963 return 0x0000; 971 return conf;
964} 972}
965 973
966static void 974static void
967nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super) 975nv50_disp_intr_unk10_0(struct nv50_disp_priv *priv, int head)
968{ 976{
969 int head = ffs((super & 0x00000060) >> 5) - 1; 977 exec_script(priv, head, 1);
970 if (head >= 0) { 978}
971 head = ffs((super & 0x00000180) >> 7) - 1;
972 if (head >= 0)
973 exec_script(priv, head, 1);
974 }
975 979
976 nv_wr32(priv, 0x610024, 0x00000010); 980static void
977 nv_wr32(priv, 0x610030, 0x80000000); 981nv50_disp_intr_unk20_0(struct nv50_disp_priv *priv, int head)
982{
983 exec_script(priv, head, 2);
984}
985
986static void
987nv50_disp_intr_unk20_1(struct nv50_disp_priv *priv, int head)
988{
989 struct nouveau_clock *clk = nouveau_clock(priv);
990 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
991 if (pclk)
992 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
978} 993}
979 994
980static void 995static void
981nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv, 996nv50_disp_intr_unk20_2_dp(struct nv50_disp_priv *priv,
982 struct dcb_output *outp, u32 pclk) 997 struct dcb_output *outp, u32 pclk)
983{ 998{
984 const int link = !(outp->sorconf.link & 1); 999 const int link = !(outp->sorconf.link & 1);
985 const int or = ffs(outp->or) - 1; 1000 const int or = ffs(outp->or) - 1;
@@ -1085,53 +1100,54 @@ nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
1085} 1100}
1086 1101
1087static void 1102static void
1088nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super) 1103nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1089{ 1104{
1090 struct dcb_output outp; 1105 struct dcb_output outp;
1091 u32 addr, mask, data; 1106 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1092 int head; 1107 u32 hval, hreg = 0x614200 + (head * 0x800);
1108 u32 oval, oreg;
1109 u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
1110 if (conf != ~0) {
1111 if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
1112 u32 soff = (ffs(outp.or) - 1) * 0x08;
1113 u32 ctrl = nv_rd32(priv, 0x610798 + soff);
1114 u32 datarate;
1115
1116 switch ((ctrl & 0x000f0000) >> 16) {
1117 case 6: datarate = pclk * 30 / 8; break;
1118 case 5: datarate = pclk * 24 / 8; break;
1119 case 2:
1120 default:
1121 datarate = pclk * 18 / 8;
1122 break;
1123 }
1093 1124
1094 /* finish detaching encoder? */ 1125 nouveau_dp_train(&priv->base, priv->sor.dp,
1095 head = ffs((super & 0x00000180) >> 7) - 1; 1126 &outp, head, datarate);
1096 if (head >= 0)
1097 exec_script(priv, head, 2);
1098
1099 /* check whether a vpll change is required */
1100 head = ffs((super & 0x00000600) >> 9) - 1;
1101 if (head >= 0) {
1102 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1103 if (pclk) {
1104 struct nouveau_clock *clk = nouveau_clock(priv);
1105 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
1106 } 1127 }
1107 1128
1108 nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000); 1129 exec_clkcmp(priv, head, 0, pclk, &outp);
1109 } 1130
1110 1131 if (!outp.location && outp.type == DCB_OUTPUT_ANALOG) {
1111 /* (re)attach the relevant OR to the head */ 1132 oreg = 0x614280 + (ffs(outp.or) - 1) * 0x800;
1112 head = ffs((super & 0x00000180) >> 7) - 1; 1133 oval = 0x00000000;
1113 if (head >= 0) { 1134 hval = 0x00000000;
1114 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1135 } else
1115 u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp); 1136 if (!outp.location) {
1116 if (conf) { 1137 if (outp.type == DCB_OUTPUT_DP)
1117 if (outp.type == DCB_OUTPUT_ANALOG) { 1138 nv50_disp_intr_unk20_2_dp(priv, &outp, pclk);
1118 addr = 0x614280 + (ffs(outp.or) - 1) * 0x800; 1139 oreg = 0x614300 + (ffs(outp.or) - 1) * 0x800;
1119 mask = 0xffffffff; 1140 oval = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1120 data = 0x00000000; 1141 hval = 0x00000000;
1121 } else { 1142 } else {
1122 if (outp.type == DCB_OUTPUT_DP) 1143 oreg = 0x614380 + (ffs(outp.or) - 1) * 0x800;
1123 nv50_disp_intr_unk20_dp(priv, &outp, pclk); 1144 oval = 0x00000001;
1124 addr = 0x614300 + (ffs(outp.or) - 1) * 0x800; 1145 hval = 0x00000001;
1125 mask = 0x00000707;
1126 data = (conf & 0x0100) ? 0x0101 : 0x0000;
1127 }
1128
1129 nv_mask(priv, addr, mask, data);
1130 } 1146 }
1131 }
1132 1147
1133 nv_wr32(priv, 0x610024, 0x00000020); 1148 nv_mask(priv, hreg, 0x0000000f, hval);
1134 nv_wr32(priv, 0x610030, 0x80000000); 1149 nv_mask(priv, oreg, 0x00000707, oval);
1150 }
1135} 1151}
1136 1152
1137/* If programming a TMDS output on a SOR that can also be configured for 1153/* If programming a TMDS output on a SOR that can also be configured for
@@ -1143,7 +1159,7 @@ nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
1143 * programmed for DisplayPort. 1159 * programmed for DisplayPort.
1144 */ 1160 */
1145static void 1161static void
1146nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp) 1162nv50_disp_intr_unk40_0_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
1147{ 1163{
1148 struct nouveau_bios *bios = nouveau_bios(priv); 1164 struct nouveau_bios *bios = nouveau_bios(priv);
1149 const int link = !(outp->sorconf.link & 1); 1165 const int link = !(outp->sorconf.link & 1);
@@ -1157,35 +1173,79 @@ nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
1157} 1173}
1158 1174
1159static void 1175static void
1160nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super) 1176nv50_disp_intr_unk40_0(struct nv50_disp_priv *priv, int head)
1161{ 1177{
1162 int head = ffs((super & 0x00000180) >> 7) - 1; 1178 struct dcb_output outp;
1163 if (head >= 0) { 1179 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1164 struct dcb_output outp; 1180 if (exec_clkcmp(priv, head, 1, pclk, &outp) != ~0) {
1165 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff; 1181 if (outp.location == 0 && outp.type == DCB_OUTPUT_TMDS)
1166 if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) { 1182 nv50_disp_intr_unk40_0_tmds(priv, &outp);
1167 if (outp.type == DCB_OUTPUT_TMDS) 1183 else
1168 nv50_disp_intr_unk40_tmds(priv, &outp); 1184 if (outp.location == 1 && outp.type == DCB_OUTPUT_DP) {
1185 u32 soff = (ffs(outp.or) - 1) * 0x08;
1186 u32 ctrl = nv_rd32(priv, 0x610b84 + soff);
1187 u32 datarate;
1188
1189 switch ((ctrl & 0x000f0000) >> 16) {
1190 case 6: datarate = pclk * 30 / 8; break;
1191 case 5: datarate = pclk * 24 / 8; break;
1192 case 2:
1193 default:
1194 datarate = pclk * 18 / 8;
1195 break;
1196 }
1197
1198 nouveau_dp_train(&priv->base, priv->pior.dp,
1199 &outp, head, datarate);
1169 } 1200 }
1170 } 1201 }
1171
1172 nv_wr32(priv, 0x610024, 0x00000040);
1173 nv_wr32(priv, 0x610030, 0x80000000);
1174} 1202}
1175 1203
1176static void 1204void
1177nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1) 1205nv50_disp_intr_supervisor(struct work_struct *work)
1178{ 1206{
1207 struct nv50_disp_priv *priv =
1208 container_of(work, struct nv50_disp_priv, supervisor);
1179 u32 super = nv_rd32(priv, 0x610030); 1209 u32 super = nv_rd32(priv, 0x610030);
1210 int head;
1180 1211
1181 nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super); 1212 nv_debug(priv, "supervisor 0x%08x 0x%08x\n", priv->super, super);
1182 1213
1183 if (intr1 & 0x00000010) 1214 if (priv->super & 0x00000010) {
1184 nv50_disp_intr_unk10(priv, super); 1215 for (head = 0; head < priv->head.nr; head++) {
1185 if (intr1 & 0x00000020) 1216 if (!(super & (0x00000020 << head)))
1186 nv50_disp_intr_unk20(priv, super); 1217 continue;
1187 if (intr1 & 0x00000040) 1218 if (!(super & (0x00000080 << head)))
1188 nv50_disp_intr_unk40(priv, super); 1219 continue;
1220 nv50_disp_intr_unk10_0(priv, head);
1221 }
1222 } else
1223 if (priv->super & 0x00000020) {
1224 for (head = 0; head < priv->head.nr; head++) {
1225 if (!(super & (0x00000080 << head)))
1226 continue;
1227 nv50_disp_intr_unk20_0(priv, head);
1228 }
1229 for (head = 0; head < priv->head.nr; head++) {
1230 if (!(super & (0x00000200 << head)))
1231 continue;
1232 nv50_disp_intr_unk20_1(priv, head);
1233 }
1234 for (head = 0; head < priv->head.nr; head++) {
1235 if (!(super & (0x00000080 << head)))
1236 continue;
1237 nv50_disp_intr_unk20_2(priv, head);
1238 }
1239 } else
1240 if (priv->super & 0x00000040) {
1241 for (head = 0; head < priv->head.nr; head++) {
1242 if (!(super & (0x00000080 << head)))
1243 continue;
1244 nv50_disp_intr_unk40_0(priv, head);
1245 }
1246 }
1247
1248 nv_wr32(priv, 0x610030, 0x80000000);
1189} 1249}
1190 1250
1191void 1251void
@@ -1201,19 +1261,21 @@ nv50_disp_intr(struct nouveau_subdev *subdev)
1201 } 1261 }
1202 1262
1203 if (intr1 & 0x00000004) { 1263 if (intr1 & 0x00000004) {
1204 nv50_disp_intr_vblank(priv, 0); 1264 nouveau_event_trigger(priv->base.vblank, 0);
1205 nv_wr32(priv, 0x610024, 0x00000004); 1265 nv_wr32(priv, 0x610024, 0x00000004);
1206 intr1 &= ~0x00000004; 1266 intr1 &= ~0x00000004;
1207 } 1267 }
1208 1268
1209 if (intr1 & 0x00000008) { 1269 if (intr1 & 0x00000008) {
1210 nv50_disp_intr_vblank(priv, 1); 1270 nouveau_event_trigger(priv->base.vblank, 1);
1211 nv_wr32(priv, 0x610024, 0x00000008); 1271 nv_wr32(priv, 0x610024, 0x00000008);
1212 intr1 &= ~0x00000008; 1272 intr1 &= ~0x00000008;
1213 } 1273 }
1214 1274
1215 if (intr1 & 0x00000070) { 1275 if (intr1 & 0x00000070) {
1216 nv50_disp_intr_super(priv, intr1); 1276 priv->super = (intr1 & 0x00000070);
1277 schedule_work(&priv->supervisor);
1278 nv_wr32(priv, 0x610024, priv->super);
1217 intr1 &= ~0x00000070; 1279 intr1 &= ~0x00000070;
1218 } 1280 }
1219} 1281}
@@ -1226,7 +1288,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1226 struct nv50_disp_priv *priv; 1288 struct nv50_disp_priv *priv;
1227 int ret; 1289 int ret;
1228 1290
1229 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 1291 ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
1230 "display", &priv); 1292 "display", &priv);
1231 *pobject = nv_object(priv); 1293 *pobject = nv_object(priv);
1232 if (ret) 1294 if (ret)
@@ -1235,16 +1297,17 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1235 nv_engine(priv)->sclass = nv50_disp_base_oclass; 1297 nv_engine(priv)->sclass = nv50_disp_base_oclass;
1236 nv_engine(priv)->cclass = &nv50_disp_cclass; 1298 nv_engine(priv)->cclass = &nv50_disp_cclass;
1237 nv_subdev(priv)->intr = nv50_disp_intr; 1299 nv_subdev(priv)->intr = nv50_disp_intr;
1300 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
1238 priv->sclass = nv50_disp_sclass; 1301 priv->sclass = nv50_disp_sclass;
1239 priv->head.nr = 2; 1302 priv->head.nr = 2;
1240 priv->dac.nr = 3; 1303 priv->dac.nr = 3;
1241 priv->sor.nr = 2; 1304 priv->sor.nr = 2;
1305 priv->pior.nr = 3;
1242 priv->dac.power = nv50_dac_power; 1306 priv->dac.power = nv50_dac_power;
1243 priv->dac.sense = nv50_dac_sense; 1307 priv->dac.sense = nv50_dac_sense;
1244 priv->sor.power = nv50_sor_power; 1308 priv->sor.power = nv50_sor_power;
1245 1309 priv->pior.power = nv50_pior_power;
1246 INIT_LIST_HEAD(&priv->base.vblank.list); 1310 priv->pior.dp = &nv50_pior_dp_func;
1247 spin_lock_init(&priv->base.vblank.lock);
1248 return 0; 1311 return 0;
1249} 1312}
1250 1313
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index a6bb931450f1..1ae6ceb56704 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -3,16 +3,22 @@
3 3
4#include <core/parent.h> 4#include <core/parent.h>
5#include <core/namedb.h> 5#include <core/namedb.h>
6#include <core/engctx.h>
6#include <core/ramht.h> 7#include <core/ramht.h>
8#include <core/event.h>
7 9
8#include <engine/dmaobj.h> 10#include <engine/dmaobj.h>
9#include <engine/disp.h> 11#include <engine/disp.h>
10 12
11struct dcb_output; 13#include "dport.h"
12 14
13struct nv50_disp_priv { 15struct nv50_disp_priv {
14 struct nouveau_disp base; 16 struct nouveau_disp base;
15 struct nouveau_oclass *sclass; 17 struct nouveau_oclass *sclass;
18
19 struct work_struct supervisor;
20 u32 super;
21
16 struct { 22 struct {
17 int nr; 23 int nr;
18 } head; 24 } head;
@@ -26,23 +32,15 @@ struct nv50_disp_priv {
26 int (*power)(struct nv50_disp_priv *, int sor, u32 data); 32 int (*power)(struct nv50_disp_priv *, int sor, u32 data);
27 int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32); 33 int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
28 int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32); 34 int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
29 int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
30 int head, u16 type, u16 mask, u32 data,
31 struct dcb_output *);
32 int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
33 int head, u16 type, u16 mask, u32 data,
34 struct dcb_output *);
35 int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
36 u16 type, u16 mask, u32 data,
37 struct dcb_output *);
38 int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
39 int head, u16 type, u16 mask, u32 data,
40 struct dcb_output *);
41 int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
42 int lane, u16 type, u16 mask, u32 data,
43 struct dcb_output *);
44 u32 lvdsconf; 35 u32 lvdsconf;
36 const struct nouveau_dp_func *dp;
45 } sor; 37 } sor;
38 struct {
39 int nr;
40 int (*power)(struct nv50_disp_priv *, int ext, u32 data);
41 u8 type[3];
42 const struct nouveau_dp_func *dp;
43 } pior;
46}; 44};
47 45
48#define DAC_MTHD(n) (n), (n) + 0x03 46#define DAC_MTHD(n) (n), (n) + 0x03
@@ -81,6 +79,11 @@ int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
81int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32, 79int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
82 struct dcb_output *); 80 struct dcb_output *);
83 81
82#define PIOR_MTHD(n) (n), (n) + 0x03
83
84int nv50_pior_mthd(struct nouveau_object *, u32, void *, u32);
85int nv50_pior_power(struct nv50_disp_priv *, int, u32);
86
84struct nv50_disp_base { 87struct nv50_disp_base {
85 struct nouveau_parent base; 88 struct nouveau_parent base;
86 struct nouveau_ramht *ramht; 89 struct nouveau_ramht *ramht;
@@ -124,6 +127,7 @@ extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
124extern struct nouveau_ofuncs nv50_disp_curs_ofuncs; 127extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
125extern struct nouveau_ofuncs nv50_disp_base_ofuncs; 128extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
126extern struct nouveau_oclass nv50_disp_cclass; 129extern struct nouveau_oclass nv50_disp_cclass;
130void nv50_disp_intr_supervisor(struct work_struct *);
127void nv50_disp_intr(struct nouveau_subdev *); 131void nv50_disp_intr(struct nouveau_subdev *);
128 132
129extern struct nouveau_omthds nv84_disp_base_omthds[]; 133extern struct nouveau_omthds nv84_disp_base_omthds[];
@@ -137,6 +141,7 @@ extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
137extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs; 141extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
138extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; 142extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
139extern struct nouveau_oclass nvd0_disp_cclass; 143extern struct nouveau_oclass nvd0_disp_cclass;
144void nvd0_disp_intr_supervisor(struct work_struct *);
140void nvd0_disp_intr(struct nouveau_subdev *); 145void nvd0_disp_intr(struct nouveau_subdev *);
141 146
142#endif 147#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index fc84eacdfbec..d8c74c0883a1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -46,6 +46,9 @@ nv84_disp_base_omthds[] = {
46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, 46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
47 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, 47 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
48 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, 48 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
49 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
50 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
51 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
49 {}, 52 {},
50}; 53};
51 54
@@ -63,7 +66,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
63 struct nv50_disp_priv *priv; 66 struct nv50_disp_priv *priv;
64 int ret; 67 int ret;
65 68
66 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 69 ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
67 "display", &priv); 70 "display", &priv);
68 *pobject = nv_object(priv); 71 *pobject = nv_object(priv);
69 if (ret) 72 if (ret)
@@ -72,17 +75,18 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
72 nv_engine(priv)->sclass = nv84_disp_base_oclass; 75 nv_engine(priv)->sclass = nv84_disp_base_oclass;
73 nv_engine(priv)->cclass = &nv50_disp_cclass; 76 nv_engine(priv)->cclass = &nv50_disp_cclass;
74 nv_subdev(priv)->intr = nv50_disp_intr; 77 nv_subdev(priv)->intr = nv50_disp_intr;
78 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
75 priv->sclass = nv84_disp_sclass; 79 priv->sclass = nv84_disp_sclass;
76 priv->head.nr = 2; 80 priv->head.nr = 2;
77 priv->dac.nr = 3; 81 priv->dac.nr = 3;
78 priv->sor.nr = 2; 82 priv->sor.nr = 2;
83 priv->pior.nr = 3;
79 priv->dac.power = nv50_dac_power; 84 priv->dac.power = nv50_dac_power;
80 priv->dac.sense = nv50_dac_sense; 85 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power; 86 priv->sor.power = nv50_sor_power;
82 priv->sor.hdmi = nv84_hdmi_ctrl; 87 priv->sor.hdmi = nv84_hdmi_ctrl;
83 88 priv->pior.power = nv50_pior_power;
84 INIT_LIST_HEAD(&priv->base.vblank.list); 89 priv->pior.dp = &nv50_pior_dp_func;
85 spin_lock_init(&priv->base.vblank.lock);
86 return 0; 90 return 0;
87} 91}
88 92
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index ba9dfd4669a2..a66f949c1f84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -44,14 +44,11 @@ nv94_disp_base_omthds[] = {
44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd }, 44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
45 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, 45 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, 46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
47 { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
48 { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
49 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
50 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
51 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
52 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
53 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, 47 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, 48 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
49 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
50 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
51 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
55 {}, 52 {},
56}; 53};
57 54
@@ -69,7 +66,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
69 struct nv50_disp_priv *priv; 66 struct nv50_disp_priv *priv;
70 int ret; 67 int ret;
71 68
72 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 69 ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
73 "display", &priv); 70 "display", &priv);
74 *pobject = nv_object(priv); 71 *pobject = nv_object(priv);
75 if (ret) 72 if (ret)
@@ -78,22 +75,19 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
78 nv_engine(priv)->sclass = nv94_disp_base_oclass; 75 nv_engine(priv)->sclass = nv94_disp_base_oclass;
79 nv_engine(priv)->cclass = &nv50_disp_cclass; 76 nv_engine(priv)->cclass = &nv50_disp_cclass;
80 nv_subdev(priv)->intr = nv50_disp_intr; 77 nv_subdev(priv)->intr = nv50_disp_intr;
78 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
81 priv->sclass = nv94_disp_sclass; 79 priv->sclass = nv94_disp_sclass;
82 priv->head.nr = 2; 80 priv->head.nr = 2;
83 priv->dac.nr = 3; 81 priv->dac.nr = 3;
84 priv->sor.nr = 4; 82 priv->sor.nr = 4;
83 priv->pior.nr = 3;
85 priv->dac.power = nv50_dac_power; 84 priv->dac.power = nv50_dac_power;
86 priv->dac.sense = nv50_dac_sense; 85 priv->dac.sense = nv50_dac_sense;
87 priv->sor.power = nv50_sor_power; 86 priv->sor.power = nv50_sor_power;
88 priv->sor.hdmi = nv84_hdmi_ctrl; 87 priv->sor.hdmi = nv84_hdmi_ctrl;
89 priv->sor.dp_train = nv94_sor_dp_train; 88 priv->sor.dp = &nv94_sor_dp_func;
90 priv->sor.dp_train_init = nv94_sor_dp_train_init; 89 priv->pior.power = nv50_pior_power;
91 priv->sor.dp_train_fini = nv94_sor_dp_train_fini; 90 priv->pior.dp = &nv50_pior_dp_func;
92 priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
93 priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
94
95 INIT_LIST_HEAD(&priv->base.vblank.list);
96 spin_lock_init(&priv->base.vblank.lock);
97 return 0; 91 return 0;
98} 92}
99 93
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index 5d63902cdeda..6cf8eefac368 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -53,7 +53,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
53 struct nv50_disp_priv *priv; 53 struct nv50_disp_priv *priv;
54 int ret; 54 int ret;
55 55
56 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 56 ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
57 "display", &priv); 57 "display", &priv);
58 *pobject = nv_object(priv); 58 *pobject = nv_object(priv);
59 if (ret) 59 if (ret)
@@ -62,17 +62,18 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
62 nv_engine(priv)->sclass = nva0_disp_base_oclass; 62 nv_engine(priv)->sclass = nva0_disp_base_oclass;
63 nv_engine(priv)->cclass = &nv50_disp_cclass; 63 nv_engine(priv)->cclass = &nv50_disp_cclass;
64 nv_subdev(priv)->intr = nv50_disp_intr; 64 nv_subdev(priv)->intr = nv50_disp_intr;
65 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
65 priv->sclass = nva0_disp_sclass; 66 priv->sclass = nva0_disp_sclass;
66 priv->head.nr = 2; 67 priv->head.nr = 2;
67 priv->dac.nr = 3; 68 priv->dac.nr = 3;
68 priv->sor.nr = 2; 69 priv->sor.nr = 2;
70 priv->pior.nr = 3;
69 priv->dac.power = nv50_dac_power; 71 priv->dac.power = nv50_dac_power;
70 priv->dac.sense = nv50_dac_sense; 72 priv->dac.sense = nv50_dac_sense;
71 priv->sor.power = nv50_sor_power; 73 priv->sor.power = nv50_sor_power;
72 priv->sor.hdmi = nv84_hdmi_ctrl; 74 priv->sor.hdmi = nv84_hdmi_ctrl;
73 75 priv->pior.power = nv50_pior_power;
74 INIT_LIST_HEAD(&priv->base.vblank.list); 76 priv->pior.dp = &nv50_pior_dp_func;
75 spin_lock_init(&priv->base.vblank.lock);
76 return 0; 77 return 0;
77} 78}
78 79
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index e9192ca389fa..b75413169eae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -45,14 +45,11 @@ nva3_disp_base_omthds[] = {
45 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd }, 45 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
46 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd }, 46 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
47 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd }, 47 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
48 { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
49 { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
50 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
51 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
52 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
53 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd }, 48 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
55 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd }, 49 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
50 { PIOR_MTHD(NV50_DISP_PIOR_PWR) , nv50_pior_mthd },
51 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR) , nv50_pior_mthd },
52 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR) , nv50_pior_mthd },
56 {}, 53 {},
57}; 54};
58 55
@@ -70,7 +67,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
70 struct nv50_disp_priv *priv; 67 struct nv50_disp_priv *priv;
71 int ret; 68 int ret;
72 69
73 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 70 ret = nouveau_disp_create(parent, engine, oclass, 2, "PDISP",
74 "display", &priv); 71 "display", &priv);
75 *pobject = nv_object(priv); 72 *pobject = nv_object(priv);
76 if (ret) 73 if (ret)
@@ -79,23 +76,20 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
79 nv_engine(priv)->sclass = nva3_disp_base_oclass; 76 nv_engine(priv)->sclass = nva3_disp_base_oclass;
80 nv_engine(priv)->cclass = &nv50_disp_cclass; 77 nv_engine(priv)->cclass = &nv50_disp_cclass;
81 nv_subdev(priv)->intr = nv50_disp_intr; 78 nv_subdev(priv)->intr = nv50_disp_intr;
79 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
82 priv->sclass = nva3_disp_sclass; 80 priv->sclass = nva3_disp_sclass;
83 priv->head.nr = 2; 81 priv->head.nr = 2;
84 priv->dac.nr = 3; 82 priv->dac.nr = 3;
85 priv->sor.nr = 4; 83 priv->sor.nr = 4;
84 priv->pior.nr = 3;
86 priv->dac.power = nv50_dac_power; 85 priv->dac.power = nv50_dac_power;
87 priv->dac.sense = nv50_dac_sense; 86 priv->dac.sense = nv50_dac_sense;
88 priv->sor.power = nv50_sor_power; 87 priv->sor.power = nv50_sor_power;
89 priv->sor.hda_eld = nva3_hda_eld; 88 priv->sor.hda_eld = nva3_hda_eld;
90 priv->sor.hdmi = nva3_hdmi_ctrl; 89 priv->sor.hdmi = nva3_hdmi_ctrl;
91 priv->sor.dp_train = nv94_sor_dp_train; 90 priv->sor.dp = &nv94_sor_dp_func;
92 priv->sor.dp_train_init = nv94_sor_dp_train_init; 91 priv->pior.power = nv50_pior_power;
93 priv->sor.dp_train_fini = nv94_sor_dp_train_fini; 92 priv->pior.dp = &nv50_pior_dp_func;
94 priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
95 priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
96
97 INIT_LIST_HEAD(&priv->base.vblank.list);
98 spin_lock_init(&priv->base.vblank.lock);
99 return 0; 93 return 0;
100} 94}
101 95
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 9e38ebff5fb3..788dd34ccb54 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -27,12 +27,10 @@
27#include <core/handle.h> 27#include <core/handle.h>
28#include <core/class.h> 28#include <core/class.h>
29 29
30#include <engine/software.h>
31#include <engine/disp.h> 30#include <engine/disp.h>
32 31
33#include <subdev/timer.h> 32#include <subdev/timer.h>
34#include <subdev/fb.h> 33#include <subdev/fb.h>
35#include <subdev/bar.h>
36#include <subdev/clock.h> 34#include <subdev/clock.h>
37 35
38#include <subdev/bios.h> 36#include <subdev/bios.h>
@@ -230,7 +228,7 @@ nvd0_disp_sync_ctor(struct nouveau_object *parent,
230 struct nv50_disp_dmac *dmac; 228 struct nv50_disp_dmac *dmac;
231 int ret; 229 int ret;
232 230
233 if (size < sizeof(*data) || args->head >= priv->head.nr) 231 if (size < sizeof(*args) || args->head >= priv->head.nr)
234 return -EINVAL; 232 return -EINVAL;
235 233
236 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, 234 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -270,7 +268,7 @@ nvd0_disp_ovly_ctor(struct nouveau_object *parent,
270 struct nv50_disp_dmac *dmac; 268 struct nv50_disp_dmac *dmac;
271 int ret; 269 int ret;
272 270
273 if (size < sizeof(*data) || args->head >= priv->head.nr) 271 if (size < sizeof(*args) || args->head >= priv->head.nr)
274 return -EINVAL; 272 return -EINVAL;
275 273
276 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf, 274 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
@@ -443,6 +441,18 @@ nvd0_disp_curs_ofuncs = {
443 * Base display object 441 * Base display object
444 ******************************************************************************/ 442 ******************************************************************************/
445 443
444static void
445nvd0_disp_base_vblank_enable(struct nouveau_event *event, int head)
446{
447 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000001);
448}
449
450static void
451nvd0_disp_base_vblank_disable(struct nouveau_event *event, int head)
452{
453 nv_mask(event->priv, 0x6100c0 + (head * 0x800), 0x00000001, 0x00000000);
454}
455
446static int 456static int
447nvd0_disp_base_ctor(struct nouveau_object *parent, 457nvd0_disp_base_ctor(struct nouveau_object *parent,
448 struct nouveau_object *engine, 458 struct nouveau_object *engine,
@@ -459,6 +469,10 @@ nvd0_disp_base_ctor(struct nouveau_object *parent,
459 if (ret) 469 if (ret)
460 return ret; 470 return ret;
461 471
472 priv->base.vblank->priv = priv;
473 priv->base.vblank->enable = nvd0_disp_base_vblank_enable;
474 priv->base.vblank->disable = nvd0_disp_base_vblank_disable;
475
462 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht); 476 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
463} 477}
464 478
@@ -609,13 +623,24 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
609} 623}
610 624
611static bool 625static bool
612exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id) 626exec_script(struct nv50_disp_priv *priv, int head, int id)
613{ 627{
614 struct nouveau_bios *bios = nouveau_bios(priv); 628 struct nouveau_bios *bios = nouveau_bios(priv);
615 struct nvbios_outp info; 629 struct nvbios_outp info;
616 struct dcb_output dcb; 630 struct dcb_output dcb;
617 u8 ver, hdr, cnt, len; 631 u8 ver, hdr, cnt, len;
632 u32 ctrl = 0x00000000;
618 u16 data; 633 u16 data;
634 int outp;
635
636 for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
637 ctrl = nv_rd32(priv, 0x640180 + (outp * 0x20));
638 if (ctrl & (1 << head))
639 break;
640 }
641
642 if (outp == 8)
643 return false;
619 644
620 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info); 645 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
621 if (data) { 646 if (data) {
@@ -635,21 +660,31 @@ exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
635} 660}
636 661
637static u32 662static u32
638exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp, 663exec_clkcmp(struct nv50_disp_priv *priv, int head, int id,
639 u32 ctrl, int id, u32 pclk) 664 u32 pclk, struct dcb_output *dcb)
640{ 665{
641 struct nouveau_bios *bios = nouveau_bios(priv); 666 struct nouveau_bios *bios = nouveau_bios(priv);
642 struct nvbios_outp info1; 667 struct nvbios_outp info1;
643 struct nvbios_ocfg info2; 668 struct nvbios_ocfg info2;
644 struct dcb_output dcb;
645 u8 ver, hdr, cnt, len; 669 u8 ver, hdr, cnt, len;
646 u16 data, conf; 670 u32 ctrl = 0x00000000;
671 u32 data, conf = ~0;
672 int outp;
647 673
648 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1); 674 for (outp = 0; !(ctrl & (1 << head)) && outp < 8; outp++) {
649 if (data == 0x0000) 675 ctrl = nv_rd32(priv, 0x660180 + (outp * 0x20));
676 if (ctrl & (1 << head))
677 break;
678 }
679
680 if (outp == 8)
650 return false; 681 return false;
651 682
652 switch (dcb.type) { 683 data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1);
684 if (data == 0x0000)
685 return conf;
686
687 switch (dcb->type) {
653 case DCB_OUTPUT_TMDS: 688 case DCB_OUTPUT_TMDS:
654 conf = (ctrl & 0x00000f00) >> 8; 689 conf = (ctrl & 0x00000f00) >> 8;
655 if (pclk >= 165000) 690 if (pclk >= 165000)
@@ -668,46 +703,52 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
668 } 703 }
669 704
670 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2); 705 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
671 if (data) { 706 if (data && id < 0xff) {
672 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); 707 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
673 if (data) { 708 if (data) {
674 struct nvbios_init init = { 709 struct nvbios_init init = {
675 .subdev = nv_subdev(priv), 710 .subdev = nv_subdev(priv),
676 .bios = bios, 711 .bios = bios,
677 .offset = data, 712 .offset = data,
678 .outp = &dcb, 713 .outp = dcb,
679 .crtc = head, 714 .crtc = head,
680 .execute = 1, 715 .execute = 1,
681 }; 716 };
682 717
683 if (nvbios_exec(&init)) 718 nvbios_exec(&init);
684 return 0x0000;
685 return conf;
686 } 719 }
687 } 720 }
688 721
689 return 0x0000; 722 return conf;
690} 723}
691 724
692static void 725static void
693nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) 726nvd0_disp_intr_unk1_0(struct nv50_disp_priv *priv, int head)
694{ 727{
695 int i; 728 exec_script(priv, head, 1);
729}
696 730
697 for (i = 0; mask && i < 8; i++) { 731static void
698 u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20)); 732nvd0_disp_intr_unk2_0(struct nv50_disp_priv *priv, int head)
699 if (mcc & (1 << head)) 733{
700 exec_script(priv, head, i, mcc, 1); 734 exec_script(priv, head, 2);
701 } 735}
702 736
703 nv_wr32(priv, 0x6101d4, 0x00000000); 737static void
704 nv_wr32(priv, 0x6109d4, 0x00000000); 738nvd0_disp_intr_unk2_1(struct nv50_disp_priv *priv, int head)
705 nv_wr32(priv, 0x6101d0, 0x80000000); 739{
740 struct nouveau_clock *clk = nouveau_clock(priv);
741 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
742 if (pclk)
743 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
744 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
706} 745}
707 746
708static void 747static void
709nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or) 748nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv *priv, int head,
749 struct dcb_output *outp)
710{ 750{
751 const int or = ffs(outp->or) - 1;
711 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020)); 752 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
712 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300)); 753 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
713 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; 754 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
@@ -750,105 +791,102 @@ nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
750} 791}
751 792
752static void 793static void
753nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) 794nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
754{ 795{
755 u32 pclk; 796 struct dcb_output outp;
756 int i; 797 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
757 798 u32 conf = exec_clkcmp(priv, head, 0xff, pclk, &outp);
758 for (i = 0; mask && i < 8; i++) { 799 if (conf != ~0) {
759 u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20)); 800 u32 addr, data;
760 if (mcc & (1 << head)) 801
761 exec_script(priv, head, i, mcc, 2); 802 if (outp.type == DCB_OUTPUT_DP) {
762 } 803 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
804 switch ((sync & 0x000003c0) >> 6) {
805 case 6: pclk = pclk * 30 / 8; break;
806 case 5: pclk = pclk * 24 / 8; break;
807 case 2:
808 default:
809 pclk = pclk * 18 / 8;
810 break;
811 }
763 812
764 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; 813 nouveau_dp_train(&priv->base, priv->sor.dp,
765 nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask); 814 &outp, head, pclk);
766 if (pclk && (mask & 0x00010000)) { 815 }
767 struct nouveau_clock *clk = nouveau_clock(priv);
768 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
769 }
770 816
771 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000); 817 exec_clkcmp(priv, head, 0, pclk, &outp);
772 818
773 for (i = 0; mask && i < 8; i++) { 819 if (outp.type == DCB_OUTPUT_ANALOG) {
774 u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg; 820 addr = 0x612280 + (ffs(outp.or) - 1) * 0x800;
775 if (mcp & (1 << head)) { 821 data = 0x00000000;
776 if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) { 822 } else {
777 u32 addr, mask, data = 0x00000000; 823 if (outp.type == DCB_OUTPUT_DP)
778 if (i < 4) { 824 nvd0_disp_intr_unk2_2_tu(priv, head, &outp);
779 addr = 0x612280 + ((i - 0) * 0x800); 825 addr = 0x612300 + (ffs(outp.or) - 1) * 0x800;
780 mask = 0xffffffff; 826 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
781 } else {
782 switch (mcp & 0x00000f00) {
783 case 0x00000800:
784 case 0x00000900:
785 nvd0_display_unk2_calc_tu(priv, head, i - 4);
786 break;
787 default:
788 break;
789 }
790
791 addr = 0x612300 + ((i - 4) * 0x800);
792 mask = 0x00000707;
793 if (cfg & 0x00000100)
794 data = 0x00000101;
795 }
796 nv_mask(priv, addr, mask, data);
797 }
798 break;
799 } 827 }
800 }
801 828
802 nv_wr32(priv, 0x6101d4, 0x00000000); 829 nv_mask(priv, addr, 0x00000707, data);
803 nv_wr32(priv, 0x6109d4, 0x00000000); 830 }
804 nv_wr32(priv, 0x6101d0, 0x80000000);
805} 831}
806 832
807static void 833static void
808nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask) 834nvd0_disp_intr_unk4_0(struct nv50_disp_priv *priv, int head)
809{ 835{
810 int pclk, i; 836 struct dcb_output outp;
811 837 u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
812 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000; 838 exec_clkcmp(priv, head, 1, pclk, &outp);
839}
813 840
814 for (i = 0; mask && i < 8; i++) { 841void
815 u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)); 842nvd0_disp_intr_supervisor(struct work_struct *work)
816 if (mcp & (1 << head)) 843{
817 exec_clkcmp(priv, head, i, mcp, 1, pclk); 844 struct nv50_disp_priv *priv =
845 container_of(work, struct nv50_disp_priv, supervisor);
846 u32 mask[4];
847 int head;
848
849 nv_debug(priv, "supervisor %08x\n", priv->super);
850 for (head = 0; head < priv->head.nr; head++) {
851 mask[head] = nv_rd32(priv, 0x6101d4 + (head * 0x800));
852 nv_debug(priv, "head %d: 0x%08x\n", head, mask[head]);
818 } 853 }
819 854
820 nv_wr32(priv, 0x6101d4, 0x00000000); 855 if (priv->super & 0x00000001) {
821 nv_wr32(priv, 0x6109d4, 0x00000000); 856 for (head = 0; head < priv->head.nr; head++) {
822 nv_wr32(priv, 0x6101d0, 0x80000000); 857 if (!(mask[head] & 0x00001000))
823} 858 continue;
824 859 nvd0_disp_intr_unk1_0(priv, head);
825static void 860 }
826nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) 861 } else
827{ 862 if (priv->super & 0x00000002) {
828 struct nouveau_bar *bar = nouveau_bar(priv); 863 for (head = 0; head < priv->head.nr; head++) {
829 struct nouveau_disp *disp = &priv->base; 864 if (!(mask[head] & 0x00001000))
830 struct nouveau_software_chan *chan, *temp; 865 continue;
831 unsigned long flags; 866 nvd0_disp_intr_unk2_0(priv, head);
832 867 }
833 spin_lock_irqsave(&disp->vblank.lock, flags); 868 for (head = 0; head < priv->head.nr; head++) {
834 list_for_each_entry_safe(chan, temp, &disp->vblank.list, vblank.head) { 869 if (!(mask[head] & 0x00010000))
835 if (chan->vblank.crtc != crtc) 870 continue;
836 continue; 871 nvd0_disp_intr_unk2_1(priv, head);
837 872 }
838 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel); 873 for (head = 0; head < priv->head.nr; head++) {
839 bar->flush(bar); 874 if (!(mask[head] & 0x00001000))
840 nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset)); 875 continue;
841 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset)); 876 nvd0_disp_intr_unk2_2(priv, head);
842 nv_wr32(priv, 0x060014, chan->vblank.value); 877 }
843 878 } else
844 list_del(&chan->vblank.head); 879 if (priv->super & 0x00000004) {
845 if (disp->vblank.put) 880 for (head = 0; head < priv->head.nr; head++) {
846 disp->vblank.put(disp->vblank.data, crtc); 881 if (!(mask[head] & 0x00001000))
882 continue;
883 nvd0_disp_intr_unk4_0(priv, head);
884 }
847 } 885 }
848 spin_unlock_irqrestore(&disp->vblank.lock, flags);
849 886
850 if (disp->vblank.notify) 887 for (head = 0; head < priv->head.nr; head++)
851 disp->vblank.notify(disp->vblank.data, crtc); 888 nv_wr32(priv, 0x6101d4 + (head * 0x800), 0x00000000);
889 nv_wr32(priv, 0x6101d0, 0x80000000);
852} 890}
853 891
854void 892void
@@ -884,27 +922,11 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
884 922
885 if (intr & 0x00100000) { 923 if (intr & 0x00100000) {
886 u32 stat = nv_rd32(priv, 0x6100ac); 924 u32 stat = nv_rd32(priv, 0x6100ac);
887 u32 mask = 0, crtc = ~0; 925 if (stat & 0x00000007) {
888 926 priv->super = (stat & 0x00000007);
889 while (!mask && ++crtc < priv->head.nr) 927 schedule_work(&priv->supervisor);
890 mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800)); 928 nv_wr32(priv, 0x6100ac, priv->super);
891 929 stat &= ~0x00000007;
892 if (stat & 0x00000001) {
893 nv_wr32(priv, 0x6100ac, 0x00000001);
894 nvd0_display_unk1_handler(priv, crtc, mask);
895 stat &= ~0x00000001;
896 }
897
898 if (stat & 0x00000002) {
899 nv_wr32(priv, 0x6100ac, 0x00000002);
900 nvd0_display_unk2_handler(priv, crtc, mask);
901 stat &= ~0x00000002;
902 }
903
904 if (stat & 0x00000004) {
905 nv_wr32(priv, 0x6100ac, 0x00000004);
906 nvd0_display_unk4_handler(priv, crtc, mask);
907 stat &= ~0x00000004;
908 } 930 }
909 931
910 if (stat) { 932 if (stat) {
@@ -920,7 +942,7 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
920 if (mask & intr) { 942 if (mask & intr) {
921 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); 943 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
922 if (stat & 0x00000001) 944 if (stat & 0x00000001)
923 nvd0_disp_intr_vblank(priv, i); 945 nouveau_event_trigger(priv->base.vblank, i);
924 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0); 946 nv_mask(priv, 0x6100bc + (i * 0x800), 0, 0);
925 nv_rd32(priv, 0x6100c0 + (i * 0x800)); 947 nv_rd32(priv, 0x6100c0 + (i * 0x800));
926 } 948 }
@@ -933,10 +955,11 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
933 struct nouveau_object **pobject) 955 struct nouveau_object **pobject)
934{ 956{
935 struct nv50_disp_priv *priv; 957 struct nv50_disp_priv *priv;
958 int heads = nv_rd32(parent, 0x022448);
936 int ret; 959 int ret;
937 960
938 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 961 ret = nouveau_disp_create(parent, engine, oclass, heads,
939 "display", &priv); 962 "PDISP", "display", &priv);
940 *pobject = nv_object(priv); 963 *pobject = nv_object(priv);
941 if (ret) 964 if (ret)
942 return ret; 965 return ret;
@@ -944,8 +967,9 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
944 nv_engine(priv)->sclass = nvd0_disp_base_oclass; 967 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
945 nv_engine(priv)->cclass = &nv50_disp_cclass; 968 nv_engine(priv)->cclass = &nv50_disp_cclass;
946 nv_subdev(priv)->intr = nvd0_disp_intr; 969 nv_subdev(priv)->intr = nvd0_disp_intr;
970 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
947 priv->sclass = nvd0_disp_sclass; 971 priv->sclass = nvd0_disp_sclass;
948 priv->head.nr = nv_rd32(priv, 0x022448); 972 priv->head.nr = heads;
949 priv->dac.nr = 3; 973 priv->dac.nr = 3;
950 priv->sor.nr = 4; 974 priv->sor.nr = 4;
951 priv->dac.power = nv50_dac_power; 975 priv->dac.power = nv50_dac_power;
@@ -953,14 +977,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
953 priv->sor.power = nv50_sor_power; 977 priv->sor.power = nv50_sor_power;
954 priv->sor.hda_eld = nvd0_hda_eld; 978 priv->sor.hda_eld = nvd0_hda_eld;
955 priv->sor.hdmi = nvd0_hdmi_ctrl; 979 priv->sor.hdmi = nvd0_hdmi_ctrl;
956 priv->sor.dp_train = nvd0_sor_dp_train; 980 priv->sor.dp = &nvd0_sor_dp_func;
957 priv->sor.dp_train_init = nv94_sor_dp_train_init;
958 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
959 priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
960 priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
961
962 INIT_LIST_HEAD(&priv->base.vblank.list);
963 spin_lock_init(&priv->base.vblank.lock);
964 return 0; 981 return 0;
965} 982}
966 983
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index 259537c4587e..20725b363d58 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -51,10 +51,11 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
51 struct nouveau_object **pobject) 51 struct nouveau_object **pobject)
52{ 52{
53 struct nv50_disp_priv *priv; 53 struct nv50_disp_priv *priv;
54 int heads = nv_rd32(parent, 0x022448);
54 int ret; 55 int ret;
55 56
56 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 57 ret = nouveau_disp_create(parent, engine, oclass, heads,
57 "display", &priv); 58 "PDISP", "display", &priv);
58 *pobject = nv_object(priv); 59 *pobject = nv_object(priv);
59 if (ret) 60 if (ret)
60 return ret; 61 return ret;
@@ -62,8 +63,9 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
62 nv_engine(priv)->sclass = nve0_disp_base_oclass; 63 nv_engine(priv)->sclass = nve0_disp_base_oclass;
63 nv_engine(priv)->cclass = &nv50_disp_cclass; 64 nv_engine(priv)->cclass = &nv50_disp_cclass;
64 nv_subdev(priv)->intr = nvd0_disp_intr; 65 nv_subdev(priv)->intr = nvd0_disp_intr;
66 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
65 priv->sclass = nve0_disp_sclass; 67 priv->sclass = nve0_disp_sclass;
66 priv->head.nr = nv_rd32(priv, 0x022448); 68 priv->head.nr = heads;
67 priv->dac.nr = 3; 69 priv->dac.nr = 3;
68 priv->sor.nr = 4; 70 priv->sor.nr = 4;
69 priv->dac.power = nv50_dac_power; 71 priv->dac.power = nv50_dac_power;
@@ -71,14 +73,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
71 priv->sor.power = nv50_sor_power; 73 priv->sor.power = nv50_sor_power;
72 priv->sor.hda_eld = nvd0_hda_eld; 74 priv->sor.hda_eld = nvd0_hda_eld;
73 priv->sor.hdmi = nvd0_hdmi_ctrl; 75 priv->sor.hdmi = nvd0_hdmi_ctrl;
74 priv->sor.dp_train = nvd0_sor_dp_train; 76 priv->sor.dp = &nvd0_sor_dp_func;
75 priv->sor.dp_train_init = nv94_sor_dp_train_init;
76 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
77 priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
78 priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
79
80 INIT_LIST_HEAD(&priv->base.vblank.list);
81 spin_lock_init(&priv->base.vblank.lock);
82 return 0; 77 return 0;
83} 78}
84 79
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
new file mode 100644
index 000000000000..2c8ce351b52d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/piornv50.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/timer.h>
31#include <subdev/i2c.h>
32
33#include "nv50.h"
34
35/******************************************************************************
36 * DisplayPort
37 *****************************************************************************/
38static struct nouveau_i2c_port *
39nv50_pior_dp_find(struct nouveau_disp *disp, struct dcb_output *outp)
40{
41 struct nouveau_i2c *i2c = nouveau_i2c(disp);
42 return i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(outp->extdev));
43}
44
45static int
46nv50_pior_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
47 int head, int pattern)
48{
49 struct nouveau_i2c_port *port;
50 int ret = -EINVAL;
51
52 port = nv50_pior_dp_find(disp, outp);
53 if (port) {
54 if (port->func->pattern)
55 ret = port->func->pattern(port, pattern);
56 else
57 ret = 0;
58 }
59
60 return ret;
61}
62
63static int
64nv50_pior_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
65 int head, int lane_nr, int link_bw, bool enh)
66{
67 struct nouveau_i2c_port *port;
68 int ret = -EINVAL;
69
70 port = nv50_pior_dp_find(disp, outp);
71 if (port && port->func->lnk_ctl)
72 ret = port->func->lnk_ctl(port, lane_nr, link_bw, enh);
73
74 return ret;
75}
76
77static int
78nv50_pior_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
79 int head, int lane, int vsw, int pre)
80{
81 struct nouveau_i2c_port *port;
82 int ret = -EINVAL;
83
84 port = nv50_pior_dp_find(disp, outp);
85 if (port) {
86 if (port->func->drv_ctl)
87 ret = port->func->drv_ctl(port, lane, vsw, pre);
88 else
89 ret = 0;
90 }
91
92 return ret;
93}
94
95const struct nouveau_dp_func
96nv50_pior_dp_func = {
97 .pattern = nv50_pior_dp_pattern,
98 .lnk_ctl = nv50_pior_dp_lnk_ctl,
99 .drv_ctl = nv50_pior_dp_drv_ctl,
100};
101
102/******************************************************************************
103 * General PIOR handling
104 *****************************************************************************/
105int
106nv50_pior_power(struct nv50_disp_priv *priv, int or, u32 data)
107{
108 const u32 stat = data & NV50_DISP_PIOR_PWR_STATE;
109 const u32 soff = (or * 0x800);
110 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
111 nv_mask(priv, 0x61e004 + soff, 0x80000101, 0x80000000 | stat);
112 nv_wait(priv, 0x61e004 + soff, 0x80000000, 0x00000000);
113 return 0;
114}
115
116int
117nv50_pior_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
118{
119 struct nv50_disp_priv *priv = (void *)object->engine;
120 const u8 type = (mthd & NV50_DISP_PIOR_MTHD_TYPE) >> 12;
121 const u8 or = (mthd & NV50_DISP_PIOR_MTHD_OR);
122 u32 *data = args;
123 int ret;
124
125 if (size < sizeof(u32))
126 return -EINVAL;
127
128 mthd &= ~NV50_DISP_PIOR_MTHD_TYPE;
129 mthd &= ~NV50_DISP_PIOR_MTHD_OR;
130 switch (mthd) {
131 case NV50_DISP_PIOR_PWR:
132 ret = priv->pior.power(priv, or, data[0]);
133 priv->pior.type[or] = type;
134 break;
135 default:
136 return -EINVAL;
137 }
138
139 return ret;
140}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index 39b6b67732d0..ab1e918469a8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -79,31 +79,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
79 priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID; 79 priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
80 ret = 0; 80 ret = 0;
81 break; 81 break;
82 case NV94_DISP_SOR_DP_TRAIN:
83 switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
84 case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
85 ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
86 break;
87 case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
88 ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
89 break;
90 case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
91 ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
92 break;
93 default:
94 break;
95 }
96 break;
97 case NV94_DISP_SOR_DP_LNKCTL:
98 ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
99 break;
100 case NV94_DISP_SOR_DP_DRVCTL(0):
101 case NV94_DISP_SOR_DP_DRVCTL(1):
102 case NV94_DISP_SOR_DP_DRVCTL(2):
103 case NV94_DISP_SOR_DP_DRVCTL(3):
104 ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
105 type, mask, data, &outp);
106 break;
107 default: 82 default:
108 BUG_ON(1); 83 BUG_ON(1);
109 } 84 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
index f6edd009762e..7ec4ee83fb64 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -33,124 +33,53 @@
33#include "nv50.h" 33#include "nv50.h"
34 34
35static inline u32 35static inline u32
36nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) 36nv94_sor_soff(struct dcb_output *outp)
37{ 37{
38 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */ 38 return (ffs(outp->or) - 1) * 0x800;
39 static const u8 nv94[] = { 16, 8, 0, 24 };
40 if (nv_device(priv)->chipset == 0xaf)
41 return nvaf[lane];
42 return nv94[lane];
43} 39}
44 40
45int 41static inline u32
46nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head, 42nv94_sor_loff(struct dcb_output *outp)
47 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
48{ 43{
49 struct nouveau_bios *bios = nouveau_bios(priv); 44 return nv94_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
50 struct nvbios_dpout info;
51 u8 ver, hdr, cnt, len;
52 u16 outp;
53
54 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
55 if (outp) {
56 struct nvbios_init init = {
57 .subdev = nv_subdev(priv),
58 .bios = bios,
59 .outp = dcbo,
60 .crtc = head,
61 .execute = 1,
62 };
63
64 if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
65 init.offset = info.script[2];
66 else
67 init.offset = info.script[3];
68 nvbios_exec(&init);
69
70 init.offset = info.script[0];
71 nvbios_exec(&init);
72 }
73
74 return 0;
75} 45}
76 46
77int 47static inline u32
78nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head, 48nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
79 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
80{ 49{
81 struct nouveau_bios *bios = nouveau_bios(priv); 50 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
82 struct nvbios_dpout info; 51 static const u8 nv94[] = { 16, 8, 0, 24 };
83 u8 ver, hdr, cnt, len; 52 if (nv_device(priv)->chipset == 0xaf)
84 u16 outp; 53 return nvaf[lane];
85 54 return nv94[lane];
86 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
87 if (outp) {
88 struct nvbios_init init = {
89 .subdev = nv_subdev(priv),
90 .bios = bios,
91 .offset = info.script[1],
92 .outp = dcbo,
93 .crtc = head,
94 .execute = 1,
95 };
96
97 nvbios_exec(&init);
98 }
99
100 return 0;
101} 55}
102 56
103int 57static int
104nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link, 58nv94_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
105 u16 type, u16 mask, u32 data, struct dcb_output *info) 59 int head, int pattern)
106{ 60{
107 const u32 loff = (or * 0x800) + (link * 0x80); 61 struct nv50_disp_priv *priv = (void *)disp;
108 const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN); 62 const u32 loff = nv94_sor_loff(outp);
109 nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24); 63 nv_mask(priv, 0x61c10c + loff, 0x0f000000, pattern << 24);
110 return 0; 64 return 0;
111} 65}
112 66
113int 67static int
114nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head, 68nv94_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
115 u16 type, u16 mask, u32 data, struct dcb_output *dcbo) 69 int head, int link_nr, int link_bw, bool enh_frame)
116{ 70{
117 struct nouveau_bios *bios = nouveau_bios(priv); 71 struct nv50_disp_priv *priv = (void *)disp;
118 const u32 loff = (or * 0x800) + (link * 0x80); 72 const u32 soff = nv94_sor_soff(outp);
119 const u32 soff = (or * 0x800); 73 const u32 loff = nv94_sor_loff(outp);
120 u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
121 u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
122 u32 dpctrl = 0x00000000; 74 u32 dpctrl = 0x00000000;
123 u32 clksor = 0x00000000; 75 u32 clksor = 0x00000000;
124 u32 outp, lane = 0; 76 u32 lane = 0;
125 u8 ver, hdr, cnt, len;
126 struct nvbios_dpout info;
127 int i; 77 int i;
128 78
129 /* -> 10Khz units */
130 link_bw *= 2700;
131
132 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
133 if (outp && info.lnkcmp) {
134 struct nvbios_init init = {
135 .subdev = nv_subdev(priv),
136 .bios = bios,
137 .offset = 0x0000,
138 .outp = dcbo,
139 .crtc = head,
140 .execute = 1,
141 };
142
143 while (link_bw < nv_ro16(bios, info.lnkcmp))
144 info.lnkcmp += 4;
145 init.offset = nv_ro16(bios, info.lnkcmp + 2);
146
147 nvbios_exec(&init);
148 }
149
150 dpctrl |= ((1 << link_nr) - 1) << 16; 79 dpctrl |= ((1 << link_nr) - 1) << 16;
151 if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH) 80 if (enh_frame)
152 dpctrl |= 0x00004000; 81 dpctrl |= 0x00004000;
153 if (link_bw > 16200) 82 if (link_bw > 0x06)
154 clksor |= 0x00040000; 83 clksor |= 0x00040000;
155 84
156 for (i = 0; i < link_nr; i++) 85 for (i = 0; i < link_nr; i++)
@@ -162,24 +91,25 @@ nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
162 return 0; 91 return 0;
163} 92}
164 93
165int 94static int
166nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane, 95nv94_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
167 u16 type, u16 mask, u32 data, struct dcb_output *dcbo) 96 int head, int lane, int swing, int preem)
168{ 97{
169 struct nouveau_bios *bios = nouveau_bios(priv); 98 struct nouveau_bios *bios = nouveau_bios(disp);
170 const u32 loff = (or * 0x800) + (link * 0x80); 99 struct nv50_disp_priv *priv = (void *)disp;
171 const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8; 100 const u32 loff = nv94_sor_loff(outp);
172 const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
173 u32 addr, shift = nv94_sor_dp_lane_map(priv, lane); 101 u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
174 u8 ver, hdr, cnt, len; 102 u8 ver, hdr, cnt, len;
175 struct nvbios_dpout outp; 103 struct nvbios_dpout info;
176 struct nvbios_dpcfg ocfg; 104 struct nvbios_dpcfg ocfg;
177 105
178 addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp); 106 addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
107 &ver, &hdr, &cnt, &len, &info);
179 if (!addr) 108 if (!addr)
180 return -ENODEV; 109 return -ENODEV;
181 110
182 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg); 111 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
112 &ver, &hdr, &cnt, &len, &ocfg);
183 if (!addr) 113 if (!addr)
184 return -EINVAL; 114 return -EINVAL;
185 115
@@ -188,3 +118,10 @@ nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
188 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8); 118 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
189 return 0; 119 return 0;
190} 120}
121
122const struct nouveau_dp_func
123nv94_sor_dp_func = {
124 .pattern = nv94_sor_dp_pattern,
125 .lnk_ctl = nv94_sor_dp_lnk_ctl,
126 .drv_ctl = nv94_sor_dp_drv_ctl,
127};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index c37ce7e29f5d..9e1d435d7282 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -33,59 +33,49 @@
33#include "nv50.h" 33#include "nv50.h"
34 34
35static inline u32 35static inline u32
36nvd0_sor_soff(struct dcb_output *outp)
37{
38 return (ffs(outp->or) - 1) * 0x800;
39}
40
41static inline u32
42nvd0_sor_loff(struct dcb_output *outp)
43{
44 return nvd0_sor_soff(outp) + !(outp->sorconf.link & 1) * 0x80;
45}
46
47static inline u32
36nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane) 48nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
37{ 49{
38 static const u8 nvd0[] = { 16, 8, 0, 24 }; 50 static const u8 nvd0[] = { 16, 8, 0, 24 };
39 return nvd0[lane]; 51 return nvd0[lane];
40} 52}
41 53
42int 54static int
43nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link, 55nvd0_sor_dp_pattern(struct nouveau_disp *disp, struct dcb_output *outp,
44 u16 type, u16 mask, u32 data, struct dcb_output *info) 56 int head, int pattern)
45{ 57{
46 const u32 loff = (or * 0x800) + (link * 0x80); 58 struct nv50_disp_priv *priv = (void *)disp;
47 const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN); 59 const u32 loff = nvd0_sor_loff(outp);
48 nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt); 60 nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
49 return 0; 61 return 0;
50} 62}
51 63
52int 64static int
53nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head, 65nvd0_sor_dp_lnk_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
54 u16 type, u16 mask, u32 data, struct dcb_output *dcbo) 66 int head, int link_nr, int link_bw, bool enh_frame)
55{ 67{
56 struct nouveau_bios *bios = nouveau_bios(priv); 68 struct nv50_disp_priv *priv = (void *)disp;
57 const u32 loff = (or * 0x800) + (link * 0x80); 69 const u32 soff = nvd0_sor_soff(outp);
58 const u32 soff = (or * 0x800); 70 const u32 loff = nvd0_sor_loff(outp);
59 const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
60 const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
61 u32 dpctrl = 0x00000000; 71 u32 dpctrl = 0x00000000;
62 u32 clksor = 0x00000000; 72 u32 clksor = 0x00000000;
63 u32 outp, lane = 0; 73 u32 lane = 0;
64 u8 ver, hdr, cnt, len;
65 struct nvbios_dpout info;
66 int i; 74 int i;
67 75
68 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
69 if (outp && info.lnkcmp) {
70 struct nvbios_init init = {
71 .subdev = nv_subdev(priv),
72 .bios = bios,
73 .offset = 0x0000,
74 .outp = dcbo,
75 .crtc = head,
76 .execute = 1,
77 };
78
79 while (nv_ro08(bios, info.lnkcmp) < link_bw)
80 info.lnkcmp += 3;
81 init.offset = nv_ro16(bios, info.lnkcmp + 1);
82
83 nvbios_exec(&init);
84 }
85
86 clksor |= link_bw << 18; 76 clksor |= link_bw << 18;
87 dpctrl |= ((1 << link_nr) - 1) << 16; 77 dpctrl |= ((1 << link_nr) - 1) << 16;
88 if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH) 78 if (enh_frame)
89 dpctrl |= 0x00004000; 79 dpctrl |= 0x00004000;
90 80
91 for (i = 0; i < link_nr; i++) 81 for (i = 0; i < link_nr; i++)
@@ -97,24 +87,25 @@ nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
97 return 0; 87 return 0;
98} 88}
99 89
100int 90static int
101nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane, 91nvd0_sor_dp_drv_ctl(struct nouveau_disp *disp, struct dcb_output *outp,
102 u16 type, u16 mask, u32 data, struct dcb_output *dcbo) 92 int head, int lane, int swing, int preem)
103{ 93{
104 struct nouveau_bios *bios = nouveau_bios(priv); 94 struct nouveau_bios *bios = nouveau_bios(disp);
105 const u32 loff = (or * 0x800) + (link * 0x80); 95 struct nv50_disp_priv *priv = (void *)disp;
106 const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8; 96 const u32 loff = nvd0_sor_loff(outp);
107 const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
108 u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane); 97 u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
109 u8 ver, hdr, cnt, len; 98 u8 ver, hdr, cnt, len;
110 struct nvbios_dpout outp; 99 struct nvbios_dpout info;
111 struct nvbios_dpcfg ocfg; 100 struct nvbios_dpcfg ocfg;
112 101
113 addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp); 102 addr = nvbios_dpout_match(bios, outp->hasht, outp->hashm,
103 &ver, &hdr, &cnt, &len, &info);
114 if (!addr) 104 if (!addr)
115 return -ENODEV; 105 return -ENODEV;
116 106
117 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg); 107 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem,
108 &ver, &hdr, &cnt, &len, &ocfg);
118 if (!addr) 109 if (!addr)
119 return -EINVAL; 110 return -EINVAL;
120 111
@@ -124,3 +115,10 @@ nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
124 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000); 115 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
125 return 0; 116 return 0;
126} 117}
118
119const struct nouveau_dp_func
120nvd0_sor_dp_func = {
121 .pattern = nvd0_sor_dp_pattern,
122 .lnk_ctl = nvd0_sor_dp_lnk_ctl,
123 .drv_ctl = nvd0_sor_dp_drv_ctl,
124};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index c2b9db335816..7341ebe131fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -22,8 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/object.h> 26#include <core/object.h>
26#include <core/handle.h> 27#include <core/handle.h>
28#include <core/event.h>
27#include <core/class.h> 29#include <core/class.h>
28 30
29#include <engine/dmaobj.h> 31#include <engine/dmaobj.h>
@@ -146,10 +148,25 @@ nouveau_fifo_chid(struct nouveau_fifo *priv, struct nouveau_object *object)
146 return -1; 148 return -1;
147} 149}
148 150
151const char *
152nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid)
153{
154 struct nouveau_fifo_chan *chan = NULL;
155 unsigned long flags;
156
157 spin_lock_irqsave(&fifo->lock, flags);
158 if (chid >= fifo->min && chid <= fifo->max)
159 chan = (void *)fifo->channel[chid];
160 spin_unlock_irqrestore(&fifo->lock, flags);
161
162 return nouveau_client_name(chan);
163}
164
149void 165void
150nouveau_fifo_destroy(struct nouveau_fifo *priv) 166nouveau_fifo_destroy(struct nouveau_fifo *priv)
151{ 167{
152 kfree(priv->channel); 168 kfree(priv->channel);
169 nouveau_event_destroy(&priv->uevent);
153 nouveau_engine_destroy(&priv->base); 170 nouveau_engine_destroy(&priv->base);
154} 171}
155 172
@@ -174,6 +191,10 @@ nouveau_fifo_create_(struct nouveau_object *parent,
174 if (!priv->channel) 191 if (!priv->channel)
175 return -ENOMEM; 192 return -ENOMEM;
176 193
194 ret = nouveau_event_create(1, &priv->uevent);
195 if (ret)
196 return ret;
197
177 priv->chid = nouveau_fifo_chid; 198 priv->chid = nouveau_fifo_chid;
178 spin_lock_init(&priv->lock); 199 spin_lock_init(&priv->lock);
179 return 0; 200 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index a47a8548f9e0..f877bd524a92 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -28,6 +28,7 @@
28#include <core/namedb.h> 28#include <core/namedb.h>
29#include <core/handle.h> 29#include <core/handle.h>
30#include <core/ramht.h> 30#include <core/ramht.h>
31#include <core/event.h>
31 32
32#include <subdev/instmem.h> 33#include <subdev/instmem.h>
33#include <subdev/instmem/nv04.h> 34#include <subdev/instmem/nv04.h>
@@ -398,6 +399,98 @@ out:
398 return handled; 399 return handled;
399} 400}
400 401
402static void
403nv04_fifo_cache_error(struct nouveau_device *device,
404 struct nv04_fifo_priv *priv, u32 chid, u32 get)
405{
406 u32 mthd, data;
407 int ptr;
408
409 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
410 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
411 * show that it wraps around to the start at GET=0x800.. No clue as to
412 * why..
413 */
414 ptr = (get & 0x7ff) >> 2;
415
416 if (device->card_type < NV_40) {
417 mthd = nv_rd32(priv, NV04_PFIFO_CACHE1_METHOD(ptr));
418 data = nv_rd32(priv, NV04_PFIFO_CACHE1_DATA(ptr));
419 } else {
420 mthd = nv_rd32(priv, NV40_PFIFO_CACHE1_METHOD(ptr));
421 data = nv_rd32(priv, NV40_PFIFO_CACHE1_DATA(ptr));
422 }
423
424 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
425 const char *client_name =
426 nouveau_client_name_for_fifo_chid(&priv->base, chid);
427 nv_error(priv,
428 "CACHE_ERROR - ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
429 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
430 data);
431 }
432
433 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
434 nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
435
436 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
437 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
438 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
439 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
440 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
441 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
442
443 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
444 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
445 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
446}
447
448static void
449nv04_fifo_dma_pusher(struct nouveau_device *device, struct nv04_fifo_priv *priv,
450 u32 chid)
451{
452 const char *client_name;
453 u32 dma_get = nv_rd32(priv, 0x003244);
454 u32 dma_put = nv_rd32(priv, 0x003240);
455 u32 push = nv_rd32(priv, 0x003220);
456 u32 state = nv_rd32(priv, 0x003228);
457
458 client_name = nouveau_client_name_for_fifo_chid(&priv->base, chid);
459
460 if (device->card_type == NV_50) {
461 u32 ho_get = nv_rd32(priv, 0x003328);
462 u32 ho_put = nv_rd32(priv, 0x003320);
463 u32 ib_get = nv_rd32(priv, 0x003334);
464 u32 ib_put = nv_rd32(priv, 0x003330);
465
466 nv_error(priv,
467 "DMA_PUSHER - ch %d [%s] get 0x%02x%08x put 0x%02x%08x ib_get 0x%08x ib_put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
468 chid, client_name, ho_get, dma_get, ho_put, dma_put,
469 ib_get, ib_put, state, nv_dma_state_err(state), push);
470
471 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
472 nv_wr32(priv, 0x003364, 0x00000000);
473 if (dma_get != dma_put || ho_get != ho_put) {
474 nv_wr32(priv, 0x003244, dma_put);
475 nv_wr32(priv, 0x003328, ho_put);
476 } else
477 if (ib_get != ib_put)
478 nv_wr32(priv, 0x003334, ib_put);
479 } else {
480 nv_error(priv,
481 "DMA_PUSHER - ch %d [%s] get 0x%08x put 0x%08x state 0x%08x (err: %s) push 0x%08x\n",
482 chid, client_name, dma_get, dma_put, state,
483 nv_dma_state_err(state), push);
484
485 if (dma_get != dma_put)
486 nv_wr32(priv, 0x003244, dma_put);
487 }
488
489 nv_wr32(priv, 0x003228, 0x00000000);
490 nv_wr32(priv, 0x003220, 0x00000001);
491 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
492}
493
401void 494void
402nv04_fifo_intr(struct nouveau_subdev *subdev) 495nv04_fifo_intr(struct nouveau_subdev *subdev)
403{ 496{
@@ -416,96 +509,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
416 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); 509 get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET);
417 510
418 if (status & NV_PFIFO_INTR_CACHE_ERROR) { 511 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
419 uint32_t mthd, data; 512 nv04_fifo_cache_error(device, priv, chid, get);
420 int ptr;
421
422 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
423 * wrapping on my G80 chips, but CACHE1 isn't big
424 * enough for this much data.. Tests show that it
425 * wraps around to the start at GET=0x800.. No clue
426 * as to why..
427 */
428 ptr = (get & 0x7ff) >> 2;
429
430 if (device->card_type < NV_40) {
431 mthd = nv_rd32(priv,
432 NV04_PFIFO_CACHE1_METHOD(ptr));
433 data = nv_rd32(priv,
434 NV04_PFIFO_CACHE1_DATA(ptr));
435 } else {
436 mthd = nv_rd32(priv,
437 NV40_PFIFO_CACHE1_METHOD(ptr));
438 data = nv_rd32(priv,
439 NV40_PFIFO_CACHE1_DATA(ptr));
440 }
441
442 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
443 nv_error(priv, "CACHE_ERROR - Ch %d/%d "
444 "Mthd 0x%04x Data 0x%08x\n",
445 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
446 data);
447 }
448
449 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
450 nv_wr32(priv, NV03_PFIFO_INTR_0,
451 NV_PFIFO_INTR_CACHE_ERROR);
452
453 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
454 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) & ~1);
455 nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4);
456 nv_wr32(priv, NV03_PFIFO_CACHE1_PUSH0,
457 nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH0) | 1);
458 nv_wr32(priv, NV04_PFIFO_CACHE1_HASH, 0);
459
460 nv_wr32(priv, NV04_PFIFO_CACHE1_DMA_PUSH,
461 nv_rd32(priv, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
462 nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1);
463
464 status &= ~NV_PFIFO_INTR_CACHE_ERROR; 513 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
465 } 514 }
466 515
467 if (status & NV_PFIFO_INTR_DMA_PUSHER) { 516 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
468 u32 dma_get = nv_rd32(priv, 0x003244); 517 nv04_fifo_dma_pusher(device, priv, chid);
469 u32 dma_put = nv_rd32(priv, 0x003240);
470 u32 push = nv_rd32(priv, 0x003220);
471 u32 state = nv_rd32(priv, 0x003228);
472
473 if (device->card_type == NV_50) {
474 u32 ho_get = nv_rd32(priv, 0x003328);
475 u32 ho_put = nv_rd32(priv, 0x003320);
476 u32 ib_get = nv_rd32(priv, 0x003334);
477 u32 ib_put = nv_rd32(priv, 0x003330);
478
479 nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
480 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
481 "State 0x%08x (err: %s) Push 0x%08x\n",
482 chid, ho_get, dma_get, ho_put,
483 dma_put, ib_get, ib_put, state,
484 nv_dma_state_err(state),
485 push);
486
487 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
488 nv_wr32(priv, 0x003364, 0x00000000);
489 if (dma_get != dma_put || ho_get != ho_put) {
490 nv_wr32(priv, 0x003244, dma_put);
491 nv_wr32(priv, 0x003328, ho_put);
492 } else
493 if (ib_get != ib_put) {
494 nv_wr32(priv, 0x003334, ib_put);
495 }
496 } else {
497 nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
498 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
499 chid, dma_get, dma_put, state,
500 nv_dma_state_err(state), push);
501
502 if (dma_get != dma_put)
503 nv_wr32(priv, 0x003244, dma_put);
504 }
505
506 nv_wr32(priv, 0x003228, 0x00000000);
507 nv_wr32(priv, 0x003220, 0x00000001);
508 nv_wr32(priv, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
509 status &= ~NV_PFIFO_INTR_DMA_PUSHER; 518 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
510 } 519 }
511 520
@@ -528,6 +537,12 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
528 status &= ~0x00000010; 537 status &= ~0x00000010;
529 nv_wr32(priv, 0x002100, 0x00000010); 538 nv_wr32(priv, 0x002100, 0x00000010);
530 } 539 }
540
541 if (status & 0x40000000) {
542 nouveau_event_trigger(priv->base.uevent, 0);
543 nv_wr32(priv, 0x002100, 0x40000000);
544 status &= ~0x40000000;
545 }
531 } 546 }
532 547
533 if (status) { 548 if (status) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index bd096364f680..840af6172788 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -129,7 +129,8 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
129 /* do the kickoff... */ 129 /* do the kickoff... */
130 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); 130 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
131 if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) { 131 if (!nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff)) {
132 nv_error(priv, "channel %d unload timeout\n", chan->base.chid); 132 nv_error(priv, "channel %d [%s] unload timeout\n",
133 chan->base.chid, nouveau_client_name(chan));
133 if (suspend) 134 if (suspend)
134 ret = -EBUSY; 135 ret = -EBUSY;
135 } 136 }
@@ -480,7 +481,7 @@ nv50_fifo_init(struct nouveau_object *object)
480 nv_wr32(priv, 0x002044, 0x01003fff); 481 nv_wr32(priv, 0x002044, 0x01003fff);
481 482
482 nv_wr32(priv, 0x002100, 0xffffffff); 483 nv_wr32(priv, 0x002100, 0xffffffff);
483 nv_wr32(priv, 0x002140, 0xffffffff); 484 nv_wr32(priv, 0x002140, 0xbfffffff);
484 485
485 for (i = 0; i < 128; i++) 486 for (i = 0; i < 128; i++)
486 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000); 487 nv_wr32(priv, 0x002600 + (i * 4), 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 1eb1c512f503..094000e87871 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -26,6 +26,7 @@
26#include <core/client.h> 26#include <core/client.h>
27#include <core/engctx.h> 27#include <core/engctx.h>
28#include <core/ramht.h> 28#include <core/ramht.h>
29#include <core/event.h>
29#include <core/class.h> 30#include <core/class.h>
30#include <core/math.h> 31#include <core/math.h>
31 32
@@ -100,7 +101,8 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
100 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); 101 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
101 nv_wr32(priv, 0x002520, save); 102 nv_wr32(priv, 0x002520, save);
102 if (!done) { 103 if (!done) {
103 nv_error(priv, "channel %d unload timeout\n", chan->base.chid); 104 nv_error(priv, "channel %d [%s] unload timeout\n",
105 chan->base.chid, nouveau_client_name(chan));
104 if (suspend) 106 if (suspend)
105 return -EBUSY; 107 return -EBUSY;
106 } 108 }
@@ -378,6 +380,20 @@ nv84_fifo_cclass = {
378 * PFIFO engine 380 * PFIFO engine
379 ******************************************************************************/ 381 ******************************************************************************/
380 382
383static void
384nv84_fifo_uevent_enable(struct nouveau_event *event, int index)
385{
386 struct nv84_fifo_priv *priv = event->priv;
387 nv_mask(priv, 0x002140, 0x40000000, 0x40000000);
388}
389
390static void
391nv84_fifo_uevent_disable(struct nouveau_event *event, int index)
392{
393 struct nv84_fifo_priv *priv = event->priv;
394 nv_mask(priv, 0x002140, 0x40000000, 0x00000000);
395}
396
381static int 397static int
382nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 398nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
383 struct nouveau_oclass *oclass, void *data, u32 size, 399 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -401,6 +417,10 @@ nv84_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
401 if (ret) 417 if (ret)
402 return ret; 418 return ret;
403 419
420 priv->base.uevent->enable = nv84_fifo_uevent_enable;
421 priv->base.uevent->disable = nv84_fifo_uevent_disable;
422 priv->base.uevent->priv = priv;
423
404 nv_subdev(priv)->unit = 0x00000100; 424 nv_subdev(priv)->unit = 0x00000100;
405 nv_subdev(priv)->intr = nv04_fifo_intr; 425 nv_subdev(priv)->intr = nv04_fifo_intr;
406 nv_engine(priv)->cclass = &nv84_fifo_cclass; 426 nv_engine(priv)->cclass = &nv84_fifo_cclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index b4365dde1859..4f226afb5591 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -27,6 +27,7 @@
27#include <core/namedb.h> 27#include <core/namedb.h>
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <core/engctx.h> 29#include <core/engctx.h>
30#include <core/event.h>
30#include <core/class.h> 31#include <core/class.h>
31#include <core/math.h> 32#include <core/math.h>
32#include <core/enum.h> 33#include <core/enum.h>
@@ -149,7 +150,8 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
149 150
150 nv_wr32(priv, 0x002634, chan->base.chid); 151 nv_wr32(priv, 0x002634, chan->base.chid);
151 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 152 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
152 nv_error(priv, "channel %d kick timeout\n", chan->base.chid); 153 nv_error(priv, "channel %d [%s] kick timeout\n",
154 chan->base.chid, nouveau_client_name(chan));
153 if (suspend) 155 if (suspend)
154 return -EBUSY; 156 return -EBUSY;
155 } 157 }
@@ -333,17 +335,17 @@ nvc0_fifo_cclass = {
333 ******************************************************************************/ 335 ******************************************************************************/
334 336
335static const struct nouveau_enum nvc0_fifo_fault_unit[] = { 337static const struct nouveau_enum nvc0_fifo_fault_unit[] = {
336 { 0x00, "PGRAPH" }, 338 { 0x00, "PGRAPH", NULL, NVDEV_ENGINE_GR },
337 { 0x03, "PEEPHOLE" }, 339 { 0x03, "PEEPHOLE" },
338 { 0x04, "BAR1" }, 340 { 0x04, "BAR1" },
339 { 0x05, "BAR3" }, 341 { 0x05, "BAR3" },
340 { 0x07, "PFIFO" }, 342 { 0x07, "PFIFO", NULL, NVDEV_ENGINE_FIFO },
341 { 0x10, "PBSP" }, 343 { 0x10, "PBSP", NULL, NVDEV_ENGINE_BSP },
342 { 0x11, "PPPP" }, 344 { 0x11, "PPPP", NULL, NVDEV_ENGINE_PPP },
343 { 0x13, "PCOUNTER" }, 345 { 0x13, "PCOUNTER" },
344 { 0x14, "PVP" }, 346 { 0x14, "PVP", NULL, NVDEV_ENGINE_VP },
345 { 0x15, "PCOPY0" }, 347 { 0x15, "PCOPY0", NULL, NVDEV_ENGINE_COPY0 },
346 { 0x16, "PCOPY1" }, 348 { 0x16, "PCOPY1", NULL, NVDEV_ENGINE_COPY1 },
347 { 0x17, "PDAEMON" }, 349 { 0x17, "PDAEMON" },
348 {} 350 {}
349}; 351};
@@ -402,6 +404,9 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
402 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10)); 404 u32 vahi = nv_rd32(priv, 0x002808 + (unit * 0x10));
403 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10)); 405 u32 stat = nv_rd32(priv, 0x00280c + (unit * 0x10));
404 u32 client = (stat & 0x00001f00) >> 8; 406 u32 client = (stat & 0x00001f00) >> 8;
407 const struct nouveau_enum *en;
408 struct nouveau_engine *engine;
409 struct nouveau_object *engctx = NULL;
405 410
406 switch (unit) { 411 switch (unit) {
407 case 3: /* PEEPHOLE */ 412 case 3: /* PEEPHOLE */
@@ -420,16 +425,26 @@ nvc0_fifo_isr_vm_fault(struct nvc0_fifo_priv *priv, int unit)
420 nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ? 425 nv_error(priv, "%s fault at 0x%010llx [", (stat & 0x00000080) ?
421 "write" : "read", (u64)vahi << 32 | valo); 426 "write" : "read", (u64)vahi << 32 | valo);
422 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f); 427 nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
423 printk("] from "); 428 pr_cont("] from ");
424 nouveau_enum_print(nvc0_fifo_fault_unit, unit); 429 en = nouveau_enum_print(nvc0_fifo_fault_unit, unit);
425 if (stat & 0x00000040) { 430 if (stat & 0x00000040) {
426 printk("/"); 431 pr_cont("/");
427 nouveau_enum_print(nvc0_fifo_fault_hubclient, client); 432 nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
428 } else { 433 } else {
429 printk("/GPC%d/", (stat & 0x1f000000) >> 24); 434 pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
430 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client); 435 nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
431 } 436 }
432 printk(" on channel 0x%010llx\n", (u64)inst << 12); 437
438 if (en && en->data2) {
439 engine = nouveau_engine(priv, en->data2);
440 if (engine)
441 engctx = nouveau_engctx_get(engine, inst);
442
443 }
444 pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
445 nouveau_client_name(engctx));
446
447 nouveau_engctx_put(engctx);
433} 448}
434 449
435static int 450static int
@@ -484,10 +499,12 @@ nvc0_fifo_isr_subfifo_intr(struct nvc0_fifo_priv *priv, int unit)
484 if (show) { 499 if (show) {
485 nv_error(priv, "SUBFIFO%d:", unit); 500 nv_error(priv, "SUBFIFO%d:", unit);
486 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show); 501 nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
487 printk("\n"); 502 pr_cont("\n");
488 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x " 503 nv_error(priv,
489 "data 0x%08x\n", 504 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
490 unit, chid, subc, mthd, data); 505 unit, chid,
506 nouveau_client_name_for_fifo_chid(&priv->base, chid),
507 subc, mthd, data);
491 } 508 }
492 509
493 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); 510 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -501,12 +518,34 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
501 u32 mask = nv_rd32(priv, 0x002140); 518 u32 mask = nv_rd32(priv, 0x002140);
502 u32 stat = nv_rd32(priv, 0x002100) & mask; 519 u32 stat = nv_rd32(priv, 0x002100) & mask;
503 520
521 if (stat & 0x00000001) {
522 u32 intr = nv_rd32(priv, 0x00252c);
523 nv_warn(priv, "INTR 0x00000001: 0x%08x\n", intr);
524 nv_wr32(priv, 0x002100, 0x00000001);
525 stat &= ~0x00000001;
526 }
527
504 if (stat & 0x00000100) { 528 if (stat & 0x00000100) {
505 nv_warn(priv, "unknown status 0x00000100\n"); 529 u32 intr = nv_rd32(priv, 0x00254c);
530 nv_warn(priv, "INTR 0x00000100: 0x%08x\n", intr);
506 nv_wr32(priv, 0x002100, 0x00000100); 531 nv_wr32(priv, 0x002100, 0x00000100);
507 stat &= ~0x00000100; 532 stat &= ~0x00000100;
508 } 533 }
509 534
535 if (stat & 0x00010000) {
536 u32 intr = nv_rd32(priv, 0x00256c);
537 nv_warn(priv, "INTR 0x00010000: 0x%08x\n", intr);
538 nv_wr32(priv, 0x002100, 0x00010000);
539 stat &= ~0x00010000;
540 }
541
542 if (stat & 0x01000000) {
543 u32 intr = nv_rd32(priv, 0x00258c);
544 nv_warn(priv, "INTR 0x01000000: 0x%08x\n", intr);
545 nv_wr32(priv, 0x002100, 0x01000000);
546 stat &= ~0x01000000;
547 }
548
510 if (stat & 0x10000000) { 549 if (stat & 0x10000000) {
511 u32 units = nv_rd32(priv, 0x00259c); 550 u32 units = nv_rd32(priv, 0x00259c);
512 u32 u = units; 551 u32 u = units;
@@ -536,11 +575,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
536 } 575 }
537 576
538 if (stat & 0x40000000) { 577 if (stat & 0x40000000) {
539 nv_warn(priv, "unknown status 0x40000000\n"); 578 u32 intr0 = nv_rd32(priv, 0x0025a4);
540 nv_mask(priv, 0x002a00, 0x00000000, 0x00000000); 579 u32 intr1 = nv_mask(priv, 0x002a00, 0x00000000, 0x00000);
580 nv_debug(priv, "INTR 0x40000000: 0x%08x 0x%08x\n",
581 intr0, intr1);
541 stat &= ~0x40000000; 582 stat &= ~0x40000000;
542 } 583 }
543 584
585 if (stat & 0x80000000) {
586 u32 intr = nv_mask(priv, 0x0025a8, 0x00000000, 0x00000000);
587 nouveau_event_trigger(priv->base.uevent, 0);
588 nv_debug(priv, "INTR 0x80000000: 0x%08x\n", intr);
589 stat &= ~0x80000000;
590 }
591
544 if (stat) { 592 if (stat) {
545 nv_fatal(priv, "unhandled status 0x%08x\n", stat); 593 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
546 nv_wr32(priv, 0x002100, stat); 594 nv_wr32(priv, 0x002100, stat);
@@ -548,6 +596,20 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
548 } 596 }
549} 597}
550 598
599static void
600nvc0_fifo_uevent_enable(struct nouveau_event *event, int index)
601{
602 struct nvc0_fifo_priv *priv = event->priv;
603 nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
604}
605
606static void
607nvc0_fifo_uevent_disable(struct nouveau_event *event, int index)
608{
609 struct nvc0_fifo_priv *priv = event->priv;
610 nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
611}
612
551static int 613static int
552nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 614nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
553 struct nouveau_oclass *oclass, void *data, u32 size, 615 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -581,6 +643,10 @@ nvc0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
581 if (ret) 643 if (ret)
582 return ret; 644 return ret;
583 645
646 priv->base.uevent->enable = nvc0_fifo_uevent_enable;
647 priv->base.uevent->disable = nvc0_fifo_uevent_disable;
648 priv->base.uevent->priv = priv;
649
584 nv_subdev(priv)->unit = 0x00000100; 650 nv_subdev(priv)->unit = 0x00000100;
585 nv_subdev(priv)->intr = nvc0_fifo_intr; 651 nv_subdev(priv)->intr = nvc0_fifo_intr;
586 nv_engine(priv)->cclass = &nvc0_fifo_cclass; 652 nv_engine(priv)->cclass = &nvc0_fifo_cclass;
@@ -639,7 +705,8 @@ nvc0_fifo_init(struct nouveau_object *object)
639 705
640 nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */ 706 nv_wr32(priv, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
641 nv_wr32(priv, 0x002100, 0xffffffff); 707 nv_wr32(priv, 0x002100, 0xffffffff);
642 nv_wr32(priv, 0x002140, 0xbfffffff); 708 nv_wr32(priv, 0x002140, 0x3fffffff);
709 nv_wr32(priv, 0x002628, 0x00000001); /* makes mthd 0x20 work */
643 return 0; 710 return 0;
644} 711}
645 712
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index c930da99c2c1..4419e40d88e9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -27,6 +27,7 @@
27#include <core/namedb.h> 27#include <core/namedb.h>
28#include <core/gpuobj.h> 28#include <core/gpuobj.h>
29#include <core/engctx.h> 29#include <core/engctx.h>
30#include <core/event.h>
30#include <core/class.h> 31#include <core/class.h>
31#include <core/math.h> 32#include <core/math.h>
32#include <core/enum.h> 33#include <core/enum.h>
@@ -184,7 +185,8 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
184 185
185 nv_wr32(priv, 0x002634, chan->base.chid); 186 nv_wr32(priv, 0x002634, chan->base.chid);
186 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 187 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
187 nv_error(priv, "channel %d kick timeout\n", chan->base.chid); 188 nv_error(priv, "channel %d [%s] kick timeout\n",
189 chan->base.chid, nouveau_client_name(chan));
188 if (suspend) 190 if (suspend)
189 return -EBUSY; 191 return -EBUSY;
190 } 192 }
@@ -412,20 +414,34 @@ nve0_fifo_isr_vm_fault(struct nve0_fifo_priv *priv, int unit)
412 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10)); 414 u32 vahi = nv_rd32(priv, 0x2808 + (unit * 0x10));
413 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10)); 415 u32 stat = nv_rd32(priv, 0x280c + (unit * 0x10));
414 u32 client = (stat & 0x00001f00) >> 8; 416 u32 client = (stat & 0x00001f00) >> 8;
417 const struct nouveau_enum *en;
418 struct nouveau_engine *engine;
419 struct nouveau_object *engctx = NULL;
415 420
416 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ? 421 nv_error(priv, "PFIFO: %s fault at 0x%010llx [", (stat & 0x00000080) ?
417 "write" : "read", (u64)vahi << 32 | valo); 422 "write" : "read", (u64)vahi << 32 | valo);
418 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f); 423 nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
419 printk("] from "); 424 pr_cont("] from ");
420 nouveau_enum_print(nve0_fifo_fault_unit, unit); 425 en = nouveau_enum_print(nve0_fifo_fault_unit, unit);
421 if (stat & 0x00000040) { 426 if (stat & 0x00000040) {
422 printk("/"); 427 pr_cont("/");
423 nouveau_enum_print(nve0_fifo_fault_hubclient, client); 428 nouveau_enum_print(nve0_fifo_fault_hubclient, client);
424 } else { 429 } else {
425 printk("/GPC%d/", (stat & 0x1f000000) >> 24); 430 pr_cont("/GPC%d/", (stat & 0x1f000000) >> 24);
426 nouveau_enum_print(nve0_fifo_fault_gpcclient, client); 431 nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
427 } 432 }
428 printk(" on channel 0x%010llx\n", (u64)inst << 12); 433
434 if (en && en->data2) {
435 engine = nouveau_engine(priv, en->data2);
436 if (engine)
437 engctx = nouveau_engctx_get(engine, inst);
438
439 }
440
441 pr_cont(" on channel 0x%010llx [%s]\n", (u64)inst << 12,
442 nouveau_client_name(engctx));
443
444 nouveau_engctx_put(engctx);
429} 445}
430 446
431static int 447static int
@@ -480,10 +496,12 @@ nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv *priv, int unit)
480 if (show) { 496 if (show) {
481 nv_error(priv, "SUBFIFO%d:", unit); 497 nv_error(priv, "SUBFIFO%d:", unit);
482 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); 498 nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
483 printk("\n"); 499 pr_cont("\n");
484 nv_error(priv, "SUBFIFO%d: ch %d subc %d mthd 0x%04x " 500 nv_error(priv,
485 "data 0x%08x\n", 501 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
486 unit, chid, subc, mthd, data); 502 unit, chid,
503 nouveau_client_name_for_fifo_chid(&priv->base, chid),
504 subc, mthd, data);
487 } 505 }
488 506
489 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008); 507 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
@@ -537,6 +555,12 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
537 stat &= ~0x40000000; 555 stat &= ~0x40000000;
538 } 556 }
539 557
558 if (stat & 0x80000000) {
559 nouveau_event_trigger(priv->base.uevent, 0);
560 nv_wr32(priv, 0x002100, 0x80000000);
561 stat &= ~0x80000000;
562 }
563
540 if (stat) { 564 if (stat) {
541 nv_fatal(priv, "unhandled status 0x%08x\n", stat); 565 nv_fatal(priv, "unhandled status 0x%08x\n", stat);
542 nv_wr32(priv, 0x002100, stat); 566 nv_wr32(priv, 0x002100, stat);
@@ -544,6 +568,20 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
544 } 568 }
545} 569}
546 570
571static void
572nve0_fifo_uevent_enable(struct nouveau_event *event, int index)
573{
574 struct nve0_fifo_priv *priv = event->priv;
575 nv_mask(priv, 0x002140, 0x80000000, 0x80000000);
576}
577
578static void
579nve0_fifo_uevent_disable(struct nouveau_event *event, int index)
580{
581 struct nve0_fifo_priv *priv = event->priv;
582 nv_mask(priv, 0x002140, 0x80000000, 0x00000000);
583}
584
547static int 585static int
548nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 586nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
549 struct nouveau_oclass *oclass, void *data, u32 size, 587 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -567,6 +605,10 @@ nve0_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
567 if (ret) 605 if (ret)
568 return ret; 606 return ret;
569 607
608 priv->base.uevent->enable = nve0_fifo_uevent_enable;
609 priv->base.uevent->disable = nve0_fifo_uevent_disable;
610 priv->base.uevent->priv = priv;
611
570 nv_subdev(priv)->unit = 0x00000100; 612 nv_subdev(priv)->unit = 0x00000100;
571 nv_subdev(priv)->intr = nve0_fifo_intr; 613 nv_subdev(priv)->intr = nve0_fifo_intr;
572 nv_engine(priv)->cclass = &nve0_fifo_cclass; 614 nv_engine(priv)->cclass = &nve0_fifo_cclass;
@@ -617,7 +659,7 @@ nve0_fifo_init(struct nouveau_object *object)
617 659
618 nv_wr32(priv, 0x002a00, 0xffffffff); 660 nv_wr32(priv, 0x002a00, 0xffffffff);
619 nv_wr32(priv, 0x002100, 0xffffffff); 661 nv_wr32(priv, 0x002100, 0xffffffff);
620 nv_wr32(priv, 0x002140, 0xbfffffff); 662 nv_wr32(priv, 0x002140, 0x3fffffff);
621 return 0; 663 return 0;
622} 664}
623 665
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index e30a9c5ff1fc..ad13dcdd15f9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -22,6 +22,7 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/os.h> 26#include <core/os.h>
26#include <core/class.h> 27#include <core/class.h>
27#include <core/handle.h> 28#include <core/handle.h>
@@ -1297,16 +1298,17 @@ nv04_graph_intr(struct nouveau_subdev *subdev)
1297 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 1298 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1298 1299
1299 if (show) { 1300 if (show) {
1300 nv_error(priv, ""); 1301 nv_error(priv, "%s", "");
1301 nouveau_bitfield_print(nv04_graph_intr_name, show); 1302 nouveau_bitfield_print(nv04_graph_intr_name, show);
1302 printk(" nsource:"); 1303 pr_cont(" nsource:");
1303 nouveau_bitfield_print(nv04_graph_nsource, nsource); 1304 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1304 printk(" nstatus:"); 1305 pr_cont(" nstatus:");
1305 nouveau_bitfield_print(nv04_graph_nstatus, nstatus); 1306 nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
1306 printk("\n"); 1307 pr_cont("\n");
1307 nv_error(priv, "ch %d/%d class 0x%04x " 1308 nv_error(priv,
1308 "mthd 0x%04x data 0x%08x\n", 1309 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
1309 chid, subc, class, mthd, data); 1310 chid, nouveau_client_name(chan), subc, class, mthd,
1311 data);
1310 } 1312 }
1311 1313
1312 nouveau_namedb_put(handle); 1314 nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 5c0f843ea249..23c143aaa556 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -22,6 +22,7 @@
22 * DEALINGS IN THE SOFTWARE. 22 * DEALINGS IN THE SOFTWARE.
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/os.h> 26#include <core/os.h>
26#include <core/class.h> 27#include <core/class.h>
27#include <core/handle.h> 28#include <core/handle.h>
@@ -1193,16 +1194,17 @@ nv10_graph_intr(struct nouveau_subdev *subdev)
1193 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 1194 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
1194 1195
1195 if (show) { 1196 if (show) {
1196 nv_error(priv, ""); 1197 nv_error(priv, "%s", "");
1197 nouveau_bitfield_print(nv10_graph_intr_name, show); 1198 nouveau_bitfield_print(nv10_graph_intr_name, show);
1198 printk(" nsource:"); 1199 pr_cont(" nsource:");
1199 nouveau_bitfield_print(nv04_graph_nsource, nsource); 1200 nouveau_bitfield_print(nv04_graph_nsource, nsource);
1200 printk(" nstatus:"); 1201 pr_cont(" nstatus:");
1201 nouveau_bitfield_print(nv10_graph_nstatus, nstatus); 1202 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
1202 printk("\n"); 1203 pr_cont("\n");
1203 nv_error(priv, "ch %d/%d class 0x%04x " 1204 nv_error(priv,
1204 "mthd 0x%04x data 0x%08x\n", 1205 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
1205 chid, subc, class, mthd, data); 1206 chid, nouveau_client_name(chan), subc, class, mthd,
1207 data);
1206 } 1208 }
1207 1209
1208 nouveau_namedb_put(handle); 1210 nouveau_namedb_put(handle);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 5b20401bf911..0607b9801748 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -1,3 +1,4 @@
1#include <core/client.h>
1#include <core/os.h> 2#include <core/os.h>
2#include <core/class.h> 3#include <core/class.h>
3#include <core/engctx.h> 4#include <core/engctx.h>
@@ -224,15 +225,17 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
224 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 225 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
225 226
226 if (show) { 227 if (show) {
227 nv_error(priv, ""); 228 nv_error(priv, "%s", "");
228 nouveau_bitfield_print(nv10_graph_intr_name, show); 229 nouveau_bitfield_print(nv10_graph_intr_name, show);
229 printk(" nsource:"); 230 pr_cont(" nsource:");
230 nouveau_bitfield_print(nv04_graph_nsource, nsource); 231 nouveau_bitfield_print(nv04_graph_nsource, nsource);
231 printk(" nstatus:"); 232 pr_cont(" nstatus:");
232 nouveau_bitfield_print(nv10_graph_nstatus, nstatus); 233 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
233 printk("\n"); 234 pr_cont("\n");
234 nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n", 235 nv_error(priv,
235 chid, subc, class, mthd, data); 236 "ch %d [%s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
237 chid, nouveau_client_name(engctx), subc, class, mthd,
238 data);
236 } 239 }
237 240
238 nouveau_engctx_put(engctx); 241 nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 0b36dd3deebd..17049d5c723d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/os.h> 26#include <core/os.h>
26#include <core/class.h> 27#include <core/class.h>
27#include <core/handle.h> 28#include <core/handle.h>
@@ -321,16 +322,17 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
321 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 322 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
322 323
323 if (show) { 324 if (show) {
324 nv_error(priv, ""); 325 nv_error(priv, "%s", "");
325 nouveau_bitfield_print(nv10_graph_intr_name, show); 326 nouveau_bitfield_print(nv10_graph_intr_name, show);
326 printk(" nsource:"); 327 pr_cont(" nsource:");
327 nouveau_bitfield_print(nv04_graph_nsource, nsource); 328 nouveau_bitfield_print(nv04_graph_nsource, nsource);
328 printk(" nstatus:"); 329 pr_cont(" nstatus:");
329 nouveau_bitfield_print(nv10_graph_nstatus, nstatus); 330 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
330 printk("\n"); 331 pr_cont("\n");
331 nv_error(priv, "ch %d [0x%08x] subc %d class 0x%04x " 332 nv_error(priv,
332 "mthd 0x%04x data 0x%08x\n", 333 "ch %d [0x%08x %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
333 chid, inst << 4, subc, class, mthd, data); 334 chid, inst << 4, nouveau_client_name(engctx), subc,
335 class, mthd, data);
334 } 336 }
335 337
336 nouveau_engctx_put(engctx); 338 nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index b1c3d835b4c2..f2b1a7a124f2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -24,6 +24,7 @@
24 24
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h> 26#include <core/class.h>
27#include <core/client.h>
27#include <core/handle.h> 28#include <core/handle.h>
28#include <core/engctx.h> 29#include <core/engctx.h>
29#include <core/enum.h> 30#include <core/enum.h>
@@ -418,7 +419,7 @@ nv50_priv_mp_trap(struct nv50_graph_priv *priv, int tpid, int display)
418 nv_error(priv, "TRAP_MP_EXEC - " 419 nv_error(priv, "TRAP_MP_EXEC - "
419 "TP %d MP %d: ", tpid, i); 420 "TP %d MP %d: ", tpid, i);
420 nouveau_enum_print(nv50_mp_exec_error_names, status); 421 nouveau_enum_print(nv50_mp_exec_error_names, status);
421 printk(" at %06x warp %d, opcode %08x %08x\n", 422 pr_cont(" at %06x warp %d, opcode %08x %08x\n",
422 pc&0xffffff, pc >> 24, 423 pc&0xffffff, pc >> 24,
423 oplow, ophigh); 424 oplow, ophigh);
424 } 425 }
@@ -532,7 +533,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
532 533
533static int 534static int
534nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display, 535nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
535 int chid, u64 inst) 536 int chid, u64 inst, struct nouveau_object *engctx)
536{ 537{
537 u32 status = nv_rd32(priv, 0x400108); 538 u32 status = nv_rd32(priv, 0x400108);
538 u32 ustatus; 539 u32 ustatus;
@@ -565,12 +566,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
565 566
566 nv_error(priv, "TRAP DISPATCH_FAULT\n"); 567 nv_error(priv, "TRAP DISPATCH_FAULT\n");
567 if (display && (addr & 0x80000000)) { 568 if (display && (addr & 0x80000000)) {
568 nv_error(priv, "ch %d [0x%010llx] " 569 nv_error(priv,
569 "subc %d class 0x%04x mthd 0x%04x " 570 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x%08x 400808 0x%08x 400848 0x%08x\n",
570 "data 0x%08x%08x " 571 chid, inst,
571 "400808 0x%08x 400848 0x%08x\n", 572 nouveau_client_name(engctx), subc,
572 chid, inst, subc, class, mthd, datah, 573 class, mthd, datah, datal, addr, r848);
573 datal, addr, r848);
574 } else 574 } else
575 if (display) { 575 if (display) {
576 nv_error(priv, "no stuck command?\n"); 576 nv_error(priv, "no stuck command?\n");
@@ -591,11 +591,11 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
591 591
592 nv_error(priv, "TRAP DISPATCH_QUERY\n"); 592 nv_error(priv, "TRAP DISPATCH_QUERY\n");
593 if (display && (addr & 0x80000000)) { 593 if (display && (addr & 0x80000000)) {
594 nv_error(priv, "ch %d [0x%010llx] " 594 nv_error(priv,
595 "subc %d class 0x%04x mthd 0x%04x " 595 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x 40084c 0x%08x\n",
596 "data 0x%08x 40084c 0x%08x\n", 596 chid, inst,
597 chid, inst, subc, class, mthd, 597 nouveau_client_name(engctx), subc,
598 data, addr); 598 class, mthd, data, addr);
599 } else 599 } else
600 if (display) { 600 if (display) {
601 nv_error(priv, "no stuck command?\n"); 601 nv_error(priv, "no stuck command?\n");
@@ -623,7 +623,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
623 if (display) { 623 if (display) {
624 nv_error(priv, "TRAP_M2MF"); 624 nv_error(priv, "TRAP_M2MF");
625 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus); 625 nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
626 printk("\n"); 626 pr_cont("\n");
627 nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n", 627 nv_error(priv, "TRAP_M2MF %08x %08x %08x %08x\n",
628 nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808), 628 nv_rd32(priv, 0x406804), nv_rd32(priv, 0x406808),
629 nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810)); 629 nv_rd32(priv, 0x40680c), nv_rd32(priv, 0x406810));
@@ -644,7 +644,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
644 if (display) { 644 if (display) {
645 nv_error(priv, "TRAP_VFETCH"); 645 nv_error(priv, "TRAP_VFETCH");
646 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus); 646 nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
647 printk("\n"); 647 pr_cont("\n");
648 nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n", 648 nv_error(priv, "TRAP_VFETCH %08x %08x %08x %08x\n",
649 nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08), 649 nv_rd32(priv, 0x400c00), nv_rd32(priv, 0x400c08),
650 nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10)); 650 nv_rd32(priv, 0x400c0c), nv_rd32(priv, 0x400c10));
@@ -661,7 +661,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
661 if (display) { 661 if (display) {
662 nv_error(priv, "TRAP_STRMOUT"); 662 nv_error(priv, "TRAP_STRMOUT");
663 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus); 663 nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
664 printk("\n"); 664 pr_cont("\n");
665 nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n", 665 nv_error(priv, "TRAP_STRMOUT %08x %08x %08x %08x\n",
666 nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808), 666 nv_rd32(priv, 0x401804), nv_rd32(priv, 0x401808),
667 nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810)); 667 nv_rd32(priv, 0x40180c), nv_rd32(priv, 0x401810));
@@ -682,7 +682,7 @@ nv50_graph_trap_handler(struct nv50_graph_priv *priv, u32 display,
682 if (display) { 682 if (display) {
683 nv_error(priv, "TRAP_CCACHE"); 683 nv_error(priv, "TRAP_CCACHE");
684 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus); 684 nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
685 printk("\n"); 685 pr_cont("\n");
686 nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x" 686 nv_error(priv, "TRAP_CCACHE %08x %08x %08x %08x"
687 " %08x %08x %08x\n", 687 " %08x %08x %08x\n",
688 nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004), 688 nv_rd32(priv, 0x405000), nv_rd32(priv, 0x405004),
@@ -774,11 +774,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
774 u32 ecode = nv_rd32(priv, 0x400110); 774 u32 ecode = nv_rd32(priv, 0x400110);
775 nv_error(priv, "DATA_ERROR "); 775 nv_error(priv, "DATA_ERROR ");
776 nouveau_enum_print(nv50_data_error_names, ecode); 776 nouveau_enum_print(nv50_data_error_names, ecode);
777 printk("\n"); 777 pr_cont("\n");
778 } 778 }
779 779
780 if (stat & 0x00200000) { 780 if (stat & 0x00200000) {
781 if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12)) 781 if (!nv50_graph_trap_handler(priv, show, chid, (u64)inst << 12,
782 engctx))
782 show &= ~0x00200000; 783 show &= ~0x00200000;
783 } 784 }
784 785
@@ -786,12 +787,13 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
786 nv_wr32(priv, 0x400500, 0x00010001); 787 nv_wr32(priv, 0x400500, 0x00010001);
787 788
788 if (show) { 789 if (show) {
789 nv_error(priv, ""); 790 nv_error(priv, "%s", "");
790 nouveau_bitfield_print(nv50_graph_intr_name, show); 791 nouveau_bitfield_print(nv50_graph_intr_name, show);
791 printk("\n"); 792 pr_cont("\n");
792 nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x " 793 nv_error(priv,
793 "mthd 0x%04x data 0x%08x\n", 794 "ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
794 chid, (u64)inst << 12, subc, class, mthd, data); 795 chid, (u64)inst << 12, nouveau_client_name(engctx),
796 subc, class, mthd, data);
795 } 797 }
796 798
797 if (nv_rd32(priv, 0x400824) & (1 << 31)) 799 if (nv_rd32(priv, 0x400824) & (1 << 31))
@@ -907,9 +909,8 @@ nv50_graph_init(struct nouveau_object *object)
907 nv_wr32(priv, 0x400828, 0x00000000); 909 nv_wr32(priv, 0x400828, 0x00000000);
908 nv_wr32(priv, 0x40082c, 0x00000000); 910 nv_wr32(priv, 0x40082c, 0x00000000);
909 nv_wr32(priv, 0x400830, 0x00000000); 911 nv_wr32(priv, 0x400830, 0x00000000);
910 nv_wr32(priv, 0x400724, 0x00000000);
911 nv_wr32(priv, 0x40032c, 0x00000000); 912 nv_wr32(priv, 0x40032c, 0x00000000);
912 nv_wr32(priv, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */ 913 nv_wr32(priv, 0x400330, 0x00000000);
913 914
914 /* some unknown zcull magic */ 915 /* some unknown zcull magic */
915 switch (nv_device(priv)->chipset & 0xf0) { 916 switch (nv_device(priv)->chipset & 0xf0) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 45aff5f5085a..0de0dd724aff 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -433,10 +433,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
433 if (stat & 0x00000010) { 433 if (stat & 0x00000010) {
434 handle = nouveau_handle_get_class(engctx, class); 434 handle = nouveau_handle_get_class(engctx, class);
435 if (!handle || nv_call(handle->object, mthd, data)) { 435 if (!handle || nv_call(handle->object, mthd, data)) {
436 nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] " 436 nv_error(priv,
437 "subc %d class 0x%04x mthd 0x%04x " 437 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
438 "data 0x%08x\n", 438 chid, inst << 12, nouveau_client_name(engctx),
439 chid, inst << 12, subc, class, mthd, data); 439 subc, class, mthd, data);
440 } 440 }
441 nouveau_handle_put(handle); 441 nouveau_handle_put(handle);
442 nv_wr32(priv, 0x400100, 0x00000010); 442 nv_wr32(priv, 0x400100, 0x00000010);
@@ -444,9 +444,10 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
444 } 444 }
445 445
446 if (stat & 0x00000020) { 446 if (stat & 0x00000020) {
447 nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d " 447 nv_error(priv,
448 "class 0x%04x mthd 0x%04x data 0x%08x\n", 448 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
449 chid, inst << 12, subc, class, mthd, data); 449 chid, inst << 12, nouveau_client_name(engctx), subc,
450 class, mthd, data);
450 nv_wr32(priv, 0x400100, 0x00000020); 451 nv_wr32(priv, 0x400100, 0x00000020);
451 stat &= ~0x00000020; 452 stat &= ~0x00000020;
452 } 453 }
@@ -454,15 +455,16 @@ nvc0_graph_intr(struct nouveau_subdev *subdev)
454 if (stat & 0x00100000) { 455 if (stat & 0x00100000) {
455 nv_error(priv, "DATA_ERROR ["); 456 nv_error(priv, "DATA_ERROR [");
456 nouveau_enum_print(nv50_data_error_names, code); 457 nouveau_enum_print(nv50_data_error_names, code);
457 printk("] ch %d [0x%010llx] subc %d class 0x%04x " 458 pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
458 "mthd 0x%04x data 0x%08x\n", 459 chid, inst << 12, nouveau_client_name(engctx), subc,
459 chid, inst << 12, subc, class, mthd, data); 460 class, mthd, data);
460 nv_wr32(priv, 0x400100, 0x00100000); 461 nv_wr32(priv, 0x400100, 0x00100000);
461 stat &= ~0x00100000; 462 stat &= ~0x00100000;
462 } 463 }
463 464
464 if (stat & 0x00200000) { 465 if (stat & 0x00200000) {
465 nv_error(priv, "TRAP ch %d [0x%010llx]\n", chid, inst << 12); 466 nv_error(priv, "TRAP ch %d [0x%010llx %s]\n", chid, inst << 12,
467 nouveau_client_name(engctx));
466 nvc0_graph_trap_intr(priv); 468 nvc0_graph_trap_intr(priv);
467 nv_wr32(priv, 0x400100, 0x00200000); 469 nv_wr32(priv, 0x400100, 0x00200000);
468 stat &= ~0x00200000; 470 stat &= ~0x00200000;
@@ -611,10 +613,8 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
611static void 613static void
612nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc) 614nvc0_graph_dtor_fw(struct nvc0_graph_fuc *fuc)
613{ 615{
614 if (fuc->data) { 616 kfree(fuc->data);
615 kfree(fuc->data); 617 fuc->data = NULL;
616 fuc->data = NULL;
617 }
618} 618}
619 619
620void 620void
@@ -622,8 +622,7 @@ nvc0_graph_dtor(struct nouveau_object *object)
622{ 622{
623 struct nvc0_graph_priv *priv = (void *)object; 623 struct nvc0_graph_priv *priv = (void *)object;
624 624
625 if (priv->data) 625 kfree(priv->data);
626 kfree(priv->data);
627 626
628 nvc0_graph_dtor_fw(&priv->fuc409c); 627 nvc0_graph_dtor_fw(&priv->fuc409c);
629 nvc0_graph_dtor_fw(&priv->fuc409d); 628 nvc0_graph_dtor_fw(&priv->fuc409d);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 9f82e9702b46..61cec0f6ff1c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -78,15 +78,16 @@ nve0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
78} 78}
79 79
80static void 80static void
81nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst) 81nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst,
82 struct nouveau_object *engctx)
82{ 83{
83 u32 trap = nv_rd32(priv, 0x400108); 84 u32 trap = nv_rd32(priv, 0x400108);
84 int rop; 85 int rop;
85 86
86 if (trap & 0x00000001) { 87 if (trap & 0x00000001) {
87 u32 stat = nv_rd32(priv, 0x404000); 88 u32 stat = nv_rd32(priv, 0x404000);
88 nv_error(priv, "DISPATCH ch %d [0x%010llx] 0x%08x\n", 89 nv_error(priv, "DISPATCH ch %d [0x%010llx %s] 0x%08x\n",
89 chid, inst, stat); 90 chid, inst, nouveau_client_name(engctx), stat);
90 nv_wr32(priv, 0x404000, 0xc0000000); 91 nv_wr32(priv, 0x404000, 0xc0000000);
91 nv_wr32(priv, 0x400108, 0x00000001); 92 nv_wr32(priv, 0x400108, 0x00000001);
92 trap &= ~0x00000001; 93 trap &= ~0x00000001;
@@ -94,8 +95,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
94 95
95 if (trap & 0x00000010) { 96 if (trap & 0x00000010) {
96 u32 stat = nv_rd32(priv, 0x405840); 97 u32 stat = nv_rd32(priv, 0x405840);
97 nv_error(priv, "SHADER ch %d [0x%010llx] 0x%08x\n", 98 nv_error(priv, "SHADER ch %d [0x%010llx %s] 0x%08x\n",
98 chid, inst, stat); 99 chid, inst, nouveau_client_name(engctx), stat);
99 nv_wr32(priv, 0x405840, 0xc0000000); 100 nv_wr32(priv, 0x405840, 0xc0000000);
100 nv_wr32(priv, 0x400108, 0x00000010); 101 nv_wr32(priv, 0x400108, 0x00000010);
101 trap &= ~0x00000010; 102 trap &= ~0x00000010;
@@ -105,8 +106,10 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
105 for (rop = 0; rop < priv->rop_nr; rop++) { 106 for (rop = 0; rop < priv->rop_nr; rop++) {
106 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070)); 107 u32 statz = nv_rd32(priv, ROP_UNIT(rop, 0x070));
107 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144)); 108 u32 statc = nv_rd32(priv, ROP_UNIT(rop, 0x144));
108 nv_error(priv, "ROP%d ch %d [0x%010llx] 0x%08x 0x%08x\n", 109 nv_error(priv,
109 rop, chid, inst, statz, statc); 110 "ROP%d ch %d [0x%010llx %s] 0x%08x 0x%08x\n",
111 rop, chid, inst, nouveau_client_name(engctx),
112 statz, statc);
110 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000); 113 nv_wr32(priv, ROP_UNIT(rop, 0x070), 0xc0000000);
111 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000); 114 nv_wr32(priv, ROP_UNIT(rop, 0x144), 0xc0000000);
112 } 115 }
@@ -115,8 +118,8 @@ nve0_graph_trap_isr(struct nvc0_graph_priv *priv, int chid, u64 inst)
115 } 118 }
116 119
117 if (trap) { 120 if (trap) {
118 nv_error(priv, "TRAP ch %d [0x%010llx] 0x%08x\n", 121 nv_error(priv, "TRAP ch %d [0x%010llx %s] 0x%08x\n",
119 chid, inst, trap); 122 chid, inst, nouveau_client_name(engctx), trap);
120 nv_wr32(priv, 0x400108, trap); 123 nv_wr32(priv, 0x400108, trap);
121 } 124 }
122} 125}
@@ -145,10 +148,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
145 if (stat & 0x00000010) { 148 if (stat & 0x00000010) {
146 handle = nouveau_handle_get_class(engctx, class); 149 handle = nouveau_handle_get_class(engctx, class);
147 if (!handle || nv_call(handle->object, mthd, data)) { 150 if (!handle || nv_call(handle->object, mthd, data)) {
148 nv_error(priv, "ILLEGAL_MTHD ch %d [0x%010llx] " 151 nv_error(priv,
149 "subc %d class 0x%04x mthd 0x%04x " 152 "ILLEGAL_MTHD ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
150 "data 0x%08x\n", 153 chid, inst, nouveau_client_name(engctx), subc,
151 chid, inst, subc, class, mthd, data); 154 class, mthd, data);
152 } 155 }
153 nouveau_handle_put(handle); 156 nouveau_handle_put(handle);
154 nv_wr32(priv, 0x400100, 0x00000010); 157 nv_wr32(priv, 0x400100, 0x00000010);
@@ -156,9 +159,10 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
156 } 159 }
157 160
158 if (stat & 0x00000020) { 161 if (stat & 0x00000020) {
159 nv_error(priv, "ILLEGAL_CLASS ch %d [0x%010llx] subc %d " 162 nv_error(priv,
160 "class 0x%04x mthd 0x%04x data 0x%08x\n", 163 "ILLEGAL_CLASS ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
161 chid, inst, subc, class, mthd, data); 164 chid, inst, nouveau_client_name(engctx), subc, class,
165 mthd, data);
162 nv_wr32(priv, 0x400100, 0x00000020); 166 nv_wr32(priv, 0x400100, 0x00000020);
163 stat &= ~0x00000020; 167 stat &= ~0x00000020;
164 } 168 }
@@ -166,15 +170,15 @@ nve0_graph_intr(struct nouveau_subdev *subdev)
166 if (stat & 0x00100000) { 170 if (stat & 0x00100000) {
167 nv_error(priv, "DATA_ERROR ["); 171 nv_error(priv, "DATA_ERROR [");
168 nouveau_enum_print(nv50_data_error_names, code); 172 nouveau_enum_print(nv50_data_error_names, code);
169 printk("] ch %d [0x%010llx] subc %d class 0x%04x " 173 pr_cont("] ch %d [0x%010llx %s] subc %d class 0x%04x mthd 0x%04x data 0x%08x\n",
170 "mthd 0x%04x data 0x%08x\n", 174 chid, inst, nouveau_client_name(engctx), subc, class,
171 chid, inst, subc, class, mthd, data); 175 mthd, data);
172 nv_wr32(priv, 0x400100, 0x00100000); 176 nv_wr32(priv, 0x400100, 0x00100000);
173 stat &= ~0x00100000; 177 stat &= ~0x00100000;
174 } 178 }
175 179
176 if (stat & 0x00200000) { 180 if (stat & 0x00200000) {
177 nve0_graph_trap_isr(priv, chid, inst); 181 nve0_graph_trap_isr(priv, chid, inst, engctx);
178 nv_wr32(priv, 0x400100, 0x00200000); 182 nv_wr32(priv, 0x400100, 0x00200000);
179 stat &= ~0x00200000; 183 stat &= ~0x00200000;
180 } 184 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 9fd86375f4c4..49ecbb859b25 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -22,6 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/client.h>
25#include <core/os.h> 26#include <core/os.h>
26#include <core/class.h> 27#include <core/class.h>
27#include <core/engctx.h> 28#include <core/engctx.h>
@@ -231,8 +232,10 @@ nv31_mpeg_intr(struct nouveau_subdev *subdev)
231 nv_wr32(priv, 0x00b230, 0x00000001); 232 nv_wr32(priv, 0x00b230, 0x00000001);
232 233
233 if (show) { 234 if (show) {
234 nv_error(priv, "ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n", 235 nv_error(priv,
235 chid, inst << 4, stat, type, mthd, data); 236 "ch %d [0x%08x %s] 0x%08x 0x%08x 0x%08x 0x%08x\n",
237 chid, inst << 4, nouveau_client_name(engctx), stat,
238 type, mthd, data);
236 } 239 }
237 240
238 nouveau_engctx_put(engctx); 241 nouveau_engctx_put(engctx);
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index b0e7e1c01ce6..c48e74953771 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -28,6 +28,9 @@
28#include <core/namedb.h> 28#include <core/namedb.h>
29#include <core/handle.h> 29#include <core/handle.h>
30#include <core/gpuobj.h> 30#include <core/gpuobj.h>
31#include <core/event.h>
32
33#include <subdev/bar.h>
31 34
32#include <engine/software.h> 35#include <engine/software.h>
33#include <engine/disp.h> 36#include <engine/disp.h>
@@ -90,18 +93,11 @@ nv50_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
90{ 93{
91 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent); 94 struct nv50_software_chan *chan = (void *)nv_engctx(object->parent);
92 struct nouveau_disp *disp = nouveau_disp(object); 95 struct nouveau_disp *disp = nouveau_disp(object);
93 unsigned long flags;
94 u32 crtc = *(u32 *)args; 96 u32 crtc = *(u32 *)args;
95
96 if (crtc > 1) 97 if (crtc > 1)
97 return -EINVAL; 98 return -EINVAL;
98 99
99 disp->vblank.get(disp->vblank.data, crtc); 100 nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
100
101 spin_lock_irqsave(&disp->vblank.lock, flags);
102 list_add(&chan->base.vblank.head, &disp->vblank.list);
103 chan->base.vblank.crtc = crtc;
104 spin_unlock_irqrestore(&disp->vblank.lock, flags);
105 return 0; 101 return 0;
106} 102}
107 103
@@ -136,6 +132,29 @@ nv50_software_sclass[] = {
136 ******************************************************************************/ 132 ******************************************************************************/
137 133
138static int 134static int
135nv50_software_vblsem_release(struct nouveau_eventh *event, int head)
136{
137 struct nouveau_software_chan *chan =
138 container_of(event, struct nouveau_software_chan, vblank.event);
139 struct nv50_software_priv *priv = (void *)nv_object(chan)->engine;
140 struct nouveau_bar *bar = nouveau_bar(priv);
141
142 nv_wr32(priv, 0x001704, chan->vblank.channel);
143 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
144 bar->flush(bar);
145
146 if (nv_device(priv)->chipset == 0x50) {
147 nv_wr32(priv, 0x001570, chan->vblank.offset);
148 nv_wr32(priv, 0x001574, chan->vblank.value);
149 } else {
150 nv_wr32(priv, 0x060010, chan->vblank.offset);
151 nv_wr32(priv, 0x060014, chan->vblank.value);
152 }
153
154 return NVKM_EVENT_DROP;
155}
156
157static int
139nv50_software_context_ctor(struct nouveau_object *parent, 158nv50_software_context_ctor(struct nouveau_object *parent,
140 struct nouveau_object *engine, 159 struct nouveau_object *engine,
141 struct nouveau_oclass *oclass, void *data, u32 size, 160 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -150,6 +169,7 @@ nv50_software_context_ctor(struct nouveau_object *parent,
150 return ret; 169 return ret;
151 170
152 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; 171 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
172 chan->base.vblank.event.func = nv50_software_vblsem_release;
153 return 0; 173 return 0;
154} 174}
155 175
@@ -170,8 +190,8 @@ nv50_software_cclass = {
170 190
171static int 191static int
172nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 192nv50_software_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
173 struct nouveau_oclass *oclass, void *data, u32 size, 193 struct nouveau_oclass *oclass, void *data, u32 size,
174 struct nouveau_object **pobject) 194 struct nouveau_object **pobject)
175{ 195{
176 struct nv50_software_priv *priv; 196 struct nv50_software_priv *priv;
177 int ret; 197 int ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index 282a1cd1bc2f..a523eaad47e3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -25,6 +25,9 @@
25#include <core/os.h> 25#include <core/os.h>
26#include <core/class.h> 26#include <core/class.h>
27#include <core/engctx.h> 27#include <core/engctx.h>
28#include <core/event.h>
29
30#include <subdev/bar.h>
28 31
29#include <engine/software.h> 32#include <engine/software.h>
30#include <engine/disp.h> 33#include <engine/disp.h>
@@ -72,18 +75,12 @@ nvc0_software_mthd_vblsem_release(struct nouveau_object *object, u32 mthd,
72{ 75{
73 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent); 76 struct nvc0_software_chan *chan = (void *)nv_engctx(object->parent);
74 struct nouveau_disp *disp = nouveau_disp(object); 77 struct nouveau_disp *disp = nouveau_disp(object);
75 unsigned long flags;
76 u32 crtc = *(u32 *)args; 78 u32 crtc = *(u32 *)args;
77 79
78 if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3) 80 if ((nv_device(object)->card_type < NV_E0 && crtc > 1) || crtc > 3)
79 return -EINVAL; 81 return -EINVAL;
80 82
81 disp->vblank.get(disp->vblank.data, crtc); 83 nouveau_event_get(disp->vblank, crtc, &chan->base.vblank.event);
82
83 spin_lock_irqsave(&disp->vblank.lock, flags);
84 list_add(&chan->base.vblank.head, &disp->vblank.list);
85 chan->base.vblank.crtc = crtc;
86 spin_unlock_irqrestore(&disp->vblank.lock, flags);
87 return 0; 84 return 0;
88} 85}
89 86
@@ -118,6 +115,23 @@ nvc0_software_sclass[] = {
118 ******************************************************************************/ 115 ******************************************************************************/
119 116
120static int 117static int
118nvc0_software_vblsem_release(struct nouveau_eventh *event, int head)
119{
120 struct nouveau_software_chan *chan =
121 container_of(event, struct nouveau_software_chan, vblank.event);
122 struct nvc0_software_priv *priv = (void *)nv_object(chan)->engine;
123 struct nouveau_bar *bar = nouveau_bar(priv);
124
125 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
126 bar->flush(bar);
127 nv_wr32(priv, 0x06000c, upper_32_bits(chan->vblank.offset));
128 nv_wr32(priv, 0x060010, lower_32_bits(chan->vblank.offset));
129 nv_wr32(priv, 0x060014, chan->vblank.value);
130
131 return NVKM_EVENT_DROP;
132}
133
134static int
121nvc0_software_context_ctor(struct nouveau_object *parent, 135nvc0_software_context_ctor(struct nouveau_object *parent,
122 struct nouveau_object *engine, 136 struct nouveau_object *engine,
123 struct nouveau_oclass *oclass, void *data, u32 size, 137 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,6 +146,7 @@ nvc0_software_context_ctor(struct nouveau_object *parent,
132 return ret; 146 return ret;
133 147
134 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12; 148 chan->base.vblank.channel = nv_gpuobj(parent->parent)->addr >> 12;
149 chan->base.vblank.event.func = nvc0_software_vblsem_release;
135 return 0; 150 return 0;
136} 151}
137 152
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 47c4b3a5bd3a..92d3ab11d962 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -154,6 +154,14 @@ struct nve0_channel_ind_class {
154 u32 engine; 154 u32 engine;
155}; 155};
156 156
157/* 0046: NV04_DISP
158 */
159
160#define NV04_DISP_CLASS 0x00000046
161
162struct nv04_display_class {
163};
164
157/* 5070: NV50_DISP 165/* 5070: NV50_DISP
158 * 8270: NV84_DISP 166 * 8270: NV84_DISP
159 * 8370: NVA0_DISP 167 * 8370: NVA0_DISP
@@ -190,25 +198,6 @@ struct nve0_channel_ind_class {
190#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f 198#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
191#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000 199#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
192#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff 200#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
193#define NV94_DISP_SOR_DP_TRAIN 0x00016000
194#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
195#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
196#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
197#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
198#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
199#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
200#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
201#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
202#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
203#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
204#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
205#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
206#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
207#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
208#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
209#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
210#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
211#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
212 201
213#define NV50_DISP_DAC_MTHD 0x00020000 202#define NV50_DISP_DAC_MTHD 0x00020000
214#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000 203#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
@@ -230,6 +219,23 @@ struct nve0_channel_ind_class {
230#define NV50_DISP_DAC_LOAD 0x0002000c 219#define NV50_DISP_DAC_LOAD 0x0002000c
231#define NV50_DISP_DAC_LOAD_VALUE 0x00000007 220#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
232 221
222#define NV50_DISP_PIOR_MTHD 0x00030000
223#define NV50_DISP_PIOR_MTHD_TYPE 0x0000f000
224#define NV50_DISP_PIOR_MTHD_OR 0x00000003
225
226#define NV50_DISP_PIOR_PWR 0x00030000
227#define NV50_DISP_PIOR_PWR_STATE 0x00000001
228#define NV50_DISP_PIOR_PWR_STATE_ON 0x00000001
229#define NV50_DISP_PIOR_PWR_STATE_OFF 0x00000000
230#define NV50_DISP_PIOR_TMDS_PWR 0x00032000
231#define NV50_DISP_PIOR_TMDS_PWR_STATE 0x00000001
232#define NV50_DISP_PIOR_TMDS_PWR_STATE_ON 0x00000001
233#define NV50_DISP_PIOR_TMDS_PWR_STATE_OFF 0x00000000
234#define NV50_DISP_PIOR_DP_PWR 0x00036000
235#define NV50_DISP_PIOR_DP_PWR_STATE 0x00000001
236#define NV50_DISP_PIOR_DP_PWR_STATE_ON 0x00000001
237#define NV50_DISP_PIOR_DP_PWR_STATE_OFF 0x00000000
238
233struct nv50_display_class { 239struct nv50_display_class {
234}; 240};
235 241
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 63acc0346ff2..c66eac513803 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -7,7 +7,7 @@ struct nouveau_client {
7 struct nouveau_namedb base; 7 struct nouveau_namedb base;
8 struct nouveau_handle *root; 8 struct nouveau_handle *root;
9 struct nouveau_object *device; 9 struct nouveau_object *device;
10 char name[16]; 10 char name[32];
11 u32 debug; 11 u32 debug;
12 struct nouveau_vm *vm; 12 struct nouveau_vm *vm;
13}; 13};
@@ -41,5 +41,6 @@ int nouveau_client_create_(const char *name, u64 device, const char *cfg,
41 41
42int nouveau_client_init(struct nouveau_client *); 42int nouveau_client_init(struct nouveau_client *);
43int nouveau_client_fini(struct nouveau_client *, bool suspend); 43int nouveau_client_fini(struct nouveau_client *, bool suspend);
44const char *nouveau_client_name(void *obj);
44 45
45#endif 46#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index e58b6f0984c1..d351a4e5819c 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -26,6 +26,7 @@ enum nv_subdev_type {
26 */ 26 */
27 NVDEV_SUBDEV_MXM, 27 NVDEV_SUBDEV_MXM,
28 NVDEV_SUBDEV_MC, 28 NVDEV_SUBDEV_MC,
29 NVDEV_SUBDEV_BUS,
29 NVDEV_SUBDEV_TIMER, 30 NVDEV_SUBDEV_TIMER,
30 NVDEV_SUBDEV_FB, 31 NVDEV_SUBDEV_FB,
31 NVDEV_SUBDEV_LTCG, 32 NVDEV_SUBDEV_LTCG,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/enum.h b/drivers/gpu/drm/nouveau/core/include/core/enum.h
index e7b1e181943b..4fc62bb8c1f0 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/enum.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/enum.h
@@ -5,12 +5,13 @@ struct nouveau_enum {
5 u32 value; 5 u32 value;
6 const char *name; 6 const char *name;
7 const void *data; 7 const void *data;
8 u32 data2;
8}; 9};
9 10
10const struct nouveau_enum * 11const struct nouveau_enum *
11nouveau_enum_find(const struct nouveau_enum *, u32 value); 12nouveau_enum_find(const struct nouveau_enum *, u32 value);
12 13
13void 14const struct nouveau_enum *
14nouveau_enum_print(const struct nouveau_enum *en, u32 value); 15nouveau_enum_print(const struct nouveau_enum *en, u32 value);
15 16
16struct nouveau_bitfield { 17struct nouveau_bitfield {
diff --git a/drivers/gpu/drm/nouveau/core/include/core/event.h b/drivers/gpu/drm/nouveau/core/include/core/event.h
new file mode 100644
index 000000000000..9e094408f14e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/event.h
@@ -0,0 +1,36 @@
1#ifndef __NVKM_EVENT_H__
2#define __NVKM_EVENT_H__
3
4/* return codes from event handlers */
5#define NVKM_EVENT_DROP 0
6#define NVKM_EVENT_KEEP 1
7
8struct nouveau_eventh {
9 struct list_head head;
10 int (*func)(struct nouveau_eventh *, int index);
11};
12
13struct nouveau_event {
14 spinlock_t lock;
15
16 void *priv;
17 void (*enable)(struct nouveau_event *, int index);
18 void (*disable)(struct nouveau_event *, int index);
19
20 int index_nr;
21 struct {
22 struct list_head list;
23 int refs;
24 } index[];
25};
26
27int nouveau_event_create(int index_nr, struct nouveau_event **);
28void nouveau_event_destroy(struct nouveau_event **);
29void nouveau_event_trigger(struct nouveau_event *, int index);
30
31void nouveau_event_get(struct nouveau_event *, int index,
32 struct nouveau_eventh *);
33void nouveau_event_put(struct nouveau_event *, int index,
34 struct nouveau_eventh *);
35
36#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 106bb19fdd9a..62e68baef087 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -136,7 +136,7 @@ static inline u8
136nv_ro08(void *obj, u64 addr) 136nv_ro08(void *obj, u64 addr)
137{ 137{
138 u8 data = nv_ofuncs(obj)->rd08(obj, addr); 138 u8 data = nv_ofuncs(obj)->rd08(obj, addr);
139 nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data); 139 nv_spam(obj, "nv_ro08 0x%08llx 0x%02x\n", addr, data);
140 return data; 140 return data;
141} 141}
142 142
@@ -144,7 +144,7 @@ static inline u16
144nv_ro16(void *obj, u64 addr) 144nv_ro16(void *obj, u64 addr)
145{ 145{
146 u16 data = nv_ofuncs(obj)->rd16(obj, addr); 146 u16 data = nv_ofuncs(obj)->rd16(obj, addr);
147 nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data); 147 nv_spam(obj, "nv_ro16 0x%08llx 0x%04x\n", addr, data);
148 return data; 148 return data;
149} 149}
150 150
@@ -152,28 +152,28 @@ static inline u32
152nv_ro32(void *obj, u64 addr) 152nv_ro32(void *obj, u64 addr)
153{ 153{
154 u32 data = nv_ofuncs(obj)->rd32(obj, addr); 154 u32 data = nv_ofuncs(obj)->rd32(obj, addr);
155 nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data); 155 nv_spam(obj, "nv_ro32 0x%08llx 0x%08x\n", addr, data);
156 return data; 156 return data;
157} 157}
158 158
159static inline void 159static inline void
160nv_wo08(void *obj, u64 addr, u8 data) 160nv_wo08(void *obj, u64 addr, u8 data)
161{ 161{
162 nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data); 162 nv_spam(obj, "nv_wo08 0x%08llx 0x%02x\n", addr, data);
163 nv_ofuncs(obj)->wr08(obj, addr, data); 163 nv_ofuncs(obj)->wr08(obj, addr, data);
164} 164}
165 165
166static inline void 166static inline void
167nv_wo16(void *obj, u64 addr, u16 data) 167nv_wo16(void *obj, u64 addr, u16 data)
168{ 168{
169 nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data); 169 nv_spam(obj, "nv_wo16 0x%08llx 0x%04x\n", addr, data);
170 nv_ofuncs(obj)->wr16(obj, addr, data); 170 nv_ofuncs(obj)->wr16(obj, addr, data);
171} 171}
172 172
173static inline void 173static inline void
174nv_wo32(void *obj, u64 addr, u32 data) 174nv_wo32(void *obj, u64 addr, u32 data)
175{ 175{
176 nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data); 176 nv_spam(obj, "nv_wo32 0x%08llx 0x%08x\n", addr, data);
177 nv_ofuncs(obj)->wr32(obj, addr, data); 177 nv_ofuncs(obj)->wr32(obj, addr, data);
178} 178}
179 179
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index 1d629664f32d..febed2ea5c80 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,7 +15,8 @@ struct nouveau_object;
15#define NV_PRINTK_TRACE KERN_DEBUG 15#define NV_PRINTK_TRACE KERN_DEBUG
16#define NV_PRINTK_SPAM KERN_DEBUG 16#define NV_PRINTK_SPAM KERN_DEBUG
17 17
18void nv_printk_(struct nouveau_object *, const char *, int, const char *, ...); 18void __printf(4, 5)
19nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
19 20
20#define nv_printk(o,l,f,a...) do { \ 21#define nv_printk(o,l,f,a...) do { \
21 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \ 22 if (NV_DBG_##l <= CONFIG_NOUVEAU_DEBUG) \
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 46948285f3e7..28da6772c095 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -4,18 +4,11 @@
4#include <core/object.h> 4#include <core/object.h>
5#include <core/engine.h> 5#include <core/engine.h>
6#include <core/device.h> 6#include <core/device.h>
7#include <core/event.h>
7 8
8struct nouveau_disp { 9struct nouveau_disp {
9 struct nouveau_engine base; 10 struct nouveau_engine base;
10 11 struct nouveau_event *vblank;
11 struct {
12 struct list_head list;
13 spinlock_t lock;
14 void (*notify)(void *, int);
15 void (*get)(void *, int);
16 void (*put)(void *, int);
17 void *data;
18 } vblank;
19}; 12};
20 13
21static inline struct nouveau_disp * 14static inline struct nouveau_disp *
@@ -24,16 +17,22 @@ nouveau_disp(void *obj)
24 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP]; 17 return (void *)nv_device(obj)->subdev[NVDEV_ENGINE_DISP];
25} 18}
26 19
27#define nouveau_disp_create(p,e,c,i,x,d) \ 20#define nouveau_disp_create(p,e,c,h,i,x,d) \
28 nouveau_engine_create((p), (e), (c), true, (i), (x), (d)) 21 nouveau_disp_create_((p), (e), (c), (h), (i), (x), \
29#define nouveau_disp_destroy(d) \ 22 sizeof(**d), (void **)d)
30 nouveau_engine_destroy(&(d)->base) 23#define nouveau_disp_destroy(d) ({ \
24 struct nouveau_disp *disp = (d); \
25 _nouveau_disp_dtor(nv_object(disp)); \
26})
31#define nouveau_disp_init(d) \ 27#define nouveau_disp_init(d) \
32 nouveau_engine_init(&(d)->base) 28 nouveau_engine_init(&(d)->base)
33#define nouveau_disp_fini(d,s) \ 29#define nouveau_disp_fini(d,s) \
34 nouveau_engine_fini(&(d)->base, (s)) 30 nouveau_engine_fini(&(d)->base, (s))
35 31
36#define _nouveau_disp_dtor _nouveau_engine_dtor 32int nouveau_disp_create_(struct nouveau_object *, struct nouveau_object *,
33 struct nouveau_oclass *, int heads,
34 const char *, const char *, int, void **);
35void _nouveau_disp_dtor(struct nouveau_object *);
37#define _nouveau_disp_init _nouveau_engine_init 36#define _nouveau_disp_init _nouveau_engine_init
38#define _nouveau_disp_fini _nouveau_engine_fini 37#define _nouveau_disp_fini _nouveau_engine_fini
39 38
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index f18846c8c6fe..b46c197709f3 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -65,6 +65,8 @@ struct nouveau_fifo_base {
65struct nouveau_fifo { 65struct nouveau_fifo {
66 struct nouveau_engine base; 66 struct nouveau_engine base;
67 67
68 struct nouveau_event *uevent;
69
68 struct nouveau_object **channel; 70 struct nouveau_object **channel;
69 spinlock_t lock; 71 spinlock_t lock;
70 u16 min; 72 u16 min;
@@ -92,6 +94,8 @@ int nouveau_fifo_create_(struct nouveau_object *, struct nouveau_object *,
92 struct nouveau_oclass *, int min, int max, 94 struct nouveau_oclass *, int min, int max,
93 int size, void **); 95 int size, void **);
94void nouveau_fifo_destroy(struct nouveau_fifo *); 96void nouveau_fifo_destroy(struct nouveau_fifo *);
97const char *
98nouveau_client_name_for_fifo_chid(struct nouveau_fifo *fifo, u32 chid);
95 99
96#define _nouveau_fifo_init _nouveau_engine_init 100#define _nouveau_fifo_init _nouveau_engine_init
97#define _nouveau_fifo_fini _nouveau_engine_fini 101#define _nouveau_fifo_fini _nouveau_engine_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/software.h b/drivers/gpu/drm/nouveau/core/include/engine/software.h
index c945691c8564..45799487e573 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/software.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/software.h
@@ -3,17 +3,17 @@
3 3
4#include <core/engine.h> 4#include <core/engine.h>
5#include <core/engctx.h> 5#include <core/engctx.h>
6#include <core/event.h>
6 7
7struct nouveau_software_chan { 8struct nouveau_software_chan {
8 struct nouveau_engctx base; 9 struct nouveau_engctx base;
9 10
10 struct { 11 struct {
11 struct list_head head; 12 struct nouveau_eventh event;
12 u32 channel; 13 u32 channel;
13 u32 ctxdma; 14 u32 ctxdma;
14 u64 offset; 15 u64 offset;
15 u32 value; 16 u32 value;
16 u32 crtc;
17 } vblank; 17 } vblank;
18 18
19 int (*flip)(void *); 19 int (*flip)(void *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index b79025da581e..123270e9813a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -16,6 +16,8 @@ enum dcb_output_type {
16 16
17struct dcb_output { 17struct dcb_output {
18 int index; /* may not be raw dcb index if merging has happened */ 18 int index; /* may not be raw dcb index if merging has happened */
19 u16 hasht;
20 u16 hashm;
19 enum dcb_output_type type; 21 enum dcb_output_type type;
20 uint8_t i2c_index; 22 uint8_t i2c_index;
21 uint8_t heads; 23 uint8_t heads;
@@ -25,6 +27,7 @@ struct dcb_output {
25 uint8_t or; 27 uint8_t or;
26 uint8_t link; 28 uint8_t link;
27 bool duallink_possible; 29 bool duallink_possible;
30 uint8_t extdev;
28 union { 31 union {
29 struct sor_conf { 32 struct sor_conf {
30 int link; 33 int link;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index e6563b5cb08e..96d3364f6db3 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -1,17 +1,22 @@
1#ifndef __NVBIOS_GPIO_H__ 1#ifndef __NVBIOS_GPIO_H__
2#define __NVBIOS_GPIO_H__ 2#define __NVBIOS_GPIO_H__
3 3
4struct nouveau_bios;
5
6enum dcb_gpio_func_name { 4enum dcb_gpio_func_name {
7 DCB_GPIO_PANEL_POWER = 0x01, 5 DCB_GPIO_PANEL_POWER = 0x01,
8 DCB_GPIO_TVDAC0 = 0x0c, 6 DCB_GPIO_TVDAC0 = 0x0c,
9 DCB_GPIO_TVDAC1 = 0x2d, 7 DCB_GPIO_TVDAC1 = 0x2d,
10 DCB_GPIO_PWM_FAN = 0x09, 8 DCB_GPIO_FAN = 0x09,
11 DCB_GPIO_FAN_SENSE = 0x3d, 9 DCB_GPIO_FAN_SENSE = 0x3d,
12 DCB_GPIO_UNUSED = 0xff 10 DCB_GPIO_UNUSED = 0xff
13}; 11};
14 12
13#define DCB_GPIO_LOG_DIR 0x02
14#define DCB_GPIO_LOG_DIR_OUT 0x00
15#define DCB_GPIO_LOG_DIR_IN 0x02
16#define DCB_GPIO_LOG_VAL 0x01
17#define DCB_GPIO_LOG_VAL_LO 0x00
18#define DCB_GPIO_LOG_VAL_HI 0x01
19
15struct dcb_gpio_func { 20struct dcb_gpio_func {
16 u8 func; 21 u8 func;
17 u8 line; 22 u8 line;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
index 5079bedfd985..10b57a19a7de 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -15,7 +15,7 @@ struct dcb_i2c_entry {
15 enum dcb_i2c_type type; 15 enum dcb_i2c_type type;
16 u8 drive; 16 u8 drive;
17 u8 sense; 17 u8 sense;
18 u32 data; 18 u8 share;
19}; 19};
20 20
21u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 21u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
index a2c4296fc5f6..083541dbe9c8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/therm.h
@@ -23,11 +23,27 @@ struct nvbios_therm_sensor {
23 struct nvbios_therm_threshold thrs_shutdown; 23 struct nvbios_therm_threshold thrs_shutdown;
24}; 24};
25 25
26/* no vbios have more than 6 */
27#define NOUVEAU_TEMP_FAN_TRIP_MAX 10
28struct nouveau_therm_trip_point {
29 int fan_duty;
30 int temp;
31 int hysteresis;
32};
33
26struct nvbios_therm_fan { 34struct nvbios_therm_fan {
27 u16 pwm_freq; 35 u16 pwm_freq;
28 36
29 u8 min_duty; 37 u8 min_duty;
30 u8 max_duty; 38 u8 max_duty;
39
40 u16 bump_period;
41 u16 slow_down_period;
42
43 struct nouveau_therm_trip_point trip[NOUVEAU_TEMP_FAN_TRIP_MAX];
44 u8 nr_fan_trip;
45 u8 linear_min_temp;
46 u8 linear_max_temp;
31}; 47};
32 48
33enum nvbios_therm_domain { 49enum nvbios_therm_domain {
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
new file mode 100644
index 000000000000..360baab52e4c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/xpio.h
@@ -0,0 +1,19 @@
1#ifndef __NVBIOS_XPIO_H__
2#define __NVBIOS_XPIO_H__
3
4#define NVBIOS_XPIO_FLAG_AUX 0x10
5#define NVBIOS_XPIO_FLAG_AUX0 0x00
6#define NVBIOS_XPIO_FLAG_AUX1 0x10
7
8struct nvbios_xpio {
9 u8 type;
10 u8 addr;
11 u8 flags;
12};
13
14u16 dcb_xpio_table(struct nouveau_bios *, u8 idx,
15 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
16u16 dcb_xpio_parse(struct nouveau_bios *, u8 idx,
17 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_xpio *);
18
19#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bus.h b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
new file mode 100644
index 000000000000..7d88ec4a6d06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bus.h
@@ -0,0 +1,41 @@
1#ifndef __NOUVEAU_BUS_H__
2#define __NOUVEAU_BUS_H__
3
4#include <core/subdev.h>
5#include <core/device.h>
6
7struct nouveau_bus_intr {
8 u32 stat;
9 u32 unit;
10};
11
12struct nouveau_bus {
13 struct nouveau_subdev base;
14};
15
16static inline struct nouveau_bus *
17nouveau_bus(void *obj)
18{
19 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_BUS];
20}
21
22#define nouveau_bus_create(p, e, o, d) \
23 nouveau_subdev_create_((p), (e), (o), 0, "PBUS", "master", \
24 sizeof(**d), (void **)d)
25#define nouveau_bus_destroy(p) \
26 nouveau_subdev_destroy(&(p)->base)
27#define nouveau_bus_init(p) \
28 nouveau_subdev_init(&(p)->base)
29#define nouveau_bus_fini(p, s) \
30 nouveau_subdev_fini(&(p)->base, (s))
31
32#define _nouveau_bus_dtor _nouveau_subdev_dtor
33#define _nouveau_bus_init _nouveau_subdev_init
34#define _nouveau_bus_fini _nouveau_subdev_fini
35
36extern struct nouveau_oclass nv04_bus_oclass;
37extern struct nouveau_oclass nv31_bus_oclass;
38extern struct nouveau_oclass nv50_bus_oclass;
39extern struct nouveau_oclass nvc0_bus_oclass;
40
41#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index b75e8f18e52c..c85b9f1579ad 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -3,6 +3,7 @@
3 3
4#include <core/subdev.h> 4#include <core/subdev.h>
5#include <core/device.h> 5#include <core/device.h>
6#include <core/event.h>
6 7
7#include <subdev/bios.h> 8#include <subdev/bios.h>
8#include <subdev/bios/gpio.h> 9#include <subdev/bios/gpio.h>
@@ -10,28 +11,18 @@
10struct nouveau_gpio { 11struct nouveau_gpio {
11 struct nouveau_subdev base; 12 struct nouveau_subdev base;
12 13
14 struct nouveau_event *events;
15
13 /* hardware interfaces */ 16 /* hardware interfaces */
14 void (*reset)(struct nouveau_gpio *, u8 func); 17 void (*reset)(struct nouveau_gpio *, u8 func);
15 int (*drive)(struct nouveau_gpio *, int line, int dir, int out); 18 int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
16 int (*sense)(struct nouveau_gpio *, int line); 19 int (*sense)(struct nouveau_gpio *, int line);
17 void (*irq_enable)(struct nouveau_gpio *, int line, bool);
18 20
19 /* software interfaces */ 21 /* software interfaces */
20 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line, 22 int (*find)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
21 struct dcb_gpio_func *); 23 struct dcb_gpio_func *);
22 int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state); 24 int (*set)(struct nouveau_gpio *, int idx, u8 tag, u8 line, int state);
23 int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line); 25 int (*get)(struct nouveau_gpio *, int idx, u8 tag, u8 line);
24 int (*irq)(struct nouveau_gpio *, int idx, u8 tag, u8 line, bool on);
25
26 /* interrupt handling */
27 struct list_head isr;
28 spinlock_t lock;
29
30 void (*isr_run)(struct nouveau_gpio *, int idx, u32 mask);
31 int (*isr_add)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
32 void (*)(void *, int state), void *data);
33 void (*isr_del)(struct nouveau_gpio *, int idx, u8 tag, u8 line,
34 void (*)(void *, int state), void *data);
35}; 26};
36 27
37static inline struct nouveau_gpio * 28static inline struct nouveau_gpio *
@@ -40,25 +31,23 @@ nouveau_gpio(void *obj)
40 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO]; 31 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_GPIO];
41} 32}
42 33
43#define nouveau_gpio_create(p,e,o,d) \ 34#define nouveau_gpio_create(p,e,o,l,d) \
44 nouveau_gpio_create_((p), (e), (o), sizeof(**d), (void **)d) 35 nouveau_gpio_create_((p), (e), (o), (l), sizeof(**d), (void **)d)
45#define nouveau_gpio_destroy(p) \ 36#define nouveau_gpio_destroy(p) ({ \
46 nouveau_subdev_destroy(&(p)->base) 37 struct nouveau_gpio *gpio = (p); \
38 _nouveau_gpio_dtor(nv_object(gpio)); \
39})
47#define nouveau_gpio_fini(p,s) \ 40#define nouveau_gpio_fini(p,s) \
48 nouveau_subdev_fini(&(p)->base, (s)) 41 nouveau_subdev_fini(&(p)->base, (s))
49 42
50int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *, 43int nouveau_gpio_create_(struct nouveau_object *, struct nouveau_object *,
51 struct nouveau_oclass *, int, void **); 44 struct nouveau_oclass *, int, int, void **);
52int nouveau_gpio_init(struct nouveau_gpio *); 45void _nouveau_gpio_dtor(struct nouveau_object *);
46int nouveau_gpio_init(struct nouveau_gpio *);
53 47
54extern struct nouveau_oclass nv10_gpio_oclass; 48extern struct nouveau_oclass nv10_gpio_oclass;
55extern struct nouveau_oclass nv50_gpio_oclass; 49extern struct nouveau_oclass nv50_gpio_oclass;
56extern struct nouveau_oclass nvd0_gpio_oclass; 50extern struct nouveau_oclass nvd0_gpio_oclass;
57 51extern struct nouveau_oclass nve0_gpio_oclass;
58void nv50_gpio_dtor(struct nouveau_object *);
59int nv50_gpio_init(struct nouveau_object *);
60int nv50_gpio_fini(struct nouveau_object *, bool);
61void nv50_gpio_intr(struct nouveau_subdev *);
62void nv50_gpio_irq_enable(struct nouveau_gpio *, int line, bool);
63 52
64#endif 53#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index b93ab01e3785..888384c0bed8 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -10,23 +10,59 @@
10#define NV_I2C_PORT(n) (0x00 + (n)) 10#define NV_I2C_PORT(n) (0x00 + (n))
11#define NV_I2C_DEFAULT(n) (0x80 + (n)) 11#define NV_I2C_DEFAULT(n) (0x80 + (n))
12 12
13#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
14#define NV_I2C_TYPE_EXTDDC(e) (0x0005 | (e) << 8)
15#define NV_I2C_TYPE_EXTAUX(e) (0x0006 | (e) << 8)
16
13struct nouveau_i2c_port { 17struct nouveau_i2c_port {
18 struct nouveau_object base;
14 struct i2c_adapter adapter; 19 struct i2c_adapter adapter;
15 struct nouveau_i2c *i2c; 20
16 struct i2c_algo_bit_data bit;
17 struct list_head head; 21 struct list_head head;
18 u8 index; 22 u8 index;
19 u8 type; 23
20 u32 dcb; 24 const struct nouveau_i2c_func *func;
21 u32 drive; 25};
22 u32 sense; 26
23 u32 state; 27struct nouveau_i2c_func {
28 void (*acquire)(struct nouveau_i2c_port *);
29 void (*release)(struct nouveau_i2c_port *);
30
31 void (*drive_scl)(struct nouveau_i2c_port *, int);
32 void (*drive_sda)(struct nouveau_i2c_port *, int);
33 int (*sense_scl)(struct nouveau_i2c_port *);
34 int (*sense_sda)(struct nouveau_i2c_port *);
35
36 int (*aux)(struct nouveau_i2c_port *, u8, u32, u8 *, u8);
37 int (*pattern)(struct nouveau_i2c_port *, int pattern);
38 int (*lnk_ctl)(struct nouveau_i2c_port *, int nr, int bw, bool enh);
39 int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
24}; 40};
25 41
42#define nouveau_i2c_port_create(p,e,o,i,a,d) \
43 nouveau_i2c_port_create_((p), (e), (o), (i), (a), \
44 sizeof(**d), (void **)d)
45#define nouveau_i2c_port_destroy(p) ({ \
46 struct nouveau_i2c_port *port = (p); \
47 _nouveau_i2c_port_dtor(nv_object(i2c)); \
48})
49#define nouveau_i2c_port_init(p) \
50 nouveau_object_init(&(p)->base)
51#define nouveau_i2c_port_fini(p,s) \
52 nouveau_object_fini(&(p)->base, (s))
53
54int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
55 struct nouveau_oclass *, u8,
56 const struct i2c_algorithm *, int, void **);
57void _nouveau_i2c_port_dtor(struct nouveau_object *);
58#define _nouveau_i2c_port_init nouveau_object_init
59#define _nouveau_i2c_port_fini nouveau_object_fini
60
26struct nouveau_i2c { 61struct nouveau_i2c {
27 struct nouveau_subdev base; 62 struct nouveau_subdev base;
28 63
29 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index); 64 struct nouveau_i2c_port *(*find)(struct nouveau_i2c *, u8 index);
65 struct nouveau_i2c_port *(*find_type)(struct nouveau_i2c *, u16 type);
30 int (*identify)(struct nouveau_i2c *, int index, 66 int (*identify)(struct nouveau_i2c *, int index,
31 const char *what, struct i2c_board_info *, 67 const char *what, struct i2c_board_info *,
32 bool (*match)(struct nouveau_i2c_port *, 68 bool (*match)(struct nouveau_i2c_port *,
@@ -40,21 +76,76 @@ nouveau_i2c(void *obj)
40 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C]; 76 return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_I2C];
41} 77}
42 78
43extern struct nouveau_oclass nouveau_i2c_oclass; 79#define nouveau_i2c_create(p,e,o,s,d) \
80 nouveau_i2c_create_((p), (e), (o), (s), sizeof(**d), (void **)d)
81#define nouveau_i2c_destroy(p) ({ \
82 struct nouveau_i2c *i2c = (p); \
83 _nouveau_i2c_dtor(nv_object(i2c)); \
84})
85#define nouveau_i2c_init(p) ({ \
86 struct nouveau_i2c *i2c = (p); \
87 _nouveau_i2c_init(nv_object(i2c)); \
88})
89#define nouveau_i2c_fini(p,s) ({ \
90 struct nouveau_i2c *i2c = (p); \
91 _nouveau_i2c_fini(nv_object(i2c), (s)); \
92})
44 93
45void nouveau_i2c_drive_scl(void *, int); 94int nouveau_i2c_create_(struct nouveau_object *, struct nouveau_object *,
46void nouveau_i2c_drive_sda(void *, int); 95 struct nouveau_oclass *, struct nouveau_oclass *,
47int nouveau_i2c_sense_scl(void *); 96 int, void **);
48int nouveau_i2c_sense_sda(void *); 97void _nouveau_i2c_dtor(struct nouveau_object *);
98int _nouveau_i2c_init(struct nouveau_object *);
99int _nouveau_i2c_fini(struct nouveau_object *, bool);
49 100
50int nv_rdi2cr(struct nouveau_i2c_port *, u8 addr, u8 reg); 101extern struct nouveau_oclass nv04_i2c_oclass;
51int nv_wri2cr(struct nouveau_i2c_port *, u8 addr, u8 reg, u8 val); 102extern struct nouveau_oclass nv4e_i2c_oclass;
52bool nv_probe_i2c(struct nouveau_i2c_port *, u8 addr); 103extern struct nouveau_oclass nv50_i2c_oclass;
53 104extern struct nouveau_oclass nv94_i2c_oclass;
54int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size); 105extern struct nouveau_oclass nvd0_i2c_oclass;
55int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size); 106extern struct nouveau_oclass nouveau_anx9805_sclass[];
56 107
57extern const struct i2c_algorithm nouveau_i2c_bit_algo; 108extern const struct i2c_algorithm nouveau_i2c_bit_algo;
58extern const struct i2c_algorithm nouveau_i2c_aux_algo; 109extern const struct i2c_algorithm nouveau_i2c_aux_algo;
59 110
111static inline int
112nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
113{
114 u8 val;
115 struct i2c_msg msgs[] = {
116 { .addr = addr, .flags = 0, .len = 1, .buf = &reg },
117 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val },
118 };
119
120 int ret = i2c_transfer(&port->adapter, msgs, 2);
121 if (ret != 2)
122 return -EIO;
123
124 return val;
125}
126
127static inline int
128nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val)
129{
130 u8 buf[2] = { reg, val };
131 struct i2c_msg msgs[] = {
132 { .addr = addr, .flags = 0, .len = 2, .buf = buf },
133 };
134
135 int ret = i2c_transfer(&port->adapter, msgs, 1);
136 if (ret != 1)
137 return -EIO;
138
139 return 0;
140}
141
142static inline bool
143nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr)
144{
145 return nv_rdi2cr(port, addr, 0) >= 0;
146}
147
148int nv_rdaux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
149int nv_wraux(struct nouveau_i2c_port *, u32 addr, u8 *data, u8 size);
150
60#endif 151#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
index faee569fd458..6b17b614629f 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/therm.h
@@ -4,10 +4,10 @@
4#include <core/device.h> 4#include <core/device.h>
5#include <core/subdev.h> 5#include <core/subdev.h>
6 6
7enum nouveau_therm_fan_mode { 7enum nouveau_therm_mode {
8 FAN_CONTROL_NONE = 0, 8 NOUVEAU_THERM_CTRL_NONE = 0,
9 FAN_CONTROL_MANUAL = 1, 9 NOUVEAU_THERM_CTRL_MANUAL = 1,
10 FAN_CONTROL_NR, 10 NOUVEAU_THERM_CTRL_AUTO = 2,
11}; 11};
12 12
13enum nouveau_therm_attr_type { 13enum nouveau_therm_attr_type {
@@ -28,6 +28,11 @@ enum nouveau_therm_attr_type {
28struct nouveau_therm { 28struct nouveau_therm {
29 struct nouveau_subdev base; 29 struct nouveau_subdev base;
30 30
31 int (*pwm_ctrl)(struct nouveau_therm *, int line, bool);
32 int (*pwm_get)(struct nouveau_therm *, int line, u32 *, u32 *);
33 int (*pwm_set)(struct nouveau_therm *, int line, u32, u32);
34 int (*pwm_clock)(struct nouveau_therm *);
35
31 int (*fan_get)(struct nouveau_therm *); 36 int (*fan_get)(struct nouveau_therm *);
32 int (*fan_set)(struct nouveau_therm *, int); 37 int (*fan_set)(struct nouveau_therm *, int);
33 int (*fan_sense)(struct nouveau_therm *); 38 int (*fan_sense)(struct nouveau_therm *);
@@ -46,13 +51,29 @@ nouveau_therm(void *obj)
46} 51}
47 52
48#define nouveau_therm_create(p,e,o,d) \ 53#define nouveau_therm_create(p,e,o,d) \
49 nouveau_subdev_create((p), (e), (o), 0, "THERM", "therm", d) 54 nouveau_therm_create_((p), (e), (o), sizeof(**d), (void **)d)
50#define nouveau_therm_destroy(p) \ 55#define nouveau_therm_destroy(p) ({ \
51 nouveau_subdev_destroy(&(p)->base) 56 struct nouveau_therm *therm = (p); \
57 _nouveau_therm_dtor(nv_object(therm)); \
58})
59#define nouveau_therm_init(p) ({ \
60 struct nouveau_therm *therm = (p); \
61 _nouveau_therm_init(nv_object(therm)); \
62})
63#define nouveau_therm_fini(p,s) ({ \
64 struct nouveau_therm *therm = (p); \
65 _nouveau_therm_init(nv_object(therm), (s)); \
66})
52 67
53#define _nouveau_therm_dtor _nouveau_subdev_dtor 68int nouveau_therm_create_(struct nouveau_object *, struct nouveau_object *,
69 struct nouveau_oclass *, int, void **);
70void _nouveau_therm_dtor(struct nouveau_object *);
71int _nouveau_therm_init(struct nouveau_object *);
72int _nouveau_therm_fini(struct nouveau_object *, bool);
54 73
55extern struct nouveau_oclass nv40_therm_oclass; 74extern struct nouveau_oclass nv40_therm_oclass;
56extern struct nouveau_oclass nv50_therm_oclass; 75extern struct nouveau_oclass nv50_therm_oclass;
76extern struct nouveau_oclass nva3_therm_oclass;
77extern struct nouveau_oclass nvd0_therm_oclass;
57 78
58#endif 79#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index c24ec8ab3db4..e465d158d352 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -10,6 +10,14 @@ struct nouveau_alarm {
10 void (*func)(struct nouveau_alarm *); 10 void (*func)(struct nouveau_alarm *);
11}; 11};
12 12
13static inline void
14nouveau_alarm_init(struct nouveau_alarm *alarm,
15 void (*func)(struct nouveau_alarm *))
16{
17 INIT_LIST_HEAD(&alarm->head);
18 alarm->func = func;
19}
20
13bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data); 21bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
14bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data); 22bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
15bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data); 23bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index cfe3b9cad156..eb496033b55c 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -16,6 +16,7 @@
16#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
17#include <linux/acpi.h> 17#include <linux/acpi.h>
18#include <linux/dmi.h> 18#include <linux/dmi.h>
19#include <linux/reboot.h>
19 20
20#include <asm/unaligned.h> 21#include <asm/unaligned.h>
21 22
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index f621f69fa1a2..e816f06637a7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -172,7 +172,7 @@ out:
172 nv_wr32(bios, pcireg, access); 172 nv_wr32(bios, pcireg, access);
173} 173}
174 174
175#if defined(CONFIG_ACPI) 175#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
176int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); 176int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
177bool nouveau_acpi_rom_supported(struct pci_dev *pdev); 177bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
178#else 178#else
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 0fd87df99dd6..2d9b9d7a7992 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,18 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
107 return 0x0000; 107 return 0x0000;
108} 108}
109 109
110static inline u16
111dcb_outp_hasht(struct dcb_output *outp)
112{
113 return (outp->extdev << 8) | (outp->location << 4) | outp->type;
114}
115
116static inline u16
117dcb_outp_hashm(struct dcb_output *outp)
118{
119 return (outp->heads << 8) | (outp->link << 6) | outp->or;
120}
121
110u16 122u16
111dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, 123dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
112 struct dcb_output *outp) 124 struct dcb_output *outp)
@@ -135,34 +147,28 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
135 case DCB_OUTPUT_DP: 147 case DCB_OUTPUT_DP:
136 outp->link = (conf & 0x00000030) >> 4; 148 outp->link = (conf & 0x00000030) >> 4;
137 outp->sorconf.link = outp->link; /*XXX*/ 149 outp->sorconf.link = outp->link; /*XXX*/
150 outp->extdev = 0x00;
151 if (outp->location != 0)
152 outp->extdev = (conf & 0x0000ff00) >> 8;
138 break; 153 break;
139 default: 154 default:
140 break; 155 break;
141 } 156 }
142 } 157 }
158
159 outp->hasht = dcb_outp_hasht(outp);
160 outp->hashm = dcb_outp_hashm(outp);
143 } 161 }
144 return dcb; 162 return dcb;
145} 163}
146 164
147static inline u16
148dcb_outp_hasht(struct dcb_output *outp)
149{
150 return outp->type;
151}
152
153static inline u16
154dcb_outp_hashm(struct dcb_output *outp)
155{
156 return (outp->heads << 8) | (outp->link << 6) | outp->or;
157}
158
159u16 165u16
160dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask, 166dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
161 u8 *ver, u8 *len, struct dcb_output *outp) 167 u8 *ver, u8 *len, struct dcb_output *outp)
162{ 168{
163 u16 dcb, idx = 0; 169 u16 dcb, idx = 0;
164 while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) { 170 while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
165 if (dcb_outp_hasht(outp) == type) { 171 if ((dcb_outp_hasht(outp) & 0x00ff) == (type & 0x00ff)) {
166 if ((dcb_outp_hashm(outp) & mask) == mask) 172 if ((dcb_outp_hashm(outp) & mask) == mask)
167 break; 173 break;
168 } 174 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
index 5afb568b2d69..b2a676e53580 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -48,7 +48,7 @@ extdev_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
48 return extdev + *hdr; 48 return extdev + *hdr;
49} 49}
50 50
51u16 51static u16
52nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len) 52nvbios_extdev_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
53{ 53{
54 u8 hdr, cnt; 54 u8 hdr, cnt;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index c84e93fa6d95..172a4f999990 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -25,6 +25,7 @@
25#include <subdev/bios.h> 25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h> 26#include <subdev/bios/dcb.h>
27#include <subdev/bios/gpio.h> 27#include <subdev/bios/gpio.h>
28#include <subdev/bios/xpio.h>
28 29
29u16 30u16
30dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 31dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
@@ -60,8 +61,14 @@ dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
60u16 61u16
61dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len) 62dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
62{ 63{
63 u8 hdr, cnt; 64 u8 hdr, cnt, xver; /* use gpio version for xpio entry parsing */
64 u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000; 65 u16 gpio;
66
67 if (!idx--)
68 gpio = dcb_gpio_table(bios, ver, &hdr, &cnt, len);
69 else
70 gpio = dcb_xpio_table(bios, idx, &xver, &hdr, &cnt, len);
71
65 if (gpio && ent < cnt) 72 if (gpio && ent < cnt)
66 return gpio + hdr + (ent * *len); 73 return gpio + hdr + (ent * *len);
67 return 0x0000; 74 return 0x0000;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
index ad577db83766..cfb9288c6d28 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -70,12 +70,12 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
70 u8 ver, len; 70 u8 ver, len;
71 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); 71 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
72 if (ent) { 72 if (ent) {
73 info->data = nv_ro32(bios, ent + 0); 73 info->type = nv_ro08(bios, ent + 3);
74 info->type = nv_ro08(bios, ent + 3); 74 info->share = DCB_I2C_UNUSED;
75 if (ver < 0x30) { 75 if (ver < 0x30) {
76 info->type &= 0x07; 76 info->type &= 0x07;
77 if (info->type == 0x07) 77 if (info->type == 0x07)
78 info->type = 0xff; 78 info->type = DCB_I2C_UNUSED;
79 } 79 }
80 80
81 switch (info->type) { 81 switch (info->type) {
@@ -88,7 +88,11 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
88 return 0; 88 return 0;
89 case DCB_I2C_NVIO_BIT: 89 case DCB_I2C_NVIO_BIT:
90 case DCB_I2C_NVIO_AUX: 90 case DCB_I2C_NVIO_AUX:
91 info->drive = nv_ro08(bios, ent + 0); 91 info->drive = nv_ro08(bios, ent + 0) & 0x0f;
92 if (nv_ro08(bios, ent + 1) & 0x01) {
93 info->share = nv_ro08(bios, ent + 1) >> 1;
94 info->share &= 0x0f;
95 }
92 return 0; 96 return 0;
93 case DCB_I2C_UNUSED: 97 case DCB_I2C_UNUSED:
94 return 0; 98 return 0;
@@ -121,7 +125,8 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
121 if (!info->sense) info->sense = 0x36; 125 if (!info->sense) info->sense = 0x36;
122 } 126 }
123 127
124 info->type = DCB_I2C_NV04_BIT; 128 info->type = DCB_I2C_NV04_BIT;
129 info->share = DCB_I2C_UNUSED;
125 return 0; 130 return 0;
126 } 131 }
127 132
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 690ed438b2ad..2cc1e6a5eb6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -231,6 +231,11 @@ init_i2c(struct nvbios_init *init, int index)
231 return NULL; 231 return NULL;
232 } 232 }
233 233
234 if (index == -2 && init->outp->location) {
235 index = NV_I2C_TYPE_EXTAUX(init->outp->extdev);
236 return i2c->find_type(i2c, index);
237 }
238
234 index = init->outp->i2c_index; 239 index = init->outp->i2c_index;
235 } 240 }
236 241
@@ -258,7 +263,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
258static int 263static int
259init_rdauxr(struct nvbios_init *init, u32 addr) 264init_rdauxr(struct nvbios_init *init, u32 addr)
260{ 265{
261 struct nouveau_i2c_port *port = init_i2c(init, -1); 266 struct nouveau_i2c_port *port = init_i2c(init, -2);
262 u8 data; 267 u8 data;
263 268
264 if (port && init_exec(init)) { 269 if (port && init_exec(init)) {
@@ -274,7 +279,7 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
274static int 279static int
275init_wrauxr(struct nvbios_init *init, u32 addr, u8 data) 280init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
276{ 281{
277 struct nouveau_i2c_port *port = init_i2c(init, -1); 282 struct nouveau_i2c_port *port = init_i2c(init, -2);
278 if (port && init_exec(init)) 283 if (port && init_exec(init))
279 return nv_wraux(port, addr, &data, 1); 284 return nv_wraux(port, addr, &data, 1);
280 return -ENODEV; 285 return -ENODEV;
@@ -1816,7 +1821,7 @@ init_ram_restrict_zm_reg_group(struct nvbios_init *init)
1816 u8 i, j; 1821 u8 i, j;
1817 1822
1818 trace("RAM_RESTRICT_ZM_REG_GROUP\t" 1823 trace("RAM_RESTRICT_ZM_REG_GROUP\t"
1819 "R[%08x] 0x%02x 0x%02x\n", addr, incr, num); 1824 "R[0x%08x] 0x%02x 0x%02x\n", addr, incr, num);
1820 init->offset += 7; 1825 init->offset += 7;
1821 1826
1822 for (i = 0; i < num; i++) { 1827 for (i = 0; i < num; i++) {
@@ -1849,7 +1854,7 @@ init_copy_zm_reg(struct nvbios_init *init)
1849 u32 sreg = nv_ro32(bios, init->offset + 1); 1854 u32 sreg = nv_ro32(bios, init->offset + 1);
1850 u32 dreg = nv_ro32(bios, init->offset + 5); 1855 u32 dreg = nv_ro32(bios, init->offset + 5);
1851 1856
1852 trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", sreg, dreg); 1857 trace("COPY_ZM_REG\tR[0x%06x] = R[0x%06x]\n", dreg, sreg);
1853 init->offset += 9; 1858 init->offset += 9;
1854 1859
1855 init_wr32(init, dreg, init_rd32(init, sreg)); 1860 init_wr32(init, dreg, init_rd32(init, sreg));
@@ -1866,7 +1871,7 @@ init_zm_reg_group(struct nvbios_init *init)
1866 u32 addr = nv_ro32(bios, init->offset + 1); 1871 u32 addr = nv_ro32(bios, init->offset + 1);
1867 u8 count = nv_ro08(bios, init->offset + 5); 1872 u8 count = nv_ro08(bios, init->offset + 5);
1868 1873
1869 trace("ZM_REG_GROUP\tR[0x%06x] =\n"); 1874 trace("ZM_REG_GROUP\tR[0x%06x] =\n", addr);
1870 init->offset += 6; 1875 init->offset += 6;
1871 1876
1872 while (count--) { 1877 while (count--) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 862a08a2ae27..22a20573ed1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -55,7 +55,7 @@ therm_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *len, u8 *cnt)
55 return therm + nv_ro08(bios, therm + 1); 55 return therm + nv_ro08(bios, therm + 1);
56} 56}
57 57
58u16 58static u16
59nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len) 59nvbios_therm_entry(struct nouveau_bios *bios, int idx, u8 *ver, u8 *len)
60{ 60{
61 u8 hdr, cnt; 61 u8 hdr, cnt;
@@ -155,10 +155,15 @@ int
155nvbios_therm_fan_parse(struct nouveau_bios *bios, 155nvbios_therm_fan_parse(struct nouveau_bios *bios,
156 struct nvbios_therm_fan *fan) 156 struct nvbios_therm_fan *fan)
157{ 157{
158 struct nouveau_therm_trip_point *cur_trip = NULL;
158 u8 ver, len, i; 159 u8 ver, len, i;
159 u16 entry; 160 u16 entry;
160 161
162 uint8_t duty_lut[] = { 0, 0, 25, 0, 40, 0, 50, 0,
163 75, 0, 85, 0, 100, 0, 100, 0 };
164
161 i = 0; 165 i = 0;
166 fan->nr_fan_trip = 0;
162 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) { 167 while ((entry = nvbios_therm_entry(bios, i++, &ver, &len))) {
163 s16 value = nv_ro16(bios, entry + 1); 168 s16 value = nv_ro16(bios, entry + 1);
164 169
@@ -167,9 +172,30 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
167 fan->min_duty = value & 0xff; 172 fan->min_duty = value & 0xff;
168 fan->max_duty = (value & 0xff00) >> 8; 173 fan->max_duty = (value & 0xff00) >> 8;
169 break; 174 break;
175 case 0x24:
176 fan->nr_fan_trip++;
177 cur_trip = &fan->trip[fan->nr_fan_trip - 1];
178 cur_trip->hysteresis = value & 0xf;
179 cur_trip->temp = (value & 0xff0) >> 4;
180 cur_trip->fan_duty = duty_lut[(value & 0xf000) >> 12];
181 break;
182 case 0x25:
183 cur_trip = &fan->trip[fan->nr_fan_trip - 1];
184 cur_trip->fan_duty = value;
185 break;
170 case 0x26: 186 case 0x26:
171 fan->pwm_freq = value; 187 fan->pwm_freq = value;
172 break; 188 break;
189 case 0x3b:
190 fan->bump_period = value;
191 break;
192 case 0x3c:
193 fan->slow_down_period = value;
194 break;
195 case 0x46:
196 fan->linear_min_temp = nv_ro08(bios, entry + 1);
197 fan->linear_max_temp = nv_ro08(bios, entry + 2);
198 break;
173 } 199 }
174 } 200 }
175 201
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
new file mode 100644
index 000000000000..e9b8e5d30a7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/xpio.c
@@ -0,0 +1,76 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/gpio.h>
27#include <subdev/bios/xpio.h>
28
29static u16
30dcb_xpiod_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 u16 data = dcb_gpio_table(bios, ver, hdr, cnt, len);
33 if (data && *ver >= 0x40 && *hdr >= 0x06) {
34 u16 xpio = nv_ro16(bios, data + 0x04);
35 if (xpio) {
36 *ver = nv_ro08(bios, data + 0x00);
37 *hdr = nv_ro08(bios, data + 0x01);
38 *cnt = nv_ro08(bios, data + 0x02);
39 *len = nv_ro08(bios, data + 0x03);
40 return xpio;
41 }
42 }
43 return 0x0000;
44}
45
46u16
47dcb_xpio_table(struct nouveau_bios *bios, u8 idx,
48 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
49{
50 u16 data = dcb_xpiod_table(bios, ver, hdr, cnt, len);
51 if (data && idx < *cnt) {
52 u16 xpio = nv_ro16(bios, data + *hdr + (idx * *len));
53 if (xpio) {
54 *ver = nv_ro08(bios, data + 0x00);
55 *hdr = nv_ro08(bios, data + 0x01);
56 *cnt = nv_ro08(bios, data + 0x02);
57 *len = nv_ro08(bios, data + 0x03);
58 return xpio;
59 }
60 }
61 return 0x0000;
62}
63
64u16
65dcb_xpio_parse(struct nouveau_bios *bios, u8 idx,
66 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
67 struct nvbios_xpio *info)
68{
69 u16 data = dcb_xpio_table(bios, idx, ver, hdr, cnt, len);
70 if (data && *len >= 6) {
71 info->type = nv_ro08(bios, data + 0x04);
72 info->addr = nv_ro08(bios, data + 0x05);
73 info->flags = nv_ro08(bios, data + 0x06);
74 }
75 return 0x0000;
76}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
new file mode 100644
index 000000000000..8c7f8057a185
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv04.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs
24 */
25
26#include <subdev/bus.h>
27
28struct nv04_bus_priv {
29 struct nouveau_bus base;
30};
31
32static void
33nv04_bus_intr(struct nouveau_subdev *subdev)
34{
35 struct nouveau_bus *pbus = nouveau_bus(subdev);
36 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
37
38 if (stat & 0x00000001) {
39 nv_error(pbus, "BUS ERROR\n");
40 stat &= ~0x00000001;
41 nv_wr32(pbus, 0x001100, 0x00000001);
42 }
43
44 if (stat & 0x00000110) {
45 subdev = nouveau_subdev(subdev, NVDEV_SUBDEV_GPIO);
46 if (subdev && subdev->intr)
47 subdev->intr(subdev);
48 stat &= ~0x00000110;
49 nv_wr32(pbus, 0x001100, 0x00000110);
50 }
51
52 if (stat) {
53 nv_error(pbus, "unknown intr 0x%08x\n", stat);
54 nv_mask(pbus, 0x001140, stat, 0x00000000);
55 }
56}
57
58static int
59nv04_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 struct nouveau_oclass *oclass, void *data, u32 size,
61 struct nouveau_object **pobject)
62{
63 struct nv04_bus_priv *priv;
64 int ret;
65
66 ret = nouveau_bus_create(parent, engine, oclass, &priv);
67 *pobject = nv_object(priv);
68 if (ret)
69 return ret;
70
71 nv_subdev(priv)->intr = nv04_bus_intr;
72 return 0;
73}
74
75static int
76nv04_bus_init(struct nouveau_object *object)
77{
78 struct nv04_bus_priv *priv = (void *)object;
79
80 nv_wr32(priv, 0x001100, 0xffffffff);
81 nv_wr32(priv, 0x001140, 0x00000111);
82
83 return nouveau_bus_init(&priv->base);
84}
85
86struct nouveau_oclass
87nv04_bus_oclass = {
88 .handle = NV_SUBDEV(BUS, 0x04),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nv04_bus_ctor,
91 .dtor = _nouveau_bus_dtor,
92 .init = nv04_bus_init,
93 .fini = _nouveau_bus_fini,
94 },
95};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
new file mode 100644
index 000000000000..34132aef34e1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv31.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs
24 */
25
26#include <subdev/bus.h>
27
28struct nv31_bus_priv {
29 struct nouveau_bus base;
30};
31
32static void
33nv31_bus_intr(struct nouveau_subdev *subdev)
34{
35 struct nouveau_bus *pbus = nouveau_bus(subdev);
36 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
37 u32 gpio = nv_rd32(pbus, 0x001104) & nv_rd32(pbus, 0x001144);
38
39 if (gpio) {
40 subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_GPIO);
41 if (subdev && subdev->intr)
42 subdev->intr(subdev);
43 }
44
45 if (stat & 0x00000008) { /* NV41- */
46 u32 addr = nv_rd32(pbus, 0x009084);
47 u32 data = nv_rd32(pbus, 0x009088);
48
49 nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
50 (addr & 0x00000002) ? "write" : "read", data,
51 (addr & 0x00fffffc));
52
53 stat &= ~0x00000008;
54 nv_wr32(pbus, 0x001100, 0x00000008);
55 }
56
57 if (stat & 0x00070000) {
58 subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
59 if (subdev && subdev->intr)
60 subdev->intr(subdev);
61 stat &= ~0x00070000;
62 nv_wr32(pbus, 0x001100, 0x00070000);
63 }
64
65 if (stat) {
66 nv_error(pbus, "unknown intr 0x%08x\n", stat);
67 nv_mask(pbus, 0x001140, stat, 0x00000000);
68 }
69}
70
71static int
72nv31_bus_init(struct nouveau_object *object)
73{
74 struct nv31_bus_priv *priv = (void *)object;
75 int ret;
76
77 ret = nouveau_bus_init(&priv->base);
78 if (ret)
79 return ret;
80
81 nv_wr32(priv, 0x001100, 0xffffffff);
82 nv_wr32(priv, 0x001140, 0x00070008);
83 return 0;
84}
85
86static int
87nv31_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 struct nouveau_oclass *oclass, void *data, u32 size,
89 struct nouveau_object **pobject)
90{
91 struct nv31_bus_priv *priv;
92 int ret;
93
94 ret = nouveau_bus_create(parent, engine, oclass, &priv);
95 *pobject = nv_object(priv);
96 if (ret)
97 return ret;
98
99 nv_subdev(priv)->intr = nv31_bus_intr;
100 return 0;
101}
102
103struct nouveau_oclass
104nv31_bus_oclass = {
105 .handle = NV_SUBDEV(BUS, 0x31),
106 .ofuncs = &(struct nouveau_ofuncs) {
107 .ctor = nv31_bus_ctor,
108 .dtor = _nouveau_bus_dtor,
109 .init = nv31_bus_init,
110 .fini = _nouveau_bus_fini,
111 },
112};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
new file mode 100644
index 000000000000..f5b2117fa8c6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nv50.c
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs
24 */
25
26#include <subdev/bus.h>
27
28struct nv50_bus_priv {
29 struct nouveau_bus base;
30};
31
32static void
33nv50_bus_intr(struct nouveau_subdev *subdev)
34{
35 struct nouveau_bus *pbus = nouveau_bus(subdev);
36 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
37
38 if (stat & 0x00000008) {
39 u32 addr = nv_rd32(pbus, 0x009084);
40 u32 data = nv_rd32(pbus, 0x009088);
41
42 nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x\n",
43 (addr & 0x00000002) ? "write" : "read", data,
44 (addr & 0x00fffffc));
45
46 stat &= ~0x00000008;
47 nv_wr32(pbus, 0x001100, 0x00000008);
48 }
49
50 if (stat & 0x00010000) {
51 subdev = nouveau_subdev(pbus, NVDEV_SUBDEV_THERM);
52 if (subdev && subdev->intr)
53 subdev->intr(subdev);
54 stat &= ~0x00010000;
55 nv_wr32(pbus, 0x001100, 0x00010000);
56 }
57
58 if (stat) {
59 nv_error(pbus, "unknown intr 0x%08x\n", stat);
60 nv_mask(pbus, 0x001140, stat, 0);
61 }
62}
63
64static int
65nv50_bus_init(struct nouveau_object *object)
66{
67 struct nv50_bus_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_bus_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x001100, 0xffffffff);
75 nv_wr32(priv, 0x001140, 0x00010008);
76 return 0;
77}
78
79static int
80nv50_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nv50_bus_priv *priv;
85 int ret;
86
87 ret = nouveau_bus_create(parent, engine, oclass, &priv);
88 *pobject = nv_object(priv);
89 if (ret)
90 return ret;
91
92 nv_subdev(priv)->intr = nv50_bus_intr;
93 return 0;
94}
95
96struct nouveau_oclass
97nv50_bus_oclass = {
98 .handle = NV_SUBDEV(BUS, 0x50),
99 .ofuncs = &(struct nouveau_ofuncs) {
100 .ctor = nv50_bus_ctor,
101 .dtor = _nouveau_bus_dtor,
102 .init = nv50_bus_init,
103 .fini = _nouveau_bus_fini,
104 },
105};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
new file mode 100644
index 000000000000..b192d6246363
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bus/nvc0.c
@@ -0,0 +1,101 @@
1/*
2 * Copyright 2012 Nouveau Community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres <martin.peres@labri.fr>
23 * Ben Skeggs
24 */
25
26#include <subdev/bus.h>
27
28struct nvc0_bus_priv {
29 struct nouveau_bus base;
30};
31
32static void
33nvc0_bus_intr(struct nouveau_subdev *subdev)
34{
35 struct nouveau_bus *pbus = nouveau_bus(subdev);
36 u32 stat = nv_rd32(pbus, 0x001100) & nv_rd32(pbus, 0x001140);
37
38 if (stat & 0x0000000e) {
39 u32 addr = nv_rd32(pbus, 0x009084);
40 u32 data = nv_rd32(pbus, 0x009088);
41
42 nv_error(pbus, "MMIO %s of 0x%08x FAULT at 0x%06x [ %s%s%s]\n",
43 (addr & 0x00000002) ? "write" : "read", data,
44 (addr & 0x00fffffc),
45 (stat & 0x00000002) ? "!ENGINE " : "",
46 (stat & 0x00000004) ? "IBUS " : "",
47 (stat & 0x00000008) ? "TIMEOUT " : "");
48
49 nv_wr32(pbus, 0x009084, 0x00000000);
50 nv_wr32(pbus, 0x001100, (stat & 0x0000000e));
51 stat &= ~0x0000000e;
52 }
53
54 if (stat) {
55 nv_error(pbus, "unknown intr 0x%08x\n", stat);
56 nv_mask(pbus, 0x001140, stat, 0x00000000);
57 }
58}
59
60static int
61nvc0_bus_init(struct nouveau_object *object)
62{
63 struct nvc0_bus_priv *priv = (void *)object;
64 int ret;
65
66 ret = nouveau_bus_init(&priv->base);
67 if (ret)
68 return ret;
69
70 nv_wr32(priv, 0x001100, 0xffffffff);
71 nv_wr32(priv, 0x001140, 0x0000000e);
72 return 0;
73}
74
75static int
76nvc0_bus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
77 struct nouveau_oclass *oclass, void *data, u32 size,
78 struct nouveau_object **pobject)
79{
80 struct nvc0_bus_priv *priv;
81 int ret;
82
83 ret = nouveau_bus_create(parent, engine, oclass, &priv);
84 *pobject = nv_object(priv);
85 if (ret)
86 return ret;
87
88 nv_subdev(priv)->intr = nvc0_bus_intr;
89 return 0;
90}
91
92struct nouveau_oclass
93nvc0_bus_oclass = {
94 .handle = NV_SUBDEV(BUS, 0xc0),
95 .ofuncs = &(struct nouveau_ofuncs) {
96 .ctor = nvc0_bus_ctor,
97 .dtor = _nouveau_bus_dtor,
98 .init = nvc0_bus_init,
99 .fini = _nouveau_bus_fini,
100 },
101};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index f8a7ed4166cf..3937ced5c753 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -66,6 +66,7 @@ static const u64 disable_map[] = {
66 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE, 66 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE, 67 [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, 68 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_BUS] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, 70 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, 71 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE, 72 [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
@@ -103,8 +104,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
103 struct nouveau_device *device; 104 struct nouveau_device *device;
104 struct nouveau_devobj *devobj; 105 struct nouveau_devobj *devobj;
105 struct nv_device_class *args = data; 106 struct nv_device_class *args = data;
106 u64 disable, boot0, strap; 107 u32 boot0, strap;
107 u64 mmio_base, mmio_size; 108 u64 disable, mmio_base, mmio_size;
108 void __iomem *map; 109 void __iomem *map;
109 int ret, i, c; 110 int ret, i, c;
110 111
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
index 8626d0d6cbbc..473c5c03d3c9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv04.c
@@ -24,6 +24,7 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
27#include <subdev/i2c.h> 28#include <subdev/i2c.h>
28#include <subdev/clock.h> 29#include <subdev/clock.h>
29#include <subdev/devinit.h> 30#include <subdev/devinit.h>
@@ -46,10 +47,11 @@ nv04_identify(struct nouveau_device *device)
46 case 0x04: 47 case 0x04:
47 device->cname = "NV04"; 48 device->cname = "NV04";
48 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 49 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
49 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 50 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
50 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 51 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
51 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass; 52 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv04_devinit_oclass;
52 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 53 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
54 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
53 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
54 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 56 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
55 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -63,10 +65,11 @@ nv04_identify(struct nouveau_device *device)
63 case 0x05: 65 case 0x05:
64 device->cname = "NV05"; 66 device->cname = "NV05";
65 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
66 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 68 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
67 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 69 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
68 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass; 70 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv05_devinit_oclass;
69 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 71 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
72 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
70 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 73 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
71 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass; 74 device->oclass[NVDEV_SUBDEV_FB ] = &nv04_fb_oclass;
72 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 75 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index 9c40b0fb23f6..d0774f5bebe1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -24,6 +24,7 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
27#include <subdev/gpio.h> 28#include <subdev/gpio.h>
28#include <subdev/i2c.h> 29#include <subdev/i2c.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
@@ -48,10 +49,11 @@ nv10_identify(struct nouveau_device *device)
48 device->cname = "NV10"; 49 device->cname = "NV10";
49 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
50 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
51 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 52 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
52 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
53 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
54 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
56 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
55 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 57 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
56 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 58 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
57 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 59 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -64,10 +66,11 @@ nv10_identify(struct nouveau_device *device)
64 device->cname = "NV15"; 66 device->cname = "NV15";
65 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 67 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
66 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 68 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
67 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 69 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
68 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 70 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
69 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 71 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
70 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 72 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
73 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
71 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
72 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
73 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -82,10 +85,11 @@ nv10_identify(struct nouveau_device *device)
82 device->cname = "NV16"; 85 device->cname = "NV16";
83 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
84 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
85 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 88 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
86 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
87 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
88 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
89 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 93 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
90 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 94 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
91 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 95 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -100,10 +104,11 @@ nv10_identify(struct nouveau_device *device)
100 device->cname = "nForce"; 104 device->cname = "nForce";
101 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
102 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
103 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 107 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
104 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
105 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
106 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 112 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
108 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; 113 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 114 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -118,10 +123,11 @@ nv10_identify(struct nouveau_device *device)
118 device->cname = "NV11"; 123 device->cname = "NV11";
119 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
120 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 125 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
121 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 126 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
122 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
123 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
124 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
125 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 131 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
126 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 132 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
127 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 133 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -136,10 +142,11 @@ nv10_identify(struct nouveau_device *device)
136 device->cname = "NV17"; 142 device->cname = "NV17";
137 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 143 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
138 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 144 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
139 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 145 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
140 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 146 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
141 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 147 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
142 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 148 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
149 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
143 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
144 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 151 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
145 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 152 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -154,10 +161,11 @@ nv10_identify(struct nouveau_device *device)
154 device->cname = "nForce2"; 161 device->cname = "nForce2";
155 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 162 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
156 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 163 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
157 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 164 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
158 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 165 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
159 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
160 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass; 170 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -172,10 +180,11 @@ nv10_identify(struct nouveau_device *device)
172 device->cname = "NV18"; 180 device->cname = "NV18";
173 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 181 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
174 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 182 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
175 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 183 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
176 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 184 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
177 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 185 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
178 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 186 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
187 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
179 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
180 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 189 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
181 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 74f88f48e1c2..ab920e0dc45b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -24,6 +24,7 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
27#include <subdev/gpio.h> 28#include <subdev/gpio.h>
28#include <subdev/i2c.h> 29#include <subdev/i2c.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv20_identify(struct nouveau_device *device)
49 device->cname = "NV20"; 50 device->cname = "NV20";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 52 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass;
58 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv20_identify(struct nouveau_device *device)
67 device->cname = "NV25"; 69 device->cname = "NV25";
68 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
69 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 71 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
70 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
71 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv20_identify(struct nouveau_device *device)
85 device->cname = "NV28"; 88 device->cname = "NV28";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 90 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -103,10 +107,11 @@ nv20_identify(struct nouveau_device *device)
103 device->cname = "NV2A"; 107 device->cname = "NV2A";
104 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
105 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
106 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
107 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
108 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 112 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
109 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 113 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
114 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
110 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 115 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
111 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass; 116 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
112 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 117 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 0ac1b2c4f61d..5f2110261b04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -24,6 +24,7 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
27#include <subdev/gpio.h> 28#include <subdev/gpio.h>
28#include <subdev/i2c.h> 29#include <subdev/i2c.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
@@ -49,10 +50,11 @@ nv30_identify(struct nouveau_device *device)
49 device->cname = "NV30"; 50 device->cname = "NV30";
50 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
51 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 52 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
52 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 53 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
53 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
54 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 55 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
55 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 56 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
57 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
56 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
57 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 59 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
58 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -67,10 +69,11 @@ nv30_identify(struct nouveau_device *device)
67 device->cname = "NV35"; 69 device->cname = "NV35";
68 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 70 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
69 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 71 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
70 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 72 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
71 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 73 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 74 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 75 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
76 device->oclass[NVDEV_SUBDEV_BUS ] = &nv04_bus_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 77 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass; 78 device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 79 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -85,10 +88,11 @@ nv30_identify(struct nouveau_device *device)
85 device->cname = "NV31"; 88 device->cname = "NV31";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 89 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 90 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 91 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 92 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 93 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 94 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
95 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 96 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 97 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -104,10 +108,11 @@ nv30_identify(struct nouveau_device *device)
104 device->cname = "NV36"; 108 device->cname = "NV36";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 109 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 110 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 111 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 112 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 113 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 114 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
115 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
111 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 116 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
112 device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass; 117 device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
113 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 118 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
@@ -123,10 +128,11 @@ nv30_identify(struct nouveau_device *device)
123 device->cname = "NV34"; 128 device->cname = "NV34";
124 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 129 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
125 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 130 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
126 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 131 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
127 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass; 132 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv04_clock_oclass;
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 133 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 134 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
135 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
130 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 136 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
131 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 137 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 138 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 41d59689a021..f3d55efe9ac9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -24,6 +24,8 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
28#include <subdev/vm.h>
27#include <subdev/gpio.h> 29#include <subdev/gpio.h>
28#include <subdev/i2c.h> 30#include <subdev/i2c.h>
29#include <subdev/clock.h> 31#include <subdev/clock.h>
@@ -50,11 +52,12 @@ nv40_identify(struct nouveau_device *device)
50 device->cname = "NV40"; 52 device->cname = "NV40";
51 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 53 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
52 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 54 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
53 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 55 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
54 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 56 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
55 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 57 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
56 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 58 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
57 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 59 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
60 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
58 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 61 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
59 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 62 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
60 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 63 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -70,11 +73,12 @@ nv40_identify(struct nouveau_device *device)
70 device->cname = "NV41"; 73 device->cname = "NV41";
71 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 74 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
72 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 75 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
73 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 76 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
74 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 77 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
75 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 78 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
76 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 79 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
77 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 80 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
81 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 82 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
79 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 83 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
80 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 84 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -90,11 +94,12 @@ nv40_identify(struct nouveau_device *device)
90 device->cname = "NV42"; 94 device->cname = "NV42";
91 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 95 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
92 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 96 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
93 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 97 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
94 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 98 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
95 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 99 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
96 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 100 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
97 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 101 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
102 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 103 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
99 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 104 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
100 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 105 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -110,11 +115,12 @@ nv40_identify(struct nouveau_device *device)
110 device->cname = "NV43"; 115 device->cname = "NV43";
111 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 116 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
112 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 117 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
113 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 118 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
114 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 119 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
115 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 120 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
116 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 121 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
117 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 122 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
123 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
118 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 124 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
119 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass; 125 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
120 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -130,11 +136,12 @@ nv40_identify(struct nouveau_device *device)
130 device->cname = "NV45"; 136 device->cname = "NV45";
131 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 137 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
132 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 138 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
133 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 139 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
134 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 140 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
135 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 141 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
136 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 142 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
137 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 143 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
144 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
138 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 145 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
139 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 146 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass;
140 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 147 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -150,11 +157,12 @@ nv40_identify(struct nouveau_device *device)
150 device->cname = "G70"; 157 device->cname = "G70";
151 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 158 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
152 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 159 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
153 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 160 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
154 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 161 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
155 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 162 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
156 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 163 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
157 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 164 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
165 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 166 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
159 device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass; 167 device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
160 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 168 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -170,11 +178,12 @@ nv40_identify(struct nouveau_device *device)
170 device->cname = "G71"; 178 device->cname = "G71";
171 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 179 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
172 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 180 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
173 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 181 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
174 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 182 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
175 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 183 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 184 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 185 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
186 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 187 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; 188 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 189 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -190,11 +199,12 @@ nv40_identify(struct nouveau_device *device)
190 device->cname = "G73"; 199 device->cname = "G73";
191 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 200 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
192 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 201 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
193 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 202 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
194 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 203 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
195 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 204 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
196 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 205 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
197 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 206 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
207 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 208 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass; 209 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 210 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -210,11 +220,12 @@ nv40_identify(struct nouveau_device *device)
210 device->cname = "NV44"; 220 device->cname = "NV44";
211 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 221 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
212 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 222 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
213 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 223 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
214 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 224 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
215 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 225 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
216 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 226 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
217 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 227 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
228 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 229 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
219 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; 230 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 231 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -230,11 +241,12 @@ nv40_identify(struct nouveau_device *device)
230 device->cname = "G72"; 241 device->cname = "G72";
231 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 242 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
232 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 243 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
233 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 244 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
234 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 245 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
235 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 246 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 247 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 248 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
249 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 250 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 251 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 252 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -250,11 +262,12 @@ nv40_identify(struct nouveau_device *device)
250 device->cname = "NV44A"; 262 device->cname = "NV44A";
251 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 263 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
252 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 264 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
253 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 265 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
254 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 266 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
255 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 267 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
256 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 268 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
257 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 269 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
270 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
258 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 271 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
259 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass; 272 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
260 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 273 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -270,11 +283,12 @@ nv40_identify(struct nouveau_device *device)
270 device->cname = "C61"; 283 device->cname = "C61";
271 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 284 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
272 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 285 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
273 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 286 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
274 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 287 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
275 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 288 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
276 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 289 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
277 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 290 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
291 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
278 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 292 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
279 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 293 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 294 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -290,11 +304,12 @@ nv40_identify(struct nouveau_device *device)
290 device->cname = "C51"; 304 device->cname = "C51";
291 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 305 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
292 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 306 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
293 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 307 device->oclass[NVDEV_SUBDEV_I2C ] = &nv4e_i2c_oclass;
294 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 308 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
295 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 309 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 310 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 311 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
312 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
298 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 313 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
299 device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass; 314 device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
300 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 315 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -310,11 +325,12 @@ nv40_identify(struct nouveau_device *device)
310 device->cname = "C73"; 325 device->cname = "C73";
311 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 326 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
312 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 327 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
313 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 328 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
314 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 329 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
315 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 330 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
316 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 331 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
317 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 332 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
333 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
318 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 334 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
319 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 335 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
320 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 336 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -330,11 +346,12 @@ nv40_identify(struct nouveau_device *device)
330 device->cname = "C67"; 346 device->cname = "C67";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 347 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 348 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 349 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 350 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 351 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 352 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
337 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 353 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
354 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
338 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 355 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
339 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 356 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
340 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 357 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
@@ -350,11 +367,12 @@ nv40_identify(struct nouveau_device *device)
350 device->cname = "C68"; 367 device->cname = "C68";
351 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 368 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
352 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass; 369 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv10_gpio_oclass;
353 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 370 device->oclass[NVDEV_SUBDEV_I2C ] = &nv04_i2c_oclass;
354 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass; 371 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
355 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass; 372 device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
356 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 373 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
357 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 374 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
375 device->oclass[NVDEV_SUBDEV_BUS ] = &nv31_bus_oclass;
358 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 376 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
359 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass; 377 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
360 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 378 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index 6ccfd8585ba2..5ed2fa51ddc2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -24,6 +24,7 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
27#include <subdev/gpio.h> 28#include <subdev/gpio.h>
28#include <subdev/i2c.h> 29#include <subdev/i2c.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nv50_identify(struct nouveau_device *device)
57 device->cname = "G80"; 58 device->cname = "G80";
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 59 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
59 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 60 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
60 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 61 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
61 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 62 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
62 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 63 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
63 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
64 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
65 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 66 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 69 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
68 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -79,12 +81,13 @@ nv50_identify(struct nouveau_device *device)
79 device->cname = "G84"; 81 device->cname = "G84";
80 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 82 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
81 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 83 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
82 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 84 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
83 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 85 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
84 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 86 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
85 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 87 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
86 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 88 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
87 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 89 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
90 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
88 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 91 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
89 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 92 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
90 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 93 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -104,12 +107,13 @@ nv50_identify(struct nouveau_device *device)
104 device->cname = "G86"; 107 device->cname = "G86";
105 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 108 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
106 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 109 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
107 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 110 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
108 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 111 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
109 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 112 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
110 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
111 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 114 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
112 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 115 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
116 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
113 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 117 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
114 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 118 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
115 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 119 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -129,12 +133,13 @@ nv50_identify(struct nouveau_device *device)
129 device->cname = "G92"; 133 device->cname = "G92";
130 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 134 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
131 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 135 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
132 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 136 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
133 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 137 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
134 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 138 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
135 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 139 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
136 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 140 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
137 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 141 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
142 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
138 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 143 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
139 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 144 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
140 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 145 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -154,12 +159,13 @@ nv50_identify(struct nouveau_device *device)
154 device->cname = "G94"; 159 device->cname = "G94";
155 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 160 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
156 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 161 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
157 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 162 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
158 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 163 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
159 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 164 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
160 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 165 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
161 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 166 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
162 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 167 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
168 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
163 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 169 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
164 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 170 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
165 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 171 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -179,12 +185,13 @@ nv50_identify(struct nouveau_device *device)
179 device->cname = "G96"; 185 device->cname = "G96";
180 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 186 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
181 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 187 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
182 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 188 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
183 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 189 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
184 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 190 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
185 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 191 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
186 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 192 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
187 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass; 193 device->oclass[NVDEV_SUBDEV_MC ] = &nv50_mc_oclass;
194 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
188 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 195 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
189 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 196 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
190 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 197 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -204,12 +211,13 @@ nv50_identify(struct nouveau_device *device)
204 device->cname = "G98"; 211 device->cname = "G98";
205 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 212 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
206 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 213 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
207 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 214 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
208 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 215 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
209 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 216 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
210 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 217 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
211 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 218 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
212 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 219 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
220 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
213 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 221 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
214 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 222 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
215 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 223 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -229,12 +237,13 @@ nv50_identify(struct nouveau_device *device)
229 device->cname = "G200"; 237 device->cname = "G200";
230 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 238 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
231 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 239 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
232 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 240 device->oclass[NVDEV_SUBDEV_I2C ] = &nv50_i2c_oclass;
233 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 241 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
234 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 242 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
235 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 243 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 244 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 245 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
246 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 247 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 248 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 249 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -254,12 +263,13 @@ nv50_identify(struct nouveau_device *device)
254 device->cname = "MCP77/MCP78"; 263 device->cname = "MCP77/MCP78";
255 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 264 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
256 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 265 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
257 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 266 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
258 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 267 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
259 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 268 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
260 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 269 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
261 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 270 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
262 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 271 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
272 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
263 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 273 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
264 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 274 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
265 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 275 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -279,12 +289,13 @@ nv50_identify(struct nouveau_device *device)
279 device->cname = "MCP79/MCP7A"; 289 device->cname = "MCP79/MCP7A";
280 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 290 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
281 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 291 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
282 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 292 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
283 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass; 293 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv50_clock_oclass;
284 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 294 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
285 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 295 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
286 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
287 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 297 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
288 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 299 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
289 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 300 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
290 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 301 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -304,12 +315,13 @@ nv50_identify(struct nouveau_device *device)
304 device->cname = "GT215"; 315 device->cname = "GT215";
305 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 316 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
306 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 317 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
307 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 318 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
308 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 319 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
309 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 320 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
310 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 321 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
311 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 322 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
312 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 323 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
324 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
313 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 325 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
314 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 326 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
315 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 327 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -330,12 +342,13 @@ nv50_identify(struct nouveau_device *device)
330 device->cname = "GT216"; 342 device->cname = "GT216";
331 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 343 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
332 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 344 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
333 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 345 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
334 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 346 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
335 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 347 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
336 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 348 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
337 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 349 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
338 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 350 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
351 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
339 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 352 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
340 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 353 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
341 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 354 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -355,12 +368,13 @@ nv50_identify(struct nouveau_device *device)
355 device->cname = "GT218"; 368 device->cname = "GT218";
356 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 369 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
357 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 370 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
358 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 371 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
359 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 372 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
360 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 373 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
361 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 374 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
362 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 375 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
363 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 376 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
377 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
364 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 378 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
365 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 379 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
366 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 380 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
@@ -380,12 +394,13 @@ nv50_identify(struct nouveau_device *device)
380 device->cname = "MCP89"; 394 device->cname = "MCP89";
381 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 395 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
382 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 396 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
383 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 397 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
384 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass; 398 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nva3_clock_oclass;
385 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 399 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
386 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 400 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
387 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 401 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
388 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass; 402 device->oclass[NVDEV_SUBDEV_MC ] = &nv98_mc_oclass;
403 device->oclass[NVDEV_SUBDEV_BUS ] = &nv50_bus_oclass;
389 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 404 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
390 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass; 405 device->oclass[NVDEV_SUBDEV_FB ] = &nv50_fb_oclass;
391 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 406 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index f0461685a422..4393eb4d6564 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -24,6 +24,7 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
27#include <subdev/gpio.h> 28#include <subdev/gpio.h>
28#include <subdev/i2c.h> 29#include <subdev/i2c.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
@@ -57,12 +58,13 @@ nvc0_identify(struct nouveau_device *device)
57 device->cname = "GF100"; 58 device->cname = "GF100";
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 59 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
59 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 60 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
60 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 61 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
61 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 62 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
62 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 63 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
63 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
64 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
65 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 66 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 69 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
68 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 70 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -85,12 +87,13 @@ nvc0_identify(struct nouveau_device *device)
85 device->cname = "GF104"; 87 device->cname = "GF104";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 88 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 89 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 90 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 91 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 92 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
91 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 93 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
92 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 94 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
93 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 95 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
96 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
94 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 97 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
95 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 98 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
96 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 99 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -113,12 +116,13 @@ nvc0_identify(struct nouveau_device *device)
113 device->cname = "GF106"; 116 device->cname = "GF106";
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 117 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
115 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 118 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
116 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 119 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
117 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 120 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
118 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 121 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
119 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 122 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
120 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 123 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
121 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 124 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
125 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
122 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 126 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
123 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 127 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
124 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 128 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -141,12 +145,13 @@ nvc0_identify(struct nouveau_device *device)
141 device->cname = "GF114"; 145 device->cname = "GF114";
142 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 146 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
143 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 147 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
144 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 148 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
145 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 149 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
146 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 150 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
147 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 151 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
148 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 152 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
149 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 153 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
154 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
150 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 155 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
151 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 156 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
152 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 157 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -169,12 +174,13 @@ nvc0_identify(struct nouveau_device *device)
169 device->cname = "GF116"; 174 device->cname = "GF116";
170 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 175 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
171 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 176 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
172 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 177 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
173 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 178 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
174 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 179 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
175 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 180 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 181 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 182 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
183 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 184 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 185 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 186 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -197,12 +203,13 @@ nvc0_identify(struct nouveau_device *device)
197 device->cname = "GF108"; 203 device->cname = "GF108";
198 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 204 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
199 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 205 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
200 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 206 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
201 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 207 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
202 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 208 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
203 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 209 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
204 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 210 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
205 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 211 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
212 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
206 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 213 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
207 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 214 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
208 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 215 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -225,12 +232,13 @@ nvc0_identify(struct nouveau_device *device)
225 device->cname = "GF110"; 232 device->cname = "GF110";
226 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 233 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
227 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass; 234 device->oclass[NVDEV_SUBDEV_GPIO ] = &nv50_gpio_oclass;
228 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 235 device->oclass[NVDEV_SUBDEV_I2C ] = &nv94_i2c_oclass;
229 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 236 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
230 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 237 device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
231 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 238 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
232 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 239 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
233 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 240 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
241 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
234 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 242 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
235 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 243 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
236 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 244 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -253,12 +261,13 @@ nvc0_identify(struct nouveau_device *device)
253 device->cname = "GF119"; 261 device->cname = "GF119";
254 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 262 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
255 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; 263 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
256 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 264 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
257 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 265 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
258 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 266 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
259 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 267 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
260 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 268 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
261 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 269 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
270 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
262 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 271 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
263 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 272 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
264 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 273 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -282,4 +291,4 @@ nvc0_identify(struct nouveau_device *device)
282 } 291 }
283 292
284 return 0; 293 return 0;
285} 294 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 03a652876e73..5c12391619fd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -24,6 +24,7 @@
24 24
25#include <subdev/device.h> 25#include <subdev/device.h>
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bus.h>
27#include <subdev/gpio.h> 28#include <subdev/gpio.h>
28#include <subdev/i2c.h> 29#include <subdev/i2c.h>
29#include <subdev/clock.h> 30#include <subdev/clock.h>
@@ -56,13 +57,14 @@ nve0_identify(struct nouveau_device *device)
56 case 0xe4: 57 case 0xe4:
57 device->cname = "GK104"; 58 device->cname = "GK104";
58 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 59 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
59 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; 60 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
60 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 61 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
61 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 62 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
62 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 63 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
63 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 64 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
64 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 65 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
65 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 66 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
67 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
66 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 68 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
67 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 69 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
68 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 70 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -84,13 +86,14 @@ nve0_identify(struct nouveau_device *device)
84 case 0xe7: 86 case 0xe7:
85 device->cname = "GK107"; 87 device->cname = "GK107";
86 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 88 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
87 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; 89 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
88 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 90 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
89 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 91 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
90 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 92 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
91 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 93 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
92 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 94 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
93 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 95 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
96 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
94 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 97 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
95 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 98 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
96 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 99 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
@@ -112,13 +115,14 @@ nve0_identify(struct nouveau_device *device)
112 case 0xe6: 115 case 0xe6:
113 device->cname = "GK106"; 116 device->cname = "GK106";
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; 117 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
115 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass; 118 device->oclass[NVDEV_SUBDEV_GPIO ] = &nve0_gpio_oclass;
116 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass; 119 device->oclass[NVDEV_SUBDEV_I2C ] = &nvd0_i2c_oclass;
117 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass; 120 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
118 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass; 121 device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
119 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; 122 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
120 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass; 123 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
121 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass; 124 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
125 device->oclass[NVDEV_SUBDEV_BUS ] = &nvc0_bus_oclass;
122 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 126 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
123 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass; 127 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
124 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass; 128 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index ae7249b09797..4a8577838417 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -78,12 +78,13 @@ nv50_devinit_init(struct nouveau_object *object)
78 if (ret) 78 if (ret)
79 return ret; 79 return ret;
80 80
81 /* if we ran the init tables, execute first script pointer for each 81 /* if we ran the init tables, we have to execute the first script
82 * display table output entry that has a matching dcb entry. 82 * pointer of each dcb entry's display encoder table in order
83 * to properly initialise each encoder.
83 */ 84 */
84 while (priv->base.post && ver) { 85 while (priv->base.post && dcb_outp_parse(bios, i, &ver, &hdr, &outp)) {
85 u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info); 86 if (nvbios_outp_match(bios, outp.hasht, outp.hashm,
86 if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) { 87 &ver, &hdr, &cnt, &len, &info)) {
87 struct nvbios_init init = { 88 struct nvbios_init init = {
88 .subdev = nv_subdev(priv), 89 .subdev = nv_subdev(priv),
89 .bios = bios, 90 .bios = bios,
@@ -95,7 +96,8 @@ nv50_devinit_init(struct nouveau_object *object)
95 96
96 nvbios_exec(&init); 97 nvbios_exec(&init);
97 } 98 }
98 }; 99 i++;
100 }
99 101
100 return 0; 102 return 0;
101} 103}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index eac236ed19b2..0772ec978165 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -22,8 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/object.h> 25#include <core/client.h>
26#include <core/enum.h> 26#include <core/enum.h>
27#include <core/engctx.h>
28#include <core/object.h>
27 29
28#include <subdev/fb.h> 30#include <subdev/fb.h>
29#include <subdev/bios.h> 31#include <subdev/bios.h>
@@ -303,17 +305,18 @@ static const struct nouveau_enum vm_client[] = {
303}; 305};
304 306
305static const struct nouveau_enum vm_engine[] = { 307static const struct nouveau_enum vm_engine[] = {
306 { 0x00000000, "PGRAPH", NULL }, 308 { 0x00000000, "PGRAPH", NULL, NVDEV_ENGINE_GR },
307 { 0x00000001, "PVP", NULL }, 309 { 0x00000001, "PVP", NULL, NVDEV_ENGINE_VP },
308 { 0x00000004, "PEEPHOLE", NULL }, 310 { 0x00000004, "PEEPHOLE", NULL },
309 { 0x00000005, "PFIFO", vm_pfifo_subclients }, 311 { 0x00000005, "PFIFO", vm_pfifo_subclients, NVDEV_ENGINE_FIFO },
310 { 0x00000006, "BAR", vm_bar_subclients }, 312 { 0x00000006, "BAR", vm_bar_subclients },
311 { 0x00000008, "PPPP", NULL }, 313 { 0x00000008, "PPPP", NULL, NVDEV_ENGINE_PPP },
312 { 0x00000009, "PBSP", NULL }, 314 { 0x00000008, "PMPEG", NULL, NVDEV_ENGINE_MPEG },
313 { 0x0000000a, "PCRYPT", NULL }, 315 { 0x00000009, "PBSP", NULL, NVDEV_ENGINE_BSP },
316 { 0x0000000a, "PCRYPT", NULL, NVDEV_ENGINE_CRYPT },
314 { 0x0000000b, "PCOUNTER", NULL }, 317 { 0x0000000b, "PCOUNTER", NULL },
315 { 0x0000000c, "SEMAPHORE_BG", NULL }, 318 { 0x0000000c, "SEMAPHORE_BG", NULL },
316 { 0x0000000d, "PCOPY", NULL }, 319 { 0x0000000d, "PCOPY", NULL, NVDEV_ENGINE_COPY0 },
317 { 0x0000000e, "PDAEMON", NULL }, 320 { 0x0000000e, "PDAEMON", NULL },
318 {} 321 {}
319}; 322};
@@ -335,8 +338,10 @@ static void
335nv50_fb_intr(struct nouveau_subdev *subdev) 338nv50_fb_intr(struct nouveau_subdev *subdev)
336{ 339{
337 struct nouveau_device *device = nv_device(subdev); 340 struct nouveau_device *device = nv_device(subdev);
341 struct nouveau_engine *engine;
338 struct nv50_fb_priv *priv = (void *)subdev; 342 struct nv50_fb_priv *priv = (void *)subdev;
339 const struct nouveau_enum *en, *cl; 343 const struct nouveau_enum *en, *cl;
344 struct nouveau_object *engctx = NULL;
340 u32 trap[6], idx, chan; 345 u32 trap[6], idx, chan;
341 u8 st0, st1, st2, st3; 346 u8 st0, st1, st2, st3;
342 int i; 347 int i;
@@ -367,36 +372,55 @@ nv50_fb_intr(struct nouveau_subdev *subdev)
367 } 372 }
368 chan = (trap[2] << 16) | trap[1]; 373 chan = (trap[2] << 16) | trap[1];
369 374
370 nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x ", 375 en = nouveau_enum_find(vm_engine, st0);
376
377 if (en && en->data2) {
378 const struct nouveau_enum *orig_en = en;
379 while (en->name && en->value == st0 && en->data2) {
380 engine = nouveau_engine(subdev, en->data2);
381 if (engine) {
382 engctx = nouveau_engctx_get(engine, chan);
383 if (engctx)
384 break;
385 }
386 en++;
387 }
388 if (!engctx)
389 en = orig_en;
390 }
391
392 nv_error(priv, "trapped %s at 0x%02x%04x%04x on channel 0x%08x [%s] ",
371 (trap[5] & 0x00000100) ? "read" : "write", 393 (trap[5] & 0x00000100) ? "read" : "write",
372 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan); 394 trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, chan,
395 nouveau_client_name(engctx));
396
397 nouveau_engctx_put(engctx);
373 398
374 en = nouveau_enum_find(vm_engine, st0);
375 if (en) 399 if (en)
376 printk("%s/", en->name); 400 pr_cont("%s/", en->name);
377 else 401 else
378 printk("%02x/", st0); 402 pr_cont("%02x/", st0);
379 403
380 cl = nouveau_enum_find(vm_client, st2); 404 cl = nouveau_enum_find(vm_client, st2);
381 if (cl) 405 if (cl)
382 printk("%s/", cl->name); 406 pr_cont("%s/", cl->name);
383 else 407 else
384 printk("%02x/", st2); 408 pr_cont("%02x/", st2);
385 409
386 if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3); 410 if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
387 else if (en && en->data) cl = nouveau_enum_find(en->data, st3); 411 else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
388 else cl = NULL; 412 else cl = NULL;
389 if (cl) 413 if (cl)
390 printk("%s", cl->name); 414 pr_cont("%s", cl->name);
391 else 415 else
392 printk("%02x", st3); 416 pr_cont("%02x", st3);
393 417
394 printk(" reason: "); 418 pr_cont(" reason: ");
395 en = nouveau_enum_find(vm_fault, st1); 419 en = nouveau_enum_find(vm_fault, st1);
396 if (en) 420 if (en)
397 printk("%s\n", en->name); 421 pr_cont("%s\n", en->name);
398 else 422 else
399 printk("0x%08x\n", st1); 423 pr_cont("0x%08x\n", st1);
400} 424}
401 425
402static int 426static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index 9fb0f9b92d49..d422acc9af15 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -102,135 +102,19 @@ nouveau_gpio_get(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line)
102 return ret; 102 return ret;
103} 103}
104 104
105static int 105void
106nouveau_gpio_irq(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, bool on) 106_nouveau_gpio_dtor(struct nouveau_object *object)
107{
108 struct dcb_gpio_func func;
109 int ret;
110
111 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
112 if (ret == 0) {
113 if (idx == 0 && gpio->irq_enable)
114 gpio->irq_enable(gpio, func.line, on);
115 else
116 ret = -ENODEV;
117 }
118
119 return ret;
120}
121
122struct gpio_isr {
123 struct nouveau_gpio *gpio;
124 struct list_head head;
125 struct work_struct work;
126 int idx;
127 struct dcb_gpio_func func;
128 void (*handler)(void *, int);
129 void *data;
130 bool inhibit;
131};
132
133static void
134nouveau_gpio_isr_bh(struct work_struct *work)
135{
136 struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
137 struct nouveau_gpio *gpio = isr->gpio;
138 unsigned long flags;
139 int state;
140
141 state = nouveau_gpio_get(gpio, isr->idx, isr->func.func,
142 isr->func.line);
143 if (state >= 0)
144 isr->handler(isr->data, state);
145
146 spin_lock_irqsave(&gpio->lock, flags);
147 isr->inhibit = false;
148 spin_unlock_irqrestore(&gpio->lock, flags);
149}
150
151static void
152nouveau_gpio_isr_run(struct nouveau_gpio *gpio, int idx, u32 line_mask)
153{
154 struct gpio_isr *isr;
155
156 if (idx != 0)
157 return;
158
159 spin_lock(&gpio->lock);
160 list_for_each_entry(isr, &gpio->isr, head) {
161 if (line_mask & (1 << isr->func.line)) {
162 if (isr->inhibit)
163 continue;
164 isr->inhibit = true;
165 schedule_work(&isr->work);
166 }
167 }
168 spin_unlock(&gpio->lock);
169}
170
171static int
172nouveau_gpio_isr_add(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
173 void (*handler)(void *, int), void *data)
174{
175 struct gpio_isr *isr;
176 unsigned long flags;
177 int ret;
178
179 isr = kzalloc(sizeof(*isr), GFP_KERNEL);
180 if (!isr)
181 return -ENOMEM;
182
183 ret = nouveau_gpio_find(gpio, idx, tag, line, &isr->func);
184 if (ret) {
185 kfree(isr);
186 return ret;
187 }
188
189 INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
190 isr->gpio = gpio;
191 isr->handler = handler;
192 isr->data = data;
193 isr->idx = idx;
194
195 spin_lock_irqsave(&gpio->lock, flags);
196 list_add(&isr->head, &gpio->isr);
197 spin_unlock_irqrestore(&gpio->lock, flags);
198 return 0;
199}
200
201static void
202nouveau_gpio_isr_del(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
203 void (*handler)(void *, int), void *data)
204{ 107{
205 struct gpio_isr *isr, *tmp; 108 struct nouveau_gpio *gpio = (void *)object;
206 struct dcb_gpio_func func; 109 nouveau_event_destroy(&gpio->events);
207 unsigned long flags; 110 nouveau_subdev_destroy(&gpio->base);
208 LIST_HEAD(tofree);
209 int ret;
210
211 ret = nouveau_gpio_find(gpio, idx, tag, line, &func);
212 if (ret == 0) {
213 spin_lock_irqsave(&gpio->lock, flags);
214 list_for_each_entry_safe(isr, tmp, &gpio->isr, head) {
215 if (memcmp(&isr->func, &func, sizeof(func)) ||
216 isr->idx != idx ||
217 isr->handler != handler || isr->data != data)
218 continue;
219 list_move_tail(&isr->head, &tofree);
220 }
221 spin_unlock_irqrestore(&gpio->lock, flags);
222
223 list_for_each_entry_safe(isr, tmp, &tofree, head) {
224 flush_work(&isr->work);
225 kfree(isr);
226 }
227 }
228} 111}
229 112
230int 113int
231nouveau_gpio_create_(struct nouveau_object *parent, 114nouveau_gpio_create_(struct nouveau_object *parent,
232 struct nouveau_object *engine, 115 struct nouveau_object *engine,
233 struct nouveau_oclass *oclass, int length, void **pobject) 116 struct nouveau_oclass *oclass, int lines,
117 int length, void **pobject)
234{ 118{
235 struct nouveau_gpio *gpio; 119 struct nouveau_gpio *gpio;
236 int ret; 120 int ret;
@@ -241,15 +125,13 @@ nouveau_gpio_create_(struct nouveau_object *parent,
241 if (ret) 125 if (ret)
242 return ret; 126 return ret;
243 127
128 ret = nouveau_event_create(lines, &gpio->events);
129 if (ret)
130 return ret;
131
244 gpio->find = nouveau_gpio_find; 132 gpio->find = nouveau_gpio_find;
245 gpio->set = nouveau_gpio_set; 133 gpio->set = nouveau_gpio_set;
246 gpio->get = nouveau_gpio_get; 134 gpio->get = nouveau_gpio_get;
247 gpio->irq = nouveau_gpio_irq;
248 gpio->isr_run = nouveau_gpio_isr_run;
249 gpio->isr_add = nouveau_gpio_isr_add;
250 gpio->isr_del = nouveau_gpio_isr_del;
251 INIT_LIST_HEAD(&gpio->isr);
252 spin_lock_init(&gpio->lock);
253 return 0; 135 return 0;
254} 136}
255 137
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
index 168d16a9a8e9..76d5d5465ddd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv10.c
@@ -24,7 +24,7 @@
24 * 24 *
25 */ 25 */
26 26
27#include <subdev/gpio.h> 27#include "priv.h"
28 28
29struct nv10_gpio_priv { 29struct nv10_gpio_priv {
30 struct nouveau_gpio base; 30 struct nouveau_gpio base;
@@ -83,27 +83,36 @@ nv10_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
83} 83}
84 84
85static void 85static void
86nv10_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
87{
88 u32 mask = 0x00010001 << line;
89
90 nv_wr32(gpio, 0x001104, mask);
91 nv_mask(gpio, 0x001144, mask, on ? mask : 0);
92}
93
94static void
95nv10_gpio_intr(struct nouveau_subdev *subdev) 86nv10_gpio_intr(struct nouveau_subdev *subdev)
96{ 87{
97 struct nv10_gpio_priv *priv = (void *)subdev; 88 struct nv10_gpio_priv *priv = (void *)subdev;
98 u32 intr = nv_rd32(priv, 0x001104); 89 u32 intr = nv_rd32(priv, 0x001104);
99 u32 hi = (intr & 0x0000ffff) >> 0; 90 u32 hi = (intr & 0x0000ffff) >> 0;
100 u32 lo = (intr & 0xffff0000) >> 16; 91 u32 lo = (intr & 0xffff0000) >> 16;
92 int i;
101 93
102 priv->base.isr_run(&priv->base, 0, hi | lo); 94 for (i = 0; (hi | lo) && i < 32; i++) {
95 if ((hi | lo) & (1 << i))
96 nouveau_event_trigger(priv->base.events, i);
97 }
103 98
104 nv_wr32(priv, 0x001104, intr); 99 nv_wr32(priv, 0x001104, intr);
105} 100}
106 101
102static void
103nv10_gpio_intr_enable(struct nouveau_event *event, int line)
104{
105 nv_wr32(event->priv, 0x001104, 0x00010001 << line);
106 nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00010001 << line);
107}
108
109static void
110nv10_gpio_intr_disable(struct nouveau_event *event, int line)
111{
112 nv_wr32(event->priv, 0x001104, 0x00010001 << line);
113 nv_mask(event->priv, 0x001144, 0x00010001 << line, 0x00000000);
114}
115
107static int 116static int
108nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 117nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
109 struct nouveau_oclass *oclass, void *data, u32 size, 118 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -112,14 +121,16 @@ nv10_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
112 struct nv10_gpio_priv *priv; 121 struct nv10_gpio_priv *priv;
113 int ret; 122 int ret;
114 123
115 ret = nouveau_gpio_create(parent, engine, oclass, &priv); 124 ret = nouveau_gpio_create(parent, engine, oclass, 16, &priv);
116 *pobject = nv_object(priv); 125 *pobject = nv_object(priv);
117 if (ret) 126 if (ret)
118 return ret; 127 return ret;
119 128
120 priv->base.drive = nv10_gpio_drive; 129 priv->base.drive = nv10_gpio_drive;
121 priv->base.sense = nv10_gpio_sense; 130 priv->base.sense = nv10_gpio_sense;
122 priv->base.irq_enable = nv10_gpio_irq_enable; 131 priv->base.events->priv = priv;
132 priv->base.events->enable = nv10_gpio_intr_enable;
133 priv->base.events->disable = nv10_gpio_intr_disable;
123 nv_subdev(priv)->intr = nv10_gpio_intr; 134 nv_subdev(priv)->intr = nv10_gpio_intr;
124 return 0; 135 return 0;
125} 136}
@@ -141,8 +152,6 @@ nv10_gpio_init(struct nouveau_object *object)
141 if (ret) 152 if (ret)
142 return ret; 153 return ret;
143 154
144 nv_wr32(priv, 0x001140, 0x00000000);
145 nv_wr32(priv, 0x001100, 0xffffffff);
146 nv_wr32(priv, 0x001144, 0x00000000); 155 nv_wr32(priv, 0x001144, 0x00000000);
147 nv_wr32(priv, 0x001104, 0xffffffff); 156 nv_wr32(priv, 0x001104, 0xffffffff);
148 return 0; 157 return 0;
@@ -152,7 +161,6 @@ static int
152nv10_gpio_fini(struct nouveau_object *object, bool suspend) 161nv10_gpio_fini(struct nouveau_object *object, bool suspend)
153{ 162{
154 struct nv10_gpio_priv *priv = (void *)object; 163 struct nv10_gpio_priv *priv = (void *)object;
155 nv_wr32(priv, 0x001140, 0x00000000);
156 nv_wr32(priv, 0x001144, 0x00000000); 164 nv_wr32(priv, 0x001144, 0x00000000);
157 return nouveau_gpio_fini(&priv->base, suspend); 165 return nouveau_gpio_fini(&priv->base, suspend);
158} 166}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf13a1200f26..bf489dcf46e2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -22,7 +22,7 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/gpio.h> 25#include "priv.h"
26 26
27struct nv50_gpio_priv { 27struct nv50_gpio_priv {
28 struct nouveau_gpio base; 28 struct nouveau_gpio base;
@@ -95,21 +95,12 @@ nv50_gpio_sense(struct nouveau_gpio *gpio, int line)
95} 95}
96 96
97void 97void
98nv50_gpio_irq_enable(struct nouveau_gpio *gpio, int line, bool on)
99{
100 u32 reg = line < 16 ? 0xe050 : 0xe070;
101 u32 mask = 0x00010001 << (line & 0xf);
102
103 nv_wr32(gpio, reg + 4, mask);
104 nv_mask(gpio, reg + 0, mask, on ? mask : 0);
105}
106
107void
108nv50_gpio_intr(struct nouveau_subdev *subdev) 98nv50_gpio_intr(struct nouveau_subdev *subdev)
109{ 99{
110 struct nv50_gpio_priv *priv = (void *)subdev; 100 struct nv50_gpio_priv *priv = (void *)subdev;
111 u32 intr0, intr1 = 0; 101 u32 intr0, intr1 = 0;
112 u32 hi, lo; 102 u32 hi, lo;
103 int i;
113 104
114 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); 105 intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
115 if (nv_device(priv)->chipset >= 0x90) 106 if (nv_device(priv)->chipset >= 0x90)
@@ -117,13 +108,35 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
117 108
118 hi = (intr0 & 0x0000ffff) | (intr1 << 16); 109 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
119 lo = (intr0 >> 16) | (intr1 & 0xffff0000); 110 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
120 priv->base.isr_run(&priv->base, 0, hi | lo); 111
112 for (i = 0; (hi | lo) && i < 32; i++) {
113 if ((hi | lo) & (1 << i))
114 nouveau_event_trigger(priv->base.events, i);
115 }
121 116
122 nv_wr32(priv, 0xe054, intr0); 117 nv_wr32(priv, 0xe054, intr0);
123 if (nv_device(priv)->chipset >= 0x90) 118 if (nv_device(priv)->chipset >= 0x90)
124 nv_wr32(priv, 0xe074, intr1); 119 nv_wr32(priv, 0xe074, intr1);
125} 120}
126 121
122void
123nv50_gpio_intr_enable(struct nouveau_event *event, int line)
124{
125 const u32 addr = line < 16 ? 0xe050 : 0xe070;
126 const u32 mask = 0x00010001 << (line & 0xf);
127 nv_wr32(event->priv, addr + 0x04, mask);
128 nv_mask(event->priv, addr + 0x00, mask, mask);
129}
130
131void
132nv50_gpio_intr_disable(struct nouveau_event *event, int line)
133{
134 const u32 addr = line < 16 ? 0xe050 : 0xe070;
135 const u32 mask = 0x00010001 << (line & 0xf);
136 nv_wr32(event->priv, addr + 0x04, mask);
137 nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
138}
139
127static int 140static int
128nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 141nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
129 struct nouveau_oclass *oclass, void *data, u32 size, 142 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -132,7 +145,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
132 struct nv50_gpio_priv *priv; 145 struct nv50_gpio_priv *priv;
133 int ret; 146 int ret;
134 147
135 ret = nouveau_gpio_create(parent, engine, oclass, &priv); 148 ret = nouveau_gpio_create(parent, engine, oclass,
149 nv_device(parent)->chipset >= 0x90 ? 32 : 16,
150 &priv);
136 *pobject = nv_object(priv); 151 *pobject = nv_object(priv);
137 if (ret) 152 if (ret)
138 return ret; 153 return ret;
@@ -140,7 +155,9 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
140 priv->base.reset = nv50_gpio_reset; 155 priv->base.reset = nv50_gpio_reset;
141 priv->base.drive = nv50_gpio_drive; 156 priv->base.drive = nv50_gpio_drive;
142 priv->base.sense = nv50_gpio_sense; 157 priv->base.sense = nv50_gpio_sense;
143 priv->base.irq_enable = nv50_gpio_irq_enable; 158 priv->base.events->priv = priv;
159 priv->base.events->enable = nv50_gpio_intr_enable;
160 priv->base.events->disable = nv50_gpio_intr_disable;
144 nv_subdev(priv)->intr = nv50_gpio_intr; 161 nv_subdev(priv)->intr = nv50_gpio_intr;
145 return 0; 162 return 0;
146} 163}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 83e8b8f16e6a..010431e3acec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -22,13 +22,13 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/gpio.h> 25#include "priv.h"
26 26
27struct nvd0_gpio_priv { 27struct nvd0_gpio_priv {
28 struct nouveau_gpio base; 28 struct nouveau_gpio base;
29}; 29};
30 30
31static void 31void
32nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match) 32nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
33{ 33{
34 struct nouveau_bios *bios = nouveau_bios(gpio); 34 struct nouveau_bios *bios = nouveau_bios(gpio);
@@ -57,7 +57,7 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
57 } 57 }
58} 58}
59 59
60static int 60int
61nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out) 61nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
62{ 62{
63 u32 data = ((dir ^ 1) << 13) | (out << 12); 63 u32 data = ((dir ^ 1) << 13) | (out << 12);
@@ -66,7 +66,7 @@ nvd0_gpio_drive(struct nouveau_gpio *gpio, int line, int dir, int out)
66 return 0; 66 return 0;
67} 67}
68 68
69static int 69int
70nvd0_gpio_sense(struct nouveau_gpio *gpio, int line) 70nvd0_gpio_sense(struct nouveau_gpio *gpio, int line)
71{ 71{
72 return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000); 72 return !!(nv_rd32(gpio, 0x00d610 + (line * 4)) & 0x00004000);
@@ -80,7 +80,7 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
80 struct nvd0_gpio_priv *priv; 80 struct nvd0_gpio_priv *priv;
81 int ret; 81 int ret;
82 82
83 ret = nouveau_gpio_create(parent, engine, oclass, &priv); 83 ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
84 *pobject = nv_object(priv); 84 *pobject = nv_object(priv);
85 if (ret) 85 if (ret)
86 return ret; 86 return ret;
@@ -88,7 +88,9 @@ nvd0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
88 priv->base.reset = nvd0_gpio_reset; 88 priv->base.reset = nvd0_gpio_reset;
89 priv->base.drive = nvd0_gpio_drive; 89 priv->base.drive = nvd0_gpio_drive;
90 priv->base.sense = nvd0_gpio_sense; 90 priv->base.sense = nvd0_gpio_sense;
91 priv->base.irq_enable = nv50_gpio_irq_enable; 91 priv->base.events->priv = priv;
92 priv->base.events->enable = nv50_gpio_intr_enable;
93 priv->base.events->disable = nv50_gpio_intr_disable;
92 nv_subdev(priv)->intr = nv50_gpio_intr; 94 nv_subdev(priv)->intr = nv50_gpio_intr;
93 return 0; 95 return 0;
94} 96}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
new file mode 100644
index 000000000000..16b8c5bf5efa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nve0.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27struct nve0_gpio_priv {
28 struct nouveau_gpio base;
29};
30
31void
32nve0_gpio_intr(struct nouveau_subdev *subdev)
33{
34 struct nve0_gpio_priv *priv = (void *)subdev;
35 u32 intr0 = nv_rd32(priv, 0xdc00) & nv_rd32(priv, 0xdc08);
36 u32 intr1 = nv_rd32(priv, 0xdc80) & nv_rd32(priv, 0xdc88);
37 u32 hi = (intr0 & 0x0000ffff) | (intr1 << 16);
38 u32 lo = (intr0 >> 16) | (intr1 & 0xffff0000);
39 int i;
40
41 for (i = 0; (hi | lo) && i < 32; i++) {
42 if ((hi | lo) & (1 << i))
43 nouveau_event_trigger(priv->base.events, i);
44 }
45
46 nv_wr32(priv, 0xdc00, intr0);
47 nv_wr32(priv, 0xdc88, intr1);
48}
49
50void
51nve0_gpio_intr_enable(struct nouveau_event *event, int line)
52{
53 const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
54 const u32 mask = 0x00010001 << (line & 0xf);
55 nv_wr32(event->priv, addr + 0x08, mask);
56 nv_mask(event->priv, addr + 0x00, mask, mask);
57}
58
59void
60nve0_gpio_intr_disable(struct nouveau_event *event, int line)
61{
62 const u32 addr = line < 16 ? 0xdc00 : 0xdc80;
63 const u32 mask = 0x00010001 << (line & 0xf);
64 nv_wr32(event->priv, addr + 0x08, mask);
65 nv_mask(event->priv, addr + 0x00, mask, 0x00000000);
66}
67
68int
69nve0_gpio_fini(struct nouveau_object *object, bool suspend)
70{
71 struct nve0_gpio_priv *priv = (void *)object;
72 nv_wr32(priv, 0xdc08, 0x00000000);
73 nv_wr32(priv, 0xdc88, 0x00000000);
74 return nouveau_gpio_fini(&priv->base, suspend);
75}
76
77int
78nve0_gpio_init(struct nouveau_object *object)
79{
80 struct nve0_gpio_priv *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_gpio_init(&priv->base);
84 if (ret)
85 return ret;
86
87 nv_wr32(priv, 0xdc00, 0xffffffff);
88 nv_wr32(priv, 0xdc80, 0xffffffff);
89 return 0;
90}
91
92void
93nve0_gpio_dtor(struct nouveau_object *object)
94{
95 struct nve0_gpio_priv *priv = (void *)object;
96 nouveau_gpio_destroy(&priv->base);
97}
98
99static int
100nve0_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject)
103{
104 struct nve0_gpio_priv *priv;
105 int ret;
106
107 ret = nouveau_gpio_create(parent, engine, oclass, 32, &priv);
108 *pobject = nv_object(priv);
109 if (ret)
110 return ret;
111
112 priv->base.reset = nvd0_gpio_reset;
113 priv->base.drive = nvd0_gpio_drive;
114 priv->base.sense = nvd0_gpio_sense;
115 priv->base.events->priv = priv;
116 priv->base.events->enable = nve0_gpio_intr_enable;
117 priv->base.events->disable = nve0_gpio_intr_disable;
118 nv_subdev(priv)->intr = nve0_gpio_intr;
119 return 0;
120}
121
122struct nouveau_oclass
123nve0_gpio_oclass = {
124 .handle = NV_SUBDEV(GPIO, 0xe0),
125 .ofuncs = &(struct nouveau_ofuncs) {
126 .ctor = nve0_gpio_ctor,
127 .dtor = nv50_gpio_dtor,
128 .init = nve0_gpio_init,
129 .fini = nve0_gpio_fini,
130 },
131};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
new file mode 100644
index 000000000000..2ee1c895c782
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/priv.h
@@ -0,0 +1,17 @@
1#ifndef __NVKM_GPIO_H__
2#define __NVKM_GPIO_H__
3
4#include <subdev/gpio.h>
5
6void nv50_gpio_dtor(struct nouveau_object *);
7int nv50_gpio_init(struct nouveau_object *);
8int nv50_gpio_fini(struct nouveau_object *, bool);
9void nv50_gpio_intr(struct nouveau_subdev *);
10void nv50_gpio_intr_enable(struct nouveau_event *, int line);
11void nv50_gpio_intr_disable(struct nouveau_event *, int line);
12
13void nvd0_gpio_reset(struct nouveau_gpio *, u8);
14int nvd0_gpio_drive(struct nouveau_gpio *, int, int, int);
15int nvd0_gpio_sense(struct nouveau_gpio *, int);
16
17#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
new file mode 100644
index 000000000000..dec94e9d776a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -0,0 +1,279 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/i2c.h>
26
27struct anx9805_i2c_port {
28 struct nouveau_i2c_port base;
29 u32 addr;
30 u32 ctrl;
31};
32
33static int
34anx9805_train(struct nouveau_i2c_port *port, int link_nr, int link_bw, bool enh)
35{
36 struct anx9805_i2c_port *chan = (void *)port;
37 struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
38 u8 tmp, i;
39
40 nv_wri2cr(mast, chan->addr, 0xa0, link_bw);
41 nv_wri2cr(mast, chan->addr, 0xa1, link_nr | (enh ? 0x80 : 0x00));
42 nv_wri2cr(mast, chan->addr, 0xa2, 0x01);
43 nv_wri2cr(mast, chan->addr, 0xa8, 0x01);
44
45 i = 0;
46 while ((tmp = nv_rdi2cr(mast, chan->addr, 0xa8)) & 0x01) {
47 mdelay(5);
48 if (i++ == 100) {
49 nv_error(port, "link training timed out\n");
50 return -ETIMEDOUT;
51 }
52 }
53
54 if (tmp & 0x70) {
55 nv_error(port, "link training failed: 0x%02x\n", tmp);
56 return -EIO;
57 }
58
59 return 1;
60}
61
62static int
63anx9805_aux(struct nouveau_i2c_port *port, u8 type, u32 addr, u8 *data, u8 size)
64{
65 struct anx9805_i2c_port *chan = (void *)port;
66 struct nouveau_i2c_port *mast = (void *)nv_object(chan)->parent;
67 int i, ret = -ETIMEDOUT;
68 u8 tmp;
69
70 tmp = nv_rdi2cr(mast, chan->ctrl, 0x07) & ~0x04;
71 nv_wri2cr(mast, chan->ctrl, 0x07, tmp | 0x04);
72 nv_wri2cr(mast, chan->ctrl, 0x07, tmp);
73 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
74
75 nv_wri2cr(mast, chan->addr, 0xe4, 0x80);
76 for (i = 0; !(type & 1) && i < size; i++)
77 nv_wri2cr(mast, chan->addr, 0xf0 + i, data[i]);
78 nv_wri2cr(mast, chan->addr, 0xe5, ((size - 1) << 4) | type);
79 nv_wri2cr(mast, chan->addr, 0xe6, (addr & 0x000ff) >> 0);
80 nv_wri2cr(mast, chan->addr, 0xe7, (addr & 0x0ff00) >> 8);
81 nv_wri2cr(mast, chan->addr, 0xe8, (addr & 0xf0000) >> 16);
82 nv_wri2cr(mast, chan->addr, 0xe9, 0x01);
83
84 i = 0;
85 while ((tmp = nv_rdi2cr(mast, chan->addr, 0xe9)) & 0x01) {
86 mdelay(5);
87 if (i++ == 32)
88 goto done;
89 }
90
91 if ((tmp = nv_rdi2cr(mast, chan->ctrl, 0xf7)) & 0x01) {
92 ret = -EIO;
93 goto done;
94 }
95
96 for (i = 0; (type & 1) && i < size; i++)
97 data[i] = nv_rdi2cr(mast, chan->addr, 0xf0 + i);
98 ret = 0;
99done:
100 nv_wri2cr(mast, chan->ctrl, 0xf7, 0x01);
101 return ret;
102}
103
104static const struct nouveau_i2c_func
105anx9805_aux_func = {
106 .aux = anx9805_aux,
107 .lnk_ctl = anx9805_train,
108};
109
110static int
111anx9805_aux_chan_ctor(struct nouveau_object *parent,
112 struct nouveau_object *engine,
113 struct nouveau_oclass *oclass, void *data, u32 index,
114 struct nouveau_object **pobject)
115{
116 struct nouveau_i2c_port *mast = (void *)parent;
117 struct anx9805_i2c_port *chan;
118 int ret;
119
120 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
121 &nouveau_i2c_aux_algo, &chan);
122 *pobject = nv_object(chan);
123 if (ret)
124 return ret;
125
126 switch ((oclass->handle & 0xff00) >> 8) {
127 case 0x0d:
128 chan->addr = 0x38;
129 chan->ctrl = 0x39;
130 break;
131 case 0x0e:
132 chan->addr = 0x3c;
133 chan->ctrl = 0x3b;
134 break;
135 default:
136 BUG_ON(1);
137 }
138
139 if (mast->adapter.algo == &i2c_bit_algo) {
140 struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
141 algo->udelay = max(algo->udelay, 40);
142 }
143
144 chan->base.func = &anx9805_aux_func;
145 return 0;
146}
147
148static struct nouveau_ofuncs
149anx9805_aux_ofuncs = {
150 .ctor = anx9805_aux_chan_ctor,
151 .dtor = _nouveau_i2c_port_dtor,
152 .init = _nouveau_i2c_port_init,
153 .fini = _nouveau_i2c_port_fini,
154};
155
156static int
157anx9805_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
158{
159 struct anx9805_i2c_port *port = adap->algo_data;
160 struct nouveau_i2c_port *mast = (void *)nv_object(port)->parent;
161 struct i2c_msg *msg = msgs;
162 int ret = -ETIMEDOUT;
163 int i, j, cnt = num;
164 u8 seg = 0x00, off = 0x00, tmp;
165
166 tmp = nv_rdi2cr(mast, port->ctrl, 0x07) & ~0x10;
167 nv_wri2cr(mast, port->ctrl, 0x07, tmp | 0x10);
168 nv_wri2cr(mast, port->ctrl, 0x07, tmp);
169 nv_wri2cr(mast, port->addr, 0x43, 0x05);
170 mdelay(5);
171
172 while (cnt--) {
173 if ( (msg->flags & I2C_M_RD) && msg->addr == 0x50) {
174 nv_wri2cr(mast, port->addr, 0x40, msg->addr << 1);
175 nv_wri2cr(mast, port->addr, 0x41, seg);
176 nv_wri2cr(mast, port->addr, 0x42, off);
177 nv_wri2cr(mast, port->addr, 0x44, msg->len);
178 nv_wri2cr(mast, port->addr, 0x45, 0x00);
179 nv_wri2cr(mast, port->addr, 0x43, 0x01);
180 for (i = 0; i < msg->len; i++) {
181 j = 0;
182 while (nv_rdi2cr(mast, port->addr, 0x46) & 0x10) {
183 mdelay(5);
184 if (j++ == 32)
185 goto done;
186 }
187 msg->buf[i] = nv_rdi2cr(mast, port->addr, 0x47);
188 }
189 } else
190 if (!(msg->flags & I2C_M_RD)) {
191 if (msg->addr == 0x50 && msg->len == 0x01) {
192 off = msg->buf[0];
193 } else
194 if (msg->addr == 0x30 && msg->len == 0x01) {
195 seg = msg->buf[0];
196 } else
197 goto done;
198 } else {
199 goto done;
200 }
201 msg++;
202 }
203
204 ret = num;
205done:
206 nv_wri2cr(mast, port->addr, 0x43, 0x00);
207 return ret;
208}
209
210static u32
211anx9805_func(struct i2c_adapter *adap)
212{
213 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
214}
215
216static const struct i2c_algorithm
217anx9805_i2c_algo = {
218 .master_xfer = anx9805_xfer,
219 .functionality = anx9805_func
220};
221
222static const struct nouveau_i2c_func
223anx9805_i2c_func = {
224};
225
226static int
227anx9805_ddc_port_ctor(struct nouveau_object *parent,
228 struct nouveau_object *engine,
229 struct nouveau_oclass *oclass, void *data, u32 index,
230 struct nouveau_object **pobject)
231{
232 struct nouveau_i2c_port *mast = (void *)parent;
233 struct anx9805_i2c_port *port;
234 int ret;
235
236 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
237 &anx9805_i2c_algo, &port);
238 *pobject = nv_object(port);
239 if (ret)
240 return ret;
241
242 switch ((oclass->handle & 0xff00) >> 8) {
243 case 0x0d:
244 port->addr = 0x3d;
245 port->ctrl = 0x39;
246 break;
247 case 0x0e:
248 port->addr = 0x3f;
249 port->ctrl = 0x3b;
250 break;
251 default:
252 BUG_ON(1);
253 }
254
255 if (mast->adapter.algo == &i2c_bit_algo) {
256 struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
257 algo->udelay = max(algo->udelay, 40);
258 }
259
260 port->base.func = &anx9805_i2c_func;
261 return 0;
262}
263
264static struct nouveau_ofuncs
265anx9805_ddc_ofuncs = {
266 .ctor = anx9805_ddc_port_ctor,
267 .dtor = _nouveau_i2c_port_dtor,
268 .init = _nouveau_i2c_port_init,
269 .fini = _nouveau_i2c_port_fini,
270};
271
272struct nouveau_oclass
273nouveau_anx9805_sclass[] = {
274 { .handle = NV_I2C_TYPE_EXTDDC(0x0d), .ofuncs = &anx9805_ddc_ofuncs },
275 { .handle = NV_I2C_TYPE_EXTAUX(0x0d), .ofuncs = &anx9805_aux_ofuncs },
276 { .handle = NV_I2C_TYPE_EXTDDC(0x0e), .ofuncs = &anx9805_ddc_ofuncs },
277 { .handle = NV_I2C_TYPE_EXTAUX(0x0e), .ofuncs = &anx9805_aux_ofuncs },
278 {}
279};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index dc27e794a851..5de074ad170b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -24,151 +24,40 @@
24 24
25#include <subdev/i2c.h> 25#include <subdev/i2c.h>
26 26
27/******************************************************************************
28 * aux channel util functions
29 *****************************************************************************/
30#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
31#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
32
33static void
34auxch_fini(struct nouveau_i2c *aux, int ch)
35{
36 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
37}
38
39static int
40auxch_init(struct nouveau_i2c *aux, int ch)
41{
42 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
43 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
44 const u32 urep = unksel ? 0x01000000 : 0x02000000;
45 u32 ctrl, timeout;
46
47 /* wait up to 1ms for any previous transaction to be done... */
48 timeout = 1000;
49 do {
50 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
51 udelay(1);
52 if (!timeout--) {
53 AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
54 return -EBUSY;
55 }
56 } while (ctrl & 0x03010000);
57
58 /* set some magic, and wait up to 1ms for it to appear */
59 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
60 timeout = 1000;
61 do {
62 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
63 udelay(1);
64 if (!timeout--) {
65 AUX_ERR("magic wait 0x%08x\n", ctrl);
66 auxch_fini(aux, ch);
67 return -EBUSY;
68 }
69 } while ((ctrl & 0x03000000) != urep);
70
71 return 0;
72}
73
74static int
75auxch_tx(struct nouveau_i2c *aux, int ch, u8 type, u32 addr, u8 *data, u8 size)
76{
77 u32 ctrl, stat, timeout, retries;
78 u32 xbuf[4] = {};
79 int ret, i;
80
81 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
82
83 ret = auxch_init(aux, ch);
84 if (ret)
85 goto out;
86
87 stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
88 if (!(stat & 0x10000000)) {
89 AUX_DBG("sink not detected\n");
90 ret = -ENXIO;
91 goto out;
92 }
93
94 if (!(type & 1)) {
95 memcpy(xbuf, data, size);
96 for (i = 0; i < 16; i += 4) {
97 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
98 nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
99 }
100 }
101
102 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
103 ctrl &= ~0x0001f0ff;
104 ctrl |= type << 12;
105 ctrl |= size - 1;
106 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
107
108 /* retry transaction a number of times on failure... */
109 ret = -EREMOTEIO;
110 for (retries = 0; retries < 32; retries++) {
111 /* reset, and delay a while if this is a retry */
112 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
113 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
114 if (retries)
115 udelay(400);
116
117 /* transaction request, wait up to 1ms for it to complete */
118 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
119
120 timeout = 1000;
121 do {
122 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
123 udelay(1);
124 if (!timeout--) {
125 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
126 goto out;
127 }
128 } while (ctrl & 0x00010000);
129
130 /* read status, and check if transaction completed ok */
131 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
132 if (!(stat & 0x000f0f00)) {
133 ret = 0;
134 break;
135 }
136
137 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
138 }
139
140 if (type & 1) {
141 for (i = 0; i < 16; i += 4) {
142 xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
143 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
144 }
145 memcpy(data, xbuf, size);
146 }
147
148out:
149 auxch_fini(aux, ch);
150 return ret;
151}
152
153int 27int
154nv_rdaux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size) 28nv_rdaux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
155{ 29{
156 return auxch_tx(auxch->i2c, auxch->drive, 9, addr, data, size); 30 if (port->func->aux) {
31 if (port->func->acquire)
32 port->func->acquire(port);
33 return port->func->aux(port, 9, addr, data, size);
34 }
35 return -ENODEV;
157} 36}
158 37
159int 38int
160nv_wraux(struct nouveau_i2c_port *auxch, u32 addr, u8 *data, u8 size) 39nv_wraux(struct nouveau_i2c_port *port, u32 addr, u8 *data, u8 size)
161{ 40{
162 return auxch_tx(auxch->i2c, auxch->drive, 8, addr, data, size); 41 if (port->func->aux) {
42 if (port->func->acquire)
43 port->func->acquire(port);
44 return port->func->aux(port, 8, addr, data, size);
45 }
46 return -ENODEV;
163} 47}
164 48
165static int 49static int
166aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 50aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
167{ 51{
168 struct nouveau_i2c_port *auxch = (struct nouveau_i2c_port *)adap; 52 struct nouveau_i2c_port *port = adap->algo_data;
169 struct i2c_msg *msg = msgs; 53 struct i2c_msg *msg = msgs;
170 int ret, mcnt = num; 54 int ret, mcnt = num;
171 55
56 if (!port->func->aux)
57 return -ENODEV;
58 if ( port->func->acquire)
59 port->func->acquire(port);
60
172 while (mcnt--) { 61 while (mcnt--) {
173 u8 remaining = msg->len; 62 u8 remaining = msg->len;
174 u8 *ptr = msg->buf; 63 u8 *ptr = msg->buf;
@@ -185,8 +74,7 @@ aux_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
185 if (mcnt || remaining > 16) 74 if (mcnt || remaining > 16)
186 cmd |= 4; /* MOT */ 75 cmd |= 4; /* MOT */
187 76
188 ret = auxch_tx(auxch->i2c, auxch->drive, cmd, 77 ret = port->func->aux(port, cmd, msg->addr, ptr, cnt);
189 msg->addr, ptr, cnt);
190 if (ret < 0) 78 if (ret < 0)
191 return ret; 79 return ret;
192 80
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index dbfc2abf0cfe..a114a0ed7e98 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2012 Red Hat Inc. 2 * Copyright 2013 Red Hat Inc.
3 * 3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"), 5 * copy of this software and associated documentation files (the "Software"),
@@ -22,64 +22,136 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include "core/option.h" 25#include <core/option.h>
26 26
27#include "subdev/i2c.h" 27#include <subdev/bios.h>
28#include "subdev/vga.h" 28#include <subdev/bios/dcb.h>
29#include <subdev/bios/i2c.h>
30#include <subdev/i2c.h>
31#include <subdev/vga.h>
29 32
30int 33/******************************************************************************
31nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg) 34 * interface to linux i2c bit-banging algorithm
35 *****************************************************************************/
36
37#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
38#define CSTMSEL true
39#else
40#define CSTMSEL false
41#endif
42
43static int
44nouveau_i2c_pre_xfer(struct i2c_adapter *adap)
32{ 45{
33 u8 val; 46 struct i2c_algo_bit_data *bit = adap->algo_data;
34 struct i2c_msg msgs[] = { 47 struct nouveau_i2c_port *port = bit->data;
35 { .addr = addr, .flags = 0, .len = 1, .buf = &reg }, 48 if (port->func->acquire)
36 { .addr = addr, .flags = I2C_M_RD, .len = 1, .buf = &val }, 49 port->func->acquire(port);
37 }; 50 return 0;
51}
38 52
39 int ret = i2c_transfer(&port->adapter, msgs, 2); 53static void
40 if (ret != 2) 54nouveau_i2c_setscl(void *data, int state)
41 return -EIO; 55{
56 struct nouveau_i2c_port *port = data;
57 port->func->drive_scl(port, state);
58}
42 59
43 return val; 60static void
61nouveau_i2c_setsda(void *data, int state)
62{
63 struct nouveau_i2c_port *port = data;
64 port->func->drive_sda(port, state);
44} 65}
45 66
46int 67static int
47nv_wri2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg, u8 val) 68nouveau_i2c_getscl(void *data)
48{ 69{
49 struct i2c_msg msgs[] = { 70 struct nouveau_i2c_port *port = data;
50 { .addr = addr, .flags = 0, .len = 1, .buf = &reg }, 71 return port->func->sense_scl(port);
51 { .addr = addr, .flags = 0, .len = 1, .buf = &val }, 72}
52 };
53 73
54 int ret = i2c_transfer(&port->adapter, msgs, 2); 74static int
55 if (ret != 2) 75nouveau_i2c_getsda(void *data)
56 return -EIO; 76{
77 struct nouveau_i2c_port *port = data;
78 return port->func->sense_sda(port);
79}
57 80
58 return 0; 81/******************************************************************************
82 * base i2c "port" class implementation
83 *****************************************************************************/
84
85void
86_nouveau_i2c_port_dtor(struct nouveau_object *object)
87{
88 struct nouveau_i2c_port *port = (void *)object;
89 i2c_del_adapter(&port->adapter);
90 nouveau_object_destroy(&port->base);
59} 91}
60 92
61bool 93int
62nv_probe_i2c(struct nouveau_i2c_port *port, u8 addr) 94nouveau_i2c_port_create_(struct nouveau_object *parent,
95 struct nouveau_object *engine,
96 struct nouveau_oclass *oclass, u8 index,
97 const struct i2c_algorithm *algo,
98 int size, void **pobject)
63{ 99{
64 u8 buf[] = { 0 }; 100 struct nouveau_device *device = nv_device(parent);
65 struct i2c_msg msgs[] = { 101 struct nouveau_i2c *i2c = (void *)engine;
66 { 102 struct nouveau_i2c_port *port;
67 .addr = addr, 103 int ret;
68 .flags = 0,
69 .len = 1,
70 .buf = buf,
71 },
72 {
73 .addr = addr,
74 .flags = I2C_M_RD,
75 .len = 1,
76 .buf = buf,
77 }
78 };
79 104
80 return i2c_transfer(&port->adapter, msgs, 2) == 2; 105 ret = nouveau_object_create_(parent, engine, oclass, 0, size, pobject);
106 port = *pobject;
107 if (ret)
108 return ret;
109
110 snprintf(port->adapter.name, sizeof(port->adapter.name),
111 "nouveau-%s-%d", device->name, index);
112 port->adapter.owner = THIS_MODULE;
113 port->adapter.dev.parent = &device->pdev->dev;
114 port->index = index;
115 i2c_set_adapdata(&port->adapter, i2c);
116
117 if ( algo == &nouveau_i2c_bit_algo &&
118 !nouveau_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
119 struct i2c_algo_bit_data *bit;
120
121 bit = kzalloc(sizeof(*bit), GFP_KERNEL);
122 if (!bit)
123 return -ENOMEM;
124
125 bit->udelay = 10;
126 bit->timeout = usecs_to_jiffies(2200);
127 bit->data = port;
128 bit->pre_xfer = nouveau_i2c_pre_xfer;
129 bit->setsda = nouveau_i2c_setsda;
130 bit->setscl = nouveau_i2c_setscl;
131 bit->getsda = nouveau_i2c_getsda;
132 bit->getscl = nouveau_i2c_getscl;
133
134 port->adapter.algo_data = bit;
135 ret = i2c_bit_add_bus(&port->adapter);
136 } else {
137 port->adapter.algo_data = port;
138 port->adapter.algo = algo;
139 ret = i2c_add_adapter(&port->adapter);
140 }
141
142 /* drop port's i2c subdev refcount, i2c handles this itself */
143 if (ret == 0) {
144 list_add_tail(&port->head, &i2c->ports);
145 atomic_dec(&engine->refcount);
146 }
147
148 return ret;
81} 149}
82 150
151/******************************************************************************
152 * base i2c subdev class implementation
153 *****************************************************************************/
154
83static struct nouveau_i2c_port * 155static struct nouveau_i2c_port *
84nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index) 156nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
85{ 157{
@@ -103,29 +175,23 @@ nouveau_i2c_find(struct nouveau_i2c *i2c, u8 index)
103 175
104 list_for_each_entry(port, &i2c->ports, head) { 176 list_for_each_entry(port, &i2c->ports, head) {
105 if (port->index == index) 177 if (port->index == index)
106 break; 178 return port;
107 } 179 }
108 180
109 if (&port->head == &i2c->ports) 181 return NULL;
110 return NULL; 182}
111 183
112 if (nv_device(i2c)->card_type >= NV_50 && (port->dcb & 0x00000100)) { 184static struct nouveau_i2c_port *
113 u32 reg = 0x00e500, val; 185nouveau_i2c_find_type(struct nouveau_i2c *i2c, u16 type)
114 if (port->type == 6) { 186{
115 reg += port->drive * 0x50; 187 struct nouveau_i2c_port *port;
116 val = 0x2002;
117 } else {
118 reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
119 val = 0xe001;
120 }
121 188
122 /* nfi, but neither auxch or i2c work if it's 1 */ 189 list_for_each_entry(port, &i2c->ports, head) {
123 nv_mask(i2c, reg + 0x0c, 0x00000001, 0x00000000); 190 if (nv_hclass(port) == type)
124 /* nfi, but switches auxch vs normal i2c */ 191 return port;
125 nv_mask(i2c, reg + 0x00, 0x0000f003, val);
126 } 192 }
127 193
128 return port; 194 return NULL;
129} 195}
130 196
131static int 197static int
@@ -155,109 +221,86 @@ nouveau_i2c_identify(struct nouveau_i2c *i2c, int index, const char *what,
155 return -ENODEV; 221 return -ENODEV;
156} 222}
157 223
158void 224int
159nouveau_i2c_drive_scl(void *data, int state) 225_nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
160{ 226{
161 struct nouveau_i2c_port *port = data; 227 struct nouveau_i2c *i2c = (void *)object;
228 struct nouveau_i2c_port *port;
229 int ret;
162 230
163 if (port->type == DCB_I2C_NV04_BIT) { 231 list_for_each_entry(port, &i2c->ports, head) {
164 u8 val = nv_rdvgac(port->i2c, 0, port->drive); 232 ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
165 if (state) val |= 0x20; 233 if (ret && suspend)
166 else val &= 0xdf; 234 goto fail;
167 nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
168 } else
169 if (port->type == DCB_I2C_NV4E_BIT) {
170 nv_mask(port->i2c, port->drive, 0x2f, state ? 0x21 : 0x01);
171 } else
172 if (port->type == DCB_I2C_NVIO_BIT) {
173 if (state) port->state |= 0x01;
174 else port->state &= 0xfe;
175 nv_wr32(port->i2c, port->drive, 4 | port->state);
176 } 235 }
177}
178
179void
180nouveau_i2c_drive_sda(void *data, int state)
181{
182 struct nouveau_i2c_port *port = data;
183 236
184 if (port->type == DCB_I2C_NV04_BIT) { 237 return nouveau_subdev_fini(&i2c->base, suspend);
185 u8 val = nv_rdvgac(port->i2c, 0, port->drive); 238fail:
186 if (state) val |= 0x10; 239 list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
187 else val &= 0xef; 240 nv_ofuncs(port)->init(nv_object(port));
188 nv_wrvgac(port->i2c, 0, port->drive, val | 0x01);
189 } else
190 if (port->type == DCB_I2C_NV4E_BIT) {
191 nv_mask(port->i2c, port->drive, 0x1f, state ? 0x11 : 0x01);
192 } else
193 if (port->type == DCB_I2C_NVIO_BIT) {
194 if (state) port->state |= 0x02;
195 else port->state &= 0xfd;
196 nv_wr32(port->i2c, port->drive, 4 | port->state);
197 } 241 }
242
243 return ret;
198} 244}
199 245
200int 246int
201nouveau_i2c_sense_scl(void *data) 247_nouveau_i2c_init(struct nouveau_object *object)
202{ 248{
203 struct nouveau_i2c_port *port = data; 249 struct nouveau_i2c *i2c = (void *)object;
204 struct nouveau_device *device = nv_device(port->i2c); 250 struct nouveau_i2c_port *port;
205 251 int ret;
206 if (port->type == DCB_I2C_NV04_BIT) { 252
207 return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x04); 253 ret = nouveau_subdev_init(&i2c->base);
208 } else 254 if (ret == 0) {
209 if (port->type == DCB_I2C_NV4E_BIT) { 255 list_for_each_entry(port, &i2c->ports, head) {
210 return !!(nv_rd32(port->i2c, port->sense) & 0x00040000); 256 ret = nv_ofuncs(port)->init(nv_object(port));
211 } else 257 if (ret)
212 if (port->type == DCB_I2C_NVIO_BIT) { 258 goto fail;
213 if (device->card_type < NV_D0) 259 }
214 return !!(nv_rd32(port->i2c, port->sense) & 0x01);
215 else
216 return !!(nv_rd32(port->i2c, port->sense) & 0x10);
217 } 260 }
218 261
219 return 0; 262 return ret;
263fail:
264 list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
265 nv_ofuncs(port)->fini(nv_object(port), false);
266 }
267
268 return ret;
220} 269}
221 270
222int 271void
223nouveau_i2c_sense_sda(void *data) 272_nouveau_i2c_dtor(struct nouveau_object *object)
224{ 273{
225 struct nouveau_i2c_port *port = data; 274 struct nouveau_i2c *i2c = (void *)object;
226 struct nouveau_device *device = nv_device(port->i2c); 275 struct nouveau_i2c_port *port, *temp;
227 276
228 if (port->type == DCB_I2C_NV04_BIT) { 277 list_for_each_entry_safe(port, temp, &i2c->ports, head) {
229 return !!(nv_rdvgac(port->i2c, 0, port->sense) & 0x08); 278 nouveau_object_ref(NULL, (struct nouveau_object **)&port);
230 } else
231 if (port->type == DCB_I2C_NV4E_BIT) {
232 return !!(nv_rd32(port->i2c, port->sense) & 0x00080000);
233 } else
234 if (port->type == DCB_I2C_NVIO_BIT) {
235 if (device->card_type < NV_D0)
236 return !!(nv_rd32(port->i2c, port->sense) & 0x02);
237 else
238 return !!(nv_rd32(port->i2c, port->sense) & 0x20);
239 } 279 }
240 280
241 return 0; 281 nouveau_subdev_destroy(&i2c->base);
242} 282}
243 283
244static const u32 nv50_i2c_port[] = { 284static struct nouveau_oclass *
245 0x00e138, 0x00e150, 0x00e168, 0x00e180, 285nouveau_i2c_extdev_sclass[] = {
246 0x00e254, 0x00e274, 0x00e764, 0x00e780, 286 nouveau_anx9805_sclass,
247 0x00e79c, 0x00e7b8
248}; 287};
249 288
250static int 289int
251nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 290nouveau_i2c_create_(struct nouveau_object *parent,
252 struct nouveau_oclass *oclass, void *data, u32 size, 291 struct nouveau_object *engine,
253 struct nouveau_object **pobject) 292 struct nouveau_oclass *oclass,
293 struct nouveau_oclass *sclass,
294 int length, void **pobject)
254{ 295{
255 struct nouveau_device *device = nv_device(parent);
256 struct nouveau_bios *bios = nouveau_bios(parent); 296 struct nouveau_bios *bios = nouveau_bios(parent);
257 struct nouveau_i2c_port *port;
258 struct nouveau_i2c *i2c; 297 struct nouveau_i2c *i2c;
298 struct nouveau_object *object;
259 struct dcb_i2c_entry info; 299 struct dcb_i2c_entry info;
260 int ret, i = -1; 300 int ret, i, j, index = -1;
301 struct dcb_output outp;
302 u8 ver, hdr;
303 u32 data;
261 304
262 ret = nouveau_subdev_create(parent, engine, oclass, 0, 305 ret = nouveau_subdev_create(parent, engine, oclass, 0,
263 "I2C", "i2c", &i2c); 306 "I2C", "i2c", &i2c);
@@ -266,142 +309,60 @@ nouveau_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
266 return ret; 309 return ret;
267 310
268 i2c->find = nouveau_i2c_find; 311 i2c->find = nouveau_i2c_find;
312 i2c->find_type = nouveau_i2c_find_type;
269 i2c->identify = nouveau_i2c_identify; 313 i2c->identify = nouveau_i2c_identify;
270 INIT_LIST_HEAD(&i2c->ports); 314 INIT_LIST_HEAD(&i2c->ports);
271 315
272 while (!dcb_i2c_parse(bios, ++i, &info)) { 316 while (!dcb_i2c_parse(bios, ++index, &info)) {
273 if (info.type == DCB_I2C_UNUSED) 317 if (info.type == DCB_I2C_UNUSED)
274 continue; 318 continue;
275 319
276 port = kzalloc(sizeof(*port), GFP_KERNEL); 320 oclass = sclass;
277 if (!port) { 321 do {
278 nv_error(i2c, "failed port memory alloc at %d\n", i); 322 ret = -EINVAL;
279 break; 323 if (oclass->handle == info.type) {
280 } 324 ret = nouveau_object_ctor(*pobject, *pobject,
281 325 oclass, &info,
282 port->type = info.type; 326 index, &object);
283 switch (port->type) {
284 case DCB_I2C_NV04_BIT:
285 port->drive = info.drive;
286 port->sense = info.sense;
287 break;
288 case DCB_I2C_NV4E_BIT:
289 port->drive = 0x600800 + info.drive;
290 port->sense = port->drive;
291 break;
292 case DCB_I2C_NVIO_BIT:
293 port->drive = info.drive & 0x0f;
294 if (device->card_type < NV_D0) {
295 if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
296 break;
297 port->drive = nv50_i2c_port[port->drive];
298 port->sense = port->drive;
299 } else {
300 port->drive = 0x00d014 + (port->drive * 0x20);
301 port->sense = port->drive;
302 } 327 }
328 } while (ret && (++oclass)->handle);
329 }
330
331 /* in addition to the busses specified in the i2c table, there
332 * may be ddc/aux channels hiding behind external tmds/dp/etc
333 * transmitters.
334 */
335 index = ((index + 0x0f) / 0x10) * 0x10;
336 i = -1;
337 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
338 if (!outp.location || !outp.extdev)
339 continue;
340
341 switch (outp.type) {
342 case DCB_OUTPUT_TMDS:
343 info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
303 break; 344 break;
304 case DCB_I2C_NVIO_AUX: 345 case DCB_OUTPUT_DP:
305 port->drive = info.drive & 0x0f; 346 info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
306 port->sense = port->drive;
307 port->adapter.algo = &nouveau_i2c_aux_algo;
308 break; 347 break;
309 default: 348 default:
310 break;
311 }
312
313 if (!port->adapter.algo && !port->drive) {
314 nv_error(i2c, "I2C%d: type %d index %x/%x unknown\n",
315 i, port->type, port->drive, port->sense);
316 kfree(port);
317 continue; 349 continue;
318 } 350 }
319 351
320 snprintf(port->adapter.name, sizeof(port->adapter.name), 352 ret = -ENODEV;
321 "nouveau-%s-%d", device->name, i); 353 j = -1;
322 port->adapter.owner = THIS_MODULE; 354 while (ret && ++j < ARRAY_SIZE(nouveau_i2c_extdev_sclass)) {
323 port->adapter.dev.parent = &device->pdev->dev; 355 parent = nv_object(i2c->find(i2c, outp.i2c_index));
324 port->i2c = i2c; 356 oclass = nouveau_i2c_extdev_sclass[j];
325 port->index = i; 357 do {
326 port->dcb = info.data; 358 if (oclass->handle != info.type)
327 i2c_set_adapdata(&port->adapter, i2c); 359 continue;
328 360 ret = nouveau_object_ctor(parent, *pobject,
329 if (port->adapter.algo != &nouveau_i2c_aux_algo) { 361 oclass, NULL,
330 nouveau_i2c_drive_scl(port, 0); 362 index++, &object);
331 nouveau_i2c_drive_sda(port, 1); 363 } while (ret && (++oclass)->handle);
332 nouveau_i2c_drive_scl(port, 1);
333
334#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
335 if (nouveau_boolopt(device->cfgopt, "NvI2C", true)) {
336#else
337 if (nouveau_boolopt(device->cfgopt, "NvI2C", false)) {
338#endif
339 port->adapter.algo = &nouveau_i2c_bit_algo;
340 ret = i2c_add_adapter(&port->adapter);
341 } else {
342 port->adapter.algo_data = &port->bit;
343 port->bit.udelay = 10;
344 port->bit.timeout = usecs_to_jiffies(2200);
345 port->bit.data = port;
346 port->bit.setsda = nouveau_i2c_drive_sda;
347 port->bit.setscl = nouveau_i2c_drive_scl;
348 port->bit.getsda = nouveau_i2c_sense_sda;
349 port->bit.getscl = nouveau_i2c_sense_scl;
350 ret = i2c_bit_add_bus(&port->adapter);
351 }
352 } else {
353 port->adapter.algo = &nouveau_i2c_aux_algo;
354 ret = i2c_add_adapter(&port->adapter);
355 }
356
357 if (ret) {
358 nv_error(i2c, "I2C%d: failed register: %d\n", i, ret);
359 kfree(port);
360 continue;
361 } 364 }
362
363 list_add_tail(&port->head, &i2c->ports);
364 } 365 }
365 366
366 return 0; 367 return 0;
367} 368}
368
369static void
370nouveau_i2c_dtor(struct nouveau_object *object)
371{
372 struct nouveau_i2c *i2c = (void *)object;
373 struct nouveau_i2c_port *port, *temp;
374
375 list_for_each_entry_safe(port, temp, &i2c->ports, head) {
376 i2c_del_adapter(&port->adapter);
377 list_del(&port->head);
378 kfree(port);
379 }
380
381 nouveau_subdev_destroy(&i2c->base);
382}
383
384static int
385nouveau_i2c_init(struct nouveau_object *object)
386{
387 struct nouveau_i2c *i2c = (void *)object;
388 return nouveau_subdev_init(&i2c->base);
389}
390
391static int
392nouveau_i2c_fini(struct nouveau_object *object, bool suspend)
393{
394 struct nouveau_i2c *i2c = (void *)object;
395 return nouveau_subdev_fini(&i2c->base, suspend);
396}
397
398struct nouveau_oclass
399nouveau_i2c_oclass = {
400 .handle = NV_SUBDEV(I2C, 0x00),
401 .ofuncs = &(struct nouveau_ofuncs) {
402 .ctor = nouveau_i2c_ctor,
403 .dtor = nouveau_i2c_dtor,
404 .init = nouveau_i2c_init,
405 .fini = nouveau_i2c_fini,
406 },
407};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
index 1c4c9a5c8e2e..a6e72d3b06b5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/bit.c
@@ -32,25 +32,25 @@
32static inline void 32static inline void
33i2c_drive_scl(struct nouveau_i2c_port *port, int state) 33i2c_drive_scl(struct nouveau_i2c_port *port, int state)
34{ 34{
35 nouveau_i2c_drive_scl(port, state); 35 port->func->drive_scl(port, state);
36} 36}
37 37
38static inline void 38static inline void
39i2c_drive_sda(struct nouveau_i2c_port *port, int state) 39i2c_drive_sda(struct nouveau_i2c_port *port, int state)
40{ 40{
41 nouveau_i2c_drive_sda(port, state); 41 port->func->drive_sda(port, state);
42} 42}
43 43
44static inline int 44static inline int
45i2c_sense_scl(struct nouveau_i2c_port *port) 45i2c_sense_scl(struct nouveau_i2c_port *port)
46{ 46{
47 return nouveau_i2c_sense_scl(port); 47 return port->func->sense_scl(port);
48} 48}
49 49
50static inline int 50static inline int
51i2c_sense_sda(struct nouveau_i2c_port *port) 51i2c_sense_sda(struct nouveau_i2c_port *port)
52{ 52{
53 return nouveau_i2c_sense_sda(port); 53 return port->func->sense_sda(port);
54} 54}
55 55
56static void 56static void
@@ -77,9 +77,8 @@ i2c_start(struct nouveau_i2c_port *port)
77{ 77{
78 int ret = 0; 78 int ret = 0;
79 79
80 port->state = i2c_sense_scl(port); 80 if (!i2c_sense_scl(port) ||
81 port->state |= i2c_sense_sda(port) << 1; 81 !i2c_sense_sda(port)) {
82 if (port->state != 3) {
83 i2c_drive_scl(port, 0); 82 i2c_drive_scl(port, 0);
84 i2c_drive_sda(port, 1); 83 i2c_drive_sda(port, 1);
85 if (!i2c_raise_scl(port)) 84 if (!i2c_raise_scl(port))
@@ -184,10 +183,13 @@ i2c_addr(struct nouveau_i2c_port *port, struct i2c_msg *msg)
184static int 183static int
185i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) 184i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
186{ 185{
187 struct nouveau_i2c_port *port = (struct nouveau_i2c_port *)adap; 186 struct nouveau_i2c_port *port = adap->algo_data;
188 struct i2c_msg *msg = msgs; 187 struct i2c_msg *msg = msgs;
189 int ret = 0, mcnt = num; 188 int ret = 0, mcnt = num;
190 189
190 if (port->func->acquire)
191 port->func->acquire(port);
192
191 while (!ret && mcnt--) { 193 while (!ret && mcnt--) {
192 u8 remaining = msg->len; 194 u8 remaining = msg->len;
193 u8 *ptr = msg->buf; 195 u8 *ptr = msg->buf;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
new file mode 100644
index 000000000000..2ad18840fe63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -0,0 +1,143 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/i2c.h>
26#include <subdev/vga.h>
27
28struct nv04_i2c_priv {
29 struct nouveau_i2c base;
30};
31
32struct nv04_i2c_port {
33 struct nouveau_i2c_port base;
34 u8 drive;
35 u8 sense;
36};
37
38static void
39nv04_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
40{
41 struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
42 struct nv04_i2c_port *port = (void *)base;
43 u8 val = nv_rdvgac(priv, 0, port->drive);
44 if (state) val |= 0x20;
45 else val &= 0xdf;
46 nv_wrvgac(priv, 0, port->drive, val | 0x01);
47}
48
49static void
50nv04_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
51{
52 struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
53 struct nv04_i2c_port *port = (void *)base;
54 u8 val = nv_rdvgac(priv, 0, port->drive);
55 if (state) val |= 0x10;
56 else val &= 0xef;
57 nv_wrvgac(priv, 0, port->drive, val | 0x01);
58}
59
60static int
61nv04_i2c_sense_scl(struct nouveau_i2c_port *base)
62{
63 struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
64 struct nv04_i2c_port *port = (void *)base;
65 return !!(nv_rdvgac(priv, 0, port->sense) & 0x04);
66}
67
68static int
69nv04_i2c_sense_sda(struct nouveau_i2c_port *base)
70{
71 struct nv04_i2c_priv *priv = (void *)nv_object(base)->engine;
72 struct nv04_i2c_port *port = (void *)base;
73 return !!(nv_rdvgac(priv, 0, port->sense) & 0x08);
74}
75
76static const struct nouveau_i2c_func
77nv04_i2c_func = {
78 .drive_scl = nv04_i2c_drive_scl,
79 .drive_sda = nv04_i2c_drive_sda,
80 .sense_scl = nv04_i2c_sense_scl,
81 .sense_sda = nv04_i2c_sense_sda,
82};
83
84static int
85nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
86 struct nouveau_oclass *oclass, void *data, u32 index,
87 struct nouveau_object **pobject)
88{
89 struct dcb_i2c_entry *info = data;
90 struct nv04_i2c_port *port;
91 int ret;
92
93 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
94 &nouveau_i2c_bit_algo, &port);
95 *pobject = nv_object(port);
96 if (ret)
97 return ret;
98
99 port->base.func = &nv04_i2c_func;
100 port->drive = info->drive;
101 port->sense = info->sense;
102 return 0;
103}
104
105static struct nouveau_oclass
106nv04_i2c_sclass[] = {
107 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV04_BIT),
108 .ofuncs = &(struct nouveau_ofuncs) {
109 .ctor = nv04_i2c_port_ctor,
110 .dtor = _nouveau_i2c_port_dtor,
111 .init = _nouveau_i2c_port_init,
112 .fini = _nouveau_i2c_port_fini,
113 },
114 },
115 {}
116};
117
118static int
119nv04_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
120 struct nouveau_oclass *oclass, void *data, u32 size,
121 struct nouveau_object **pobject)
122{
123 struct nv04_i2c_priv *priv;
124 int ret;
125
126 ret = nouveau_i2c_create(parent, engine, oclass, nv04_i2c_sclass, &priv);
127 *pobject = nv_object(priv);
128 if (ret)
129 return ret;
130
131 return 0;
132}
133
134struct nouveau_oclass
135nv04_i2c_oclass = {
136 .handle = NV_SUBDEV(I2C, 0x04),
137 .ofuncs = &(struct nouveau_ofuncs) {
138 .ctor = nv04_i2c_ctor,
139 .dtor = _nouveau_i2c_dtor,
140 .init = _nouveau_i2c_init,
141 .fini = _nouveau_i2c_fini,
142 },
143};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
new file mode 100644
index 000000000000..f501ae25dbb3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/i2c.h>
26#include <subdev/vga.h>
27
28struct nv4e_i2c_priv {
29 struct nouveau_i2c base;
30};
31
32struct nv4e_i2c_port {
33 struct nouveau_i2c_port base;
34 u32 addr;
35};
36
37static void
38nv4e_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
39{
40 struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
41 struct nv4e_i2c_port *port = (void *)base;
42 nv_mask(priv, port->addr, 0x2f, state ? 0x21 : 0x01);
43}
44
45static void
46nv4e_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
47{
48 struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
49 struct nv4e_i2c_port *port = (void *)base;
50 nv_mask(priv, port->addr, 0x1f, state ? 0x11 : 0x01);
51}
52
53static int
54nv4e_i2c_sense_scl(struct nouveau_i2c_port *base)
55{
56 struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
57 struct nv4e_i2c_port *port = (void *)base;
58 return !!(nv_rd32(priv, port->addr) & 0x00040000);
59}
60
61static int
62nv4e_i2c_sense_sda(struct nouveau_i2c_port *base)
63{
64 struct nv4e_i2c_priv *priv = (void *)nv_object(base)->engine;
65 struct nv4e_i2c_port *port = (void *)base;
66 return !!(nv_rd32(priv, port->addr) & 0x00080000);
67}
68
69static const struct nouveau_i2c_func
70nv4e_i2c_func = {
71 .drive_scl = nv4e_i2c_drive_scl,
72 .drive_sda = nv4e_i2c_drive_sda,
73 .sense_scl = nv4e_i2c_sense_scl,
74 .sense_sda = nv4e_i2c_sense_sda,
75};
76
77static int
78nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
79 struct nouveau_oclass *oclass, void *data, u32 index,
80 struct nouveau_object **pobject)
81{
82 struct dcb_i2c_entry *info = data;
83 struct nv4e_i2c_port *port;
84 int ret;
85
86 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
87 &nouveau_i2c_bit_algo, &port);
88 *pobject = nv_object(port);
89 if (ret)
90 return ret;
91
92 port->base.func = &nv4e_i2c_func;
93 port->addr = 0x600800 + info->drive;
94 return 0;
95}
96
97static struct nouveau_oclass
98nv4e_i2c_sclass[] = {
99 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NV4E_BIT),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv4e_i2c_port_ctor,
102 .dtor = _nouveau_i2c_port_dtor,
103 .init = _nouveau_i2c_port_init,
104 .fini = _nouveau_i2c_port_fini,
105 },
106 },
107 {}
108};
109
110static int
111nv4e_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
112 struct nouveau_oclass *oclass, void *data, u32 size,
113 struct nouveau_object **pobject)
114{
115 struct nv4e_i2c_priv *priv;
116 int ret;
117
118 ret = nouveau_i2c_create(parent, engine, oclass, nv4e_i2c_sclass, &priv);
119 *pobject = nv_object(priv);
120 if (ret)
121 return ret;
122
123 return 0;
124}
125
126struct nouveau_oclass
127nv4e_i2c_oclass = {
128 .handle = NV_SUBDEV(I2C, 0x4e),
129 .ofuncs = &(struct nouveau_ofuncs) {
130 .ctor = nv4e_i2c_ctor,
131 .dtor = _nouveau_i2c_dtor,
132 .init = _nouveau_i2c_init,
133 .fini = _nouveau_i2c_fini,
134 },
135};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
new file mode 100644
index 000000000000..378dfa324e5f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27void
28nv50_i2c_drive_scl(struct nouveau_i2c_port *base, int state)
29{
30 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
31 struct nv50_i2c_port *port = (void *)base;
32 if (state) port->state |= 0x01;
33 else port->state &= 0xfe;
34 nv_wr32(priv, port->addr, port->state);
35}
36
37void
38nv50_i2c_drive_sda(struct nouveau_i2c_port *base, int state)
39{
40 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
41 struct nv50_i2c_port *port = (void *)base;
42 if (state) port->state |= 0x02;
43 else port->state &= 0xfd;
44 nv_wr32(priv, port->addr, port->state);
45}
46
47int
48nv50_i2c_sense_scl(struct nouveau_i2c_port *base)
49{
50 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
51 struct nv50_i2c_port *port = (void *)base;
52 return !!(nv_rd32(priv, port->addr) & 0x00000001);
53}
54
55int
56nv50_i2c_sense_sda(struct nouveau_i2c_port *base)
57{
58 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
59 struct nv50_i2c_port *port = (void *)base;
60 return !!(nv_rd32(priv, port->addr) & 0x00000002);
61}
62
63static const struct nouveau_i2c_func
64nv50_i2c_func = {
65 .drive_scl = nv50_i2c_drive_scl,
66 .drive_sda = nv50_i2c_drive_sda,
67 .sense_scl = nv50_i2c_sense_scl,
68 .sense_sda = nv50_i2c_sense_sda,
69};
70
71const u32 nv50_i2c_addr[] = {
72 0x00e138, 0x00e150, 0x00e168, 0x00e180,
73 0x00e254, 0x00e274, 0x00e764, 0x00e780,
74 0x00e79c, 0x00e7b8
75};
76const int nv50_i2c_addr_nr = ARRAY_SIZE(nv50_i2c_addr);
77
78static int
79nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
80 struct nouveau_oclass *oclass, void *data, u32 index,
81 struct nouveau_object **pobject)
82{
83 struct dcb_i2c_entry *info = data;
84 struct nv50_i2c_port *port;
85 int ret;
86
87 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
88 &nouveau_i2c_bit_algo, &port);
89 *pobject = nv_object(port);
90 if (ret)
91 return ret;
92
93 if (info->drive >= nv50_i2c_addr_nr)
94 return -EINVAL;
95
96 port->base.func = &nv50_i2c_func;
97 port->state = 0x00000007;
98 port->addr = nv50_i2c_addr[info->drive];
99 return 0;
100}
101
102int
103nv50_i2c_port_init(struct nouveau_object *object)
104{
105 struct nv50_i2c_priv *priv = (void *)object->engine;
106 struct nv50_i2c_port *port = (void *)object;
107 nv_wr32(priv, port->addr, port->state);
108 return nouveau_i2c_port_init(&port->base);
109}
110
111static struct nouveau_oclass
112nv50_i2c_sclass[] = {
113 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
114 .ofuncs = &(struct nouveau_ofuncs) {
115 .ctor = nv50_i2c_port_ctor,
116 .dtor = _nouveau_i2c_port_dtor,
117 .init = nv50_i2c_port_init,
118 .fini = _nouveau_i2c_port_fini,
119 },
120 },
121 {}
122};
123
124static int
125nv50_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
126 struct nouveau_oclass *oclass, void *data, u32 size,
127 struct nouveau_object **pobject)
128{
129 struct nv50_i2c_priv *priv;
130 int ret;
131
132 ret = nouveau_i2c_create(parent, engine, oclass, nv50_i2c_sclass, &priv);
133 *pobject = nv_object(priv);
134 if (ret)
135 return ret;
136
137 return 0;
138}
139
140struct nouveau_oclass
141nv50_i2c_oclass = {
142 .handle = NV_SUBDEV(I2C, 0x50),
143 .ofuncs = &(struct nouveau_ofuncs) {
144 .ctor = nv50_i2c_ctor,
145 .dtor = _nouveau_i2c_dtor,
146 .init = _nouveau_i2c_init,
147 .fini = _nouveau_i2c_fini,
148 },
149};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
new file mode 100644
index 000000000000..4e5ba48ebf5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -0,0 +1,32 @@
1#ifndef __NV50_I2C_H__
2#define __NV50_I2C_H__
3
4#include <subdev/i2c.h>
5
6struct nv50_i2c_priv {
7 struct nouveau_i2c base;
8};
9
10struct nv50_i2c_port {
11 struct nouveau_i2c_port base;
12 u32 addr;
13 u32 ctrl;
14 u32 data;
15 u32 state;
16};
17
18extern const u32 nv50_i2c_addr[];
19extern const int nv50_i2c_addr_nr;
20int nv50_i2c_port_init(struct nouveau_object *);
21int nv50_i2c_sense_scl(struct nouveau_i2c_port *);
22int nv50_i2c_sense_sda(struct nouveau_i2c_port *);
23void nv50_i2c_drive_scl(struct nouveau_i2c_port *, int state);
24void nv50_i2c_drive_sda(struct nouveau_i2c_port *, int state);
25
26int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
27 struct nouveau_oclass *, void *, u32,
28 struct nouveau_object **);
29void nv94_i2c_acquire(struct nouveau_i2c_port *);
30void nv94_i2c_release(struct nouveau_i2c_port *);
31
32#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
new file mode 100644
index 000000000000..61b771670bfe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -0,0 +1,285 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
28#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
29
30static void
31auxch_fini(struct nouveau_i2c *aux, int ch)
32{
33 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00310000, 0x00000000);
34}
35
36static int
37auxch_init(struct nouveau_i2c *aux, int ch)
38{
39 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
40 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
41 const u32 urep = unksel ? 0x01000000 : 0x02000000;
42 u32 ctrl, timeout;
43
44 /* wait up to 1ms for any previous transaction to be done... */
45 timeout = 1000;
46 do {
47 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
48 udelay(1);
49 if (!timeout--) {
50 AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
51 return -EBUSY;
52 }
53 } while (ctrl & 0x03010000);
54
55 /* set some magic, and wait up to 1ms for it to appear */
56 nv_mask(aux, 0x00e4e4 + (ch * 0x50), 0x00300000, ureq);
57 timeout = 1000;
58 do {
59 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
60 udelay(1);
61 if (!timeout--) {
62 AUX_ERR("magic wait 0x%08x\n", ctrl);
63 auxch_fini(aux, ch);
64 return -EBUSY;
65 }
66 } while ((ctrl & 0x03000000) != urep);
67
68 return 0;
69}
70
71int
72nv94_aux(struct nouveau_i2c_port *base, u8 type, u32 addr, u8 *data, u8 size)
73{
74 struct nouveau_i2c *aux = nouveau_i2c(base);
75 struct nv50_i2c_port *port = (void *)base;
76 u32 ctrl, stat, timeout, retries;
77 u32 xbuf[4] = {};
78 int ch = port->addr;
79 int ret, i;
80
81 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
82
83 ret = auxch_init(aux, ch);
84 if (ret)
85 goto out;
86
87 stat = nv_rd32(aux, 0x00e4e8 + (ch * 0x50));
88 if (!(stat & 0x10000000)) {
89 AUX_DBG("sink not detected\n");
90 ret = -ENXIO;
91 goto out;
92 }
93
94 if (!(type & 1)) {
95 memcpy(xbuf, data, size);
96 for (i = 0; i < 16; i += 4) {
97 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
98 nv_wr32(aux, 0x00e4c0 + (ch * 0x50) + i, xbuf[i / 4]);
99 }
100 }
101
102 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
103 ctrl &= ~0x0001f0ff;
104 ctrl |= type << 12;
105 ctrl |= size - 1;
106 nv_wr32(aux, 0x00e4e0 + (ch * 0x50), addr);
107
108 /* retry transaction a number of times on failure... */
109 ret = -EREMOTEIO;
110 for (retries = 0; retries < 32; retries++) {
111 /* reset, and delay a while if this is a retry */
112 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x80000000 | ctrl);
113 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00000000 | ctrl);
114 if (retries)
115 udelay(400);
116
117 /* transaction request, wait up to 1ms for it to complete */
118 nv_wr32(aux, 0x00e4e4 + (ch * 0x50), 0x00010000 | ctrl);
119
120 timeout = 1000;
121 do {
122 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
123 udelay(1);
124 if (!timeout--) {
125 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
126 goto out;
127 }
128 } while (ctrl & 0x00010000);
129
130 /* read status, and check if transaction completed ok */
131 stat = nv_mask(aux, 0x00e4e8 + (ch * 0x50), 0, 0);
132 if (!(stat & 0x000f0f00)) {
133 ret = 0;
134 break;
135 }
136
137 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
138 }
139
140 if (type & 1) {
141 for (i = 0; i < 16; i += 4) {
142 xbuf[i / 4] = nv_rd32(aux, 0x00e4d0 + (ch * 0x50) + i);
143 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
144 }
145 memcpy(data, xbuf, size);
146 }
147
148out:
149 auxch_fini(aux, ch);
150 return ret;
151}
152
153void
154nv94_i2c_acquire(struct nouveau_i2c_port *base)
155{
156 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
157 struct nv50_i2c_port *port = (void *)base;
158 if (port->ctrl) {
159 nv_mask(priv, port->ctrl + 0x0c, 0x00000001, 0x00000000);
160 nv_mask(priv, port->ctrl + 0x00, 0x0000f003, port->data);
161 }
162}
163
164void
165nv94_i2c_release(struct nouveau_i2c_port *base)
166{
167}
168
169static const struct nouveau_i2c_func
170nv94_i2c_func = {
171 .acquire = nv94_i2c_acquire,
172 .release = nv94_i2c_release,
173 .drive_scl = nv50_i2c_drive_scl,
174 .drive_sda = nv50_i2c_drive_sda,
175 .sense_scl = nv50_i2c_sense_scl,
176 .sense_sda = nv50_i2c_sense_sda,
177};
178
179static int
180nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
181 struct nouveau_oclass *oclass, void *data, u32 index,
182 struct nouveau_object **pobject)
183{
184 struct dcb_i2c_entry *info = data;
185 struct nv50_i2c_port *port;
186 int ret;
187
188 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
189 &nouveau_i2c_bit_algo, &port);
190 *pobject = nv_object(port);
191 if (ret)
192 return ret;
193
194 if (info->drive >= nv50_i2c_addr_nr)
195 return -EINVAL;
196
197 port->base.func = &nv94_i2c_func;
198 port->state = 7;
199 port->addr = nv50_i2c_addr[info->drive];
200 if (info->share != DCB_I2C_UNUSED) {
201 port->ctrl = 0x00e500 + (info->share * 0x50);
202 port->data = 0x0000e001;
203 }
204 return 0;
205}
206
207static const struct nouveau_i2c_func
208nv94_aux_func = {
209 .acquire = nv94_i2c_acquire,
210 .release = nv94_i2c_release,
211 .aux = nv94_aux,
212};
213
214int
215nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
216 struct nouveau_oclass *oclass, void *data, u32 index,
217 struct nouveau_object **pobject)
218{
219 struct dcb_i2c_entry *info = data;
220 struct nv50_i2c_port *port;
221 int ret;
222
223 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
224 &nouveau_i2c_aux_algo, &port);
225 *pobject = nv_object(port);
226 if (ret)
227 return ret;
228
229 port->base.func = &nv94_aux_func;
230 port->addr = info->drive;
231 if (info->share != DCB_I2C_UNUSED) {
232 port->ctrl = 0x00e500 + (info->drive * 0x50);
233 port->data = 0x00002002;
234 }
235
236 return 0;
237}
238
239static struct nouveau_oclass
240nv94_i2c_sclass[] = {
241 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
242 .ofuncs = &(struct nouveau_ofuncs) {
243 .ctor = nv94_i2c_port_ctor,
244 .dtor = _nouveau_i2c_port_dtor,
245 .init = nv50_i2c_port_init,
246 .fini = _nouveau_i2c_port_fini,
247 },
248 },
249 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
250 .ofuncs = &(struct nouveau_ofuncs) {
251 .ctor = nv94_aux_port_ctor,
252 .dtor = _nouveau_i2c_port_dtor,
253 .init = _nouveau_i2c_port_init,
254 .fini = _nouveau_i2c_port_fini,
255 },
256 },
257 {}
258};
259
260static int
261nv94_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
262 struct nouveau_oclass *oclass, void *data, u32 size,
263 struct nouveau_object **pobject)
264{
265 struct nv50_i2c_priv *priv;
266 int ret;
267
268 ret = nouveau_i2c_create(parent, engine, oclass, nv94_i2c_sclass, &priv);
269 *pobject = nv_object(priv);
270 if (ret)
271 return ret;
272
273 return 0;
274}
275
276struct nouveau_oclass
277nv94_i2c_oclass = {
278 .handle = NV_SUBDEV(I2C, 0x94),
279 .ofuncs = &(struct nouveau_ofuncs) {
280 .ctor = nv94_i2c_ctor,
281 .dtor = _nouveau_i2c_dtor,
282 .init = _nouveau_i2c_init,
283 .fini = _nouveau_i2c_fini,
284 },
285};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
new file mode 100644
index 000000000000..f761b8a610f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -0,0 +1,124 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27static int
28nvd0_i2c_sense_scl(struct nouveau_i2c_port *base)
29{
30 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
31 struct nv50_i2c_port *port = (void *)base;
32 return !!(nv_rd32(priv, port->addr) & 0x00000010);
33}
34
35static int
36nvd0_i2c_sense_sda(struct nouveau_i2c_port *base)
37{
38 struct nv50_i2c_priv *priv = (void *)nv_object(base)->engine;
39 struct nv50_i2c_port *port = (void *)base;
40 return !!(nv_rd32(priv, port->addr) & 0x00000020);
41}
42
43static const struct nouveau_i2c_func
44nvd0_i2c_func = {
45 .acquire = nv94_i2c_acquire,
46 .release = nv94_i2c_release,
47 .drive_scl = nv50_i2c_drive_scl,
48 .drive_sda = nv50_i2c_drive_sda,
49 .sense_scl = nvd0_i2c_sense_scl,
50 .sense_sda = nvd0_i2c_sense_sda,
51};
52
53static int
54nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 index,
56 struct nouveau_object **pobject)
57{
58 struct dcb_i2c_entry *info = data;
59 struct nv50_i2c_port *port;
60 int ret;
61
62 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
63 &nouveau_i2c_bit_algo, &port);
64 *pobject = nv_object(port);
65 if (ret)
66 return ret;
67
68 port->base.func = &nvd0_i2c_func;
69 port->state = 0x00000007;
70 port->addr = 0x00d014 + (info->drive * 0x20);
71 if (info->share != DCB_I2C_UNUSED) {
72 port->ctrl = 0x00e500 + (info->share * 0x50);
73 port->data = 0x0000e001;
74 }
75 return 0;
76}
77
78static struct nouveau_oclass
79nvd0_i2c_sclass[] = {
80 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
81 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nvd0_i2c_port_ctor,
83 .dtor = _nouveau_i2c_port_dtor,
84 .init = nv50_i2c_port_init,
85 .fini = _nouveau_i2c_port_fini,
86 },
87 },
88 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
89 .ofuncs = &(struct nouveau_ofuncs) {
90 .ctor = nv94_aux_port_ctor,
91 .dtor = _nouveau_i2c_port_dtor,
92 .init = _nouveau_i2c_port_init,
93 .fini = _nouveau_i2c_port_fini,
94 },
95 },
96 {}
97};
98
99static int
100nvd0_i2c_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
101 struct nouveau_oclass *oclass, void *data, u32 size,
102 struct nouveau_object **pobject)
103{
104 struct nv50_i2c_priv *priv;
105 int ret;
106
107 ret = nouveau_i2c_create(parent, engine, oclass, nvd0_i2c_sclass, &priv);
108 *pobject = nv_object(priv);
109 if (ret)
110 return ret;
111
112 return 0;
113}
114
115struct nouveau_oclass
116nvd0_i2c_oclass = {
117 .handle = NV_SUBDEV(I2C, 0xd0),
118 .ofuncs = &(struct nouveau_ofuncs) {
119 .ctor = nvd0_i2c_ctor,
120 .dtor = _nouveau_i2c_dtor,
121 .init = _nouveau_i2c_init,
122 .fini = _nouveau_i2c_fini,
123 },
124};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 23ebe477a6f0..89da8fa7ea0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -37,7 +37,7 @@ nv04_mc_intr[] = {
37 { 0x00100000, NVDEV_SUBDEV_TIMER }, 37 { 0x00100000, NVDEV_SUBDEV_TIMER },
38 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */ 38 { 0x01000000, NVDEV_ENGINE_DISP }, /* NV04- PCRTC0 */
39 { 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */ 39 { 0x02000000, NVDEV_ENGINE_DISP }, /* NV11- PCRTC1 */
40 { 0x10000000, NVDEV_SUBDEV_GPIO }, /* PBUS */ 40 { 0x10000000, NVDEV_SUBDEV_BUS },
41 { 0x80000000, NVDEV_ENGINE_SW }, 41 { 0x80000000, NVDEV_ENGINE_SW },
42 {} 42 {}
43}; 43};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 8d759f830323..5965add6daee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -38,6 +38,7 @@ nv50_mc_intr[] = {
38 { 0x00100000, NVDEV_SUBDEV_TIMER }, 38 { 0x00100000, NVDEV_SUBDEV_TIMER },
39 { 0x00200000, NVDEV_SUBDEV_GPIO }, 39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x04000000, NVDEV_ENGINE_DISP }, 40 { 0x04000000, NVDEV_ENGINE_DISP },
41 { 0x10000000, NVDEV_SUBDEV_BUS },
41 { 0x80000000, NVDEV_ENGINE_SW }, 42 { 0x80000000, NVDEV_ENGINE_SW },
42 { 0x0000d101, NVDEV_SUBDEV_FB }, 43 { 0x0000d101, NVDEV_SUBDEV_FB },
43 {}, 44 {},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index ceb5c83f9459..3a80b29dce0f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -35,10 +35,12 @@ nv98_mc_intr[] = {
35 { 0x00001000, NVDEV_ENGINE_GR }, 35 { 0x00001000, NVDEV_ENGINE_GR },
36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */ 36 { 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
37 { 0x00008000, NVDEV_ENGINE_BSP }, 37 { 0x00008000, NVDEV_ENGINE_BSP },
38 { 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
38 { 0x00100000, NVDEV_SUBDEV_TIMER }, 39 { 0x00100000, NVDEV_SUBDEV_TIMER },
39 { 0x00200000, NVDEV_SUBDEV_GPIO }, 40 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ 41 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
41 { 0x04000000, NVDEV_ENGINE_DISP }, 42 { 0x04000000, NVDEV_ENGINE_DISP },
43 { 0x10000000, NVDEV_SUBDEV_BUS },
42 { 0x80000000, NVDEV_ENGINE_SW }, 44 { 0x80000000, NVDEV_ENGINE_SW },
43 { 0x0040d101, NVDEV_SUBDEV_FB }, 45 { 0x0040d101, NVDEV_SUBDEV_FB },
44 {}, 46 {},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index 92796682722d..42bbf72023a8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,11 +36,13 @@ nvc0_mc_intr[] = {
36 { 0x00000100, NVDEV_ENGINE_FIFO }, 36 { 0x00000100, NVDEV_ENGINE_FIFO },
37 { 0x00001000, NVDEV_ENGINE_GR }, 37 { 0x00001000, NVDEV_ENGINE_GR },
38 { 0x00008000, NVDEV_ENGINE_BSP }, 38 { 0x00008000, NVDEV_ENGINE_BSP },
39 { 0x00040000, NVDEV_SUBDEV_THERM },
39 { 0x00020000, NVDEV_ENGINE_VP }, 40 { 0x00020000, NVDEV_ENGINE_VP },
40 { 0x00100000, NVDEV_SUBDEV_TIMER }, 41 { 0x00100000, NVDEV_SUBDEV_TIMER },
41 { 0x00200000, NVDEV_SUBDEV_GPIO }, 42 { 0x00200000, NVDEV_SUBDEV_GPIO },
42 { 0x02000000, NVDEV_SUBDEV_LTCG }, 43 { 0x02000000, NVDEV_SUBDEV_LTCG },
43 { 0x04000000, NVDEV_ENGINE_DISP }, 44 { 0x04000000, NVDEV_ENGINE_DISP },
45 { 0x10000000, NVDEV_SUBDEV_BUS },
44 { 0x40000000, NVDEV_SUBDEV_IBUS }, 46 { 0x40000000, NVDEV_SUBDEV_IBUS },
45 { 0x80000000, NVDEV_ENGINE_SW }, 47 { 0x80000000, NVDEV_ENGINE_SW },
46 {}, 48 {},
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
index 839ca1edc132..4bde7f7f7b81 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/mxms.c
@@ -156,15 +156,15 @@ mxms_foreach(struct nouveau_mxm *mxm, u8 types,
156 156
157 nv_debug(mxm, "%4s: ", mxms_desc_name[type]); 157 nv_debug(mxm, "%4s: ", mxms_desc_name[type]);
158 for (j = headerlen - 1; j >= 0; j--) 158 for (j = headerlen - 1; j >= 0; j--)
159 printk("%02x", dump[j]); 159 pr_cont("%02x", dump[j]);
160 printk("\n"); 160 pr_cont("\n");
161 dump += headerlen; 161 dump += headerlen;
162 162
163 for (i = 0; i < entries; i++, dump += recordlen) { 163 for (i = 0; i < entries; i++, dump += recordlen) {
164 nv_debug(mxm, " "); 164 nv_debug(mxm, " ");
165 for (j = recordlen - 1; j >= 0; j--) 165 for (j = recordlen - 1; j >= 0; j--)
166 printk("%02x", dump[j]); 166 pr_cont("%02x", dump[j]);
167 printk("\n"); 167 pr_cont("\n");
168 } 168 }
169 } 169 }
170 170
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index 1674c74a76c8..f794dc89a3b2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -29,6 +29,134 @@
29 29
30#include "priv.h" 30#include "priv.h"
31 31
32static int
33nouveau_therm_update_trip(struct nouveau_therm *therm)
34{
35 struct nouveau_therm_priv *priv = (void *)therm;
36 struct nouveau_therm_trip_point *trip = priv->fan->bios.trip,
37 *cur_trip = NULL,
38 *last_trip = priv->last_trip;
39 u8 temp = therm->temp_get(therm);
40 u16 duty, i;
41
42 /* look for the trip point corresponding to the current temperature */
43 cur_trip = NULL;
44 for (i = 0; i < priv->fan->bios.nr_fan_trip; i++) {
45 if (temp >= trip[i].temp)
46 cur_trip = &trip[i];
47 }
48
49 /* account for the hysteresis cycle */
50 if (last_trip && temp <= (last_trip->temp) &&
51 temp > (last_trip->temp - last_trip->hysteresis))
52 cur_trip = last_trip;
53
54 if (cur_trip) {
55 duty = cur_trip->fan_duty;
56 priv->last_trip = cur_trip;
57 } else {
58 duty = 0;
59 priv->last_trip = NULL;
60 }
61
62 return duty;
63}
64
65static int
66nouveau_therm_update_linear(struct nouveau_therm *therm)
67{
68 struct nouveau_therm_priv *priv = (void *)therm;
69 u8 linear_min_temp = priv->fan->bios.linear_min_temp;
70 u8 linear_max_temp = priv->fan->bios.linear_max_temp;
71 u8 temp = therm->temp_get(therm);
72 u16 duty;
73
74 /* handle the non-linear part first */
75 if (temp < linear_min_temp)
76 return priv->fan->bios.min_duty;
77 else if (temp > linear_max_temp)
78 return priv->fan->bios.max_duty;
79
80 /* we are in the linear zone */
81 duty = (temp - linear_min_temp);
82 duty *= (priv->fan->bios.max_duty - priv->fan->bios.min_duty);
83 duty /= (linear_max_temp - linear_min_temp);
84 duty += priv->fan->bios.min_duty;
85
86 return duty;
87}
88
89static void
90nouveau_therm_update(struct nouveau_therm *therm, int mode)
91{
92 struct nouveau_timer *ptimer = nouveau_timer(therm);
93 struct nouveau_therm_priv *priv = (void *)therm;
94 unsigned long flags;
95 int duty;
96
97 spin_lock_irqsave(&priv->lock, flags);
98 if (mode < 0)
99 mode = priv->mode;
100 priv->mode = mode;
101
102 switch (mode) {
103 case NOUVEAU_THERM_CTRL_MANUAL:
104 duty = nouveau_therm_fan_get(therm);
105 if (duty < 0)
106 duty = 100;
107 break;
108 case NOUVEAU_THERM_CTRL_AUTO:
109 if (priv->fan->bios.nr_fan_trip)
110 duty = nouveau_therm_update_trip(therm);
111 else
112 duty = nouveau_therm_update_linear(therm);
113 break;
114 case NOUVEAU_THERM_CTRL_NONE:
115 default:
116 goto done;
117 }
118
119 nv_debug(therm, "FAN target request: %d%%\n", duty);
120 nouveau_therm_fan_set(therm, (mode != NOUVEAU_THERM_CTRL_AUTO), duty);
121
122done:
123 if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
124 ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
125 spin_unlock_irqrestore(&priv->lock, flags);
126}
127
128static void
129nouveau_therm_alarm(struct nouveau_alarm *alarm)
130{
131 struct nouveau_therm_priv *priv =
132 container_of(alarm, struct nouveau_therm_priv, alarm);
133 nouveau_therm_update(&priv->base, -1);
134}
135
136int
137nouveau_therm_mode(struct nouveau_therm *therm, int mode)
138{
139 struct nouveau_therm_priv *priv = (void *)therm;
140 struct nouveau_device *device = nv_device(therm);
141 static const char *name[] = {
142 "disabled",
143 "manual",
144 "automatic"
145 };
146
147 /* The default PDAEMON ucode interferes with fan management */
148 if ((mode >= ARRAY_SIZE(name)) ||
149 (mode != NOUVEAU_THERM_CTRL_NONE && device->card_type >= NV_C0))
150 return -EINVAL;
151
152 if (priv->mode == mode)
153 return 0;
154
155 nv_info(therm, "Thermal management: %s\n", name[mode]);
156 nouveau_therm_update(therm, mode);
157 return 0;
158}
159
32int 160int
33nouveau_therm_attr_get(struct nouveau_therm *therm, 161nouveau_therm_attr_get(struct nouveau_therm *therm,
34 enum nouveau_therm_attr_type type) 162 enum nouveau_therm_attr_type type)
@@ -37,11 +165,11 @@ nouveau_therm_attr_get(struct nouveau_therm *therm,
37 165
38 switch (type) { 166 switch (type) {
39 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY: 167 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
40 return priv->bios_fan.min_duty; 168 return priv->fan->bios.min_duty;
41 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY: 169 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
42 return priv->bios_fan.max_duty; 170 return priv->fan->bios.max_duty;
43 case NOUVEAU_THERM_ATTR_FAN_MODE: 171 case NOUVEAU_THERM_ATTR_FAN_MODE:
44 return priv->fan.mode; 172 return priv->mode;
45 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: 173 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
46 return priv->bios_sensor.thrs_fan_boost.temp; 174 return priv->bios_sensor.thrs_fan_boost.temp;
47 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST: 175 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
@@ -73,42 +201,50 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
73 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY: 201 case NOUVEAU_THERM_ATTR_FAN_MIN_DUTY:
74 if (value < 0) 202 if (value < 0)
75 value = 0; 203 value = 0;
76 if (value > priv->bios_fan.max_duty) 204 if (value > priv->fan->bios.max_duty)
77 value = priv->bios_fan.max_duty; 205 value = priv->fan->bios.max_duty;
78 priv->bios_fan.min_duty = value; 206 priv->fan->bios.min_duty = value;
79 return 0; 207 return 0;
80 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY: 208 case NOUVEAU_THERM_ATTR_FAN_MAX_DUTY:
81 if (value < 0) 209 if (value < 0)
82 value = 0; 210 value = 0;
83 if (value < priv->bios_fan.min_duty) 211 if (value < priv->fan->bios.min_duty)
84 value = priv->bios_fan.min_duty; 212 value = priv->fan->bios.min_duty;
85 priv->bios_fan.max_duty = value; 213 priv->fan->bios.max_duty = value;
86 return 0; 214 return 0;
87 case NOUVEAU_THERM_ATTR_FAN_MODE: 215 case NOUVEAU_THERM_ATTR_FAN_MODE:
88 return nouveau_therm_fan_set_mode(therm, value); 216 return nouveau_therm_mode(therm, value);
89 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST: 217 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST:
90 priv->bios_sensor.thrs_fan_boost.temp = value; 218 priv->bios_sensor.thrs_fan_boost.temp = value;
219 priv->sensor.program_alarms(therm);
91 return 0; 220 return 0;
92 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST: 221 case NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST:
93 priv->bios_sensor.thrs_fan_boost.hysteresis = value; 222 priv->bios_sensor.thrs_fan_boost.hysteresis = value;
223 priv->sensor.program_alarms(therm);
94 return 0; 224 return 0;
95 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK: 225 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK:
96 priv->bios_sensor.thrs_down_clock.temp = value; 226 priv->bios_sensor.thrs_down_clock.temp = value;
227 priv->sensor.program_alarms(therm);
97 return 0; 228 return 0;
98 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST: 229 case NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST:
99 priv->bios_sensor.thrs_down_clock.hysteresis = value; 230 priv->bios_sensor.thrs_down_clock.hysteresis = value;
231 priv->sensor.program_alarms(therm);
100 return 0; 232 return 0;
101 case NOUVEAU_THERM_ATTR_THRS_CRITICAL: 233 case NOUVEAU_THERM_ATTR_THRS_CRITICAL:
102 priv->bios_sensor.thrs_critical.temp = value; 234 priv->bios_sensor.thrs_critical.temp = value;
235 priv->sensor.program_alarms(therm);
103 return 0; 236 return 0;
104 case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST: 237 case NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST:
105 priv->bios_sensor.thrs_critical.hysteresis = value; 238 priv->bios_sensor.thrs_critical.hysteresis = value;
239 priv->sensor.program_alarms(therm);
106 return 0; 240 return 0;
107 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN: 241 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN:
108 priv->bios_sensor.thrs_shutdown.temp = value; 242 priv->bios_sensor.thrs_shutdown.temp = value;
243 priv->sensor.program_alarms(therm);
109 return 0; 244 return 0;
110 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST: 245 case NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST:
111 priv->bios_sensor.thrs_shutdown.hysteresis = value; 246 priv->bios_sensor.thrs_shutdown.hysteresis = value;
247 priv->sensor.program_alarms(therm);
112 return 0; 248 return 0;
113 } 249 }
114 250
@@ -116,7 +252,7 @@ nouveau_therm_attr_set(struct nouveau_therm *therm,
116} 252}
117 253
118int 254int
119nouveau_therm_init(struct nouveau_object *object) 255_nouveau_therm_init(struct nouveau_object *object)
120{ 256{
121 struct nouveau_therm *therm = (void *)object; 257 struct nouveau_therm *therm = (void *)object;
122 struct nouveau_therm_priv *priv = (void *)therm; 258 struct nouveau_therm_priv *priv = (void *)therm;
@@ -126,19 +262,69 @@ nouveau_therm_init(struct nouveau_object *object)
126 if (ret) 262 if (ret)
127 return ret; 263 return ret;
128 264
129 if (priv->fan.percent >= 0) 265 if (priv->suspend >= 0)
130 therm->fan_set(therm, priv->fan.percent); 266 nouveau_therm_mode(therm, priv->mode);
131 267 priv->sensor.program_alarms(therm);
132 return 0; 268 return 0;
133} 269}
134 270
135int 271int
136nouveau_therm_fini(struct nouveau_object *object, bool suspend) 272_nouveau_therm_fini(struct nouveau_object *object, bool suspend)
137{ 273{
138 struct nouveau_therm *therm = (void *)object; 274 struct nouveau_therm *therm = (void *)object;
139 struct nouveau_therm_priv *priv = (void *)therm; 275 struct nouveau_therm_priv *priv = (void *)therm;
140 276
141 priv->fan.percent = therm->fan_get(therm); 277 if (suspend) {
278 priv->suspend = priv->mode;
279 priv->mode = NOUVEAU_THERM_CTRL_NONE;
280 }
142 281
143 return nouveau_subdev_fini(&therm->base, suspend); 282 return nouveau_subdev_fini(&therm->base, suspend);
144} 283}
284
285int
286nouveau_therm_create_(struct nouveau_object *parent,
287 struct nouveau_object *engine,
288 struct nouveau_oclass *oclass,
289 int length, void **pobject)
290{
291 struct nouveau_therm_priv *priv;
292 int ret;
293
294 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "PTHERM",
295 "therm", length, pobject);
296 priv = *pobject;
297 if (ret)
298 return ret;
299
300 nouveau_alarm_init(&priv->alarm, nouveau_therm_alarm);
301 spin_lock_init(&priv->lock);
302 spin_lock_init(&priv->sensor.alarm_program_lock);
303
304 priv->base.fan_get = nouveau_therm_fan_user_get;
305 priv->base.fan_set = nouveau_therm_fan_user_set;
306 priv->base.fan_sense = nouveau_therm_fan_sense;
307 priv->base.attr_get = nouveau_therm_attr_get;
308 priv->base.attr_set = nouveau_therm_attr_set;
309 priv->mode = priv->suspend = -1; /* undefined */
310 return 0;
311}
312
313int
314nouveau_therm_preinit(struct nouveau_therm *therm)
315{
316 nouveau_therm_ic_ctor(therm);
317 nouveau_therm_sensor_ctor(therm);
318 nouveau_therm_fan_ctor(therm);
319
320 nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_NONE);
321 return 0;
322}
323
324void
325_nouveau_therm_dtor(struct nouveau_object *object)
326{
327 struct nouveau_therm_priv *priv = (void *)object;
328 kfree(priv->fan);
329 nouveau_subdev_destroy(&priv->base.base);
330}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index 523178685180..c728380d3d62 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -27,90 +27,107 @@
27 27
28#include <core/object.h> 28#include <core/object.h>
29#include <core/device.h> 29#include <core/device.h>
30
30#include <subdev/gpio.h> 31#include <subdev/gpio.h>
31#include <subdev/timer.h> 32#include <subdev/timer.h>
32 33
33int 34static int
34nouveau_therm_fan_get(struct nouveau_therm *therm) 35nouveau_fan_update(struct nouveau_fan *fan, bool immediate, int target)
35{ 36{
37 struct nouveau_therm *therm = fan->parent;
36 struct nouveau_therm_priv *priv = (void *)therm; 38 struct nouveau_therm_priv *priv = (void *)therm;
37 struct nouveau_gpio *gpio = nouveau_gpio(therm); 39 struct nouveau_timer *ptimer = nouveau_timer(priv);
38 struct dcb_gpio_func func; 40 unsigned long flags;
39 int card_type = nv_device(therm)->card_type; 41 int ret = 0;
40 u32 divs, duty; 42 int duty;
41 int ret; 43
42 44 /* update target fan speed, restricting to allowed range */
43 if (!priv->fan.pwm_get) 45 spin_lock_irqsave(&fan->lock, flags);
44 return -ENODEV; 46 if (target < 0)
47 target = fan->percent;
48 target = max_t(u8, target, fan->bios.min_duty);
49 target = min_t(u8, target, fan->bios.max_duty);
50 if (fan->percent != target) {
51 nv_debug(therm, "FAN target: %d\n", target);
52 fan->percent = target;
53 }
45 54
46 ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func); 55 /* check that we're not already at the target duty cycle */
47 if (ret == 0) { 56 duty = fan->get(therm);
48 ret = priv->fan.pwm_get(therm, func.line, &divs, &duty); 57 if (duty == target)
49 if (ret == 0 && divs) { 58 goto done;
50 divs = max(divs, duty); 59
51 if (card_type <= NV_40 || (func.log[0] & 1)) 60 /* smooth out the fanspeed increase/decrease */
52 duty = divs - duty; 61 if (!immediate && duty >= 0) {
53 return (duty * 100) / divs; 62 /* the constant "3" is a rough approximation taken from
54 } 63 * nvidia's behaviour.
64 * it is meant to bump the fan speed more incrementally
65 */
66 if (duty < target)
67 duty = min(duty + 3, target);
68 else if (duty > target)
69 duty = max(duty - 3, target);
70 } else {
71 duty = target;
72 }
55 73
56 return gpio->get(gpio, 0, func.func, func.line) * 100; 74 nv_debug(therm, "FAN update: %d\n", duty);
75 ret = fan->set(therm, duty);
76 if (ret)
77 goto done;
78
79 /* schedule next fan update, if not at target speed already */
80 if (list_empty(&fan->alarm.head) && target != duty) {
81 u16 bump_period = fan->bios.bump_period;
82 u16 slow_down_period = fan->bios.slow_down_period;
83 u64 delay;
84
85 if (duty > target)
86 delay = slow_down_period;
87 else if (duty == target)
88 delay = min(bump_period, slow_down_period) ;
89 else
90 delay = bump_period;
91
92 ptimer->alarm(ptimer, delay * 1000 * 1000, &fan->alarm);
57 } 93 }
58 94
59 return -ENODEV; 95done:
96 spin_unlock_irqrestore(&fan->lock, flags);
97 return ret;
98}
99
100static void
101nouveau_fan_alarm(struct nouveau_alarm *alarm)
102{
103 struct nouveau_fan *fan = container_of(alarm, struct nouveau_fan, alarm);
104 nouveau_fan_update(fan, false, -1);
60} 105}
61 106
62int 107int
63nouveau_therm_fan_set(struct nouveau_therm *therm, int percent) 108nouveau_therm_fan_get(struct nouveau_therm *therm)
64{ 109{
65 struct nouveau_therm_priv *priv = (void *)therm; 110 struct nouveau_therm_priv *priv = (void *)therm;
66 struct nouveau_gpio *gpio = nouveau_gpio(therm); 111 return priv->fan->get(therm);
67 struct dcb_gpio_func func; 112}
68 int card_type = nv_device(therm)->card_type;
69 u32 divs, duty;
70 int ret;
71
72 if (priv->fan.mode == FAN_CONTROL_NONE)
73 return -EINVAL;
74
75 if (!priv->fan.pwm_set)
76 return -ENODEV;
77
78 if (percent < priv->bios_fan.min_duty)
79 percent = priv->bios_fan.min_duty;
80 if (percent > priv->bios_fan.max_duty)
81 percent = priv->bios_fan.max_duty;
82
83 ret = gpio->find(gpio, 0, DCB_GPIO_PWM_FAN, 0xff, &func);
84 if (ret == 0) {
85 divs = priv->bios_perf_fan.pwm_divisor;
86 if (priv->bios_fan.pwm_freq) {
87 divs = 1;
88 if (priv->fan.pwm_clock)
89 divs = priv->fan.pwm_clock(therm);
90 divs /= priv->bios_fan.pwm_freq;
91 }
92
93 duty = ((divs * percent) + 99) / 100;
94 if (card_type <= NV_40 || (func.log[0] & 1))
95 duty = divs - duty;
96
97 ret = priv->fan.pwm_set(therm, func.line, divs, duty);
98 return ret;
99 }
100 113
101 return -ENODEV; 114int
115nouveau_therm_fan_set(struct nouveau_therm *therm, bool immediate, int percent)
116{
117 struct nouveau_therm_priv *priv = (void *)therm;
118 return nouveau_fan_update(priv->fan, immediate, percent);
102} 119}
103 120
104int 121int
105nouveau_therm_fan_sense(struct nouveau_therm *therm) 122nouveau_therm_fan_sense(struct nouveau_therm *therm)
106{ 123{
124 struct nouveau_therm_priv *priv = (void *)therm;
107 struct nouveau_timer *ptimer = nouveau_timer(therm); 125 struct nouveau_timer *ptimer = nouveau_timer(therm);
108 struct nouveau_gpio *gpio = nouveau_gpio(therm); 126 struct nouveau_gpio *gpio = nouveau_gpio(therm);
109 struct dcb_gpio_func func;
110 u32 cycles, cur, prev; 127 u32 cycles, cur, prev;
111 u64 start, end, tach; 128 u64 start, end, tach;
112 129
113 if (gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &func)) 130 if (priv->fan->tach.func == DCB_GPIO_UNUSED)
114 return -ENODEV; 131 return -ENODEV;
115 132
116 /* Time a complete rotation and extrapolate to RPM: 133 /* Time a complete rotation and extrapolate to RPM:
@@ -118,12 +135,12 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
118 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation. 135 * We get 4 changes (0 -> 1 -> 0 -> 1) per complete rotation.
119 */ 136 */
120 start = ptimer->read(ptimer); 137 start = ptimer->read(ptimer);
121 prev = gpio->get(gpio, 0, func.func, func.line); 138 prev = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
122 cycles = 0; 139 cycles = 0;
123 do { 140 do {
124 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */ 141 usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
125 142
126 cur = gpio->get(gpio, 0, func.func, func.line); 143 cur = gpio->get(gpio, 0, priv->fan->tach.func, priv->fan->tach.line);
127 if (prev != cur) { 144 if (prev != cur) {
128 if (!start) 145 if (!start)
129 start = ptimer->read(ptimer); 146 start = ptimer->read(ptimer);
@@ -142,34 +159,6 @@ nouveau_therm_fan_sense(struct nouveau_therm *therm)
142} 159}
143 160
144int 161int
145nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
146 enum nouveau_therm_fan_mode mode)
147{
148 struct nouveau_therm_priv *priv = (void *)therm;
149
150 if (priv->fan.mode == mode)
151 return 0;
152
153 if (mode < FAN_CONTROL_NONE || mode >= FAN_CONTROL_NR)
154 return -EINVAL;
155
156 switch (mode)
157 {
158 case FAN_CONTROL_NONE:
159 nv_info(therm, "switch fan to no-control mode\n");
160 break;
161 case FAN_CONTROL_MANUAL:
162 nv_info(therm, "switch fan to manual mode\n");
163 break;
164 case FAN_CONTROL_NR:
165 break;
166 }
167
168 priv->fan.mode = mode;
169 return 0;
170}
171
172int
173nouveau_therm_fan_user_get(struct nouveau_therm *therm) 162nouveau_therm_fan_user_get(struct nouveau_therm *therm)
174{ 163{
175 return nouveau_therm_fan_get(therm); 164 return nouveau_therm_fan_get(therm);
@@ -180,55 +169,86 @@ nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent)
180{ 169{
181 struct nouveau_therm_priv *priv = (void *)therm; 170 struct nouveau_therm_priv *priv = (void *)therm;
182 171
183 if (priv->fan.mode != FAN_CONTROL_MANUAL) 172 if (priv->mode != NOUVEAU_THERM_CTRL_MANUAL)
184 return -EINVAL; 173 return -EINVAL;
185 174
186 return nouveau_therm_fan_set(therm, percent); 175 return nouveau_therm_fan_set(therm, true, percent);
187} 176}
188 177
189void 178static void
190nouveau_therm_fan_set_defaults(struct nouveau_therm *therm) 179nouveau_therm_fan_set_defaults(struct nouveau_therm *therm)
191{ 180{
192 struct nouveau_therm_priv *priv = (void *)therm; 181 struct nouveau_therm_priv *priv = (void *)therm;
193 182
194 priv->bios_fan.pwm_freq = 0; 183 priv->fan->bios.pwm_freq = 0;
195 priv->bios_fan.min_duty = 0; 184 priv->fan->bios.min_duty = 0;
196 priv->bios_fan.max_duty = 100; 185 priv->fan->bios.max_duty = 100;
186 priv->fan->bios.bump_period = 500;
187 priv->fan->bios.slow_down_period = 2000;
188 priv->fan->bios.linear_min_temp = 40;
189 priv->fan->bios.linear_max_temp = 85;
197} 190}
198 191
199
200static void 192static void
201nouveau_therm_fan_safety_checks(struct nouveau_therm *therm) 193nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
202{ 194{
203 struct nouveau_therm_priv *priv = (void *)therm; 195 struct nouveau_therm_priv *priv = (void *)therm;
204 196
205 if (priv->bios_fan.min_duty > 100) 197 if (priv->fan->bios.min_duty > 100)
206 priv->bios_fan.min_duty = 100; 198 priv->fan->bios.min_duty = 100;
207 if (priv->bios_fan.max_duty > 100) 199 if (priv->fan->bios.max_duty > 100)
208 priv->bios_fan.max_duty = 100; 200 priv->fan->bios.max_duty = 100;
209 201
210 if (priv->bios_fan.min_duty > priv->bios_fan.max_duty) 202 if (priv->fan->bios.min_duty > priv->fan->bios.max_duty)
211 priv->bios_fan.min_duty = priv->bios_fan.max_duty; 203 priv->fan->bios.min_duty = priv->fan->bios.max_duty;
212}
213
214int nouveau_fan_pwm_clock_dummy(struct nouveau_therm *therm)
215{
216 return 1;
217} 204}
218 205
219int 206int
220nouveau_therm_fan_ctor(struct nouveau_therm *therm) 207nouveau_therm_fan_ctor(struct nouveau_therm *therm)
221{ 208{
222 struct nouveau_therm_priv *priv = (void *)therm; 209 struct nouveau_therm_priv *priv = (void *)therm;
210 struct nouveau_gpio *gpio = nouveau_gpio(therm);
223 struct nouveau_bios *bios = nouveau_bios(therm); 211 struct nouveau_bios *bios = nouveau_bios(therm);
212 struct dcb_gpio_func func;
213 int ret;
224 214
215 /* attempt to locate a drivable fan, and determine control method */
216 ret = gpio->find(gpio, 0, DCB_GPIO_FAN, 0xff, &func);
217 if (ret == 0) {
218 if (func.log[0] & DCB_GPIO_LOG_DIR_IN) {
219 nv_debug(therm, "GPIO_FAN is in input mode\n");
220 ret = -EINVAL;
221 } else {
222 ret = nouveau_fanpwm_create(therm, &func);
223 if (ret != 0)
224 ret = nouveau_fantog_create(therm, &func);
225 }
226 }
227
228 /* no controllable fan found, create a dummy fan module */
229 if (ret != 0) {
230 ret = nouveau_fannil_create(therm);
231 if (ret)
232 return ret;
233 }
234
235 nv_info(therm, "FAN control: %s\n", priv->fan->type);
236
237 /* attempt to detect a tachometer connection */
238 ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
239 if (ret)
240 priv->fan->tach.func = DCB_GPIO_UNUSED;
241
242 /* initialise fan bump/slow update handling */
243 priv->fan->parent = therm;
244 nouveau_alarm_init(&priv->fan->alarm, nouveau_fan_alarm);
245 spin_lock_init(&priv->fan->lock);
246
247 /* other random init... */
225 nouveau_therm_fan_set_defaults(therm); 248 nouveau_therm_fan_set_defaults(therm);
226 nvbios_perf_fan_parse(bios, &priv->bios_perf_fan); 249 nvbios_perf_fan_parse(bios, &priv->fan->perf);
227 if (nvbios_therm_fan_parse(bios, &priv->bios_fan)) 250 if (nvbios_therm_fan_parse(bios, &priv->fan->bios))
228 nv_error(therm, "parsing the thermal table failed\n"); 251 nv_error(therm, "parsing the thermal table failed\n");
229 nouveau_therm_fan_safety_checks(therm); 252 nouveau_therm_fan_safety_checks(therm);
230
231 nouveau_therm_fan_set_mode(therm, FAN_CONTROL_NONE);
232
233 return 0; 253 return 0;
234} 254}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
new file mode 100644
index 000000000000..b78c182e1d51
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fannil.c
@@ -0,0 +1,54 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27static int
28nouveau_fannil_get(struct nouveau_therm *therm)
29{
30 return -ENODEV;
31}
32
33static int
34nouveau_fannil_set(struct nouveau_therm *therm, int percent)
35{
36 return -ENODEV;
37}
38
39int
40nouveau_fannil_create(struct nouveau_therm *therm)
41{
42 struct nouveau_therm_priv *tpriv = (void *)therm;
43 struct nouveau_fan *priv;
44
45 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
46 tpriv->fan = priv;
47 if (!priv)
48 return -ENOMEM;
49
50 priv->type = "none / external";
51 priv->get = nouveau_fannil_get;
52 priv->set = nouveau_fannil_set;
53 return 0;
54}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
new file mode 100644
index 000000000000..5f71db8e8992
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fanpwm.c
@@ -0,0 +1,107 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 * Martin Peres
24 */
25
26#include <core/option.h>
27#include <subdev/gpio.h>
28
29#include "priv.h"
30
31struct nouveau_fanpwm_priv {
32 struct nouveau_fan base;
33 struct dcb_gpio_func func;
34};
35
36static int
37nouveau_fanpwm_get(struct nouveau_therm *therm)
38{
39 struct nouveau_therm_priv *tpriv = (void *)therm;
40 struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
41 struct nouveau_gpio *gpio = nouveau_gpio(therm);
42 int card_type = nv_device(therm)->card_type;
43 u32 divs, duty;
44 int ret;
45
46 ret = therm->pwm_get(therm, priv->func.line, &divs, &duty);
47 if (ret == 0 && divs) {
48 divs = max(divs, duty);
49 if (card_type <= NV_40 || (priv->func.log[0] & 1))
50 duty = divs - duty;
51 return (duty * 100) / divs;
52 }
53
54 return gpio->get(gpio, 0, priv->func.func, priv->func.line) * 100;
55}
56
57static int
58nouveau_fanpwm_set(struct nouveau_therm *therm, int percent)
59{
60 struct nouveau_therm_priv *tpriv = (void *)therm;
61 struct nouveau_fanpwm_priv *priv = (void *)tpriv->fan;
62 int card_type = nv_device(therm)->card_type;
63 u32 divs, duty;
64 int ret;
65
66 divs = priv->base.perf.pwm_divisor;
67 if (priv->base.bios.pwm_freq) {
68 divs = 1;
69 if (therm->pwm_clock)
70 divs = therm->pwm_clock(therm);
71 divs /= priv->base.bios.pwm_freq;
72 }
73
74 duty = ((divs * percent) + 99) / 100;
75 if (card_type <= NV_40 || (priv->func.log[0] & 1))
76 duty = divs - duty;
77
78 ret = therm->pwm_set(therm, priv->func.line, divs, duty);
79 if (ret == 0)
80 ret = therm->pwm_ctrl(therm, priv->func.line, true);
81 return ret;
82}
83
84int
85nouveau_fanpwm_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
86{
87 struct nouveau_device *device = nv_device(therm);
88 struct nouveau_therm_priv *tpriv = (void *)therm;
89 struct nouveau_fanpwm_priv *priv;
90 u32 divs, duty;
91
92 if (!nouveau_boolopt(device->cfgopt, "NvFanPWM", func->param) ||
93 !therm->pwm_ctrl ||
94 therm->pwm_get(therm, func->line, &divs, &duty) == -ENODEV)
95 return -ENODEV;
96
97 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
98 tpriv->fan = &priv->base;
99 if (!priv)
100 return -ENOMEM;
101
102 priv->base.type = "PWM";
103 priv->base.get = nouveau_fanpwm_get;
104 priv->base.set = nouveau_fanpwm_set;
105 priv->func = *func;
106 return 0;
107}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
new file mode 100644
index 000000000000..e601773ee475
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fantog.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright 2012 The Nouveau community
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Martin Peres
23 */
24
25#include "priv.h"
26
27#include <core/object.h>
28#include <core/device.h>
29
30#include <subdev/gpio.h>
31#include <subdev/timer.h>
32
33struct nouveau_fantog_priv {
34 struct nouveau_fan base;
35 struct nouveau_alarm alarm;
36 spinlock_t lock;
37 u32 period_us;
38 u32 percent;
39 struct dcb_gpio_func func;
40};
41
42static void
43nouveau_fantog_update(struct nouveau_fantog_priv *priv, int percent)
44{
45 struct nouveau_therm_priv *tpriv = (void *)priv->base.parent;
46 struct nouveau_timer *ptimer = nouveau_timer(tpriv);
47 struct nouveau_gpio *gpio = nouveau_gpio(tpriv);
48 unsigned long flags;
49 int duty;
50
51 spin_lock_irqsave(&priv->lock, flags);
52 if (percent < 0)
53 percent = priv->percent;
54 priv->percent = percent;
55
56 duty = !gpio->get(gpio, 0, DCB_GPIO_FAN, 0xff);
57 gpio->set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
58
59 if (list_empty(&priv->alarm.head) && percent != (duty * 100)) {
60 u64 next_change = (percent * priv->period_us) / 100;
61 if (!duty)
62 next_change = priv->period_us - next_change;
63 ptimer->alarm(ptimer, next_change * 1000, &priv->alarm);
64 }
65 spin_unlock_irqrestore(&priv->lock, flags);
66}
67
68static void
69nouveau_fantog_alarm(struct nouveau_alarm *alarm)
70{
71 struct nouveau_fantog_priv *priv =
72 container_of(alarm, struct nouveau_fantog_priv, alarm);
73 nouveau_fantog_update(priv, -1);
74}
75
76static int
77nouveau_fantog_get(struct nouveau_therm *therm)
78{
79 struct nouveau_therm_priv *tpriv = (void *)therm;
80 struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
81 return priv->percent;
82}
83
84static int
85nouveau_fantog_set(struct nouveau_therm *therm, int percent)
86{
87 struct nouveau_therm_priv *tpriv = (void *)therm;
88 struct nouveau_fantog_priv *priv = (void *)tpriv->fan;
89 if (therm->pwm_ctrl)
90 therm->pwm_ctrl(therm, priv->func.line, false);
91 nouveau_fantog_update(priv, percent);
92 return 0;
93}
94
95int
96nouveau_fantog_create(struct nouveau_therm *therm, struct dcb_gpio_func *func)
97{
98 struct nouveau_therm_priv *tpriv = (void *)therm;
99 struct nouveau_fantog_priv *priv;
100
101 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
102 tpriv->fan = &priv->base;
103 if (!priv)
104 return -ENOMEM;
105
106 priv->base.type = "toggle";
107 priv->base.get = nouveau_fantog_get;
108 priv->base.set = nouveau_fantog_set;
109 nouveau_alarm_init(&priv->alarm, nouveau_fantog_alarm);
110 priv->period_us = 100000; /* 10Hz */
111 priv->percent = 100;
112 priv->func = *func;
113 spin_lock_init(&priv->lock);
114 return 0;
115}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
index e512ff0aae60..e24090bac195 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/ic.c
@@ -31,7 +31,7 @@ static bool
31probe_monitoring_device(struct nouveau_i2c_port *i2c, 31probe_monitoring_device(struct nouveau_i2c_port *i2c,
32 struct i2c_board_info *info) 32 struct i2c_board_info *info)
33{ 33{
34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c->i2c); 34 struct nouveau_therm_priv *priv = (void *)nouveau_therm(i2c);
35 struct i2c_client *client; 35 struct i2c_client *client;
36 36
37 request_module("%s%s", I2C_MODULE_PREFIX, info->type); 37 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
@@ -53,6 +53,31 @@ probe_monitoring_device(struct nouveau_i2c_port *i2c,
53 return true; 53 return true;
54} 54}
55 55
56static struct i2c_board_info
57nv_board_infos[] = {
58 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
59 { I2C_BOARD_INFO("w83781d", 0x2d) },
60 { I2C_BOARD_INFO("adt7473", 0x2e) },
61 { I2C_BOARD_INFO("adt7473", 0x2d) },
62 { I2C_BOARD_INFO("adt7473", 0x2c) },
63 { I2C_BOARD_INFO("f75375", 0x2e) },
64 { I2C_BOARD_INFO("lm99", 0x4c) },
65 { I2C_BOARD_INFO("lm90", 0x4c) },
66 { I2C_BOARD_INFO("lm90", 0x4d) },
67 { I2C_BOARD_INFO("adm1021", 0x18) },
68 { I2C_BOARD_INFO("adm1021", 0x19) },
69 { I2C_BOARD_INFO("adm1021", 0x1a) },
70 { I2C_BOARD_INFO("adm1021", 0x29) },
71 { I2C_BOARD_INFO("adm1021", 0x2a) },
72 { I2C_BOARD_INFO("adm1021", 0x2b) },
73 { I2C_BOARD_INFO("adm1021", 0x4c) },
74 { I2C_BOARD_INFO("adm1021", 0x4d) },
75 { I2C_BOARD_INFO("adm1021", 0x4e) },
76 { I2C_BOARD_INFO("lm63", 0x18) },
77 { I2C_BOARD_INFO("lm63", 0x4e) },
78 { }
79};
80
56void 81void
57nouveau_therm_ic_ctor(struct nouveau_therm *therm) 82nouveau_therm_ic_ctor(struct nouveau_therm *therm)
58{ 83{
@@ -60,29 +85,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
60 struct nouveau_bios *bios = nouveau_bios(therm); 85 struct nouveau_bios *bios = nouveau_bios(therm);
61 struct nouveau_i2c *i2c = nouveau_i2c(therm); 86 struct nouveau_i2c *i2c = nouveau_i2c(therm);
62 struct nvbios_extdev_func extdev_entry; 87 struct nvbios_extdev_func extdev_entry;
63 struct i2c_board_info info[] = {
64 { I2C_BOARD_INFO("w83l785ts", 0x2d) },
65 { I2C_BOARD_INFO("w83781d", 0x2d) },
66 { I2C_BOARD_INFO("adt7473", 0x2e) },
67 { I2C_BOARD_INFO("adt7473", 0x2d) },
68 { I2C_BOARD_INFO("adt7473", 0x2c) },
69 { I2C_BOARD_INFO("f75375", 0x2e) },
70 { I2C_BOARD_INFO("lm99", 0x4c) },
71 { I2C_BOARD_INFO("lm90", 0x4c) },
72 { I2C_BOARD_INFO("lm90", 0x4d) },
73 { I2C_BOARD_INFO("adm1021", 0x18) },
74 { I2C_BOARD_INFO("adm1021", 0x19) },
75 { I2C_BOARD_INFO("adm1021", 0x1a) },
76 { I2C_BOARD_INFO("adm1021", 0x29) },
77 { I2C_BOARD_INFO("adm1021", 0x2a) },
78 { I2C_BOARD_INFO("adm1021", 0x2b) },
79 { I2C_BOARD_INFO("adm1021", 0x4c) },
80 { I2C_BOARD_INFO("adm1021", 0x4d) },
81 { I2C_BOARD_INFO("adm1021", 0x4e) },
82 { I2C_BOARD_INFO("lm63", 0x18) },
83 { I2C_BOARD_INFO("lm63", 0x4e) },
84 { }
85 };
86 88
87 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) { 89 if (!nvbios_extdev_find(bios, NVBIOS_EXTDEV_LM89, &extdev_entry)) {
88 struct i2c_board_info board[] = { 90 struct i2c_board_info board[] = {
@@ -111,6 +113,6 @@ nouveau_therm_ic_ctor(struct nouveau_therm *therm)
111 /* The vbios doesn't provide the address of an exisiting monitoring 113 /* The vbios doesn't provide the address of an exisiting monitoring
112 device. Let's try our static list. 114 device. Let's try our static list.
113 */ 115 */
114 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device", info, 116 i2c->identify(i2c, NV_I2C_DEFAULT(0), "monitoring device",
115 probe_monitoring_device); 117 nv_board_infos, probe_monitoring_device);
116} 118}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
index fcf2cfe731d6..0f5363edb964 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv40.c
@@ -25,6 +25,10 @@
25 25
26#include "priv.h" 26#include "priv.h"
27 27
28struct nv40_therm_priv {
29 struct nouveau_therm_priv base;
30};
31
28static int 32static int
29nv40_sensor_setup(struct nouveau_therm *therm) 33nv40_sensor_setup(struct nouveau_therm *therm)
30{ 34{
@@ -34,6 +38,7 @@ nv40_sensor_setup(struct nouveau_therm *therm)
34 if (device->chipset >= 0x46) { 38 if (device->chipset >= 0x46) {
35 nv_mask(therm, 0x15b8, 0x80000000, 0); 39 nv_mask(therm, 0x15b8, 0x80000000, 0);
36 nv_wr32(therm, 0x15b0, 0x80003fff); 40 nv_wr32(therm, 0x15b0, 0x80003fff);
41 mdelay(10); /* wait for the temperature to stabilize */
37 return nv_rd32(therm, 0x15b4) & 0x3fff; 42 return nv_rd32(therm, 0x15b4) & 0x3fff;
38 } else { 43 } else {
39 nv_wr32(therm, 0x15b0, 0xff); 44 nv_wr32(therm, 0x15b0, 0xff);
@@ -75,7 +80,20 @@ nv40_temp_get(struct nouveau_therm *therm)
75 return core_temp; 80 return core_temp;
76} 81}
77 82
78int 83static int
84nv40_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
85{
86 u32 mask = enable ? 0x80000000 : 0x0000000;
87 if (line == 2) nv_mask(therm, 0x0010f0, 0x80000000, mask);
88 else if (line == 9) nv_mask(therm, 0x0015f4, 0x80000000, mask);
89 else {
90 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
91 return -ENODEV;
92 }
93 return 0;
94}
95
96static int
79nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty) 97nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
80{ 98{
81 if (line == 2) { 99 if (line == 2) {
@@ -101,15 +119,15 @@ nv40_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
101 return -EINVAL; 119 return -EINVAL;
102} 120}
103 121
104int 122static int
105nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty) 123nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
106{ 124{
107 if (line == 2) { 125 if (line == 2) {
108 nv_wr32(therm, 0x0010f0, 0x80000000 | (duty << 16) | divs); 126 nv_mask(therm, 0x0010f0, 0x7fff7fff, (duty << 16) | divs);
109 } else 127 } else
110 if (line == 9) { 128 if (line == 9) {
111 nv_wr32(therm, 0x0015f8, divs); 129 nv_wr32(therm, 0x0015f8, divs);
112 nv_wr32(therm, 0x0015f4, duty | 0x80000000); 130 nv_mask(therm, 0x0015f4, 0x7fffffff, duty);
113 } else { 131 } else {
114 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line); 132 nv_error(therm, "unknown pwm ctrl for gpio %d\n", line);
115 return -ENODEV; 133 return -ENODEV;
@@ -118,37 +136,51 @@ nv40_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
118 return 0; 136 return 0;
119} 137}
120 138
139static void
140nv40_therm_intr(struct nouveau_subdev *subdev)
141{
142 struct nouveau_therm *therm = nouveau_therm(subdev);
143 uint32_t stat = nv_rd32(therm, 0x1100);
144
145 /* traitement */
146
147 /* ack all IRQs */
148 nv_wr32(therm, 0x1100, 0x70000);
149
150 nv_error(therm, "THERM received an IRQ: stat = %x\n", stat);
151}
152
121static int 153static int
122nv40_therm_ctor(struct nouveau_object *parent, 154nv40_therm_ctor(struct nouveau_object *parent,
123 struct nouveau_object *engine, 155 struct nouveau_object *engine,
124 struct nouveau_oclass *oclass, void *data, u32 size, 156 struct nouveau_oclass *oclass, void *data, u32 size,
125 struct nouveau_object **pobject) 157 struct nouveau_object **pobject)
126{ 158{
127 struct nouveau_therm_priv *priv; 159 struct nv40_therm_priv *priv;
128 struct nouveau_therm *therm;
129 int ret; 160 int ret;
130 161
131 ret = nouveau_therm_create(parent, engine, oclass, &priv); 162 ret = nouveau_therm_create(parent, engine, oclass, &priv);
132 *pobject = nv_object(priv); 163 *pobject = nv_object(priv);
133 therm = (void *) priv;
134 if (ret) 164 if (ret)
135 return ret; 165 return ret;
136 166
137 nouveau_therm_ic_ctor(therm); 167 priv->base.base.pwm_ctrl = nv40_fan_pwm_ctrl;
138 nouveau_therm_sensor_ctor(therm); 168 priv->base.base.pwm_get = nv40_fan_pwm_get;
139 nouveau_therm_fan_ctor(therm); 169 priv->base.base.pwm_set = nv40_fan_pwm_set;
170 priv->base.base.temp_get = nv40_temp_get;
171 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
172 nv_subdev(priv)->intr = nv40_therm_intr;
173 return nouveau_therm_preinit(&priv->base.base);
174}
140 175
141 priv->fan.pwm_get = nv40_fan_pwm_get; 176static int
142 priv->fan.pwm_set = nv40_fan_pwm_set; 177nv40_therm_init(struct nouveau_object *object)
178{
179 struct nouveau_therm *therm = (void *)object;
143 180
144 therm->temp_get = nv40_temp_get; 181 nv40_sensor_setup(therm);
145 therm->fan_get = nouveau_therm_fan_user_get;
146 therm->fan_set = nouveau_therm_fan_user_set;
147 therm->fan_sense = nouveau_therm_fan_sense;
148 therm->attr_get = nouveau_therm_attr_get;
149 therm->attr_set = nouveau_therm_attr_set;
150 182
151 return 0; 183 return _nouveau_therm_init(object);
152} 184}
153 185
154struct nouveau_oclass 186struct nouveau_oclass
@@ -157,7 +189,7 @@ nv40_therm_oclass = {
157 .ofuncs = &(struct nouveau_ofuncs) { 189 .ofuncs = &(struct nouveau_ofuncs) {
158 .ctor = nv40_therm_ctor, 190 .ctor = nv40_therm_ctor,
159 .dtor = _nouveau_therm_dtor, 191 .dtor = _nouveau_therm_dtor,
160 .init = nouveau_therm_init, 192 .init = nv40_therm_init,
161 .fini = nouveau_therm_fini, 193 .fini = _nouveau_therm_fini,
162 }, 194 },
163}; \ No newline at end of file 195};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
index 9360ddd469e7..86632cbd65ce 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nv50.c
@@ -25,6 +25,10 @@
25 25
26#include "priv.h" 26#include "priv.h"
27 27
28struct nv50_therm_priv {
29 struct nouveau_therm_priv base;
30};
31
28static int 32static int
29pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx) 33pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
30{ 34{
@@ -51,6 +55,16 @@ pwm_info(struct nouveau_therm *therm, int *line, int *ctrl, int *indx)
51} 55}
52 56
53int 57int
58nv50_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
59{
60 u32 data = enable ? 0x00000001 : 0x00000000;
61 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
62 if (ret == 0)
63 nv_mask(therm, ctrl, 0x00010001 << line, data << line);
64 return ret;
65}
66
67int
54nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty) 68nv50_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
55{ 69{
56 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id); 70 int ctrl, id, ret = pwm_info(therm, &line, &ctrl, &id);
@@ -73,7 +87,6 @@ nv50_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
73 if (ret) 87 if (ret)
74 return ret; 88 return ret;
75 89
76 nv_mask(therm, ctrl, 0x00010001 << line, 0x00000001 << line);
77 nv_wr32(therm, 0x00e114 + (id * 8), divs); 90 nv_wr32(therm, 0x00e114 + (id * 8), divs);
78 nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000); 91 nv_wr32(therm, 0x00e118 + (id * 8), duty | 0x80000000);
79 return 0; 92 return 0;
@@ -111,38 +124,178 @@ nv50_temp_get(struct nouveau_therm *therm)
111 return nv_rd32(therm, 0x20400); 124 return nv_rd32(therm, 0x20400);
112} 125}
113 126
127static void
128nv50_therm_program_alarms(struct nouveau_therm *therm)
129{
130 struct nouveau_therm_priv *priv = (void *)therm;
131 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
132 unsigned long flags;
133
134 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
135
136 /* enable RISING and FALLING IRQs for shutdown, THRS 0, 1, 2 and 4 */
137 nv_wr32(therm, 0x20000, 0x000003ff);
138
139 /* shutdown: The computer should be shutdown when reached */
140 nv_wr32(therm, 0x20484, sensor->thrs_shutdown.hysteresis);
141 nv_wr32(therm, 0x20480, sensor->thrs_shutdown.temp);
142
143 /* THRS_1 : fan boost*/
144 nv_wr32(therm, 0x204c4, sensor->thrs_fan_boost.temp);
145
146 /* THRS_2 : critical */
147 nv_wr32(therm, 0x204c0, sensor->thrs_critical.temp);
148
149 /* THRS_4 : down clock */
150 nv_wr32(therm, 0x20414, sensor->thrs_down_clock.temp);
151 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
152
153 nv_info(therm,
154 "Programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
155 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
156 sensor->thrs_down_clock.temp,
157 sensor->thrs_down_clock.hysteresis,
158 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
159 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
160
161}
162
163/* must be called with alarm_program_lock taken ! */
164static void
165nv50_therm_threshold_hyst_emulation(struct nouveau_therm *therm,
166 uint32_t thrs_reg, u8 status_bit,
167 const struct nvbios_therm_threshold *thrs,
168 enum nouveau_therm_thrs thrs_name)
169{
170 enum nouveau_therm_thrs_direction direction;
171 enum nouveau_therm_thrs_state prev_state, new_state;
172 int temp, cur;
173
174 prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
175 temp = nv_rd32(therm, thrs_reg);
176
177 /* program the next threshold */
178 if (temp == thrs->temp) {
179 nv_wr32(therm, thrs_reg, thrs->temp - thrs->hysteresis);
180 new_state = NOUVEAU_THERM_THRS_HIGHER;
181 } else {
182 nv_wr32(therm, thrs_reg, thrs->temp);
183 new_state = NOUVEAU_THERM_THRS_LOWER;
184 }
185
186 /* fix the state (in case someone reprogrammed the alarms) */
187 cur = therm->temp_get(therm);
188 if (new_state == NOUVEAU_THERM_THRS_LOWER && cur > thrs->temp)
189 new_state = NOUVEAU_THERM_THRS_HIGHER;
190 else if (new_state == NOUVEAU_THERM_THRS_HIGHER &&
191 cur < thrs->temp - thrs->hysteresis)
192 new_state = NOUVEAU_THERM_THRS_LOWER;
193 nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
194
195 /* find the direction */
196 if (prev_state < new_state)
197 direction = NOUVEAU_THERM_THRS_RISING;
198 else if (prev_state > new_state)
199 direction = NOUVEAU_THERM_THRS_FALLING;
200 else
201 return;
202
203 /* advertise a change in direction */
204 nouveau_therm_sensor_event(therm, thrs_name, direction);
205}
206
207static void
208nv50_therm_intr(struct nouveau_subdev *subdev)
209{
210 struct nouveau_therm *therm = nouveau_therm(subdev);
211 struct nouveau_therm_priv *priv = (void *)therm;
212 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
213 unsigned long flags;
214 uint32_t intr;
215
216 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
217
218 intr = nv_rd32(therm, 0x20100);
219
220 /* THRS_4: downclock */
221 if (intr & 0x002) {
222 nv50_therm_threshold_hyst_emulation(therm, 0x20414, 24,
223 &sensor->thrs_down_clock,
224 NOUVEAU_THERM_THRS_DOWNCLOCK);
225 intr &= ~0x002;
226 }
227
228 /* shutdown */
229 if (intr & 0x004) {
230 nv50_therm_threshold_hyst_emulation(therm, 0x20480, 20,
231 &sensor->thrs_shutdown,
232 NOUVEAU_THERM_THRS_SHUTDOWN);
233 intr &= ~0x004;
234 }
235
236 /* THRS_1 : fan boost */
237 if (intr & 0x008) {
238 nv50_therm_threshold_hyst_emulation(therm, 0x204c4, 21,
239 &sensor->thrs_fan_boost,
240 NOUVEAU_THERM_THRS_FANBOOST);
241 intr &= ~0x008;
242 }
243
244 /* THRS_2 : critical */
245 if (intr & 0x010) {
246 nv50_therm_threshold_hyst_emulation(therm, 0x204c0, 22,
247 &sensor->thrs_critical,
248 NOUVEAU_THERM_THRS_CRITICAL);
249 intr &= ~0x010;
250 }
251
252 if (intr)
253 nv_error(therm, "unhandled intr 0x%08x\n", intr);
254
255 /* ACK everything */
256 nv_wr32(therm, 0x20100, 0xffffffff);
257 nv_wr32(therm, 0x1100, 0x10000); /* PBUS */
258
259 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
260}
261
114static int 262static int
115nv50_therm_ctor(struct nouveau_object *parent, 263nv50_therm_ctor(struct nouveau_object *parent,
116 struct nouveau_object *engine, 264 struct nouveau_object *engine,
117 struct nouveau_oclass *oclass, void *data, u32 size, 265 struct nouveau_oclass *oclass, void *data, u32 size,
118 struct nouveau_object **pobject) 266 struct nouveau_object **pobject)
119{ 267{
120 struct nouveau_therm_priv *priv; 268 struct nv50_therm_priv *priv;
121 struct nouveau_therm *therm;
122 int ret; 269 int ret;
123 270
124 ret = nouveau_therm_create(parent, engine, oclass, &priv); 271 ret = nouveau_therm_create(parent, engine, oclass, &priv);
125 *pobject = nv_object(priv); 272 *pobject = nv_object(priv);
126 therm = (void *) priv;
127 if (ret) 273 if (ret)
128 return ret; 274 return ret;
129 275
130 nouveau_therm_ic_ctor(therm); 276 priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
131 nouveau_therm_sensor_ctor(therm); 277 priv->base.base.pwm_get = nv50_fan_pwm_get;
132 nouveau_therm_fan_ctor(therm); 278 priv->base.base.pwm_set = nv50_fan_pwm_set;
279 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
280 priv->base.base.temp_get = nv50_temp_get;
281 priv->base.sensor.program_alarms = nv50_therm_program_alarms;
282 nv_subdev(priv)->intr = nv50_therm_intr;
133 283
134 priv->fan.pwm_get = nv50_fan_pwm_get; 284 /* init the thresholds */
135 priv->fan.pwm_set = nv50_fan_pwm_set; 285 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
136 priv->fan.pwm_clock = nv50_fan_pwm_clock; 286 NOUVEAU_THERM_THRS_SHUTDOWN,
287 NOUVEAU_THERM_THRS_LOWER);
288 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
289 NOUVEAU_THERM_THRS_FANBOOST,
290 NOUVEAU_THERM_THRS_LOWER);
291 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
292 NOUVEAU_THERM_THRS_CRITICAL,
293 NOUVEAU_THERM_THRS_LOWER);
294 nouveau_therm_sensor_set_threshold_state(&priv->base.base,
295 NOUVEAU_THERM_THRS_DOWNCLOCK,
296 NOUVEAU_THERM_THRS_LOWER);
137 297
138 therm->temp_get = nv50_temp_get; 298 return nouveau_therm_preinit(&priv->base.base);
139 therm->fan_get = nouveau_therm_fan_user_get;
140 therm->fan_set = nouveau_therm_fan_user_set;
141 therm->fan_sense = nouveau_therm_fan_sense;
142 therm->attr_get = nouveau_therm_attr_get;
143 therm->attr_set = nouveau_therm_attr_set;
144
145 return 0;
146} 299}
147 300
148struct nouveau_oclass 301struct nouveau_oclass
@@ -151,7 +304,7 @@ nv50_therm_oclass = {
151 .ofuncs = &(struct nouveau_ofuncs) { 304 .ofuncs = &(struct nouveau_ofuncs) {
152 .ctor = nv50_therm_ctor, 305 .ctor = nv50_therm_ctor,
153 .dtor = _nouveau_therm_dtor, 306 .dtor = _nouveau_therm_dtor,
154 .init = nouveau_therm_init, 307 .init = _nouveau_therm_init,
155 .fini = nouveau_therm_fini, 308 .fini = _nouveau_therm_fini,
156 }, 309 },
157}; 310};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
new file mode 100644
index 000000000000..2dcc5437116a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nva3.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/gpio.h>
26
27#include "priv.h"
28
29struct nva3_therm_priv {
30 struct nouveau_therm_priv base;
31};
32
33int
34nva3_therm_fan_sense(struct nouveau_therm *therm)
35{
36 u32 tach = nv_rd32(therm, 0x00e728) & 0x0000ffff;
37 u32 ctrl = nv_rd32(therm, 0x00e720);
38 if (ctrl & 0x00000001)
39 return tach * 60;
40 return -ENODEV;
41}
42
43static int
44nva3_therm_init(struct nouveau_object *object)
45{
46 struct nva3_therm_priv *priv = (void *)object;
47 struct dcb_gpio_func *tach = &priv->base.fan->tach;
48 int ret;
49
50 ret = nouveau_therm_init(&priv->base.base);
51 if (ret)
52 return ret;
53
54 /* enable fan tach, count revolutions per-second */
55 nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
56 if (tach->func != DCB_GPIO_UNUSED) {
57 nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
58 nv_mask(priv, 0x00e720, 0x001f0000, tach->line << 16);
59 nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
60 }
61 nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
62
63 return 0;
64}
65
66static int
67nva3_therm_ctor(struct nouveau_object *parent,
68 struct nouveau_object *engine,
69 struct nouveau_oclass *oclass, void *data, u32 size,
70 struct nouveau_object **pobject)
71{
72 struct nva3_therm_priv *priv;
73 int ret;
74
75 ret = nouveau_therm_create(parent, engine, oclass, &priv);
76 *pobject = nv_object(priv);
77 if (ret)
78 return ret;
79
80 priv->base.base.pwm_ctrl = nv50_fan_pwm_ctrl;
81 priv->base.base.pwm_get = nv50_fan_pwm_get;
82 priv->base.base.pwm_set = nv50_fan_pwm_set;
83 priv->base.base.pwm_clock = nv50_fan_pwm_clock;
84 priv->base.base.temp_get = nv50_temp_get;
85 priv->base.base.fan_sense = nva3_therm_fan_sense;
86 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
87 return nouveau_therm_preinit(&priv->base.base);
88}
89
90struct nouveau_oclass
91nva3_therm_oclass = {
92 .handle = NV_SUBDEV(THERM, 0xa3),
93 .ofuncs = &(struct nouveau_ofuncs) {
94 .ctor = nva3_therm_ctor,
95 .dtor = _nouveau_therm_dtor,
96 .init = nva3_therm_init,
97 .fini = _nouveau_therm_fini,
98 },
99};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
new file mode 100644
index 000000000000..d7d30ee8332e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c
@@ -0,0 +1,153 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "priv.h"
26
27struct nvd0_therm_priv {
28 struct nouveau_therm_priv base;
29};
30
31static int
32pwm_info(struct nouveau_therm *therm, int line)
33{
34 u32 gpio = nv_rd32(therm, 0x00d610 + (line * 0x04));
35 switch (gpio & 0x000000c0) {
36 case 0x00000000: /* normal mode, possibly pwm forced off by us */
37 case 0x00000040: /* nvio special */
38 switch (gpio & 0x0000001f) {
39 case 0x19: return 1;
40 case 0x1c: return 0;
41 default:
42 break;
43 }
44 default:
45 break;
46 }
47
48 nv_error(therm, "GPIO %d unknown PWM: 0x%08x\n", line, gpio);
49 return -ENODEV;
50}
51
52static int
53nvd0_fan_pwm_ctrl(struct nouveau_therm *therm, int line, bool enable)
54{
55 u32 data = enable ? 0x00000040 : 0x00000000;
56 int indx = pwm_info(therm, line);
57 if (indx < 0)
58 return indx;
59
60 nv_mask(therm, 0x00d610 + (line * 0x04), 0x000000c0, data);
61 return 0;
62}
63
64static int
65nvd0_fan_pwm_get(struct nouveau_therm *therm, int line, u32 *divs, u32 *duty)
66{
67 int indx = pwm_info(therm, line);
68 if (indx < 0)
69 return indx;
70
71 if (nv_rd32(therm, 0x00d610 + (line * 0x04)) & 0x00000040) {
72 *divs = nv_rd32(therm, 0x00e114 + (indx * 8));
73 *duty = nv_rd32(therm, 0x00e118 + (indx * 8));
74 return 0;
75 }
76
77 return -EINVAL;
78}
79
80static int
81nvd0_fan_pwm_set(struct nouveau_therm *therm, int line, u32 divs, u32 duty)
82{
83 int indx = pwm_info(therm, line);
84 if (indx < 0)
85 return indx;
86
87 nv_wr32(therm, 0x00e114 + (indx * 8), divs);
88 nv_wr32(therm, 0x00e118 + (indx * 8), duty | 0x80000000);
89 return 0;
90}
91
92static int
93nvd0_fan_pwm_clock(struct nouveau_therm *therm)
94{
95 return (nv_device(therm)->crystal * 1000) / 20;
96}
97
98static int
99nvd0_therm_init(struct nouveau_object *object)
100{
101 struct nvd0_therm_priv *priv = (void *)object;
102 int ret;
103
104 ret = nouveau_therm_init(&priv->base.base);
105 if (ret)
106 return ret;
107
108 /* enable fan tach, count revolutions per-second */
109 nv_mask(priv, 0x00e720, 0x00000003, 0x00000002);
110 if (priv->base.fan->tach.func != DCB_GPIO_UNUSED) {
111 nv_mask(priv, 0x00d79c, 0x000000ff, priv->base.fan->tach.line);
112 nv_wr32(priv, 0x00e724, nv_device(priv)->crystal * 1000);
113 nv_mask(priv, 0x00e720, 0x00000001, 0x00000001);
114 }
115 nv_mask(priv, 0x00e720, 0x00000002, 0x00000000);
116
117 return 0;
118}
119
120static int
121nvd0_therm_ctor(struct nouveau_object *parent,
122 struct nouveau_object *engine,
123 struct nouveau_oclass *oclass, void *data, u32 size,
124 struct nouveau_object **pobject)
125{
126 struct nvd0_therm_priv *priv;
127 int ret;
128
129 ret = nouveau_therm_create(parent, engine, oclass, &priv);
130 *pobject = nv_object(priv);
131 if (ret)
132 return ret;
133
134 priv->base.base.pwm_ctrl = nvd0_fan_pwm_ctrl;
135 priv->base.base.pwm_get = nvd0_fan_pwm_get;
136 priv->base.base.pwm_set = nvd0_fan_pwm_set;
137 priv->base.base.pwm_clock = nvd0_fan_pwm_clock;
138 priv->base.base.temp_get = nv50_temp_get;
139 priv->base.base.fan_sense = nva3_therm_fan_sense;
140 priv->base.sensor.program_alarms = nouveau_therm_program_alarms_polling;
141 return nouveau_therm_preinit(&priv->base.base);
142}
143
144struct nouveau_oclass
145nvd0_therm_oclass = {
146 .handle = NV_SUBDEV(THERM, 0xd0),
147 .ofuncs = &(struct nouveau_ofuncs) {
148 .ctor = nvd0_therm_ctor,
149 .dtor = _nouveau_therm_dtor,
150 .init = nvd0_therm_init,
151 .fini = _nouveau_therm_fini,
152 },
153};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 1c3cd6abc36e..06b98706b3fc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -1,3 +1,6 @@
1#ifndef __NVTHERM_PRIV_H__
2#define __NVTHERM_PRIV_H__
3
1/* 4/*
2 * Copyright 2012 The Nouveau community 5 * Copyright 2012 The Nouveau community
3 * 6 *
@@ -25,33 +28,81 @@
25#include <subdev/therm.h> 28#include <subdev/therm.h>
26 29
27#include <subdev/bios/extdev.h> 30#include <subdev/bios/extdev.h>
31#include <subdev/bios/gpio.h>
28#include <subdev/bios/perf.h> 32#include <subdev/bios/perf.h>
29#include <subdev/bios/therm.h> 33#include <subdev/bios/therm.h>
34#include <subdev/timer.h>
35
36struct nouveau_fan {
37 struct nouveau_therm *parent;
38 const char *type;
39
40 struct nvbios_therm_fan bios;
41 struct nvbios_perf_fan perf;
42
43 struct nouveau_alarm alarm;
44 spinlock_t lock;
45 int percent;
46
47 int (*get)(struct nouveau_therm *therm);
48 int (*set)(struct nouveau_therm *therm, int percent);
49
50 struct dcb_gpio_func tach;
51};
52
53enum nouveau_therm_thrs_direction {
54 NOUVEAU_THERM_THRS_FALLING = 0,
55 NOUVEAU_THERM_THRS_RISING = 1
56};
57
58enum nouveau_therm_thrs_state {
59 NOUVEAU_THERM_THRS_LOWER = 0,
60 NOUVEAU_THERM_THRS_HIGHER = 1
61};
62
63enum nouveau_therm_thrs {
64 NOUVEAU_THERM_THRS_FANBOOST = 0,
65 NOUVEAU_THERM_THRS_DOWNCLOCK = 1,
66 NOUVEAU_THERM_THRS_CRITICAL = 2,
67 NOUVEAU_THERM_THRS_SHUTDOWN = 3,
68 NOUVEAU_THERM_THRS_NR
69};
30 70
31struct nouveau_therm_priv { 71struct nouveau_therm_priv {
32 struct nouveau_therm base; 72 struct nouveau_therm base;
33 73
74 /* automatic thermal management */
75 struct nouveau_alarm alarm;
76 spinlock_t lock;
77 struct nouveau_therm_trip_point *last_trip;
78 int mode;
79 int suspend;
80
34 /* bios */ 81 /* bios */
35 struct nvbios_therm_sensor bios_sensor; 82 struct nvbios_therm_sensor bios_sensor;
36 struct nvbios_therm_fan bios_fan;
37 struct nvbios_perf_fan bios_perf_fan;
38 83
39 /* fan priv */ 84 /* fan priv */
85 struct nouveau_fan *fan;
86
87 /* alarms priv */
40 struct { 88 struct {
41 enum nouveau_therm_fan_mode mode; 89 spinlock_t alarm_program_lock;
42 int percent; 90 struct nouveau_alarm therm_poll_alarm;
91 enum nouveau_therm_thrs_state alarm_state[NOUVEAU_THERM_THRS_NR];
92 void (*program_alarms)(struct nouveau_therm *);
93 } sensor;
43 94
44 int (*pwm_get)(struct nouveau_therm *, int line, u32*, u32*); 95 /* what should be done if the card overheats */
45 int (*pwm_set)(struct nouveau_therm *, int line, u32, u32); 96 struct {
46 int (*pwm_clock)(struct nouveau_therm *); 97 void (*downclock)(struct nouveau_therm *, bool active);
47 } fan; 98 void (*pause)(struct nouveau_therm *, bool active);
99 } emergency;
48 100
49 /* ic */ 101 /* ic */
50 struct i2c_client *ic; 102 struct i2c_client *ic;
51}; 103};
52 104
53int nouveau_therm_init(struct nouveau_object *object); 105int nouveau_therm_mode(struct nouveau_therm *therm, int mode);
54int nouveau_therm_fini(struct nouveau_object *object, bool suspend);
55int nouveau_therm_attr_get(struct nouveau_therm *therm, 106int nouveau_therm_attr_get(struct nouveau_therm *therm,
56 enum nouveau_therm_attr_type type); 107 enum nouveau_therm_attr_type type);
57int nouveau_therm_attr_set(struct nouveau_therm *therm, 108int nouveau_therm_attr_set(struct nouveau_therm *therm,
@@ -63,11 +114,35 @@ int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
63 114
64int nouveau_therm_fan_ctor(struct nouveau_therm *therm); 115int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
65int nouveau_therm_fan_get(struct nouveau_therm *therm); 116int nouveau_therm_fan_get(struct nouveau_therm *therm);
66int nouveau_therm_fan_set(struct nouveau_therm *therm, int percent); 117int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
67int nouveau_therm_fan_user_get(struct nouveau_therm *therm); 118int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
68int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent); 119int nouveau_therm_fan_user_set(struct nouveau_therm *therm, int percent);
69int nouveau_therm_fan_set_mode(struct nouveau_therm *therm,
70 enum nouveau_therm_fan_mode mode);
71
72 120
73int nouveau_therm_fan_sense(struct nouveau_therm *therm); 121int nouveau_therm_fan_sense(struct nouveau_therm *therm);
122
123int nouveau_therm_preinit(struct nouveau_therm *);
124
125void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
126 enum nouveau_therm_thrs thrs,
127 enum nouveau_therm_thrs_state st);
128enum nouveau_therm_thrs_state
129nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
130 enum nouveau_therm_thrs thrs);
131void nouveau_therm_sensor_event(struct nouveau_therm *therm,
132 enum nouveau_therm_thrs thrs,
133 enum nouveau_therm_thrs_direction dir);
134void nouveau_therm_program_alarms_polling(struct nouveau_therm *therm);
135
136int nv50_fan_pwm_ctrl(struct nouveau_therm *, int, bool);
137int nv50_fan_pwm_get(struct nouveau_therm *, int, u32 *, u32 *);
138int nv50_fan_pwm_set(struct nouveau_therm *, int, u32, u32);
139int nv50_fan_pwm_clock(struct nouveau_therm *);
140int nv50_temp_get(struct nouveau_therm *therm);
141
142int nva3_therm_fan_sense(struct nouveau_therm *);
143
144int nouveau_fanpwm_create(struct nouveau_therm *, struct dcb_gpio_func *);
145int nouveau_fantog_create(struct nouveau_therm *, struct dcb_gpio_func *);
146int nouveau_fannil_create(struct nouveau_therm *);
147
148#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index 204282301fb1..b37624af8297 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -58,11 +58,171 @@ static void
58nouveau_therm_temp_safety_checks(struct nouveau_therm *therm) 58nouveau_therm_temp_safety_checks(struct nouveau_therm *therm)
59{ 59{
60 struct nouveau_therm_priv *priv = (void *)therm; 60 struct nouveau_therm_priv *priv = (void *)therm;
61 struct nvbios_therm_sensor *s = &priv->bios_sensor;
61 62
62 if (!priv->bios_sensor.slope_div) 63 if (!priv->bios_sensor.slope_div)
63 priv->bios_sensor.slope_div = 1; 64 priv->bios_sensor.slope_div = 1;
64 if (!priv->bios_sensor.offset_den) 65 if (!priv->bios_sensor.offset_den)
65 priv->bios_sensor.offset_den = 1; 66 priv->bios_sensor.offset_den = 1;
67
68 /* enforce a minimum hysteresis on thresholds */
69 s->thrs_fan_boost.hysteresis = max_t(u8, s->thrs_fan_boost.hysteresis, 2);
70 s->thrs_down_clock.hysteresis = max_t(u8, s->thrs_down_clock.hysteresis, 2);
71 s->thrs_critical.hysteresis = max_t(u8, s->thrs_critical.hysteresis, 2);
72 s->thrs_shutdown.hysteresis = max_t(u8, s->thrs_shutdown.hysteresis, 2);
73}
74
75/* must be called with alarm_program_lock taken ! */
76void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
77 enum nouveau_therm_thrs thrs,
78 enum nouveau_therm_thrs_state st)
79{
80 struct nouveau_therm_priv *priv = (void *)therm;
81 priv->sensor.alarm_state[thrs] = st;
82}
83
84/* must be called with alarm_program_lock taken ! */
85enum nouveau_therm_thrs_state
86nouveau_therm_sensor_get_threshold_state(struct nouveau_therm *therm,
87 enum nouveau_therm_thrs thrs)
88{
89 struct nouveau_therm_priv *priv = (void *)therm;
90 return priv->sensor.alarm_state[thrs];
91}
92
93static void
94nv_poweroff_work(struct work_struct *work)
95{
96 orderly_poweroff(true);
97 kfree(work);
98}
99
100void nouveau_therm_sensor_event(struct nouveau_therm *therm,
101 enum nouveau_therm_thrs thrs,
102 enum nouveau_therm_thrs_direction dir)
103{
104 struct nouveau_therm_priv *priv = (void *)therm;
105 bool active;
106 const char *thresolds[] = {
107 "fanboost", "downclock", "critical", "shutdown"
108 };
109 uint8_t temperature = therm->temp_get(therm);
110
111 if (thrs < 0 || thrs > 3)
112 return;
113
114 if (dir == NOUVEAU_THERM_THRS_FALLING)
115 nv_info(therm, "temperature (%u C) went below the '%s' threshold\n",
116 temperature, thresolds[thrs]);
117 else
118 nv_info(therm, "temperature (%u C) hit the '%s' threshold\n",
119 temperature, thresolds[thrs]);
120
121 active = (dir == NOUVEAU_THERM_THRS_RISING);
122 switch (thrs) {
123 case NOUVEAU_THERM_THRS_FANBOOST:
124 if (active) {
125 nouveau_therm_fan_set(therm, true, 100);
126 nouveau_therm_mode(therm, NOUVEAU_THERM_CTRL_AUTO);
127 }
128 break;
129 case NOUVEAU_THERM_THRS_DOWNCLOCK:
130 if (priv->emergency.downclock)
131 priv->emergency.downclock(therm, active);
132 break;
133 case NOUVEAU_THERM_THRS_CRITICAL:
134 if (priv->emergency.pause)
135 priv->emergency.pause(therm, active);
136 break;
137 case NOUVEAU_THERM_THRS_SHUTDOWN:
138 if (active) {
139 struct work_struct *work;
140
141 work = kmalloc(sizeof(*work), GFP_ATOMIC);
142 if (work) {
143 INIT_WORK(work, nv_poweroff_work);
144 schedule_work(work);
145 }
146 }
147 break;
148 case NOUVEAU_THERM_THRS_NR:
149 break;
150 }
151
152}
153
154/* must be called with alarm_program_lock taken ! */
155static void
156nouveau_therm_threshold_hyst_polling(struct nouveau_therm *therm,
157 const struct nvbios_therm_threshold *thrs,
158 enum nouveau_therm_thrs thrs_name)
159{
160 enum nouveau_therm_thrs_direction direction;
161 enum nouveau_therm_thrs_state prev_state, new_state;
162 int temp = therm->temp_get(therm);
163
164 prev_state = nouveau_therm_sensor_get_threshold_state(therm, thrs_name);
165
166 if (temp >= thrs->temp && prev_state == NOUVEAU_THERM_THRS_LOWER) {
167 direction = NOUVEAU_THERM_THRS_RISING;
168 new_state = NOUVEAU_THERM_THRS_HIGHER;
169 } else if (temp <= thrs->temp - thrs->hysteresis &&
170 prev_state == NOUVEAU_THERM_THRS_HIGHER) {
171 direction = NOUVEAU_THERM_THRS_FALLING;
172 new_state = NOUVEAU_THERM_THRS_LOWER;
173 } else
174 return; /* nothing to do */
175
176 nouveau_therm_sensor_set_threshold_state(therm, thrs_name, new_state);
177 nouveau_therm_sensor_event(therm, thrs_name, direction);
178}
179
180static void
181alarm_timer_callback(struct nouveau_alarm *alarm)
182{
183 struct nouveau_therm_priv *priv =
184 container_of(alarm, struct nouveau_therm_priv, sensor.therm_poll_alarm);
185 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
186 struct nouveau_timer *ptimer = nouveau_timer(priv);
187 struct nouveau_therm *therm = &priv->base;
188 unsigned long flags;
189
190 spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
191
192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
193 NOUVEAU_THERM_THRS_FANBOOST);
194
195 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_down_clock,
196 NOUVEAU_THERM_THRS_DOWNCLOCK);
197
198 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_critical,
199 NOUVEAU_THERM_THRS_CRITICAL);
200
201 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
202 NOUVEAU_THERM_THRS_SHUTDOWN);
203
204 /* schedule the next poll in one second */
205 if (list_empty(&alarm->head))
206 ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
207
208 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
209}
210
211void
212nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
213{
214 struct nouveau_therm_priv *priv = (void *)therm;
215 struct nvbios_therm_sensor *sensor = &priv->bios_sensor;
216
217 nv_info(therm,
218 "programmed thresholds [ %d(%d), %d(%d), %d(%d), %d(%d) ]\n",
219 sensor->thrs_fan_boost.temp, sensor->thrs_fan_boost.hysteresis,
220 sensor->thrs_down_clock.temp,
221 sensor->thrs_down_clock.hysteresis,
222 sensor->thrs_critical.temp, sensor->thrs_critical.hysteresis,
223 sensor->thrs_shutdown.temp, sensor->thrs_shutdown.hysteresis);
224
225 alarm_timer_callback(&priv->sensor.therm_poll_alarm);
66} 226}
67 227
68int 228int
@@ -71,6 +231,8 @@ nouveau_therm_sensor_ctor(struct nouveau_therm *therm)
71 struct nouveau_therm_priv *priv = (void *)therm; 231 struct nouveau_therm_priv *priv = (void *)therm;
72 struct nouveau_bios *bios = nouveau_bios(therm); 232 struct nouveau_bios *bios = nouveau_bios(therm);
73 233
234 nouveau_alarm_init(&priv->sensor.therm_poll_alarm, alarm_timer_callback);
235
74 nouveau_therm_temp_set_defaults(therm); 236 nouveau_therm_temp_set_defaults(therm);
75 if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE, 237 if (nvbios_therm_sensor_parse(bios, NVBIOS_THERM_DOMAIN_CORE,
76 &priv->bios_sensor)) 238 &priv->bios_sensor))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index c26ca9bef671..8e1bae4f12e8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -79,7 +79,7 @@ nv04_timer_alarm_trigger(struct nouveau_timer *ptimer)
79 79
80 /* execute any pending alarm handlers */ 80 /* execute any pending alarm handlers */
81 list_for_each_entry_safe(alarm, atemp, &exec, head) { 81 list_for_each_entry_safe(alarm, atemp, &exec, head) {
82 list_del(&alarm->head); 82 list_del_init(&alarm->head);
83 alarm->func(alarm); 83 alarm->func(alarm);
84 } 84 }
85} 85}
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index d0da230d7706..74acf0f87785 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -3,7 +3,7 @@
3 3
4#define ROM_BIOS_PAGE 4096 4#define ROM_BIOS_PAGE 4096
5 5
6#if defined(CONFIG_ACPI) 6#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
7bool nouveau_is_optimus(void); 7bool nouveau_is_optimus(void);
8bool nouveau_is_v1_dsm(void); 8bool nouveau_is_v1_dsm(void);
9void nouveau_register_dsm_handler(void); 9void nouveau_register_dsm_handler(void);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index f65b20a375f6..5d940302d2aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -84,6 +84,8 @@ nv40_backlight_init(struct drm_connector *connector)
84 props.max_brightness = 31; 84 props.max_brightness = 31;
85 bd = backlight_device_register("nv_backlight", &connector->kdev, drm, 85 bd = backlight_device_register("nv_backlight", &connector->kdev, drm,
86 &nv40_bl_ops, &props); 86 &nv40_bl_ops, &props);
87 if (IS_ERR(bd))
88 return PTR_ERR(bd);
87 drm->backlight = bd; 89 drm->backlight = bd;
88 bd->props.brightness = nv40_get_intensity(bd); 90 bd->props.brightness = nv40_get_intensity(bd);
89 backlight_update_status(bd); 91 backlight_update_status(bd);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 865eddfa30a7..50a6dd02f7c5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -678,23 +678,6 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
678 return 0; 678 return 0;
679} 679}
680 680
681static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
682{
683 /*
684 * offset + 0 (8 bits): Micro version
685 * offset + 1 (8 bits): Minor version
686 * offset + 2 (8 bits): Chip version
687 * offset + 3 (8 bits): Major version
688 */
689 struct nouveau_drm *drm = nouveau_drm(dev);
690
691 bios->major_version = bios->data[offset + 3];
692 bios->chip_version = bios->data[offset + 2];
693 NV_INFO(drm, "Bios version %02x.%02x.%02x.%02x\n",
694 bios->data[offset + 3], bios->data[offset + 2],
695 bios->data[offset + 1], bios->data[offset]);
696}
697
698static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset) 681static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
699{ 682{
700 /* 683 /*
@@ -710,12 +693,6 @@ static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
710 */ 693 */
711 694
712 bios->init_script_tbls_ptr = ROM16(bios->data[offset]); 695 bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
713 bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
714 bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
715 bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
716 bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
717 bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
718 bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
719} 696}
720 697
721static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) 698static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
@@ -765,25 +742,6 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
765 return 0; 742 return 0;
766} 743}
767 744
768static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
769{
770 /*
771 * offset + 8 (16 bits): PLL limits table pointer
772 *
773 * There's more in here, but that's unknown.
774 */
775 struct nouveau_drm *drm = nouveau_drm(dev);
776
777 if (bitentry->length < 10) {
778 NV_ERROR(drm, "Do not understand BIT C table\n");
779 return -EINVAL;
780 }
781
782 bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
783
784 return 0;
785}
786
787static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry) 745static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
788{ 746{
789 /* 747 /*
@@ -821,12 +779,6 @@ static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios,
821 } 779 }
822 780
823 parse_script_table_pointers(bios, bitentry->offset); 781 parse_script_table_pointers(bios, bitentry->offset);
824
825 if (bitentry->length >= 16)
826 bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
827 if (bitentry->length >= 18)
828 bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
829
830 return 0; 782 return 0;
831} 783}
832 784
@@ -852,8 +804,6 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
852 return -EINVAL; 804 return -EINVAL;
853 } 805 }
854 806
855 parse_bios_version(dev, bios, bitentry->offset);
856
857 /* 807 /*
858 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's 808 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
859 * Quadro identity crisis), other bits possibly as for BMP feature byte 809 * Quadro identity crisis), other bits possibly as for BMP feature byte
@@ -1078,9 +1028,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
1078 return ret; 1028 return ret;
1079 if (bios->major_version >= 0x60) /* g80+ */ 1029 if (bios->major_version >= 0x60) /* g80+ */
1080 parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A)); 1030 parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
1081 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
1082 if (ret)
1083 return ret;
1084 parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display)); 1031 parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
1085 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init)); 1032 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
1086 if (ret) 1033 if (ret)
@@ -1228,8 +1175,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
1228 */ 1175 */
1229 bios->feature_byte = bmp[9]; 1176 bios->feature_byte = bmp[9];
1230 1177
1231 parse_bios_version(dev, bios, offset + 10);
1232
1233 if (bmp_version_major < 5 || bmp_version_minor < 0x10) 1178 if (bmp_version_major < 5 || bmp_version_minor < 0x10)
1234 bios->old_style_init = true; 1179 bios->old_style_init = true;
1235 legacy_scripts_offset = 18; 1180 legacy_scripts_offset = 18;
@@ -1276,8 +1221,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
1276 bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]); 1221 bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
1277 bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]); 1222 bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
1278 } 1223 }
1224#if 0
1279 if (bmplength > 143) 1225 if (bmplength > 143)
1280 bios->pll_limit_tbl_ptr = ROM16(bmp[142]); 1226 bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
1227#endif
1281 1228
1282 if (bmplength > 157) 1229 if (bmplength > 157)
1283 bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10; 1230 bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
@@ -1522,6 +1469,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1522 } 1469 }
1523 case DCB_OUTPUT_DP: 1470 case DCB_OUTPUT_DP:
1524 entry->dpconf.sor.link = (conf & 0x00000030) >> 4; 1471 entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
1472 entry->extdev = (conf & 0x0000ff00) >> 8;
1525 switch ((conf & 0x00e00000) >> 21) { 1473 switch ((conf & 0x00e00000) >> 21) {
1526 case 0: 1474 case 0:
1527 entry->dpconf.link_bw = 162000; 1475 entry->dpconf.link_bw = 162000;
@@ -1543,8 +1491,10 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1543 } 1491 }
1544 break; 1492 break;
1545 case DCB_OUTPUT_TMDS: 1493 case DCB_OUTPUT_TMDS:
1546 if (dcb->version >= 0x40) 1494 if (dcb->version >= 0x40) {
1547 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4; 1495 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
1496 entry->extdev = (conf & 0x0000ff00) >> 8;
1497 }
1548 else if (dcb->version >= 0x30) 1498 else if (dcb->version >= 0x30)
1549 entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8; 1499 entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
1550 else if (dcb->version >= 0x22) 1500 else if (dcb->version >= 0x22)
@@ -1937,9 +1887,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
1937 if (conn[0] != 0xff) { 1887 if (conn[0] != 0xff) {
1938 NV_INFO(drm, "DCB conn %02d: ", idx); 1888 NV_INFO(drm, "DCB conn %02d: ", idx);
1939 if (olddcb_conntab(dev)[3] < 4) 1889 if (olddcb_conntab(dev)[3] < 4)
1940 printk("%04x\n", ROM16(conn[0])); 1890 pr_cont("%04x\n", ROM16(conn[0]));
1941 else 1891 else
1942 printk("%08x\n", ROM32(conn[0])); 1892 pr_cont("%08x\n", ROM32(conn[0]));
1943 } 1893 }
1944 } 1894 }
1945 dcb_fake_connectors(bios); 1895 dcb_fake_connectors(bios);
@@ -2052,45 +2002,29 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
2052static bool NVInitVBIOS(struct drm_device *dev) 2002static bool NVInitVBIOS(struct drm_device *dev)
2053{ 2003{
2054 struct nouveau_drm *drm = nouveau_drm(dev); 2004 struct nouveau_drm *drm = nouveau_drm(dev);
2055 struct nvbios *bios = &drm->vbios; 2005 struct nouveau_bios *bios = nouveau_bios(drm->device);
2056 2006 struct nvbios *legacy = &drm->vbios;
2057 memset(bios, 0, sizeof(struct nvbios)); 2007
2058 spin_lock_init(&bios->lock); 2008 memset(legacy, 0, sizeof(struct nvbios));
2059 bios->dev = dev; 2009 spin_lock_init(&legacy->lock);
2060 2010 legacy->dev = dev;
2061 bios->data = nouveau_bios(drm->device)->data; 2011
2062 bios->length = nouveau_bios(drm->device)->size; 2012 legacy->data = bios->data;
2063 return true; 2013 legacy->length = bios->size;
2064} 2014 legacy->major_version = bios->version.major;
2015 legacy->chip_version = bios->version.chip;
2016 if (bios->bit_offset) {
2017 legacy->type = NVBIOS_BIT;
2018 legacy->offset = bios->bit_offset;
2019 return !parse_bit_structure(legacy, legacy->offset + 6);
2020 } else
2021 if (bios->bmp_offset) {
2022 legacy->type = NVBIOS_BMP;
2023 legacy->offset = bios->bmp_offset;
2024 return !parse_bmp_structure(dev, legacy, legacy->offset);
2025 }
2065 2026
2066static int nouveau_parse_vbios_struct(struct drm_device *dev) 2027 return false;
2067{
2068 struct nouveau_drm *drm = nouveau_drm(dev);
2069 struct nvbios *bios = &drm->vbios;
2070 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
2071 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
2072 int offset;
2073
2074 offset = findstr(bios->data, bios->length,
2075 bit_signature, sizeof(bit_signature));
2076 if (offset) {
2077 NV_INFO(drm, "BIT BIOS found\n");
2078 bios->type = NVBIOS_BIT;
2079 bios->offset = offset;
2080 return parse_bit_structure(bios, offset + 6);
2081 }
2082
2083 offset = findstr(bios->data, bios->length,
2084 bmp_signature, sizeof(bmp_signature));
2085 if (offset) {
2086 NV_INFO(drm, "BMP BIOS found\n");
2087 bios->type = NVBIOS_BMP;
2088 bios->offset = offset;
2089 return parse_bmp_structure(dev, bios, offset);
2090 }
2091
2092 NV_ERROR(drm, "No known BIOS signature found\n");
2093 return -ENODEV;
2094} 2028}
2095 2029
2096int 2030int
@@ -2146,10 +2080,6 @@ nouveau_bios_init(struct drm_device *dev)
2146 if (!NVInitVBIOS(dev)) 2080 if (!NVInitVBIOS(dev))
2147 return -ENODEV; 2081 return -ENODEV;
2148 2082
2149 ret = nouveau_parse_vbios_struct(dev);
2150 if (ret)
2151 return ret;
2152
2153 ret = parse_dcb_table(dev, bios); 2083 ret = parse_dcb_table(dev, bios);
2154 if (ret) 2084 if (ret)
2155 return ret; 2085 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index f68c54ca422f..7ccd28f11adf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -107,20 +107,10 @@ struct nvbios {
107 bool old_style_init; 107 bool old_style_init;
108 uint16_t init_script_tbls_ptr; 108 uint16_t init_script_tbls_ptr;
109 uint16_t extra_init_script_tbl_ptr; 109 uint16_t extra_init_script_tbl_ptr;
110 uint16_t macro_index_tbl_ptr; 110
111 uint16_t macro_tbl_ptr;
112 uint16_t condition_tbl_ptr;
113 uint16_t io_condition_tbl_ptr;
114 uint16_t io_flag_condition_tbl_ptr;
115 uint16_t init_function_tbl_ptr;
116
117 uint16_t pll_limit_tbl_ptr;
118 uint16_t ram_restrict_tbl_ptr; 111 uint16_t ram_restrict_tbl_ptr;
119 uint8_t ram_restrict_group_count; 112 uint8_t ram_restrict_group_count;
120 113
121 uint16_t some_script_ptr; /* BIT I + 14 */
122 uint16_t init96_tbl_ptr; /* BIT I + 16 */
123
124 struct dcb_table dcb; 114 struct dcb_table dcb;
125 115
126 struct { 116 struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1699a9083a2f..11ca82148edc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -301,17 +301,18 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
301 struct ttm_buffer_object *bo = &nvbo->bo; 301 struct ttm_buffer_object *bo = &nvbo->bo;
302 int ret; 302 int ret;
303 303
304 ret = ttm_bo_reserve(bo, false, false, false, 0);
305 if (ret)
306 goto out;
307
304 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 308 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
305 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, 309 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
306 1 << bo->mem.mem_type, memtype); 310 1 << bo->mem.mem_type, memtype);
307 return -EINVAL; 311 ret = -EINVAL;
312 goto out;
308 } 313 }
309 314
310 if (nvbo->pin_refcnt++) 315 if (nvbo->pin_refcnt++)
311 return 0;
312
313 ret = ttm_bo_reserve(bo, false, false, false, 0);
314 if (ret)
315 goto out; 316 goto out;
316 317
317 nouveau_bo_placement_set(nvbo, memtype, 0); 318 nouveau_bo_placement_set(nvbo, memtype, 0);
@@ -329,10 +330,8 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
329 break; 330 break;
330 } 331 }
331 } 332 }
332 ttm_bo_unreserve(bo);
333out: 333out:
334 if (unlikely(ret)) 334 ttm_bo_unreserve(bo);
335 nvbo->pin_refcnt--;
336 return ret; 335 return ret;
337} 336}
338 337
@@ -343,13 +342,13 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
343 struct ttm_buffer_object *bo = &nvbo->bo; 342 struct ttm_buffer_object *bo = &nvbo->bo;
344 int ret; 343 int ret;
345 344
346 if (--nvbo->pin_refcnt)
347 return 0;
348
349 ret = ttm_bo_reserve(bo, false, false, false, 0); 345 ret = ttm_bo_reserve(bo, false, false, false, 0);
350 if (ret) 346 if (ret)
351 return ret; 347 return ret;
352 348
349 if (--nvbo->pin_refcnt)
350 goto out;
351
353 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
354 353
355 ret = nouveau_bo_validate(nvbo, false, false); 354 ret = nouveau_bo_validate(nvbo, false, false);
@@ -366,6 +365,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
366 } 365 }
367 } 366 }
368 367
368out:
369 ttm_bo_unreserve(bo); 369 ttm_bo_unreserve(bo);
370 return ret; 370 return ret;
371} 371}
@@ -562,7 +562,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
562 struct nouveau_fence *fence = NULL; 562 struct nouveau_fence *fence = NULL;
563 int ret; 563 int ret;
564 564
565 ret = nouveau_fence_new(chan, &fence); 565 ret = nouveau_fence_new(chan, false, &fence);
566 if (ret) 566 if (ret)
567 return ret; 567 return ret;
568 568
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 25ca37989d2c..653dbbbd4fa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -28,10 +28,11 @@ struct nouveau_bo {
28 struct nouveau_drm_tile *tile; 28 struct nouveau_drm_tile *tile;
29 29
30 struct drm_gem_object *gem; 30 struct drm_gem_object *gem;
31
32 /* protect by the ttm reservation lock */
31 int pin_refcnt; 33 int pin_refcnt;
32 34
33 struct ttm_bo_kmap_obj dma_buf_vmap; 35 struct ttm_bo_kmap_obj dma_buf_vmap;
34 int vmapping_count;
35}; 36};
36 37
37static inline struct nouveau_bo * 38static inline struct nouveau_bo *
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 174300b6a02e..eaa80a2b81ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -51,14 +51,15 @@ nouveau_channel_idle(struct nouveau_channel *chan)
51 struct nouveau_fence *fence = NULL; 51 struct nouveau_fence *fence = NULL;
52 int ret; 52 int ret;
53 53
54 ret = nouveau_fence_new(chan, &fence); 54 ret = nouveau_fence_new(chan, false, &fence);
55 if (!ret) { 55 if (!ret) {
56 ret = nouveau_fence_wait(fence, false, false); 56 ret = nouveau_fence_wait(fence, false, false);
57 nouveau_fence_unref(&fence); 57 nouveau_fence_unref(&fence);
58 } 58 }
59 59
60 if (ret) 60 if (ret)
61 NV_ERROR(cli, "failed to idle channel 0x%08x\n", chan->handle); 61 NV_ERROR(cli, "failed to idle channel 0x%08x [%s]\n",
62 chan->handle, cli->base.name);
62 return ret; 63 return ret;
63} 64}
64 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e620ba8271b4..4dd7ae2ac6c6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -55,8 +55,6 @@ MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
55static int nouveau_duallink = 1; 55static int nouveau_duallink = 1;
56module_param_named(duallink, nouveau_duallink, int, 0400); 56module_param_named(duallink, nouveau_duallink, int, 0400);
57 57
58static void nouveau_connector_hotplug(void *, int);
59
60struct nouveau_encoder * 58struct nouveau_encoder *
61find_encoder(struct drm_connector *connector, int type) 59find_encoder(struct drm_connector *connector, int type)
62{ 60{
@@ -100,22 +98,6 @@ static void
100nouveau_connector_destroy(struct drm_connector *connector) 98nouveau_connector_destroy(struct drm_connector *connector)
101{ 99{
102 struct nouveau_connector *nv_connector = nouveau_connector(connector); 100 struct nouveau_connector *nv_connector = nouveau_connector(connector);
103 struct nouveau_gpio *gpio;
104 struct nouveau_drm *drm;
105 struct drm_device *dev;
106
107 if (!nv_connector)
108 return;
109
110 dev = nv_connector->base.dev;
111 drm = nouveau_drm(dev);
112 gpio = nouveau_gpio(drm->device);
113
114 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
115 gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
116 nouveau_connector_hotplug, connector);
117 }
118
119 kfree(nv_connector->edid); 101 kfree(nv_connector->edid);
120 drm_sysfs_connector_remove(connector); 102 drm_sysfs_connector_remove(connector);
121 drm_connector_cleanup(connector); 103 drm_connector_cleanup(connector);
@@ -130,7 +112,6 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
130 struct nouveau_connector *nv_connector = nouveau_connector(connector); 112 struct nouveau_connector *nv_connector = nouveau_connector(connector);
131 struct nouveau_drm *drm = nouveau_drm(dev); 113 struct nouveau_drm *drm = nouveau_drm(dev);
132 struct nouveau_gpio *gpio = nouveau_gpio(drm->device); 114 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
133 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
134 struct nouveau_i2c_port *port = NULL; 115 struct nouveau_i2c_port *port = NULL;
135 int i, panel = -ENODEV; 116 int i, panel = -ENODEV;
136 117
@@ -160,8 +141,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
160 continue; 141 continue;
161 nv_encoder = nouveau_encoder(obj_to_encoder(obj)); 142 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
162 143
163 if (nv_encoder->dcb->i2c_index < 0xf) 144 port = nv_encoder->i2c;
164 port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
165 if (port && nv_probe_i2c(port, 0x50)) { 145 if (port && nv_probe_i2c(port, 0x50)) {
166 *pnv_encoder = nv_encoder; 146 *pnv_encoder = nv_encoder;
167 break; 147 break;
@@ -399,9 +379,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
399 struct edid *edid = 379 struct edid *edid =
400 (struct edid *)nouveau_bios_embedded_edid(dev); 380 (struct edid *)nouveau_bios_embedded_edid(dev);
401 if (edid) { 381 if (edid) {
402 nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 382 nv_connector->edid =
403 *(nv_connector->edid) = *edid; 383 kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
404 status = connector_status_connected; 384 if (nv_connector->edid)
385 status = connector_status_connected;
405 } 386 }
406 } 387 }
407 388
@@ -911,6 +892,37 @@ nouveau_connector_funcs_lvds = {
911 .force = nouveau_connector_force 892 .force = nouveau_connector_force
912}; 893};
913 894
895static void
896nouveau_connector_hotplug_work(struct work_struct *work)
897{
898 struct nouveau_connector *nv_connector =
899 container_of(work, struct nouveau_connector, hpd_work);
900 struct drm_connector *connector = &nv_connector->base;
901 struct drm_device *dev = connector->dev;
902 struct nouveau_drm *drm = nouveau_drm(dev);
903 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
904 bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
905
906 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
907 drm_get_connector_name(connector));
908
909 if (plugged)
910 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
911 else
912 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
913
914 drm_helper_hpd_irq_event(dev);
915}
916
917static int
918nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
919{
920 struct nouveau_connector *nv_connector =
921 container_of(event, struct nouveau_connector, hpd_func);
922 schedule_work(&nv_connector->hpd_work);
923 return NVKM_EVENT_KEEP;
924}
925
914static int 926static int
915drm_conntype_from_dcb(enum dcb_connector_type dcb) 927drm_conntype_from_dcb(enum dcb_connector_type dcb)
916{ 928{
@@ -961,6 +973,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
961 return ERR_PTR(-ENOMEM); 973 return ERR_PTR(-ENOMEM);
962 974
963 connector = &nv_connector->base; 975 connector = &nv_connector->base;
976 INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
964 nv_connector->index = index; 977 nv_connector->index = index;
965 978
966 /* attempt to parse vbios connector type and hotplug gpio */ 979 /* attempt to parse vbios connector type and hotplug gpio */
@@ -975,8 +988,11 @@ nouveau_connector_create(struct drm_device *dev, int index)
975 if (olddcb_conntab(dev)[3] >= 4) 988 if (olddcb_conntab(dev)[3] >= 4)
976 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16; 989 entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
977 990
978 nv_connector->hpd = ffs((entry & 0x07033000) >> 12); 991 ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
979 nv_connector->hpd = hpd[nv_connector->hpd]; 992 DCB_GPIO_UNUSED, &nv_connector->hpd);
993 nv_connector->hpd_func.func = nouveau_connector_hotplug;
994 if (ret)
995 nv_connector->hpd.func = DCB_GPIO_UNUSED;
980 996
981 nv_connector->type = nv_connector->dcb[0]; 997 nv_connector->type = nv_connector->dcb[0];
982 if (drm_conntype_from_dcb(nv_connector->type) == 998 if (drm_conntype_from_dcb(nv_connector->type) ==
@@ -999,7 +1015,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
999 } 1015 }
1000 } else { 1016 } else {
1001 nv_connector->type = DCB_CONNECTOR_NONE; 1017 nv_connector->type = DCB_CONNECTOR_NONE;
1002 nv_connector->hpd = DCB_GPIO_UNUSED; 1018 nv_connector->hpd.func = DCB_GPIO_UNUSED;
1003 } 1019 }
1004 1020
1005 /* no vbios data, or an unknown dcb connector type - attempt to 1021 /* no vbios data, or an unknown dcb connector type - attempt to
@@ -1126,31 +1142,9 @@ nouveau_connector_create(struct drm_device *dev, int index)
1126 } 1142 }
1127 1143
1128 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1144 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1129 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) { 1145 if (nv_connector->hpd.func != DCB_GPIO_UNUSED)
1130 ret = gpio->isr_add(gpio, 0, nv_connector->hpd, 0xff, 1146 connector->polled = DRM_CONNECTOR_POLL_HPD;
1131 nouveau_connector_hotplug, connector);
1132 if (ret == 0)
1133 connector->polled = DRM_CONNECTOR_POLL_HPD;
1134 }
1135 1147
1136 drm_sysfs_connector_add(connector); 1148 drm_sysfs_connector_add(connector);
1137 return connector; 1149 return connector;
1138} 1150}
1139
1140static void
1141nouveau_connector_hotplug(void *data, int plugged)
1142{
1143 struct drm_connector *connector = data;
1144 struct drm_device *dev = connector->dev;
1145 struct nouveau_drm *drm = nouveau_drm(dev);
1146
1147 NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
1148 drm_get_connector_name(connector));
1149
1150 if (plugged)
1151 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1152 else
1153 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1154
1155 drm_helper_hpd_irq_event(dev);
1156}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 20eb84cce9e6..6e399aad491a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,6 +30,11 @@
30#include <drm/drm_edid.h> 30#include <drm/drm_edid.h>
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32 32
33#include <core/event.h>
34
35#include <subdev/bios.h>
36#include <subdev/bios/gpio.h>
37
33struct nouveau_i2c_port; 38struct nouveau_i2c_port;
34 39
35enum nouveau_underscan_type { 40enum nouveau_underscan_type {
@@ -61,7 +66,10 @@ struct nouveau_connector {
61 enum dcb_connector_type type; 66 enum dcb_connector_type type;
62 u8 index; 67 u8 index;
63 u8 *dcb; 68 u8 *dcb;
64 u8 hpd; 69
70 struct dcb_gpio_func hpd;
71 struct work_struct hpd_work;
72 struct nouveau_eventh hpd_func;
65 73
66 int dithering_mode; 74 int dithering_mode;
67 int dithering_depth; 75 int dithering_depth;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 000000000000..5392e07edfc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,64 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Ben Skeggs <bskeggs@redhat.com>
29 */
30
31#include "nouveau_debugfs.h"
32#include "nouveau_drm.h"
33
34static int
35nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
36{
37 struct drm_info_node *node = (struct drm_info_node *) m->private;
38 struct nouveau_drm *drm = nouveau_drm(node->minor->dev);
39 int i;
40
41 for (i = 0; i < drm->vbios.length; i++)
42 seq_printf(m, "%c", drm->vbios.data[i]);
43 return 0;
44}
45
46static struct drm_info_list nouveau_debugfs_list[] = {
47 { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
48};
49#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
50
51int
52nouveau_debugfs_init(struct drm_minor *minor)
53{
54 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
55 minor->debugfs_root, minor);
56 return 0;
57}
58
59void
60nouveau_debugfs_takedown(struct drm_minor *minor)
61{
62 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
63 minor);
64}
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
new file mode 100644
index 000000000000..a62af6fb5f99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -0,0 +1,22 @@
1#ifndef __NOUVEAU_DEBUGFS_H__
2#define __NOUVEAU_DEBUGFS_H__
3
4#include <drm/drmP.h>
5
6#if defined(CONFIG_DEBUG_FS)
7extern int nouveau_debugfs_init(struct drm_minor *);
8extern void nouveau_debugfs_takedown(struct drm_minor *);
9#else
10static inline int
11nouveau_debugfs_init(struct drm_minor *minor)
12{
13 return 0;
14}
15
16static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
17{
18}
19
20#endif
21
22#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 508b00a2ce0d..4610c3a29bbe 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -41,6 +41,8 @@
41#include <subdev/gpio.h> 41#include <subdev/gpio.h>
42#include <engine/disp.h> 42#include <engine/disp.h>
43 43
44#include <core/class.h>
45
44static void 46static void
45nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) 47nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
46{ 48{
@@ -78,11 +80,6 @@ nouveau_framebuffer_init(struct drm_device *dev,
78 struct drm_framebuffer *fb = &nv_fb->base; 80 struct drm_framebuffer *fb = &nv_fb->base;
79 int ret; 81 int ret;
80 82
81 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
82 if (ret) {
83 return ret;
84 }
85
86 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 83 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
87 nv_fb->nvbo = nvbo; 84 nv_fb->nvbo = nvbo;
88 85
@@ -125,6 +122,11 @@ nouveau_framebuffer_init(struct drm_device *dev,
125 } 122 }
126 } 123 }
127 124
125 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
126 if (ret) {
127 return ret;
128 }
129
128 return 0; 130 return 0;
129} 131}
130 132
@@ -231,8 +233,10 @@ nouveau_display_init(struct drm_device *dev)
231 /* enable hotplug interrupts */ 233 /* enable hotplug interrupts */
232 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 234 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
233 struct nouveau_connector *conn = nouveau_connector(connector); 235 struct nouveau_connector *conn = nouveau_connector(connector);
234 if (gpio) 236 if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
235 gpio->irq(gpio, 0, conn->hpd, 0xff, true); 237 nouveau_event_get(gpio->events, conn->hpd.line,
238 &conn->hpd_func);
239 }
236 } 240 }
237 241
238 return ret; 242 return ret;
@@ -249,37 +253,20 @@ nouveau_display_fini(struct drm_device *dev)
249 /* disable hotplug interrupts */ 253 /* disable hotplug interrupts */
250 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 254 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
251 struct nouveau_connector *conn = nouveau_connector(connector); 255 struct nouveau_connector *conn = nouveau_connector(connector);
252 if (gpio) 256 if (gpio && conn->hpd.func != DCB_GPIO_UNUSED) {
253 gpio->irq(gpio, 0, conn->hpd, 0xff, false); 257 nouveau_event_put(gpio->events, conn->hpd.line,
258 &conn->hpd_func);
259 }
254 } 260 }
255 261
256 drm_kms_helper_poll_disable(dev); 262 drm_kms_helper_poll_disable(dev);
257 disp->fini(dev); 263 disp->fini(dev);
258} 264}
259 265
260static void
261nouveau_display_vblank_notify(void *data, int crtc)
262{
263 drm_handle_vblank(data, crtc);
264}
265
266static void
267nouveau_display_vblank_get(void *data, int crtc)
268{
269 drm_vblank_get(data, crtc);
270}
271
272static void
273nouveau_display_vblank_put(void *data, int crtc)
274{
275 drm_vblank_put(data, crtc);
276}
277
278int 266int
279nouveau_display_create(struct drm_device *dev) 267nouveau_display_create(struct drm_device *dev)
280{ 268{
281 struct nouveau_drm *drm = nouveau_drm(dev); 269 struct nouveau_drm *drm = nouveau_drm(dev);
282 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
283 struct nouveau_display *disp; 270 struct nouveau_display *disp;
284 u32 pclass = dev->pdev->class >> 8; 271 u32 pclass = dev->pdev->class >> 8;
285 int ret, gen; 272 int ret, gen;
@@ -288,11 +275,6 @@ nouveau_display_create(struct drm_device *dev)
288 if (!disp) 275 if (!disp)
289 return -ENOMEM; 276 return -ENOMEM;
290 277
291 pdisp->vblank.data = dev;
292 pdisp->vblank.notify = nouveau_display_vblank_notify;
293 pdisp->vblank.get = nouveau_display_vblank_get;
294 pdisp->vblank.put = nouveau_display_vblank_put;
295
296 drm_mode_config_init(dev); 278 drm_mode_config_init(dev);
297 drm_mode_create_scaling_mode_property(dev); 279 drm_mode_create_scaling_mode_property(dev);
298 drm_mode_create_dvi_i_properties(dev); 280 drm_mode_create_dvi_i_properties(dev);
@@ -316,17 +298,13 @@ nouveau_display_create(struct drm_device *dev)
316 drm_property_create_range(dev, 0, "underscan vborder", 0, 128); 298 drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
317 299
318 if (gen >= 1) { 300 if (gen >= 1) {
301 /* -90..+90 */
319 disp->vibrant_hue_property = 302 disp->vibrant_hue_property =
320 drm_property_create(dev, DRM_MODE_PROP_RANGE, 303 drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
321 "vibrant hue", 2);
322 disp->vibrant_hue_property->values[0] = 0;
323 disp->vibrant_hue_property->values[1] = 180; /* -90..+90 */
324 304
305 /* -100..+100 */
325 disp->color_vibrance_property = 306 disp->color_vibrance_property =
326 drm_property_create(dev, DRM_MODE_PROP_RANGE, 307 drm_property_create_range(dev, 0, "color vibrance", 0, 200);
327 "color vibrance", 2);
328 disp->color_vibrance_property->values[0] = 0;
329 disp->color_vibrance_property->values[1] = 200; /* -100..+100 */
330 } 308 }
331 309
332 dev->mode_config.funcs = &nouveau_mode_config_funcs; 310 dev->mode_config.funcs = &nouveau_mode_config_funcs;
@@ -478,39 +456,6 @@ nouveau_display_resume(struct drm_device *dev)
478 } 456 }
479} 457}
480 458
481int
482nouveau_vblank_enable(struct drm_device *dev, int crtc)
483{
484 struct nouveau_device *device = nouveau_dev(dev);
485
486 if (device->card_type >= NV_D0)
487 nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 1);
488 else
489 if (device->card_type >= NV_50)
490 nv_mask(device, NV50_PDISPLAY_INTR_EN_1, 0,
491 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
492 else
493 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
494 NV_PCRTC_INTR_0_VBLANK);
495
496 return 0;
497}
498
499void
500nouveau_vblank_disable(struct drm_device *dev, int crtc)
501{
502 struct nouveau_device *device = nouveau_dev(dev);
503
504 if (device->card_type >= NV_D0)
505 nv_mask(device, 0x6100c0 + (crtc * 0x800), 1, 0);
506 else
507 if (device->card_type >= NV_50)
508 nv_mask(device, NV50_PDISPLAY_INTR_EN_1,
509 NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
510 else
511 NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
512}
513
514static int 459static int
515nouveau_page_flip_reserve(struct nouveau_bo *old_bo, 460nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
516 struct nouveau_bo *new_bo) 461 struct nouveau_bo *new_bo)
@@ -595,7 +540,7 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
595 } 540 }
596 FIRE_RING (chan); 541 FIRE_RING (chan);
597 542
598 ret = nouveau_fence_new(chan, pfence); 543 ret = nouveau_fence_new(chan, false, pfence);
599 if (ret) 544 if (ret)
600 goto fail; 545 goto fail;
601 546
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 722548bb3bd3..1ea3e4734b62 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -59,9 +59,6 @@ void nouveau_display_fini(struct drm_device *dev);
59int nouveau_display_suspend(struct drm_device *dev); 59int nouveau_display_suspend(struct drm_device *dev);
60void nouveau_display_resume(struct drm_device *dev); 60void nouveau_display_resume(struct drm_device *dev);
61 61
62int nouveau_vblank_enable(struct drm_device *dev, int crtc);
63void nouveau_vblank_disable(struct drm_device *dev, int crtc);
64
65int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 62int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
66 struct drm_pending_vblank_event *event); 63 struct drm_pending_vblank_event *event);
67int nouveau_finish_page_flip(struct nouveau_channel *, 64int nouveau_finish_page_flip(struct nouveau_channel *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 5c2e22932d1c..690d5930ce32 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -191,7 +191,7 @@ WIND_RING(struct nouveau_channel *chan)
191#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002 191#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG 0x00000002
192#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004 192#define NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL 0x00000004
193#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000 193#define NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD 0x00001000
194#define NV84_SUBCHAN_NOTIFY_INTR 0x00000020 194#define NV84_SUBCHAN_UEVENT 0x00000020
195#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024 195#define NV84_SUBCHAN_WRCACHE_FLUSH 0x00000024
196#define NV10_SUBCHAN_REF_CNT 0x00000050 196#define NV10_SUBCHAN_REF_CNT 0x00000050
197#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054 197#define NVSW_SUBCHAN_PAGE_FLIP 0x00000054
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 59838651ee8f..36fd22500569 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -35,300 +35,6 @@
35#include <subdev/gpio.h> 35#include <subdev/gpio.h>
36#include <subdev/i2c.h> 36#include <subdev/i2c.h>
37 37
38/******************************************************************************
39 * link training
40 *****************************************************************************/
41struct dp_state {
42 struct nouveau_i2c_port *auxch;
43 struct nouveau_object *core;
44 struct dcb_output *dcb;
45 int crtc;
46 u8 *dpcd;
47 int link_nr;
48 u32 link_bw;
49 u8 stat[6];
50 u8 conf[4];
51};
52
53static void
54dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
55{
56 struct nouveau_drm *drm = nouveau_drm(dev);
57 struct dcb_output *dcb = dp->dcb;
58 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
59 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
60 u8 sink[2];
61 u32 data;
62
63 NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
64
65 /* set desired link configuration on the source */
66 data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
67 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
68 data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
69
70 nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
71
72 /* inform the sink of the new configuration */
73 sink[0] = dp->link_bw / 27000;
74 sink[1] = dp->link_nr;
75 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
76 sink[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
77
78 nv_wraux(dp->auxch, DP_LINK_BW_SET, sink, 2);
79}
80
81static void
82dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
83{
84 struct nouveau_drm *drm = nouveau_drm(dev);
85 struct dcb_output *dcb = dp->dcb;
86 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
87 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
88 u8 sink_tp;
89
90 NV_DEBUG(drm, "training pattern %d\n", pattern);
91
92 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
93
94 nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
95 sink_tp &= ~DP_TRAINING_PATTERN_MASK;
96 sink_tp |= pattern;
97 nv_wraux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
98}
99
100static int
101dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
102{
103 struct nouveau_drm *drm = nouveau_drm(dev);
104 struct dcb_output *dcb = dp->dcb;
105 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
106 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
107 int i;
108
109 for (i = 0; i < dp->link_nr; i++) {
110 u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
111 u8 lpre = (lane & 0x0c) >> 2;
112 u8 lvsw = (lane & 0x03) >> 0;
113
114 dp->conf[i] = (lpre << 3) | lvsw;
115 if (lvsw == DP_TRAIN_VOLTAGE_SWING_1200)
116 dp->conf[i] |= DP_TRAIN_MAX_SWING_REACHED;
117 if ((lpre << 3) == DP_TRAIN_PRE_EMPHASIS_9_5)
118 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
119
120 NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
121
122 nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
123 }
124
125 return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
126}
127
128static int
129dp_link_train_update(struct drm_device *dev, struct dp_state *dp, u32 delay)
130{
131 struct nouveau_drm *drm = nouveau_drm(dev);
132 int ret;
133
134 udelay(delay);
135
136 ret = nv_rdaux(dp->auxch, DP_LANE0_1_STATUS, dp->stat, 6);
137 if (ret)
138 return ret;
139
140 NV_DEBUG(drm, "status %*ph\n", 6, dp->stat);
141 return 0;
142}
143
144static int
145dp_link_train_cr(struct drm_device *dev, struct dp_state *dp)
146{
147 bool cr_done = false, abort = false;
148 int voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
149 int tries = 0, i;
150
151 dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_1);
152
153 do {
154 if (dp_link_train_commit(dev, dp) ||
155 dp_link_train_update(dev, dp, 100))
156 break;
157
158 cr_done = true;
159 for (i = 0; i < dp->link_nr; i++) {
160 u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
161 if (!(lane & DP_LANE_CR_DONE)) {
162 cr_done = false;
163 if (dp->conf[i] & DP_TRAIN_MAX_SWING_REACHED)
164 abort = true;
165 break;
166 }
167 }
168
169 if ((dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
170 voltage = dp->conf[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
171 tries = 0;
172 }
173 } while (!cr_done && !abort && ++tries < 5);
174
175 return cr_done ? 0 : -1;
176}
177
178static int
179dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
180{
181 bool eq_done, cr_done = true;
182 int tries = 0, i;
183
184 dp_set_training_pattern(dev, dp, DP_TRAINING_PATTERN_2);
185
186 do {
187 if (dp_link_train_update(dev, dp, 400))
188 break;
189
190 eq_done = !!(dp->stat[2] & DP_INTERLANE_ALIGN_DONE);
191 for (i = 0; i < dp->link_nr && eq_done; i++) {
192 u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
193 if (!(lane & DP_LANE_CR_DONE))
194 cr_done = false;
195 if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
196 !(lane & DP_LANE_SYMBOL_LOCKED))
197 eq_done = false;
198 }
199
200 if (dp_link_train_commit(dev, dp))
201 break;
202 } while (!eq_done && cr_done && ++tries <= 5);
203
204 return eq_done ? 0 : -1;
205}
206
207static void
208dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
209{
210 struct dcb_output *dcb = dp->dcb;
211 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
212 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
213
214 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
215 NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
216 NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
217 NV94_DISP_SOR_DP_TRAIN_OP_INIT);
218}
219
220static void
221dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
222{
223 struct dcb_output *dcb = dp->dcb;
224 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
225 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
226
227 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
228 NV94_DISP_SOR_DP_TRAIN_OP_FINI);
229}
230
231static bool
232nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
233 struct nouveau_object *core)
234{
235 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
236 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
237 struct nouveau_connector *nv_connector =
238 nouveau_encoder_connector_get(nv_encoder);
239 struct drm_device *dev = encoder->dev;
240 struct nouveau_drm *drm = nouveau_drm(dev);
241 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
242 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
243 const u32 bw_list[] = { 270000, 162000, 0 };
244 const u32 *link_bw = bw_list;
245 struct dp_state dp;
246
247 dp.auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
248 if (!dp.auxch)
249 return false;
250
251 dp.core = core;
252 dp.dcb = nv_encoder->dcb;
253 dp.crtc = nv_crtc->index;
254 dp.dpcd = nv_encoder->dp.dpcd;
255
256 /* adjust required bandwidth for 8B/10B coding overhead */
257 datarate = (datarate / 8) * 10;
258
259 /* some sinks toggle hotplug in response to some of the actions
260 * we take during link training (DP_SET_POWER is one), we need
261 * to ignore them for the moment to avoid races.
262 */
263 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
264
265 /* enable down-spreading and execute pre-train script from vbios */
266 dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
267
268 /* start off at highest link rate supported by encoder and display */
269 while (*link_bw > nv_encoder->dp.link_bw)
270 link_bw++;
271
272 while (link_bw[0]) {
273 /* find minimum required lane count at this link rate */
274 dp.link_nr = nv_encoder->dp.link_nr;
275 while ((dp.link_nr >> 1) * link_bw[0] > datarate)
276 dp.link_nr >>= 1;
277
278 /* drop link rate to minimum with this lane count */
279 while ((link_bw[1] * dp.link_nr) > datarate)
280 link_bw++;
281 dp.link_bw = link_bw[0];
282
283 /* program selected link configuration */
284 dp_set_link_config(dev, &dp);
285
286 /* attempt to train the link at this configuration */
287 memset(dp.stat, 0x00, sizeof(dp.stat));
288 if (!dp_link_train_cr(dev, &dp) &&
289 !dp_link_train_eq(dev, &dp))
290 break;
291
292 /* retry at lower rate */
293 link_bw++;
294 }
295
296 /* finish link training */
297 dp_set_training_pattern(dev, &dp, DP_TRAINING_PATTERN_DISABLE);
298
299 /* execute post-train script from vbios */
300 dp_link_train_fini(dev, &dp);
301
302 /* re-enable hotplug detect */
303 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, true);
304 return true;
305}
306
307void
308nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
309 struct nouveau_object *core)
310{
311 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
312 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
313 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
314 struct nouveau_i2c_port *auxch;
315 u8 status;
316
317 auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index);
318 if (!auxch)
319 return;
320
321 if (mode == DRM_MODE_DPMS_ON)
322 status = DP_SET_POWER_D0;
323 else
324 status = DP_SET_POWER_D3;
325
326 nv_wraux(auxch, DP_SET_POWER, &status, 1);
327
328 if (mode == DRM_MODE_DPMS_ON)
329 nouveau_dp_link_train(encoder, datarate, core);
330}
331
332static void 38static void
333nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch, 39nouveau_dp_probe_oui(struct drm_device *dev, struct nouveau_i2c_port *auxch,
334 u8 *dpcd) 40 u8 *dpcd)
@@ -355,12 +61,11 @@ nouveau_dp_detect(struct drm_encoder *encoder)
355 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 61 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
356 struct drm_device *dev = encoder->dev; 62 struct drm_device *dev = encoder->dev;
357 struct nouveau_drm *drm = nouveau_drm(dev); 63 struct nouveau_drm *drm = nouveau_drm(dev);
358 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
359 struct nouveau_i2c_port *auxch; 64 struct nouveau_i2c_port *auxch;
360 u8 *dpcd = nv_encoder->dp.dpcd; 65 u8 *dpcd = nv_encoder->dp.dpcd;
361 int ret; 66 int ret;
362 67
363 auxch = i2c->find(i2c, nv_encoder->dcb->i2c_index); 68 auxch = nv_encoder->i2c;
364 if (!auxch) 69 if (!auxch)
365 return false; 70 return false;
366 71
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5e7aef23825a..d1099365bfc1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -34,6 +34,8 @@
34#include <subdev/device.h> 34#include <subdev/device.h>
35#include <subdev/vm.h> 35#include <subdev/vm.h>
36 36
37#include <engine/disp.h>
38
37#include "nouveau_drm.h" 39#include "nouveau_drm.h"
38#include "nouveau_irq.h" 40#include "nouveau_irq.h"
39#include "nouveau_dma.h" 41#include "nouveau_dma.h"
@@ -48,6 +50,7 @@
48#include "nouveau_abi16.h" 50#include "nouveau_abi16.h"
49#include "nouveau_fbcon.h" 51#include "nouveau_fbcon.h"
50#include "nouveau_fence.h" 52#include "nouveau_fence.h"
53#include "nouveau_debugfs.h"
51 54
52MODULE_PARM_DESC(config, "option string to pass to driver core"); 55MODULE_PARM_DESC(config, "option string to pass to driver core");
53static char *nouveau_config; 56static char *nouveau_config;
@@ -68,6 +71,32 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
68 71
69static struct drm_driver driver; 72static struct drm_driver driver;
70 73
74static int
75nouveau_drm_vblank_enable(struct drm_device *dev, int head)
76{
77 struct nouveau_drm *drm = nouveau_drm(dev);
78 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
79 nouveau_event_get(pdisp->vblank, head, &drm->vblank);
80 return 0;
81}
82
83static void
84nouveau_drm_vblank_disable(struct drm_device *dev, int head)
85{
86 struct nouveau_drm *drm = nouveau_drm(dev);
87 struct nouveau_disp *pdisp = nouveau_disp(drm->device);
88 nouveau_event_put(pdisp->vblank, head, &drm->vblank);
89}
90
91static int
92nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
93{
94 struct nouveau_drm *drm =
95 container_of(event, struct nouveau_drm, vblank);
96 drm_handle_vblank(drm->dev, head);
97 return NVKM_EVENT_KEEP;
98}
99
71static u64 100static u64
72nouveau_name(struct pci_dev *pdev) 101nouveau_name(struct pci_dev *pdev)
73{ 102{
@@ -132,7 +161,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
132 161
133 /* initialise synchronisation routines */ 162 /* initialise synchronisation routines */
134 if (device->card_type < NV_10) ret = nv04_fence_create(drm); 163 if (device->card_type < NV_10) ret = nv04_fence_create(drm);
135 else if (device->card_type < NV_50) ret = nv10_fence_create(drm); 164 else if (device->chipset < 0x17) ret = nv10_fence_create(drm);
165 else if (device->card_type < NV_50) ret = nv17_fence_create(drm);
136 else if (device->chipset < 0x84) ret = nv50_fence_create(drm); 166 else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
137 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); 167 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
138 else ret = nvc0_fence_create(drm); 168 else ret = nvc0_fence_create(drm);
@@ -262,6 +292,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
262 292
263 dev->dev_private = drm; 293 dev->dev_private = drm;
264 drm->dev = dev; 294 drm->dev = dev;
295 drm->vblank.func = nouveau_drm_vblank_handler;
265 296
266 INIT_LIST_HEAD(&drm->clients); 297 INIT_LIST_HEAD(&drm->clients);
267 spin_lock_init(&drm->tile.lock); 298 spin_lock_init(&drm->tile.lock);
@@ -401,7 +432,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
401 nouveau_object_debug(); 432 nouveau_object_debug();
402} 433}
403 434
404int 435static int
405nouveau_do_suspend(struct drm_device *dev) 436nouveau_do_suspend(struct drm_device *dev)
406{ 437{
407 struct nouveau_drm *drm = nouveau_drm(dev); 438 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -472,7 +503,7 @@ int nouveau_pmops_suspend(struct device *dev)
472 return 0; 503 return 0;
473} 504}
474 505
475int 506static int
476nouveau_do_resume(struct drm_device *dev) 507nouveau_do_resume(struct drm_device *dev)
477{ 508{
478 struct nouveau_drm *drm = nouveau_drm(dev); 509 struct nouveau_drm *drm = nouveau_drm(dev);
@@ -546,10 +577,11 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
546 struct pci_dev *pdev = dev->pdev; 577 struct pci_dev *pdev = dev->pdev;
547 struct nouveau_drm *drm = nouveau_drm(dev); 578 struct nouveau_drm *drm = nouveau_drm(dev);
548 struct nouveau_cli *cli; 579 struct nouveau_cli *cli;
549 char name[16]; 580 char name[32], tmpname[TASK_COMM_LEN];
550 int ret; 581 int ret;
551 582
552 snprintf(name, sizeof(name), "%d", pid_nr(fpriv->pid)); 583 get_task_comm(tmpname, current);
584 snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
553 585
554 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli); 586 ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
555 if (ret) 587 if (ret)
@@ -639,22 +671,32 @@ driver = {
639 .postclose = nouveau_drm_postclose, 671 .postclose = nouveau_drm_postclose,
640 .lastclose = nouveau_vga_lastclose, 672 .lastclose = nouveau_vga_lastclose,
641 673
674#if defined(CONFIG_DEBUG_FS)
675 .debugfs_init = nouveau_debugfs_init,
676 .debugfs_cleanup = nouveau_debugfs_takedown,
677#endif
678
642 .irq_preinstall = nouveau_irq_preinstall, 679 .irq_preinstall = nouveau_irq_preinstall,
643 .irq_postinstall = nouveau_irq_postinstall, 680 .irq_postinstall = nouveau_irq_postinstall,
644 .irq_uninstall = nouveau_irq_uninstall, 681 .irq_uninstall = nouveau_irq_uninstall,
645 .irq_handler = nouveau_irq_handler, 682 .irq_handler = nouveau_irq_handler,
646 683
647 .get_vblank_counter = drm_vblank_count, 684 .get_vblank_counter = drm_vblank_count,
648 .enable_vblank = nouveau_vblank_enable, 685 .enable_vblank = nouveau_drm_vblank_enable,
649 .disable_vblank = nouveau_vblank_disable, 686 .disable_vblank = nouveau_drm_vblank_disable,
650 687
651 .ioctls = nouveau_ioctls, 688 .ioctls = nouveau_ioctls,
652 .fops = &nouveau_driver_fops, 689 .fops = &nouveau_driver_fops,
653 690
654 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 691 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
655 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 692 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
656 .gem_prime_export = nouveau_gem_prime_export, 693 .gem_prime_export = drm_gem_prime_export,
657 .gem_prime_import = nouveau_gem_prime_import, 694 .gem_prime_import = drm_gem_prime_import,
695 .gem_prime_pin = nouveau_gem_prime_pin,
696 .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
697 .gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
698 .gem_prime_vmap = nouveau_gem_prime_vmap,
699 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
658 700
659 .gem_init_object = nouveau_gem_object_new, 701 .gem_init_object = nouveau_gem_object_new,
660 .gem_free_object = nouveau_gem_object_del, 702 .gem_free_object = nouveau_gem_object_del,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index aa89eb938b47..b25df374c901 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -13,6 +13,7 @@
13#define DRIVER_PATCHLEVEL 0 13#define DRIVER_PATCHLEVEL 0
14 14
15#include <core/client.h> 15#include <core/client.h>
16#include <core/event.h>
16 17
17#include <subdev/vm.h> 18#include <subdev/vm.h>
18 19
@@ -112,6 +113,7 @@ struct nouveau_drm {
112 struct nvbios vbios; 113 struct nvbios vbios;
113 struct nouveau_display *display; 114 struct nouveau_display *display;
114 struct backlight_device *backlight; 115 struct backlight_device *backlight;
116 struct nouveau_eventh vblank;
115 117
116 /* power management */ 118 /* power management */
117 struct nouveau_pm *pm; 119 struct nouveau_pm *pm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index d0d95bd511ab..e24341229d5e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -36,19 +36,12 @@
36 36
37struct nouveau_i2c_port; 37struct nouveau_i2c_port;
38 38
39struct dp_train_func {
40 void (*link_set)(struct drm_device *, struct dcb_output *, int crtc,
41 int nr, u32 bw, bool enhframe);
42 void (*train_set)(struct drm_device *, struct dcb_output *, u8 pattern);
43 void (*train_adj)(struct drm_device *, struct dcb_output *,
44 u8 lane, u8 swing, u8 preem);
45};
46
47struct nouveau_encoder { 39struct nouveau_encoder {
48 struct drm_encoder_slave base; 40 struct drm_encoder_slave base;
49 41
50 struct dcb_output *dcb; 42 struct dcb_output *dcb;
51 int or; 43 int or;
44 struct nouveau_i2c_port *i2c;
52 45
53 /* different to drm_encoder.crtc, this reflects what's 46 /* different to drm_encoder.crtc, this reflects what's
54 * actually programmed on the hw, not the proposed crtc */ 47 * actually programmed on the hw, not the proposed crtc */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 67a1a069de28..b03531781580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -251,9 +251,10 @@ nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
251} 251}
252 252
253static int 253static int
254nouveau_fbcon_create(struct nouveau_fbdev *fbcon, 254nouveau_fbcon_create(struct drm_fb_helper *helper,
255 struct drm_fb_helper_surface_size *sizes) 255 struct drm_fb_helper_surface_size *sizes)
256{ 256{
257 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
257 struct drm_device *dev = fbcon->dev; 258 struct drm_device *dev = fbcon->dev;
258 struct nouveau_drm *drm = nouveau_drm(dev); 259 struct nouveau_drm *drm = nouveau_drm(dev);
259 struct nouveau_device *device = nv_device(drm->device); 260 struct nouveau_device *device = nv_device(drm->device);
@@ -388,23 +389,6 @@ out:
388 return ret; 389 return ret;
389} 390}
390 391
391static int
392nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
393 struct drm_fb_helper_surface_size *sizes)
394{
395 struct nouveau_fbdev *fbcon = (struct nouveau_fbdev *)helper;
396 int new_fb = 0;
397 int ret;
398
399 if (!helper->fb) {
400 ret = nouveau_fbcon_create(fbcon, sizes);
401 if (ret)
402 return ret;
403 new_fb = 1;
404 }
405 return new_fb;
406}
407
408void 392void
409nouveau_fbcon_output_poll_changed(struct drm_device *dev) 393nouveau_fbcon_output_poll_changed(struct drm_device *dev)
410{ 394{
@@ -433,6 +417,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
433 nouveau_fb->nvbo = NULL; 417 nouveau_fb->nvbo = NULL;
434 } 418 }
435 drm_fb_helper_fini(&fbcon->helper); 419 drm_fb_helper_fini(&fbcon->helper);
420 drm_framebuffer_unregister_private(&nouveau_fb->base);
436 drm_framebuffer_cleanup(&nouveau_fb->base); 421 drm_framebuffer_cleanup(&nouveau_fb->base);
437 return 0; 422 return 0;
438} 423}
@@ -449,7 +434,7 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info)
449static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = { 434static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
450 .gamma_set = nouveau_fbcon_gamma_set, 435 .gamma_set = nouveau_fbcon_gamma_set,
451 .gamma_get = nouveau_fbcon_gamma_get, 436 .gamma_get = nouveau_fbcon_gamma_get,
452 .fb_probe = nouveau_fbcon_find_or_create_single, 437 .fb_probe = nouveau_fbcon_create,
453}; 438};
454 439
455 440
@@ -490,6 +475,9 @@ nouveau_fbcon_init(struct drm_device *dev)
490 else 475 else
491 preferred_bpp = 32; 476 preferred_bpp = 32;
492 477
478 /* disable all the possible outputs/crtcs before entering KMS mode */
479 drm_helper_disable_unused_functions(dev);
480
493 drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); 481 drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
494 return 0; 482 return 0;
495} 483}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 1d049be79f74..6c946837a0aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -33,14 +33,14 @@
33#include "nouveau_dma.h" 33#include "nouveau_dma.h"
34#include "nouveau_fence.h" 34#include "nouveau_fence.h"
35 35
36#include <engine/fifo.h>
37
36void 38void
37nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 39nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
38{ 40{
39 struct nouveau_fence *fence, *fnext; 41 struct nouveau_fence *fence, *fnext;
40 spin_lock(&fctx->lock); 42 spin_lock(&fctx->lock);
41 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 43 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
42 if (fence->work)
43 fence->work(fence->priv, false);
44 fence->channel = NULL; 44 fence->channel = NULL;
45 list_del(&fence->head); 45 list_del(&fence->head);
46 nouveau_fence_unref(&fence); 46 nouveau_fence_unref(&fence);
@@ -59,17 +59,14 @@ nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
59static void 59static void
60nouveau_fence_update(struct nouveau_channel *chan) 60nouveau_fence_update(struct nouveau_channel *chan)
61{ 61{
62 struct nouveau_fence_priv *priv = chan->drm->fence;
63 struct nouveau_fence_chan *fctx = chan->fence; 62 struct nouveau_fence_chan *fctx = chan->fence;
64 struct nouveau_fence *fence, *fnext; 63 struct nouveau_fence *fence, *fnext;
65 64
66 spin_lock(&fctx->lock); 65 spin_lock(&fctx->lock);
67 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) { 66 list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
68 if (priv->read(chan) < fence->sequence) 67 if (fctx->read(chan) < fence->sequence)
69 break; 68 break;
70 69
71 if (fence->work)
72 fence->work(fence->priv, true);
73 fence->channel = NULL; 70 fence->channel = NULL;
74 list_del(&fence->head); 71 list_del(&fence->head);
75 nouveau_fence_unref(&fence); 72 nouveau_fence_unref(&fence);
@@ -80,7 +77,6 @@ nouveau_fence_update(struct nouveau_channel *chan)
80int 77int
81nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) 78nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
82{ 79{
83 struct nouveau_fence_priv *priv = chan->drm->fence;
84 struct nouveau_fence_chan *fctx = chan->fence; 80 struct nouveau_fence_chan *fctx = chan->fence;
85 int ret; 81 int ret;
86 82
@@ -88,7 +84,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
88 fence->timeout = jiffies + (3 * DRM_HZ); 84 fence->timeout = jiffies + (3 * DRM_HZ);
89 fence->sequence = ++fctx->sequence; 85 fence->sequence = ++fctx->sequence;
90 86
91 ret = priv->emit(fence); 87 ret = fctx->emit(fence);
92 if (!ret) { 88 if (!ret) {
93 kref_get(&fence->kref); 89 kref_get(&fence->kref);
94 spin_lock(&fctx->lock); 90 spin_lock(&fctx->lock);
@@ -107,13 +103,87 @@ nouveau_fence_done(struct nouveau_fence *fence)
107 return !fence->channel; 103 return !fence->channel;
108} 104}
109 105
106struct nouveau_fence_uevent {
107 struct nouveau_eventh handler;
108 struct nouveau_fence_priv *priv;
109};
110
111static int
112nouveau_fence_wait_uevent_handler(struct nouveau_eventh *event, int index)
113{
114 struct nouveau_fence_uevent *uevent =
115 container_of(event, struct nouveau_fence_uevent, handler);
116 wake_up_all(&uevent->priv->waiting);
117 return NVKM_EVENT_KEEP;
118}
119
120static int
121nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
122
123{
124 struct nouveau_channel *chan = fence->channel;
125 struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
126 struct nouveau_fence_priv *priv = chan->drm->fence;
127 struct nouveau_fence_uevent uevent = {
128 .handler.func = nouveau_fence_wait_uevent_handler,
129 .priv = priv,
130 };
131 int ret = 0;
132
133 nouveau_event_get(pfifo->uevent, 0, &uevent.handler);
134
135 if (fence->timeout) {
136 unsigned long timeout = fence->timeout - jiffies;
137
138 if (time_before(jiffies, fence->timeout)) {
139 if (intr) {
140 ret = wait_event_interruptible_timeout(
141 priv->waiting,
142 nouveau_fence_done(fence),
143 timeout);
144 } else {
145 ret = wait_event_timeout(priv->waiting,
146 nouveau_fence_done(fence),
147 timeout);
148 }
149 }
150
151 if (ret >= 0) {
152 fence->timeout = jiffies + ret;
153 if (time_after_eq(jiffies, fence->timeout))
154 ret = -EBUSY;
155 }
156 } else {
157 if (intr) {
158 ret = wait_event_interruptible(priv->waiting,
159 nouveau_fence_done(fence));
160 } else {
161 wait_event(priv->waiting, nouveau_fence_done(fence));
162 }
163 }
164
165 nouveau_event_put(pfifo->uevent, 0, &uevent.handler);
166 if (unlikely(ret < 0))
167 return ret;
168
169 return 0;
170}
171
110int 172int
111nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) 173nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
112{ 174{
175 struct nouveau_channel *chan = fence->channel;
176 struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
113 unsigned long sleep_time = NSEC_PER_MSEC / 1000; 177 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
114 ktime_t t; 178 ktime_t t;
115 int ret = 0; 179 int ret = 0;
116 180
181 while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
182 ret = nouveau_fence_wait_uevent(fence, intr);
183 if (ret < 0)
184 return ret;
185 }
186
117 while (!nouveau_fence_done(fence)) { 187 while (!nouveau_fence_done(fence)) {
118 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) { 188 if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
119 ret = -EBUSY; 189 ret = -EBUSY;
@@ -143,14 +213,14 @@ nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
143int 213int
144nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan) 214nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
145{ 215{
146 struct nouveau_fence_priv *priv = chan->drm->fence; 216 struct nouveau_fence_chan *fctx = chan->fence;
147 struct nouveau_channel *prev; 217 struct nouveau_channel *prev;
148 int ret = 0; 218 int ret = 0;
149 219
150 prev = fence ? fence->channel : NULL; 220 prev = fence ? fence->channel : NULL;
151 if (prev) { 221 if (prev) {
152 if (unlikely(prev != chan && !nouveau_fence_done(fence))) { 222 if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
153 ret = priv->sync(fence, prev, chan); 223 ret = fctx->sync(fence, prev, chan);
154 if (unlikely(ret)) 224 if (unlikely(ret))
155 ret = nouveau_fence_wait(fence, true, false); 225 ret = nouveau_fence_wait(fence, true, false);
156 } 226 }
@@ -182,7 +252,8 @@ nouveau_fence_ref(struct nouveau_fence *fence)
182} 252}
183 253
184int 254int
185nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence) 255nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
256 struct nouveau_fence **pfence)
186{ 257{
187 struct nouveau_fence *fence; 258 struct nouveau_fence *fence;
188 int ret = 0; 259 int ret = 0;
@@ -193,13 +264,13 @@ nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence)
193 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 264 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
194 if (!fence) 265 if (!fence)
195 return -ENOMEM; 266 return -ENOMEM;
267
268 fence->sysmem = sysmem;
196 kref_init(&fence->kref); 269 kref_init(&fence->kref);
197 270
198 if (chan) { 271 ret = nouveau_fence_emit(fence, chan);
199 ret = nouveau_fence_emit(fence, chan); 272 if (ret)
200 if (ret) 273 nouveau_fence_unref(&fence);
201 nouveau_fence_unref(&fence);
202 }
203 274
204 *pfence = fence; 275 *pfence = fence;
205 return ret; 276 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index cdb83acdffe2..c89943407b52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -7,15 +7,15 @@ struct nouveau_fence {
7 struct list_head head; 7 struct list_head head;
8 struct kref kref; 8 struct kref kref;
9 9
10 bool sysmem;
11
10 struct nouveau_channel *channel; 12 struct nouveau_channel *channel;
11 unsigned long timeout; 13 unsigned long timeout;
12 u32 sequence; 14 u32 sequence;
13
14 void (*work)(void *priv, bool signalled);
15 void *priv;
16}; 15};
17 16
18int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **); 17int nouveau_fence_new(struct nouveau_channel *, bool sysmem,
18 struct nouveau_fence **);
19struct nouveau_fence * 19struct nouveau_fence *
20nouveau_fence_ref(struct nouveau_fence *); 20nouveau_fence_ref(struct nouveau_fence *);
21void nouveau_fence_unref(struct nouveau_fence **); 21void nouveau_fence_unref(struct nouveau_fence **);
@@ -29,6 +29,13 @@ struct nouveau_fence_chan {
29 struct list_head pending; 29 struct list_head pending;
30 struct list_head flip; 30 struct list_head flip;
31 31
32 int (*emit)(struct nouveau_fence *);
33 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
34 struct nouveau_channel *);
35 u32 (*read)(struct nouveau_channel *);
36 int (*emit32)(struct nouveau_channel *, u64, u32);
37 int (*sync32)(struct nouveau_channel *, u64, u32);
38
32 spinlock_t lock; 39 spinlock_t lock;
33 u32 sequence; 40 u32 sequence;
34}; 41};
@@ -39,10 +46,9 @@ struct nouveau_fence_priv {
39 void (*resume)(struct nouveau_drm *); 46 void (*resume)(struct nouveau_drm *);
40 int (*context_new)(struct nouveau_channel *); 47 int (*context_new)(struct nouveau_channel *);
41 void (*context_del)(struct nouveau_channel *); 48 void (*context_del)(struct nouveau_channel *);
42 int (*emit)(struct nouveau_fence *); 49
43 int (*sync)(struct nouveau_fence *, struct nouveau_channel *, 50 wait_queue_head_t waiting;
44 struct nouveau_channel *); 51 bool uevent;
45 u32 (*read)(struct nouveau_channel *);
46}; 52};
47 53
48#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence) 54#define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
@@ -60,13 +66,31 @@ u32 nv10_fence_read(struct nouveau_channel *);
60void nv10_fence_context_del(struct nouveau_channel *); 66void nv10_fence_context_del(struct nouveau_channel *);
61void nv10_fence_destroy(struct nouveau_drm *); 67void nv10_fence_destroy(struct nouveau_drm *);
62int nv10_fence_create(struct nouveau_drm *); 68int nv10_fence_create(struct nouveau_drm *);
69
70int nv17_fence_create(struct nouveau_drm *);
63void nv17_fence_resume(struct nouveau_drm *drm); 71void nv17_fence_resume(struct nouveau_drm *drm);
64 72
65int nv50_fence_create(struct nouveau_drm *); 73int nv50_fence_create(struct nouveau_drm *);
66int nv84_fence_create(struct nouveau_drm *); 74int nv84_fence_create(struct nouveau_drm *);
67int nvc0_fence_create(struct nouveau_drm *); 75int nvc0_fence_create(struct nouveau_drm *);
68u64 nvc0_fence_crtc(struct nouveau_channel *, int crtc);
69 76
70int nouveau_flip_complete(void *chan); 77int nouveau_flip_complete(void *chan);
71 78
79struct nv84_fence_chan {
80 struct nouveau_fence_chan base;
81 struct nouveau_vma vma;
82 struct nouveau_vma vma_gart;
83 struct nouveau_vma dispc_vma[4];
84};
85
86struct nv84_fence_priv {
87 struct nouveau_fence_priv base;
88 struct nouveau_bo *bo;
89 struct nouveau_bo *bo_gart;
90 u32 *suspend;
91};
92
93u64 nv84_fence_crtc(struct nouveau_channel *, int);
94int nv84_fence_context_new(struct nouveau_channel *);
95
72#endif 96#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 8bf695c52f95..b4b4d0c1f4af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -24,8 +24,6 @@
24 * 24 *
25 */ 25 */
26 26
27#include <linux/dma-buf.h>
28
29#include <subdev/fb.h> 27#include <subdev/fb.h>
30 28
31#include "nouveau_drm.h" 29#include "nouveau_drm.h"
@@ -205,6 +203,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
205 struct drm_file *file_priv) 203 struct drm_file *file_priv)
206{ 204{
207 struct nouveau_drm *drm = nouveau_drm(dev); 205 struct nouveau_drm *drm = nouveau_drm(dev);
206 struct nouveau_cli *cli = nouveau_cli(file_priv);
208 struct nouveau_fb *pfb = nouveau_fb(drm->device); 207 struct nouveau_fb *pfb = nouveau_fb(drm->device);
209 struct drm_nouveau_gem_new *req = data; 208 struct drm_nouveau_gem_new *req = data;
210 struct nouveau_bo *nvbo = NULL; 209 struct nouveau_bo *nvbo = NULL;
@@ -213,7 +212,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
213 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; 212 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
214 213
215 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { 214 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
216 NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags); 215 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
217 return -EINVAL; 216 return -EINVAL;
218 } 217 }
219 218
@@ -315,16 +314,18 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
315 struct drm_nouveau_gem_pushbuf_bo *pbbo, 314 struct drm_nouveau_gem_pushbuf_bo *pbbo,
316 int nr_buffers, struct validate_op *op) 315 int nr_buffers, struct validate_op *op)
317{ 316{
317 struct nouveau_cli *cli = nouveau_cli(file_priv);
318 struct drm_device *dev = chan->drm->dev; 318 struct drm_device *dev = chan->drm->dev;
319 struct nouveau_drm *drm = nouveau_drm(dev); 319 struct nouveau_drm *drm = nouveau_drm(dev);
320 uint32_t sequence; 320 uint32_t sequence;
321 int trycnt = 0; 321 int trycnt = 0;
322 int ret, i; 322 int ret, i;
323 struct nouveau_bo *res_bo = NULL;
323 324
324 sequence = atomic_add_return(1, &drm->ttm.validate_sequence); 325 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
325retry: 326retry:
326 if (++trycnt > 100000) { 327 if (++trycnt > 100000) {
327 NV_ERROR(drm, "%s failed and gave up.\n", __func__); 328 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
328 return -EINVAL; 329 return -EINVAL;
329 } 330 }
330 331
@@ -335,14 +336,19 @@ retry:
335 336
336 gem = drm_gem_object_lookup(dev, file_priv, b->handle); 337 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
337 if (!gem) { 338 if (!gem) {
338 NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle); 339 NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
339 validate_fini(op, NULL); 340 validate_fini(op, NULL);
340 return -ENOENT; 341 return -ENOENT;
341 } 342 }
342 nvbo = gem->driver_private; 343 nvbo = gem->driver_private;
344 if (nvbo == res_bo) {
345 res_bo = NULL;
346 drm_gem_object_unreference_unlocked(gem);
347 continue;
348 }
343 349
344 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 350 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
345 NV_ERROR(drm, "multiple instances of buffer %d on " 351 NV_ERROR(cli, "multiple instances of buffer %d on "
346 "validation list\n", b->handle); 352 "validation list\n", b->handle);
347 drm_gem_object_unreference_unlocked(gem); 353 drm_gem_object_unreference_unlocked(gem);
348 validate_fini(op, NULL); 354 validate_fini(op, NULL);
@@ -352,15 +358,19 @@ retry:
352 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); 358 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence);
353 if (ret) { 359 if (ret) {
354 validate_fini(op, NULL); 360 validate_fini(op, NULL);
355 if (unlikely(ret == -EAGAIN)) 361 if (unlikely(ret == -EAGAIN)) {
356 ret = ttm_bo_wait_unreserved(&nvbo->bo, true); 362 sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
357 drm_gem_object_unreference_unlocked(gem); 363 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
364 sequence);
365 if (!ret)
366 res_bo = nvbo;
367 }
358 if (unlikely(ret)) { 368 if (unlikely(ret)) {
369 drm_gem_object_unreference_unlocked(gem);
359 if (ret != -ERESTARTSYS) 370 if (ret != -ERESTARTSYS)
360 NV_ERROR(drm, "fail reserve\n"); 371 NV_ERROR(cli, "fail reserve\n");
361 return ret; 372 return ret;
362 } 373 }
363 goto retry;
364 } 374 }
365 375
366 b->user_priv = (uint64_t)(unsigned long)nvbo; 376 b->user_priv = (uint64_t)(unsigned long)nvbo;
@@ -376,12 +386,14 @@ retry:
376 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 386 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
377 list_add_tail(&nvbo->entry, &op->gart_list); 387 list_add_tail(&nvbo->entry, &op->gart_list);
378 else { 388 else {
379 NV_ERROR(drm, "invalid valid domains: 0x%08x\n", 389 NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
380 b->valid_domains); 390 b->valid_domains);
381 list_add_tail(&nvbo->entry, &op->both_list); 391 list_add_tail(&nvbo->entry, &op->both_list);
382 validate_fini(op, NULL); 392 validate_fini(op, NULL);
383 return -EINVAL; 393 return -EINVAL;
384 } 394 }
395 if (nvbo == res_bo)
396 goto retry;
385 } 397 }
386 398
387 return 0; 399 return 0;
@@ -407,8 +419,9 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
407} 419}
408 420
409static int 421static int
410validate_list(struct nouveau_channel *chan, struct list_head *list, 422validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
411 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) 423 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
424 uint64_t user_pbbo_ptr)
412{ 425{
413 struct nouveau_drm *drm = chan->drm; 426 struct nouveau_drm *drm = chan->drm;
414 struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 427 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
@@ -421,7 +434,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
421 434
422 ret = validate_sync(chan, nvbo); 435 ret = validate_sync(chan, nvbo);
423 if (unlikely(ret)) { 436 if (unlikely(ret)) {
424 NV_ERROR(drm, "fail pre-validate sync\n"); 437 NV_ERROR(cli, "fail pre-validate sync\n");
425 return ret; 438 return ret;
426 } 439 }
427 440
@@ -429,20 +442,20 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
429 b->write_domains, 442 b->write_domains,
430 b->valid_domains); 443 b->valid_domains);
431 if (unlikely(ret)) { 444 if (unlikely(ret)) {
432 NV_ERROR(drm, "fail set_domain\n"); 445 NV_ERROR(cli, "fail set_domain\n");
433 return ret; 446 return ret;
434 } 447 }
435 448
436 ret = nouveau_bo_validate(nvbo, true, false); 449 ret = nouveau_bo_validate(nvbo, true, false);
437 if (unlikely(ret)) { 450 if (unlikely(ret)) {
438 if (ret != -ERESTARTSYS) 451 if (ret != -ERESTARTSYS)
439 NV_ERROR(drm, "fail ttm_validate\n"); 452 NV_ERROR(cli, "fail ttm_validate\n");
440 return ret; 453 return ret;
441 } 454 }
442 455
443 ret = validate_sync(chan, nvbo); 456 ret = validate_sync(chan, nvbo);
444 if (unlikely(ret)) { 457 if (unlikely(ret)) {
445 NV_ERROR(drm, "fail post-validate sync\n"); 458 NV_ERROR(cli, "fail post-validate sync\n");
446 return ret; 459 return ret;
447 } 460 }
448 461
@@ -478,7 +491,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
478 uint64_t user_buffers, int nr_buffers, 491 uint64_t user_buffers, int nr_buffers,
479 struct validate_op *op, int *apply_relocs) 492 struct validate_op *op, int *apply_relocs)
480{ 493{
481 struct nouveau_drm *drm = chan->drm; 494 struct nouveau_cli *cli = nouveau_cli(file_priv);
482 int ret, relocs = 0; 495 int ret, relocs = 0;
483 496
484 INIT_LIST_HEAD(&op->vram_list); 497 INIT_LIST_HEAD(&op->vram_list);
@@ -491,32 +504,32 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
491 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 504 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
492 if (unlikely(ret)) { 505 if (unlikely(ret)) {
493 if (ret != -ERESTARTSYS) 506 if (ret != -ERESTARTSYS)
494 NV_ERROR(drm, "validate_init\n"); 507 NV_ERROR(cli, "validate_init\n");
495 return ret; 508 return ret;
496 } 509 }
497 510
498 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); 511 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
499 if (unlikely(ret < 0)) { 512 if (unlikely(ret < 0)) {
500 if (ret != -ERESTARTSYS) 513 if (ret != -ERESTARTSYS)
501 NV_ERROR(drm, "validate vram_list\n"); 514 NV_ERROR(cli, "validate vram_list\n");
502 validate_fini(op, NULL); 515 validate_fini(op, NULL);
503 return ret; 516 return ret;
504 } 517 }
505 relocs += ret; 518 relocs += ret;
506 519
507 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); 520 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
508 if (unlikely(ret < 0)) { 521 if (unlikely(ret < 0)) {
509 if (ret != -ERESTARTSYS) 522 if (ret != -ERESTARTSYS)
510 NV_ERROR(drm, "validate gart_list\n"); 523 NV_ERROR(cli, "validate gart_list\n");
511 validate_fini(op, NULL); 524 validate_fini(op, NULL);
512 return ret; 525 return ret;
513 } 526 }
514 relocs += ret; 527 relocs += ret;
515 528
516 ret = validate_list(chan, &op->both_list, pbbo, user_buffers); 529 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
517 if (unlikely(ret < 0)) { 530 if (unlikely(ret < 0)) {
518 if (ret != -ERESTARTSYS) 531 if (ret != -ERESTARTSYS)
519 NV_ERROR(drm, "validate both_list\n"); 532 NV_ERROR(cli, "validate both_list\n");
520 validate_fini(op, NULL); 533 validate_fini(op, NULL);
521 return ret; 534 return ret;
522 } 535 }
@@ -545,11 +558,10 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
545} 558}
546 559
547static int 560static int
548nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, 561nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
549 struct drm_nouveau_gem_pushbuf *req, 562 struct drm_nouveau_gem_pushbuf *req,
550 struct drm_nouveau_gem_pushbuf_bo *bo) 563 struct drm_nouveau_gem_pushbuf_bo *bo)
551{ 564{
552 struct nouveau_drm *drm = nouveau_drm(dev);
553 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 565 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
554 int ret = 0; 566 int ret = 0;
555 unsigned i; 567 unsigned i;
@@ -565,7 +577,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
565 uint32_t data; 577 uint32_t data;
566 578
567 if (unlikely(r->bo_index > req->nr_buffers)) { 579 if (unlikely(r->bo_index > req->nr_buffers)) {
568 NV_ERROR(drm, "reloc bo index invalid\n"); 580 NV_ERROR(cli, "reloc bo index invalid\n");
569 ret = -EINVAL; 581 ret = -EINVAL;
570 break; 582 break;
571 } 583 }
@@ -575,7 +587,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
575 continue; 587 continue;
576 588
577 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 589 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
578 NV_ERROR(drm, "reloc container bo index invalid\n"); 590 NV_ERROR(cli, "reloc container bo index invalid\n");
579 ret = -EINVAL; 591 ret = -EINVAL;
580 break; 592 break;
581 } 593 }
@@ -583,7 +595,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
583 595
584 if (unlikely(r->reloc_bo_offset + 4 > 596 if (unlikely(r->reloc_bo_offset + 4 >
585 nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 597 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
586 NV_ERROR(drm, "reloc outside of bo\n"); 598 NV_ERROR(cli, "reloc outside of bo\n");
587 ret = -EINVAL; 599 ret = -EINVAL;
588 break; 600 break;
589 } 601 }
@@ -592,7 +604,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
592 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 604 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
593 &nvbo->kmap); 605 &nvbo->kmap);
594 if (ret) { 606 if (ret) {
595 NV_ERROR(drm, "failed kmap for reloc\n"); 607 NV_ERROR(cli, "failed kmap for reloc\n");
596 break; 608 break;
597 } 609 }
598 nvbo->validate_mapped = true; 610 nvbo->validate_mapped = true;
@@ -617,7 +629,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
617 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 629 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
618 spin_unlock(&nvbo->bo.bdev->fence_lock); 630 spin_unlock(&nvbo->bo.bdev->fence_lock);
619 if (ret) { 631 if (ret) {
620 NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret); 632 NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
621 break; 633 break;
622 } 634 }
623 635
@@ -633,6 +645,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
633 struct drm_file *file_priv) 645 struct drm_file *file_priv)
634{ 646{
635 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); 647 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
648 struct nouveau_cli *cli = nouveau_cli(file_priv);
636 struct nouveau_abi16_chan *temp; 649 struct nouveau_abi16_chan *temp;
637 struct nouveau_drm *drm = nouveau_drm(dev); 650 struct nouveau_drm *drm = nouveau_drm(dev);
638 struct drm_nouveau_gem_pushbuf *req = data; 651 struct drm_nouveau_gem_pushbuf *req = data;
@@ -662,19 +675,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
662 goto out_next; 675 goto out_next;
663 676
664 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 677 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
665 NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n", 678 NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
666 req->nr_push, NOUVEAU_GEM_MAX_PUSH); 679 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
667 return nouveau_abi16_put(abi16, -EINVAL); 680 return nouveau_abi16_put(abi16, -EINVAL);
668 } 681 }
669 682
670 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 683 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
671 NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n", 684 NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
672 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 685 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
673 return nouveau_abi16_put(abi16, -EINVAL); 686 return nouveau_abi16_put(abi16, -EINVAL);
674 } 687 }
675 688
676 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 689 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
677 NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n", 690 NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
678 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 691 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
679 return nouveau_abi16_put(abi16, -EINVAL); 692 return nouveau_abi16_put(abi16, -EINVAL);
680 } 693 }
@@ -692,7 +705,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
692 /* Ensure all push buffers are on validate list */ 705 /* Ensure all push buffers are on validate list */
693 for (i = 0; i < req->nr_push; i++) { 706 for (i = 0; i < req->nr_push; i++) {
694 if (push[i].bo_index >= req->nr_buffers) { 707 if (push[i].bo_index >= req->nr_buffers) {
695 NV_ERROR(drm, "push %d buffer not in list\n", i); 708 NV_ERROR(cli, "push %d buffer not in list\n", i);
696 ret = -EINVAL; 709 ret = -EINVAL;
697 goto out_prevalid; 710 goto out_prevalid;
698 } 711 }
@@ -703,15 +716,15 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
703 req->nr_buffers, &op, &do_reloc); 716 req->nr_buffers, &op, &do_reloc);
704 if (ret) { 717 if (ret) {
705 if (ret != -ERESTARTSYS) 718 if (ret != -ERESTARTSYS)
706 NV_ERROR(drm, "validate: %d\n", ret); 719 NV_ERROR(cli, "validate: %d\n", ret);
707 goto out_prevalid; 720 goto out_prevalid;
708 } 721 }
709 722
710 /* Apply any relocations that are required */ 723 /* Apply any relocations that are required */
711 if (do_reloc) { 724 if (do_reloc) {
712 ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); 725 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
713 if (ret) { 726 if (ret) {
714 NV_ERROR(drm, "reloc apply: %d\n", ret); 727 NV_ERROR(cli, "reloc apply: %d\n", ret);
715 goto out; 728 goto out;
716 } 729 }
717 } 730 }
@@ -719,7 +732,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
719 if (chan->dma.ib_max) { 732 if (chan->dma.ib_max) {
720 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 733 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
721 if (ret) { 734 if (ret) {
722 NV_ERROR(drm, "nv50cal_space: %d\n", ret); 735 NV_ERROR(cli, "nv50cal_space: %d\n", ret);
723 goto out; 736 goto out;
724 } 737 }
725 738
@@ -734,7 +747,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
734 if (nv_device(drm->device)->chipset >= 0x25) { 747 if (nv_device(drm->device)->chipset >= 0x25) {
735 ret = RING_SPACE(chan, req->nr_push * 2); 748 ret = RING_SPACE(chan, req->nr_push * 2);
736 if (ret) { 749 if (ret) {
737 NV_ERROR(drm, "cal_space: %d\n", ret); 750 NV_ERROR(cli, "cal_space: %d\n", ret);
738 goto out; 751 goto out;
739 } 752 }
740 753
@@ -748,7 +761,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
748 } else { 761 } else {
749 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 762 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
750 if (ret) { 763 if (ret) {
751 NV_ERROR(drm, "jmp_space: %d\n", ret); 764 NV_ERROR(cli, "jmp_space: %d\n", ret);
752 goto out; 765 goto out;
753 } 766 }
754 767
@@ -784,9 +797,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
784 } 797 }
785 } 798 }
786 799
787 ret = nouveau_fence_new(chan, &fence); 800 ret = nouveau_fence_new(chan, false, &fence);
788 if (ret) { 801 if (ret) {
789 NV_ERROR(drm, "error fencing pushbuf: %d\n", ret); 802 NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
790 WIND_RING(chan); 803 WIND_RING(chan);
791 goto out; 804 goto out;
792 } 805 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 5c1049236d22..8d7a3f0aeb86 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -35,9 +35,11 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
35extern int nouveau_gem_ioctl_info(struct drm_device *, void *, 35extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
36 struct drm_file *); 36 struct drm_file *);
37 37
38extern struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, 38extern int nouveau_gem_prime_pin(struct drm_gem_object *);
39 struct drm_gem_object *obj, int flags); 39extern struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *);
40extern struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev, 40extern struct drm_gem_object *nouveau_gem_prime_import_sg_table(
41 struct dma_buf *dma_buf); 41 struct drm_device *, size_t size, struct sg_table *);
42extern void *nouveau_gem_prime_vmap(struct drm_gem_object *);
43extern void nouveau_gem_prime_vunmap(struct drm_gem_object *, void *);
42 44
43#endif 45#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a701ff5ffa5b..bb54098c6d97 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -409,6 +409,81 @@ static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
409 NULL, 0); 409 NULL, 0);
410 410
411static ssize_t 411static ssize_t
412nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
413 struct device_attribute *a, char *buf)
414{
415 return snprintf(buf, PAGE_SIZE, "%d\n", 100);
416}
417static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, S_IRUGO,
418 nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
419
420static ssize_t
421nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
422 struct device_attribute *a, char *buf)
423{
424 struct drm_device *dev = dev_get_drvdata(d);
425 struct nouveau_drm *drm = nouveau_drm(dev);
426 struct nouveau_therm *therm = nouveau_therm(drm->device);
427
428 return snprintf(buf, PAGE_SIZE, "%d\n",
429 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST) * 1000);
430}
431static ssize_t
432nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
433 struct device_attribute *a,
434 const char *buf, size_t count)
435{
436 struct drm_device *dev = dev_get_drvdata(d);
437 struct nouveau_drm *drm = nouveau_drm(dev);
438 struct nouveau_therm *therm = nouveau_therm(drm->device);
439 long value;
440
441 if (kstrtol(buf, 10, &value) == -EINVAL)
442 return count;
443
444 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST,
445 value / 1000);
446
447 return count;
448}
449static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IRUGO | S_IWUSR,
450 nouveau_hwmon_temp1_auto_point1_temp,
451 nouveau_hwmon_set_temp1_auto_point1_temp, 0);
452
453static ssize_t
454nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
455 struct device_attribute *a, char *buf)
456{
457 struct drm_device *dev = dev_get_drvdata(d);
458 struct nouveau_drm *drm = nouveau_drm(dev);
459 struct nouveau_therm *therm = nouveau_therm(drm->device);
460
461 return snprintf(buf, PAGE_SIZE, "%d\n",
462 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
463}
464static ssize_t
465nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
466 struct device_attribute *a,
467 const char *buf, size_t count)
468{
469 struct drm_device *dev = dev_get_drvdata(d);
470 struct nouveau_drm *drm = nouveau_drm(dev);
471 struct nouveau_therm *therm = nouveau_therm(drm->device);
472 long value;
473
474 if (kstrtol(buf, 10, &value) == -EINVAL)
475 return count;
476
477 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_FAN_BOOST_HYST,
478 value / 1000);
479
480 return count;
481}
482static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp_hyst, S_IRUGO | S_IWUSR,
483 nouveau_hwmon_temp1_auto_point1_temp_hyst,
484 nouveau_hwmon_set_temp1_auto_point1_temp_hyst, 0);
485
486static ssize_t
412nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf) 487nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
413{ 488{
414 struct drm_device *dev = dev_get_drvdata(d); 489 struct drm_device *dev = dev_get_drvdata(d);
@@ -439,6 +514,38 @@ static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
439 0); 514 0);
440 515
441static ssize_t 516static ssize_t
517nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
518 char *buf)
519{
520 struct drm_device *dev = dev_get_drvdata(d);
521 struct nouveau_drm *drm = nouveau_drm(dev);
522 struct nouveau_therm *therm = nouveau_therm(drm->device);
523
524 return snprintf(buf, PAGE_SIZE, "%d\n",
525 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
526}
527static ssize_t
528nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
529 const char *buf, size_t count)
530{
531 struct drm_device *dev = dev_get_drvdata(d);
532 struct nouveau_drm *drm = nouveau_drm(dev);
533 struct nouveau_therm *therm = nouveau_therm(drm->device);
534 long value;
535
536 if (kstrtol(buf, 10, &value) == -EINVAL)
537 return count;
538
539 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_DOWN_CLK_HYST,
540 value / 1000);
541
542 return count;
543}
544static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
545 nouveau_hwmon_max_temp_hyst,
546 nouveau_hwmon_set_max_temp_hyst, 0);
547
548static ssize_t
442nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a, 549nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
443 char *buf) 550 char *buf)
444{ 551{
@@ -471,6 +578,107 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
471 nouveau_hwmon_set_critical_temp, 578 nouveau_hwmon_set_critical_temp,
472 0); 579 0);
473 580
581static ssize_t
582nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
583 char *buf)
584{
585 struct drm_device *dev = dev_get_drvdata(d);
586 struct nouveau_drm *drm = nouveau_drm(dev);
587 struct nouveau_therm *therm = nouveau_therm(drm->device);
588
589 return snprintf(buf, PAGE_SIZE, "%d\n",
590 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
591}
592static ssize_t
593nouveau_hwmon_set_critical_temp_hyst(struct device *d,
594 struct device_attribute *a,
595 const char *buf,
596 size_t count)
597{
598 struct drm_device *dev = dev_get_drvdata(d);
599 struct nouveau_drm *drm = nouveau_drm(dev);
600 struct nouveau_therm *therm = nouveau_therm(drm->device);
601 long value;
602
603 if (kstrtol(buf, 10, &value) == -EINVAL)
604 return count;
605
606 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_CRITICAL_HYST,
607 value / 1000);
608
609 return count;
610}
611static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO | S_IWUSR,
612 nouveau_hwmon_critical_temp_hyst,
613 nouveau_hwmon_set_critical_temp_hyst, 0);
614static ssize_t
615nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
616 char *buf)
617{
618 struct drm_device *dev = dev_get_drvdata(d);
619 struct nouveau_drm *drm = nouveau_drm(dev);
620 struct nouveau_therm *therm = nouveau_therm(drm->device);
621
622 return snprintf(buf, PAGE_SIZE, "%d\n",
623 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN) * 1000);
624}
625static ssize_t
626nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
627 const char *buf,
628 size_t count)
629{
630 struct drm_device *dev = dev_get_drvdata(d);
631 struct nouveau_drm *drm = nouveau_drm(dev);
632 struct nouveau_therm *therm = nouveau_therm(drm->device);
633 long value;
634
635 if (kstrtol(buf, 10, &value) == -EINVAL)
636 return count;
637
638 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN, value / 1000);
639
640 return count;
641}
642static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO | S_IWUSR,
643 nouveau_hwmon_emergency_temp,
644 nouveau_hwmon_set_emergency_temp,
645 0);
646
647static ssize_t
648nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
649 char *buf)
650{
651 struct drm_device *dev = dev_get_drvdata(d);
652 struct nouveau_drm *drm = nouveau_drm(dev);
653 struct nouveau_therm *therm = nouveau_therm(drm->device);
654
655 return snprintf(buf, PAGE_SIZE, "%d\n",
656 therm->attr_get(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
657}
658static ssize_t
659nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
660 struct device_attribute *a,
661 const char *buf,
662 size_t count)
663{
664 struct drm_device *dev = dev_get_drvdata(d);
665 struct nouveau_drm *drm = nouveau_drm(dev);
666 struct nouveau_therm *therm = nouveau_therm(drm->device);
667 long value;
668
669 if (kstrtol(buf, 10, &value) == -EINVAL)
670 return count;
671
672 therm->attr_set(therm, NOUVEAU_THERM_ATTR_THRS_SHUTDOWN_HYST,
673 value / 1000);
674
675 return count;
676}
677static SENSOR_DEVICE_ATTR(temp1_emergency_hyst, S_IRUGO | S_IWUSR,
678 nouveau_hwmon_emergency_temp_hyst,
679 nouveau_hwmon_set_emergency_temp_hyst,
680 0);
681
474static ssize_t nouveau_hwmon_show_name(struct device *dev, 682static ssize_t nouveau_hwmon_show_name(struct device *dev,
475 struct device_attribute *attr, 683 struct device_attribute *attr,
476 char *buf) 684 char *buf)
@@ -490,7 +698,7 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
490 NULL, 0); 698 NULL, 0);
491 699
492static ssize_t 700static ssize_t
493nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr, 701nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
494 char *buf) 702 char *buf)
495{ 703{
496 struct drm_device *dev = dev_get_drvdata(d); 704 struct drm_device *dev = dev_get_drvdata(d);
@@ -499,7 +707,7 @@ nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
499 707
500 return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm)); 708 return snprintf(buf, PAGE_SIZE, "%d\n", therm->fan_sense(therm));
501} 709}
502static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input, 710static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, nouveau_hwmon_show_fan1_input,
503 NULL, 0); 711 NULL, 0);
504 712
505 static ssize_t 713 static ssize_t
@@ -665,14 +873,21 @@ static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO | S_IWUSR,
665 873
666static struct attribute *hwmon_attributes[] = { 874static struct attribute *hwmon_attributes[] = {
667 &sensor_dev_attr_temp1_input.dev_attr.attr, 875 &sensor_dev_attr_temp1_input.dev_attr.attr,
876 &sensor_dev_attr_temp1_auto_point1_pwm.dev_attr.attr,
877 &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
878 &sensor_dev_attr_temp1_auto_point1_temp_hyst.dev_attr.attr,
668 &sensor_dev_attr_temp1_max.dev_attr.attr, 879 &sensor_dev_attr_temp1_max.dev_attr.attr,
880 &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
669 &sensor_dev_attr_temp1_crit.dev_attr.attr, 881 &sensor_dev_attr_temp1_crit.dev_attr.attr,
882 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
883 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
884 &sensor_dev_attr_temp1_emergency_hyst.dev_attr.attr,
670 &sensor_dev_attr_name.dev_attr.attr, 885 &sensor_dev_attr_name.dev_attr.attr,
671 &sensor_dev_attr_update_rate.dev_attr.attr, 886 &sensor_dev_attr_update_rate.dev_attr.attr,
672 NULL 887 NULL
673}; 888};
674static struct attribute *hwmon_fan_rpm_attributes[] = { 889static struct attribute *hwmon_fan_rpm_attributes[] = {
675 &sensor_dev_attr_fan0_input.dev_attr.attr, 890 &sensor_dev_attr_fan1_input.dev_attr.attr,
676 NULL 891 NULL
677}; 892};
678static struct attribute *hwmon_pwm_fan_attributes[] = { 893static struct attribute *hwmon_pwm_fan_attributes[] = {
@@ -717,7 +932,7 @@ nouveau_hwmon_init(struct drm_device *dev)
717 dev_set_drvdata(hwmon_dev, dev); 932 dev_set_drvdata(hwmon_dev, dev);
718 933
719 /* default sysfs entries */ 934 /* default sysfs entries */
720 ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); 935 ret = sysfs_create_group(&hwmon_dev->kobj, &hwmon_attrgroup);
721 if (ret) { 936 if (ret) {
722 if (ret) 937 if (ret)
723 goto error; 938 goto error;
@@ -728,7 +943,7 @@ nouveau_hwmon_init(struct drm_device *dev)
728 * the gpio entries for pwm fan control even when there's no 943 * the gpio entries for pwm fan control even when there's no
729 * actual fan connected to it... therm table? */ 944 * actual fan connected to it... therm table? */
730 if (therm->fan_get && therm->fan_get(therm) >= 0) { 945 if (therm->fan_get && therm->fan_get(therm) >= 0) {
731 ret = sysfs_create_group(&dev->pdev->dev.kobj, 946 ret = sysfs_create_group(&hwmon_dev->kobj,
732 &hwmon_pwm_fan_attrgroup); 947 &hwmon_pwm_fan_attrgroup);
733 if (ret) 948 if (ret)
734 goto error; 949 goto error;
@@ -736,7 +951,7 @@ nouveau_hwmon_init(struct drm_device *dev)
736 951
737 /* if the card can read the fan rpm */ 952 /* if the card can read the fan rpm */
738 if (therm->fan_sense(therm) >= 0) { 953 if (therm->fan_sense(therm) >= 0) {
739 ret = sysfs_create_group(&dev->pdev->dev.kobj, 954 ret = sysfs_create_group(&hwmon_dev->kobj,
740 &hwmon_fan_rpm_attrgroup); 955 &hwmon_fan_rpm_attrgroup);
741 if (ret) 956 if (ret)
742 goto error; 957 goto error;
@@ -764,10 +979,10 @@ nouveau_hwmon_fini(struct drm_device *dev)
764 struct nouveau_pm *pm = nouveau_pm(dev); 979 struct nouveau_pm *pm = nouveau_pm(dev);
765 980
766 if (pm->hwmon) { 981 if (pm->hwmon) {
767 sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup); 982 sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
768 sysfs_remove_group(&dev->pdev->dev.kobj, 983 sysfs_remove_group(&pm->hwmon->kobj,
769 &hwmon_pwm_fan_attrgroup); 984 &hwmon_pwm_fan_attrgroup);
770 sysfs_remove_group(&dev->pdev->dev.kobj, 985 sysfs_remove_group(&pm->hwmon->kobj,
771 &hwmon_fan_rpm_attrgroup); 986 &hwmon_fan_rpm_attrgroup);
772 987
773 hwmon_device_unregister(pm->hwmon); 988 hwmon_device_unregister(pm->hwmon);
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b8e05ae38212..f53e10874cae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -22,126 +22,42 @@
22 * Authors: Dave Airlie 22 * Authors: Dave Airlie
23 */ 23 */
24 24
25#include <linux/dma-buf.h>
26
27#include <drm/drmP.h> 25#include <drm/drmP.h>
28 26
29#include "nouveau_drm.h" 27#include "nouveau_drm.h"
30#include "nouveau_gem.h" 28#include "nouveau_gem.h"
31 29
32static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment, 30struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj)
33 enum dma_data_direction dir)
34{ 31{
35 struct nouveau_bo *nvbo = attachment->dmabuf->priv; 32 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
36 struct drm_device *dev = nvbo->gem->dev;
37 int npages = nvbo->bo.num_pages; 33 int npages = nvbo->bo.num_pages;
38 struct sg_table *sg;
39 int nents;
40
41 mutex_lock(&dev->struct_mutex);
42 sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
43 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
44 mutex_unlock(&dev->struct_mutex);
45 return sg;
46}
47
48static void nouveau_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
49 struct sg_table *sg, enum dma_data_direction dir)
50{
51 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
52 sg_free_table(sg);
53 kfree(sg);
54}
55
56static void nouveau_gem_dmabuf_release(struct dma_buf *dma_buf)
57{
58 struct nouveau_bo *nvbo = dma_buf->priv;
59
60 if (nvbo->gem->export_dma_buf == dma_buf) {
61 nvbo->gem->export_dma_buf = NULL;
62 drm_gem_object_unreference_unlocked(nvbo->gem);
63 }
64}
65
66static void *nouveau_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
67{
68 return NULL;
69}
70
71static void nouveau_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
72{
73 34
74} 35 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
75static void *nouveau_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
76{
77 return NULL;
78} 36}
79 37
80static void nouveau_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr) 38void *nouveau_gem_prime_vmap(struct drm_gem_object *obj)
81{ 39{
82 40 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
83}
84
85static int nouveau_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
86{
87 return -EINVAL;
88}
89
90static void *nouveau_gem_prime_vmap(struct dma_buf *dma_buf)
91{
92 struct nouveau_bo *nvbo = dma_buf->priv;
93 struct drm_device *dev = nvbo->gem->dev;
94 int ret; 41 int ret;
95 42
96 mutex_lock(&dev->struct_mutex);
97 if (nvbo->vmapping_count) {
98 nvbo->vmapping_count++;
99 goto out_unlock;
100 }
101
102 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, 43 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages,
103 &nvbo->dma_buf_vmap); 44 &nvbo->dma_buf_vmap);
104 if (ret) { 45 if (ret)
105 mutex_unlock(&dev->struct_mutex);
106 return ERR_PTR(ret); 46 return ERR_PTR(ret);
107 } 47
108 nvbo->vmapping_count = 1;
109out_unlock:
110 mutex_unlock(&dev->struct_mutex);
111 return nvbo->dma_buf_vmap.virtual; 48 return nvbo->dma_buf_vmap.virtual;
112} 49}
113 50
114static void nouveau_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) 51void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
115{ 52{
116 struct nouveau_bo *nvbo = dma_buf->priv; 53 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
117 struct drm_device *dev = nvbo->gem->dev;
118 54
119 mutex_lock(&dev->struct_mutex); 55 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
120 nvbo->vmapping_count--;
121 if (nvbo->vmapping_count == 0) {
122 ttm_bo_kunmap(&nvbo->dma_buf_vmap);
123 }
124 mutex_unlock(&dev->struct_mutex);
125} 56}
126 57
127static const struct dma_buf_ops nouveau_dmabuf_ops = { 58struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
128 .map_dma_buf = nouveau_gem_map_dma_buf, 59 size_t size,
129 .unmap_dma_buf = nouveau_gem_unmap_dma_buf, 60 struct sg_table *sg)
130 .release = nouveau_gem_dmabuf_release,
131 .kmap = nouveau_gem_kmap,
132 .kmap_atomic = nouveau_gem_kmap_atomic,
133 .kunmap = nouveau_gem_kunmap,
134 .kunmap_atomic = nouveau_gem_kunmap_atomic,
135 .mmap = nouveau_gem_prime_mmap,
136 .vmap = nouveau_gem_prime_vmap,
137 .vunmap = nouveau_gem_prime_vunmap,
138};
139
140static int
141nouveau_prime_new(struct drm_device *dev,
142 size_t size,
143 struct sg_table *sg,
144 struct nouveau_bo **pnvbo)
145{ 61{
146 struct nouveau_bo *nvbo; 62 struct nouveau_bo *nvbo;
147 u32 flags = 0; 63 u32 flags = 0;
@@ -150,24 +66,22 @@ nouveau_prime_new(struct drm_device *dev,
150 flags = TTM_PL_FLAG_TT; 66 flags = TTM_PL_FLAG_TT;
151 67
152 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0, 68 ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
153 sg, pnvbo); 69 sg, &nvbo);
154 if (ret) 70 if (ret)
155 return ret; 71 return ERR_PTR(ret);
156 nvbo = *pnvbo;
157 72
158 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 73 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
159 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 74 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
160 if (!nvbo->gem) { 75 if (!nvbo->gem) {
161 nouveau_bo_ref(NULL, pnvbo); 76 nouveau_bo_ref(NULL, &nvbo);
162 return -ENOMEM; 77 return ERR_PTR(-ENOMEM);
163 } 78 }
164 79
165 nvbo->gem->driver_private = nvbo; 80 nvbo->gem->driver_private = nvbo;
166 return 0; 81 return nvbo->gem;
167} 82}
168 83
169struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev, 84int nouveau_gem_prime_pin(struct drm_gem_object *obj)
170 struct drm_gem_object *obj, int flags)
171{ 85{
172 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 86 struct nouveau_bo *nvbo = nouveau_gem_object(obj);
173 int ret = 0; 87 int ret = 0;
@@ -175,52 +89,7 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_device *dev,
175 /* pin buffer into GTT */ 89 /* pin buffer into GTT */
176 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); 90 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT);
177 if (ret) 91 if (ret)
178 return ERR_PTR(-EINVAL); 92 return -EINVAL;
179
180 return dma_buf_export(nvbo, &nouveau_dmabuf_ops, obj->size, flags);
181}
182
183struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
184 struct dma_buf *dma_buf)
185{
186 struct dma_buf_attachment *attach;
187 struct sg_table *sg;
188 struct nouveau_bo *nvbo;
189 int ret;
190
191 if (dma_buf->ops == &nouveau_dmabuf_ops) {
192 nvbo = dma_buf->priv;
193 if (nvbo->gem) {
194 if (nvbo->gem->dev == dev) {
195 drm_gem_object_reference(nvbo->gem);
196 dma_buf_put(dma_buf);
197 return nvbo->gem;
198 }
199 }
200 }
201 /* need to attach */
202 attach = dma_buf_attach(dma_buf, dev->dev);
203 if (IS_ERR(attach))
204 return ERR_PTR(PTR_ERR(attach));
205
206 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
207 if (IS_ERR(sg)) {
208 ret = PTR_ERR(sg);
209 goto fail_detach;
210 }
211
212 ret = nouveau_prime_new(dev, dma_buf->size, sg, &nvbo);
213 if (ret)
214 goto fail_unmap;
215
216 nvbo->gem->import_attach = attach;
217
218 return nvbo->gem;
219 93
220fail_unmap: 94 return 0;
221 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
222fail_detach:
223 dma_buf_detach(dma_buf, attach);
224 return ERR_PTR(ret);
225} 95}
226
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 39ffc07f906b..7e24cdf1cb39 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -490,8 +490,8 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
490 /* BIOS scripts usually take care of the backlight, thanks 490 /* BIOS scripts usually take care of the backlight, thanks
491 * Apple for your consistency. 491 * Apple for your consistency.
492 */ 492 */
493 if (dev->pci_device == 0x0179 || dev->pci_device == 0x0189 || 493 if (dev->pci_device == 0x0174 || dev->pci_device == 0x0179 ||
494 dev->pci_device == 0x0329) { 494 dev->pci_device == 0x0189 || dev->pci_device == 0x0329) {
495 if (mode == DRM_MODE_DPMS_ON) { 495 if (mode == DRM_MODE_DPMS_ON) {
496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31); 496 nv_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 0, 1 << 31);
497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); 497 nv_mask(device, NV_PCRTC_GPIO_EXT, 3, 1);
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 2cd6fb8c548e..ad48444c385c 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -22,6 +22,9 @@
22 * Author: Ben Skeggs 22 * Author: Ben Skeggs
23 */ 23 */
24 24
25#include <core/object.h>
26#include <core/class.h>
27
25#include <drm/drmP.h> 28#include <drm/drmP.h>
26#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
27 30
@@ -31,6 +34,8 @@
31#include "nouveau_encoder.h" 34#include "nouveau_encoder.h"
32#include "nouveau_connector.h" 35#include "nouveau_connector.h"
33 36
37#include <subdev/i2c.h>
38
34int 39int
35nv04_display_early_init(struct drm_device *dev) 40nv04_display_early_init(struct drm_device *dev)
36{ 41{
@@ -53,6 +58,7 @@ int
53nv04_display_create(struct drm_device *dev) 58nv04_display_create(struct drm_device *dev)
54{ 59{
55 struct nouveau_drm *drm = nouveau_drm(dev); 60 struct nouveau_drm *drm = nouveau_drm(dev);
61 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
56 struct dcb_table *dcb = &drm->vbios.dcb; 62 struct dcb_table *dcb = &drm->vbios.dcb;
57 struct drm_connector *connector, *ct; 63 struct drm_connector *connector, *ct;
58 struct drm_encoder *encoder; 64 struct drm_encoder *encoder;
@@ -71,6 +77,11 @@ nv04_display_create(struct drm_device *dev)
71 77
72 nouveau_hw_save_vga_fonts(dev, 1); 78 nouveau_hw_save_vga_fonts(dev, 1);
73 79
80 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE, 0xd1500000,
81 NV04_DISP_CLASS, NULL, 0, &disp->core);
82 if (ret)
83 return ret;
84
74 nv04_crtc_create(dev, 0); 85 nv04_crtc_create(dev, 0);
75 if (nv_two_heads(dev)) 86 if (nv_two_heads(dev))
76 nv04_crtc_create(dev, 1); 87 nv04_crtc_create(dev, 1);
@@ -114,6 +125,11 @@ nv04_display_create(struct drm_device *dev)
114 } 125 }
115 } 126 }
116 127
128 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
129 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
130 nv_encoder->i2c = i2c->find(i2c, nv_encoder->dcb->i2c_index);
131 }
132
117 /* Save previous state */ 133 /* Save previous state */
118 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 134 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
119 crtc->funcs->save(crtc); 135 crtc->funcs->save(crtc);
@@ -140,7 +156,7 @@ nv04_display_destroy(struct drm_device *dev)
140 .crtc = crtc, 156 .crtc = crtc,
141 }; 157 };
142 158
143 crtc->funcs->set_config(&modeset); 159 drm_mode_set_config_internal(&modeset);
144 } 160 }
145 161
146 /* Restore state */ 162 /* Restore state */
diff --git a/drivers/gpu/drm/nouveau/nv04_display.h b/drivers/gpu/drm/nouveau/nv04_display.h
index 45322802e37d..a0a031dad13f 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.h
+++ b/drivers/gpu/drm/nouveau/nv04_display.h
@@ -80,6 +80,7 @@ struct nv04_display {
80 struct nv04_mode_state saved_reg; 80 struct nv04_mode_state saved_reg;
81 uint32_t saved_vga_font[4][16384]; 81 uint32_t saved_vga_font[4][16384];
82 uint32_t dac_users[4]; 82 uint32_t dac_users[4];
83 struct nouveau_object *core;
83}; 84};
84 85
85static inline struct nv04_display * 86static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nv04_fence.c b/drivers/gpu/drm/nouveau/nv04_fence.c
index a220b94ba9f2..94eadd1dd10a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fence.c
+++ b/drivers/gpu/drm/nouveau/nv04_fence.c
@@ -78,6 +78,9 @@ nv04_fence_context_new(struct nouveau_channel *chan)
78 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL); 78 struct nv04_fence_chan *fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
79 if (fctx) { 79 if (fctx) {
80 nouveau_fence_context_new(&fctx->base); 80 nouveau_fence_context_new(&fctx->base);
81 fctx->base.emit = nv04_fence_emit;
82 fctx->base.sync = nv04_fence_sync;
83 fctx->base.read = nv04_fence_read;
81 chan->fence = fctx; 84 chan->fence = fctx;
82 return 0; 85 return 0;
83 } 86 }
@@ -104,8 +107,5 @@ nv04_fence_create(struct nouveau_drm *drm)
104 priv->base.dtor = nv04_fence_destroy; 107 priv->base.dtor = nv04_fence_destroy;
105 priv->base.context_new = nv04_fence_context_new; 108 priv->base.context_new = nv04_fence_context_new;
106 priv->base.context_del = nv04_fence_context_del; 109 priv->base.context_del = nv04_fence_context_del;
107 priv->base.emit = nv04_fence_emit;
108 priv->base.sync = nv04_fence_sync;
109 priv->base.read = nv04_fence_read;
110 return 0; 110 return 0;
111} 111}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 62e826a139b3..4a69ccdef9b4 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -184,14 +184,23 @@ static const struct drm_encoder_funcs nv04_tv_funcs = {
184 .destroy = nv04_tv_destroy, 184 .destroy = nv04_tv_destroy,
185}; 185};
186 186
187static const struct drm_encoder_helper_funcs nv04_tv_helper_funcs = {
188 .dpms = nv04_tv_dpms,
189 .save = drm_i2c_encoder_save,
190 .restore = drm_i2c_encoder_restore,
191 .mode_fixup = drm_i2c_encoder_mode_fixup,
192 .prepare = nv04_tv_prepare,
193 .commit = nv04_tv_commit,
194 .mode_set = nv04_tv_mode_set,
195 .detect = drm_i2c_encoder_detect,
196};
197
187int 198int
188nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry) 199nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
189{ 200{
190 struct nouveau_encoder *nv_encoder; 201 struct nouveau_encoder *nv_encoder;
191 struct drm_encoder *encoder; 202 struct drm_encoder *encoder;
192 struct drm_device *dev = connector->dev; 203 struct drm_device *dev = connector->dev;
193 struct drm_encoder_helper_funcs *hfuncs;
194 struct drm_encoder_slave_funcs *sfuncs;
195 struct nouveau_drm *drm = nouveau_drm(dev); 204 struct nouveau_drm *drm = nouveau_drm(dev);
196 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 205 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
197 struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index); 206 struct nouveau_i2c_port *port = i2c->find(i2c, entry->i2c_index);
@@ -207,17 +216,11 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
207 if (!nv_encoder) 216 if (!nv_encoder)
208 return -ENOMEM; 217 return -ENOMEM;
209 218
210 hfuncs = kzalloc(sizeof(*hfuncs), GFP_KERNEL);
211 if (!hfuncs) {
212 ret = -ENOMEM;
213 goto fail_free;
214 }
215
216 /* Initialize the common members */ 219 /* Initialize the common members */
217 encoder = to_drm_encoder(nv_encoder); 220 encoder = to_drm_encoder(nv_encoder);
218 221
219 drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); 222 drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
220 drm_encoder_helper_add(encoder, hfuncs); 223 drm_encoder_helper_add(encoder, &nv04_tv_helper_funcs);
221 224
222 encoder->possible_crtcs = entry->heads; 225 encoder->possible_crtcs = entry->heads;
223 encoder->possible_clones = 0; 226 encoder->possible_clones = 0;
@@ -230,30 +233,14 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
230 if (ret < 0) 233 if (ret < 0)
231 goto fail_cleanup; 234 goto fail_cleanup;
232 235
233 /* Fill the function pointers */
234 sfuncs = get_slave_funcs(encoder);
235
236 *hfuncs = (struct drm_encoder_helper_funcs) {
237 .dpms = nv04_tv_dpms,
238 .save = sfuncs->save,
239 .restore = sfuncs->restore,
240 .mode_fixup = sfuncs->mode_fixup,
241 .prepare = nv04_tv_prepare,
242 .commit = nv04_tv_commit,
243 .mode_set = nv04_tv_mode_set,
244 .detect = sfuncs->detect,
245 };
246
247 /* Attach it to the specified connector. */ 236 /* Attach it to the specified connector. */
248 sfuncs->create_resources(encoder, connector); 237 get_slave_funcs(encoder)->create_resources(encoder, connector);
249 drm_mode_connector_attach_encoder(connector, encoder); 238 drm_mode_connector_attach_encoder(connector, encoder);
250 239
251 return 0; 240 return 0;
252 241
253fail_cleanup: 242fail_cleanup:
254 drm_encoder_cleanup(encoder); 243 drm_encoder_cleanup(encoder);
255 kfree(hfuncs);
256fail_free:
257 kfree(nv_encoder); 244 kfree(nv_encoder);
258 return ret; 245 return ret;
259} 246}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index 03017f24d593..06f434f03fba 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -27,18 +27,7 @@
27 27
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
30#include "nouveau_fence.h" 30#include "nv10_fence.h"
31
32struct nv10_fence_chan {
33 struct nouveau_fence_chan base;
34};
35
36struct nv10_fence_priv {
37 struct nouveau_fence_priv base;
38 struct nouveau_bo *bo;
39 spinlock_t lock;
40 u32 sequence;
41};
42 31
43int 32int
44nv10_fence_emit(struct nouveau_fence *fence) 33nv10_fence_emit(struct nouveau_fence *fence)
@@ -61,45 +50,6 @@ nv10_fence_sync(struct nouveau_fence *fence,
61 return -ENODEV; 50 return -ENODEV;
62} 51}
63 52
64int
65nv17_fence_sync(struct nouveau_fence *fence,
66 struct nouveau_channel *prev, struct nouveau_channel *chan)
67{
68 struct nv10_fence_priv *priv = chan->drm->fence;
69 u32 value;
70 int ret;
71
72 if (!mutex_trylock(&prev->cli->mutex))
73 return -EBUSY;
74
75 spin_lock(&priv->lock);
76 value = priv->sequence;
77 priv->sequence += 2;
78 spin_unlock(&priv->lock);
79
80 ret = RING_SPACE(prev, 5);
81 if (!ret) {
82 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
83 OUT_RING (prev, NvSema);
84 OUT_RING (prev, 0);
85 OUT_RING (prev, value + 0);
86 OUT_RING (prev, value + 1);
87 FIRE_RING (prev);
88 }
89
90 if (!ret && !(ret = RING_SPACE(chan, 5))) {
91 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
92 OUT_RING (chan, NvSema);
93 OUT_RING (chan, 0);
94 OUT_RING (chan, value + 1);
95 OUT_RING (chan, value + 2);
96 FIRE_RING (chan);
97 }
98
99 mutex_unlock(&prev->cli->mutex);
100 return 0;
101}
102
103u32 53u32
104nv10_fence_read(struct nouveau_channel *chan) 54nv10_fence_read(struct nouveau_channel *chan)
105{ 55{
@@ -115,39 +65,20 @@ nv10_fence_context_del(struct nouveau_channel *chan)
115 kfree(fctx); 65 kfree(fctx);
116} 66}
117 67
118static int 68int
119nv10_fence_context_new(struct nouveau_channel *chan) 69nv10_fence_context_new(struct nouveau_channel *chan)
120{ 70{
121 struct nv10_fence_priv *priv = chan->drm->fence;
122 struct nv10_fence_chan *fctx; 71 struct nv10_fence_chan *fctx;
123 int ret = 0;
124 72
125 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 73 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
126 if (!fctx) 74 if (!fctx)
127 return -ENOMEM; 75 return -ENOMEM;
128 76
129 nouveau_fence_context_new(&fctx->base); 77 nouveau_fence_context_new(&fctx->base);
130 78 fctx->base.emit = nv10_fence_emit;
131 if (priv->bo) { 79 fctx->base.read = nv10_fence_read;
132 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 80 fctx->base.sync = nv10_fence_sync;
133 struct nouveau_object *object; 81 return 0;
134 u32 start = mem->start * PAGE_SIZE;
135 u32 limit = mem->start + mem->size - 1;
136
137 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
138 NvSema, 0x0002,
139 &(struct nv_dma_class) {
140 .flags = NV_DMA_TARGET_VRAM |
141 NV_DMA_ACCESS_RDWR,
142 .start = start,
143 .limit = limit,
144 }, sizeof(struct nv_dma_class),
145 &object);
146 }
147
148 if (ret)
149 nv10_fence_context_del(chan);
150 return ret;
151} 82}
152 83
153void 84void
@@ -162,18 +93,10 @@ nv10_fence_destroy(struct nouveau_drm *drm)
162 kfree(priv); 93 kfree(priv);
163} 94}
164 95
165void nv17_fence_resume(struct nouveau_drm *drm)
166{
167 struct nv10_fence_priv *priv = drm->fence;
168
169 nouveau_bo_wr32(priv->bo, 0, priv->sequence);
170}
171
172int 96int
173nv10_fence_create(struct nouveau_drm *drm) 97nv10_fence_create(struct nouveau_drm *drm)
174{ 98{
175 struct nv10_fence_priv *priv; 99 struct nv10_fence_priv *priv;
176 int ret = 0;
177 100
178 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); 101 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
179 if (!priv) 102 if (!priv)
@@ -182,33 +105,6 @@ nv10_fence_create(struct nouveau_drm *drm)
182 priv->base.dtor = nv10_fence_destroy; 105 priv->base.dtor = nv10_fence_destroy;
183 priv->base.context_new = nv10_fence_context_new; 106 priv->base.context_new = nv10_fence_context_new;
184 priv->base.context_del = nv10_fence_context_del; 107 priv->base.context_del = nv10_fence_context_del;
185 priv->base.emit = nv10_fence_emit;
186 priv->base.read = nv10_fence_read;
187 priv->base.sync = nv10_fence_sync;
188 spin_lock_init(&priv->lock); 108 spin_lock_init(&priv->lock);
189 109 return 0;
190 if (nv_device(drm->device)->chipset >= 0x17) {
191 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
192 0, 0x0000, NULL, &priv->bo);
193 if (!ret) {
194 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
195 if (!ret) {
196 ret = nouveau_bo_map(priv->bo);
197 if (ret)
198 nouveau_bo_unpin(priv->bo);
199 }
200 if (ret)
201 nouveau_bo_ref(NULL, &priv->bo);
202 }
203
204 if (ret == 0) {
205 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
206 priv->base.sync = nv17_fence_sync;
207 priv->base.resume = nv17_fence_resume;
208 }
209 }
210
211 if (ret)
212 nv10_fence_destroy(drm);
213 return ret;
214} 110}
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.h b/drivers/gpu/drm/nouveau/nv10_fence.h
new file mode 100644
index 000000000000..e5d9204826c2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fence.h
@@ -0,0 +1,19 @@
1#ifndef __NV10_FENCE_H_
2#define __NV10_FENCE_H_
3
4#include <core/os.h>
5#include "nouveau_fence.h"
6#include "nouveau_bo.h"
7
8struct nv10_fence_chan {
9 struct nouveau_fence_chan base;
10};
11
12struct nv10_fence_priv {
13 struct nouveau_fence_priv base;
14 struct nouveau_bo *bo;
15 spinlock_t lock;
16 u32 sequence;
17};
18
19#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
new file mode 100644
index 000000000000..8e47a9bae8c3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -0,0 +1,149 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <core/object.h>
26#include <core/class.h>
27
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30#include "nv10_fence.h"
31
32int
33nv17_fence_sync(struct nouveau_fence *fence,
34 struct nouveau_channel *prev, struct nouveau_channel *chan)
35{
36 struct nv10_fence_priv *priv = chan->drm->fence;
37 u32 value;
38 int ret;
39
40 if (!mutex_trylock(&prev->cli->mutex))
41 return -EBUSY;
42
43 spin_lock(&priv->lock);
44 value = priv->sequence;
45 priv->sequence += 2;
46 spin_unlock(&priv->lock);
47
48 ret = RING_SPACE(prev, 5);
49 if (!ret) {
50 BEGIN_NV04(prev, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
51 OUT_RING (prev, NvSema);
52 OUT_RING (prev, 0);
53 OUT_RING (prev, value + 0);
54 OUT_RING (prev, value + 1);
55 FIRE_RING (prev);
56 }
57
58 if (!ret && !(ret = RING_SPACE(chan, 5))) {
59 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 4);
60 OUT_RING (chan, NvSema);
61 OUT_RING (chan, 0);
62 OUT_RING (chan, value + 1);
63 OUT_RING (chan, value + 2);
64 FIRE_RING (chan);
65 }
66
67 mutex_unlock(&prev->cli->mutex);
68 return 0;
69}
70
71static int
72nv17_fence_context_new(struct nouveau_channel *chan)
73{
74 struct nv10_fence_priv *priv = chan->drm->fence;
75 struct nv10_fence_chan *fctx;
76 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
77 struct nouveau_object *object;
78 u32 start = mem->start * PAGE_SIZE;
79 u32 limit = mem->start + mem->size - 1;
80 int ret = 0;
81
82 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
83 if (!fctx)
84 return -ENOMEM;
85
86 nouveau_fence_context_new(&fctx->base);
87 fctx->base.emit = nv10_fence_emit;
88 fctx->base.read = nv10_fence_read;
89 fctx->base.sync = nv17_fence_sync;
90
91 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
92 NvSema, 0x0002,
93 &(struct nv_dma_class) {
94 .flags = NV_DMA_TARGET_VRAM |
95 NV_DMA_ACCESS_RDWR,
96 .start = start,
97 .limit = limit,
98 }, sizeof(struct nv_dma_class),
99 &object);
100 if (ret)
101 nv10_fence_context_del(chan);
102 return ret;
103}
104
105void
106nv17_fence_resume(struct nouveau_drm *drm)
107{
108 struct nv10_fence_priv *priv = drm->fence;
109
110 nouveau_bo_wr32(priv->bo, 0, priv->sequence);
111}
112
113int
114nv17_fence_create(struct nouveau_drm *drm)
115{
116 struct nv10_fence_priv *priv;
117 int ret = 0;
118
119 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
120 if (!priv)
121 return -ENOMEM;
122
123 priv->base.dtor = nv10_fence_destroy;
124 priv->base.resume = nv17_fence_resume;
125 priv->base.context_new = nv17_fence_context_new;
126 priv->base.context_del = nv10_fence_context_del;
127 spin_lock_init(&priv->lock);
128
129 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
130 0, 0x0000, NULL, &priv->bo);
131 if (!ret) {
132 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
133 if (!ret) {
134 ret = nouveau_bo_map(priv->bo);
135 if (ret)
136 nouveau_bo_unpin(priv->bo);
137 }
138 if (ret)
139 nouveau_bo_ref(NULL, &priv->bo);
140 }
141
142 if (ret) {
143 nv10_fence_destroy(drm);
144 return ret;
145 }
146
147 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
148 return ret;
149}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 2ca276ada507..977e42be2050 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -768,7 +768,7 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
768 .crtc = crtc, 768 .crtc = crtc,
769 }; 769 };
770 770
771 crtc->funcs->set_config(&modeset); 771 drm_mode_set_config_internal(&modeset);
772 } 772 }
773 } 773 }
774 774
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 35874085a61e..a6237c9cbbc3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -43,6 +43,7 @@
43#include <subdev/timer.h> 43#include <subdev/timer.h>
44#include <subdev/bar.h> 44#include <subdev/bar.h>
45#include <subdev/fb.h> 45#include <subdev/fb.h>
46#include <subdev/i2c.h>
46 47
47#define EVO_DMA_NR 9 48#define EVO_DMA_NR 9
48 49
@@ -128,6 +129,11 @@ struct nv50_dmac {
128 struct nv50_chan base; 129 struct nv50_chan base;
129 dma_addr_t handle; 130 dma_addr_t handle;
130 u32 *ptr; 131 u32 *ptr;
132
133 /* Protects against concurrent pushbuf access to this channel, lock is
134 * grabbed by evo_wait (if the pushbuf reservation is successful) and
135 * dropped again by evo_kick. */
136 struct mutex lock;
131}; 137};
132 138
133static void 139static void
@@ -271,6 +277,8 @@ nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
271 u32 pushbuf = *(u32 *)data; 277 u32 pushbuf = *(u32 *)data;
272 int ret; 278 int ret;
273 279
280 mutex_init(&dmac->lock);
281
274 dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE, 282 dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
275 &dmac->handle); 283 &dmac->handle);
276 if (!dmac->ptr) 284 if (!dmac->ptr)
@@ -395,11 +403,13 @@ evo_wait(void *evoc, int nr)
395 struct nv50_dmac *dmac = evoc; 403 struct nv50_dmac *dmac = evoc;
396 u32 put = nv_ro32(dmac->base.user, 0x0000) / 4; 404 u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
397 405
406 mutex_lock(&dmac->lock);
398 if (put + nr >= (PAGE_SIZE / 4) - 8) { 407 if (put + nr >= (PAGE_SIZE / 4) - 8) {
399 dmac->ptr[put] = 0x20000000; 408 dmac->ptr[put] = 0x20000000;
400 409
401 nv_wo32(dmac->base.user, 0x0000, 0x00000000); 410 nv_wo32(dmac->base.user, 0x0000, 0x00000000);
402 if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) { 411 if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
412 mutex_unlock(&dmac->lock);
403 NV_ERROR(dmac->base.user, "channel stalled\n"); 413 NV_ERROR(dmac->base.user, "channel stalled\n");
404 return NULL; 414 return NULL;
405 } 415 }
@@ -415,6 +425,7 @@ evo_kick(u32 *push, void *evoc)
415{ 425{
416 struct nv50_dmac *dmac = evoc; 426 struct nv50_dmac *dmac = evoc;
417 nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2); 427 nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
428 mutex_unlock(&dmac->lock);
418} 429}
419 430
420#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) 431#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
@@ -423,7 +434,10 @@ evo_kick(u32 *push, void *evoc)
423static bool 434static bool
424evo_sync_wait(void *data) 435evo_sync_wait(void *data)
425{ 436{
426 return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000; 437 if (nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000)
438 return true;
439 usleep_range(1, 2);
440 return false;
427} 441}
428 442
429static int 443static int
@@ -502,7 +516,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
502 if (ret) 516 if (ret)
503 return ret; 517 return ret;
504 518
505 if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { 519 if (nv_mclass(chan->object) < NV84_CHANNEL_IND_CLASS) {
506 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); 520 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
507 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 521 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
508 OUT_RING (chan, sync->sem.offset); 522 OUT_RING (chan, sync->sem.offset);
@@ -512,24 +526,36 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
512 OUT_RING (chan, sync->sem.offset ^ 0x10); 526 OUT_RING (chan, sync->sem.offset ^ 0x10);
513 OUT_RING (chan, 0x74b1e000); 527 OUT_RING (chan, 0x74b1e000);
514 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 528 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
515 if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS) 529 OUT_RING (chan, NvSema);
516 OUT_RING (chan, NvSema); 530 } else
517 else 531 if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
518 OUT_RING (chan, chan->vram); 532 u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
533 offset += sync->sem.offset;
534
535 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
536 OUT_RING (chan, upper_32_bits(offset));
537 OUT_RING (chan, lower_32_bits(offset));
538 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
539 OUT_RING (chan, 0x00000002);
540 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
541 OUT_RING (chan, upper_32_bits(offset));
542 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
543 OUT_RING (chan, 0x74b1e000);
544 OUT_RING (chan, 0x00000001);
519 } else { 545 } else {
520 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index); 546 u64 offset = nv84_fence_crtc(chan, nv_crtc->index);
521 offset += sync->sem.offset; 547 offset += sync->sem.offset;
522 548
523 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 549 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
524 OUT_RING (chan, upper_32_bits(offset)); 550 OUT_RING (chan, upper_32_bits(offset));
525 OUT_RING (chan, lower_32_bits(offset)); 551 OUT_RING (chan, lower_32_bits(offset));
526 OUT_RING (chan, 0xf00d0000 | sync->sem.value); 552 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
527 OUT_RING (chan, 0x1002); 553 OUT_RING (chan, 0x00001002);
528 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 554 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
529 OUT_RING (chan, upper_32_bits(offset)); 555 OUT_RING (chan, upper_32_bits(offset));
530 OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 556 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
531 OUT_RING (chan, 0x74b1e000); 557 OUT_RING (chan, 0x74b1e000);
532 OUT_RING (chan, 0x1001); 558 OUT_RING (chan, 0x00001001);
533 } 559 }
534 560
535 FIRE_RING (chan); 561 FIRE_RING (chan);
@@ -1493,9 +1519,6 @@ nv50_dac_disconnect(struct drm_encoder *encoder)
1493 evo_mthd(push, 0x0180 + (or * 0x020), 1); 1519 evo_mthd(push, 0x0180 + (or * 0x020), 1);
1494 evo_data(push, 0x00000000); 1520 evo_data(push, 0x00000000);
1495 } 1521 }
1496
1497 evo_mthd(push, 0x0080, 1);
1498 evo_data(push, 0x00000000);
1499 evo_kick(push, mast); 1522 evo_kick(push, mast);
1500 } 1523 }
1501 } 1524 }
@@ -1542,20 +1565,23 @@ static const struct drm_encoder_funcs nv50_dac_func = {
1542static int 1565static int
1543nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe) 1566nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1544{ 1567{
1545 struct drm_device *dev = connector->dev; 1568 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1569 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
1546 struct nouveau_encoder *nv_encoder; 1570 struct nouveau_encoder *nv_encoder;
1547 struct drm_encoder *encoder; 1571 struct drm_encoder *encoder;
1572 int type = DRM_MODE_ENCODER_DAC;
1548 1573
1549 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); 1574 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1550 if (!nv_encoder) 1575 if (!nv_encoder)
1551 return -ENOMEM; 1576 return -ENOMEM;
1552 nv_encoder->dcb = dcbe; 1577 nv_encoder->dcb = dcbe;
1553 nv_encoder->or = ffs(dcbe->or) - 1; 1578 nv_encoder->or = ffs(dcbe->or) - 1;
1579 nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
1554 1580
1555 encoder = to_drm_encoder(nv_encoder); 1581 encoder = to_drm_encoder(nv_encoder);
1556 encoder->possible_crtcs = dcbe->heads; 1582 encoder->possible_crtcs = dcbe->heads;
1557 encoder->possible_clones = 0; 1583 encoder->possible_clones = 0;
1558 drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC); 1584 drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type);
1559 drm_encoder_helper_add(encoder, &nv50_dac_hfunc); 1585 drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
1560 1586
1561 drm_mode_connector_attach_encoder(connector, encoder); 1587 drm_mode_connector_attach_encoder(connector, encoder);
@@ -1664,9 +1690,6 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1664 } 1690 }
1665 1691
1666 nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON)); 1692 nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
1667
1668 if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
1669 nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
1670} 1693}
1671 1694
1672static bool 1695static bool
@@ -1709,9 +1732,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
1709 evo_mthd(push, 0x0200 + (or * 0x20), 1); 1732 evo_mthd(push, 0x0200 + (or * 0x20), 1);
1710 evo_data(push, 0x00000000); 1733 evo_data(push, 0x00000000);
1711 } 1734 }
1712
1713 evo_mthd(push, 0x0080, 1);
1714 evo_data(push, 0x00000000);
1715 evo_kick(push, mast); 1735 evo_kick(push, mast);
1716 } 1736 }
1717 1737
@@ -1723,14 +1743,6 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
1723} 1743}
1724 1744
1725static void 1745static void
1726nv50_sor_prepare(struct drm_encoder *encoder)
1727{
1728 nv50_sor_disconnect(encoder);
1729 if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
1730 evo_sync(encoder->dev);
1731}
1732
1733static void
1734nv50_sor_commit(struct drm_encoder *encoder) 1746nv50_sor_commit(struct drm_encoder *encoder)
1735{ 1747{
1736} 1748}
@@ -1825,8 +1837,13 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1825 push = evo_wait(nv50_mast(dev), 8); 1837 push = evo_wait(nv50_mast(dev), 8);
1826 if (push) { 1838 if (push) {
1827 if (nv50_vers(mast) < NVD0_DISP_CLASS) { 1839 if (nv50_vers(mast) < NVD0_DISP_CLASS) {
1840 u32 ctrl = (depth << 16) | (proto << 8) | owner;
1841 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1842 ctrl |= 0x00001000;
1843 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1844 ctrl |= 0x00002000;
1828 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1); 1845 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
1829 evo_data(push, (depth << 16) | (proto << 8) | owner); 1846 evo_data(push, ctrl);
1830 } else { 1847 } else {
1831 u32 magic = 0x31ec6000 | (nv_crtc->index << 25); 1848 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
1832 u32 syncs = 0x00000001; 1849 u32 syncs = 0x00000001;
@@ -1862,7 +1879,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
1862static const struct drm_encoder_helper_funcs nv50_sor_hfunc = { 1879static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
1863 .dpms = nv50_sor_dpms, 1880 .dpms = nv50_sor_dpms,
1864 .mode_fixup = nv50_sor_mode_fixup, 1881 .mode_fixup = nv50_sor_mode_fixup,
1865 .prepare = nv50_sor_prepare, 1882 .prepare = nv50_sor_disconnect,
1866 .commit = nv50_sor_commit, 1883 .commit = nv50_sor_commit,
1867 .mode_set = nv50_sor_mode_set, 1884 .mode_set = nv50_sor_mode_set,
1868 .disable = nv50_sor_disconnect, 1885 .disable = nv50_sor_disconnect,
@@ -1876,21 +1893,33 @@ static const struct drm_encoder_funcs nv50_sor_func = {
1876static int 1893static int
1877nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe) 1894nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1878{ 1895{
1879 struct drm_device *dev = connector->dev; 1896 struct nouveau_drm *drm = nouveau_drm(connector->dev);
1897 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
1880 struct nouveau_encoder *nv_encoder; 1898 struct nouveau_encoder *nv_encoder;
1881 struct drm_encoder *encoder; 1899 struct drm_encoder *encoder;
1900 int type;
1901
1902 switch (dcbe->type) {
1903 case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
1904 case DCB_OUTPUT_TMDS:
1905 case DCB_OUTPUT_DP:
1906 default:
1907 type = DRM_MODE_ENCODER_TMDS;
1908 break;
1909 }
1882 1910
1883 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); 1911 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1884 if (!nv_encoder) 1912 if (!nv_encoder)
1885 return -ENOMEM; 1913 return -ENOMEM;
1886 nv_encoder->dcb = dcbe; 1914 nv_encoder->dcb = dcbe;
1887 nv_encoder->or = ffs(dcbe->or) - 1; 1915 nv_encoder->or = ffs(dcbe->or) - 1;
1916 nv_encoder->i2c = i2c->find(i2c, dcbe->i2c_index);
1888 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; 1917 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
1889 1918
1890 encoder = to_drm_encoder(nv_encoder); 1919 encoder = to_drm_encoder(nv_encoder);
1891 encoder->possible_crtcs = dcbe->heads; 1920 encoder->possible_crtcs = dcbe->heads;
1892 encoder->possible_clones = 0; 1921 encoder->possible_clones = 0;
1893 drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS); 1922 drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type);
1894 drm_encoder_helper_add(encoder, &nv50_sor_hfunc); 1923 drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
1895 1924
1896 drm_mode_connector_attach_encoder(connector, encoder); 1925 drm_mode_connector_attach_encoder(connector, encoder);
@@ -1898,6 +1927,181 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1898} 1927}
1899 1928
1900/****************************************************************************** 1929/******************************************************************************
1930 * PIOR
1931 *****************************************************************************/
1932
1933static void
1934nv50_pior_dpms(struct drm_encoder *encoder, int mode)
1935{
1936 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1937 struct nv50_disp *disp = nv50_disp(encoder->dev);
1938 u32 mthd = (nv_encoder->dcb->type << 12) | nv_encoder->or;
1939 u32 ctrl = (mode == DRM_MODE_DPMS_ON);
1940 nv_call(disp->core, NV50_DISP_PIOR_PWR + mthd, ctrl);
1941}
1942
1943static bool
1944nv50_pior_mode_fixup(struct drm_encoder *encoder,
1945 const struct drm_display_mode *mode,
1946 struct drm_display_mode *adjusted_mode)
1947{
1948 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1949 struct nouveau_connector *nv_connector;
1950
1951 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1952 if (nv_connector && nv_connector->native_mode) {
1953 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
1954 int id = adjusted_mode->base.id;
1955 *adjusted_mode = *nv_connector->native_mode;
1956 adjusted_mode->base.id = id;
1957 }
1958 }
1959
1960 adjusted_mode->clock *= 2;
1961 return true;
1962}
1963
1964static void
1965nv50_pior_commit(struct drm_encoder *encoder)
1966{
1967}
1968
1969static void
1970nv50_pior_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1971 struct drm_display_mode *adjusted_mode)
1972{
1973 struct nv50_mast *mast = nv50_mast(encoder->dev);
1974 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1975 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1976 struct nouveau_connector *nv_connector;
1977 u8 owner = 1 << nv_crtc->index;
1978 u8 proto, depth;
1979 u32 *push;
1980
1981 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1982 switch (nv_connector->base.display_info.bpc) {
1983 case 10: depth = 0x6; break;
1984 case 8: depth = 0x5; break;
1985 case 6: depth = 0x2; break;
1986 default: depth = 0x0; break;
1987 }
1988
1989 switch (nv_encoder->dcb->type) {
1990 case DCB_OUTPUT_TMDS:
1991 case DCB_OUTPUT_DP:
1992 proto = 0x0;
1993 break;
1994 default:
1995 BUG_ON(1);
1996 break;
1997 }
1998
1999 nv50_pior_dpms(encoder, DRM_MODE_DPMS_ON);
2000
2001 push = evo_wait(mast, 8);
2002 if (push) {
2003 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
2004 u32 ctrl = (depth << 16) | (proto << 8) | owner;
2005 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2006 ctrl |= 0x00001000;
2007 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2008 ctrl |= 0x00002000;
2009 evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
2010 evo_data(push, ctrl);
2011 }
2012
2013 evo_kick(push, mast);
2014 }
2015
2016 nv_encoder->crtc = encoder->crtc;
2017}
2018
2019static void
2020nv50_pior_disconnect(struct drm_encoder *encoder)
2021{
2022 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2023 struct nv50_mast *mast = nv50_mast(encoder->dev);
2024 const int or = nv_encoder->or;
2025 u32 *push;
2026
2027 if (nv_encoder->crtc) {
2028 nv50_crtc_prepare(nv_encoder->crtc);
2029
2030 push = evo_wait(mast, 4);
2031 if (push) {
2032 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
2033 evo_mthd(push, 0x0700 + (or * 0x040), 1);
2034 evo_data(push, 0x00000000);
2035 }
2036 evo_kick(push, mast);
2037 }
2038 }
2039
2040 nv_encoder->crtc = NULL;
2041}
2042
2043static void
2044nv50_pior_destroy(struct drm_encoder *encoder)
2045{
2046 drm_encoder_cleanup(encoder);
2047 kfree(encoder);
2048}
2049
2050static const struct drm_encoder_helper_funcs nv50_pior_hfunc = {
2051 .dpms = nv50_pior_dpms,
2052 .mode_fixup = nv50_pior_mode_fixup,
2053 .prepare = nv50_pior_disconnect,
2054 .commit = nv50_pior_commit,
2055 .mode_set = nv50_pior_mode_set,
2056 .disable = nv50_pior_disconnect,
2057 .get_crtc = nv50_display_crtc_get,
2058};
2059
2060static const struct drm_encoder_funcs nv50_pior_func = {
2061 .destroy = nv50_pior_destroy,
2062};
2063
2064static int
2065nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
2066{
2067 struct nouveau_drm *drm = nouveau_drm(connector->dev);
2068 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
2069 struct nouveau_i2c_port *ddc = NULL;
2070 struct nouveau_encoder *nv_encoder;
2071 struct drm_encoder *encoder;
2072 int type;
2073
2074 switch (dcbe->type) {
2075 case DCB_OUTPUT_TMDS:
2076 ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTDDC(dcbe->extdev));
2077 type = DRM_MODE_ENCODER_TMDS;
2078 break;
2079 case DCB_OUTPUT_DP:
2080 ddc = i2c->find_type(i2c, NV_I2C_TYPE_EXTAUX(dcbe->extdev));
2081 type = DRM_MODE_ENCODER_TMDS;
2082 break;
2083 default:
2084 return -ENODEV;
2085 }
2086
2087 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
2088 if (!nv_encoder)
2089 return -ENOMEM;
2090 nv_encoder->dcb = dcbe;
2091 nv_encoder->or = ffs(dcbe->or) - 1;
2092 nv_encoder->i2c = ddc;
2093
2094 encoder = to_drm_encoder(nv_encoder);
2095 encoder->possible_crtcs = dcbe->heads;
2096 encoder->possible_clones = 0;
2097 drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type);
2098 drm_encoder_helper_add(encoder, &nv50_pior_hfunc);
2099
2100 drm_mode_connector_attach_encoder(connector, encoder);
2101 return 0;
2102}
2103
2104/******************************************************************************
1901 * Init 2105 * Init
1902 *****************************************************************************/ 2106 *****************************************************************************/
1903void 2107void
@@ -1913,7 +2117,7 @@ nv50_display_init(struct drm_device *dev)
1913 evo_mthd(push, 0x0088, 1); 2117 evo_mthd(push, 0x0088, 1);
1914 evo_data(push, NvEvoSync); 2118 evo_data(push, NvEvoSync);
1915 evo_kick(push, nv50_mast(dev)); 2119 evo_kick(push, nv50_mast(dev));
1916 return evo_sync(dev); 2120 return 0;
1917 } 2121 }
1918 2122
1919 return -EBUSY; 2123 return -EBUSY;
@@ -2019,25 +2223,28 @@ nv50_display_create(struct drm_device *dev)
2019 if (IS_ERR(connector)) 2223 if (IS_ERR(connector))
2020 continue; 2224 continue;
2021 2225
2022 if (dcbe->location != DCB_LOC_ON_CHIP) { 2226 if (dcbe->location == DCB_LOC_ON_CHIP) {
2023 NV_WARN(drm, "skipping off-chip encoder %d/%d\n", 2227 switch (dcbe->type) {
2024 dcbe->type, ffs(dcbe->or) - 1); 2228 case DCB_OUTPUT_TMDS:
2025 continue; 2229 case DCB_OUTPUT_LVDS:
2230 case DCB_OUTPUT_DP:
2231 ret = nv50_sor_create(connector, dcbe);
2232 break;
2233 case DCB_OUTPUT_ANALOG:
2234 ret = nv50_dac_create(connector, dcbe);
2235 break;
2236 default:
2237 ret = -ENODEV;
2238 break;
2239 }
2240 } else {
2241 ret = nv50_pior_create(connector, dcbe);
2026 } 2242 }
2027 2243
2028 switch (dcbe->type) { 2244 if (ret) {
2029 case DCB_OUTPUT_TMDS: 2245 NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
2030 case DCB_OUTPUT_LVDS: 2246 dcbe->location, dcbe->type,
2031 case DCB_OUTPUT_DP: 2247 ffs(dcbe->or) - 1, ret);
2032 nv50_sor_create(connector, dcbe);
2033 break;
2034 case DCB_OUTPUT_ANALOG:
2035 nv50_dac_create(connector, dcbe);
2036 break;
2037 default:
2038 NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
2039 dcbe->type, ffs(dcbe->or) - 1);
2040 continue;
2041 } 2248 }
2042 } 2249 }
2043 2250
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index d889f3ac0d41..f9701e567db8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -27,27 +27,16 @@
27 27
28#include "nouveau_drm.h" 28#include "nouveau_drm.h"
29#include "nouveau_dma.h" 29#include "nouveau_dma.h"
30#include "nouveau_fence.h" 30#include "nv10_fence.h"
31 31
32#include "nv50_display.h" 32#include "nv50_display.h"
33 33
34struct nv50_fence_chan {
35 struct nouveau_fence_chan base;
36};
37
38struct nv50_fence_priv {
39 struct nouveau_fence_priv base;
40 struct nouveau_bo *bo;
41 spinlock_t lock;
42 u32 sequence;
43};
44
45static int 34static int
46nv50_fence_context_new(struct nouveau_channel *chan) 35nv50_fence_context_new(struct nouveau_channel *chan)
47{ 36{
48 struct drm_device *dev = chan->drm->dev; 37 struct drm_device *dev = chan->drm->dev;
49 struct nv50_fence_priv *priv = chan->drm->fence; 38 struct nv10_fence_priv *priv = chan->drm->fence;
50 struct nv50_fence_chan *fctx; 39 struct nv10_fence_chan *fctx;
51 struct ttm_mem_reg *mem = &priv->bo->bo.mem; 40 struct ttm_mem_reg *mem = &priv->bo->bo.mem;
52 struct nouveau_object *object; 41 struct nouveau_object *object;
53 int ret, i; 42 int ret, i;
@@ -57,6 +46,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
57 return -ENOMEM; 46 return -ENOMEM;
58 47
59 nouveau_fence_context_new(&fctx->base); 48 nouveau_fence_context_new(&fctx->base);
49 fctx->base.emit = nv10_fence_emit;
50 fctx->base.read = nv10_fence_read;
51 fctx->base.sync = nv17_fence_sync;
60 52
61 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 53 ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
62 NvSema, 0x0002, 54 NvSema, 0x0002,
@@ -91,7 +83,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
91int 83int
92nv50_fence_create(struct nouveau_drm *drm) 84nv50_fence_create(struct nouveau_drm *drm)
93{ 85{
94 struct nv50_fence_priv *priv; 86 struct nv10_fence_priv *priv;
95 int ret = 0; 87 int ret = 0;
96 88
97 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); 89 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -99,11 +91,9 @@ nv50_fence_create(struct nouveau_drm *drm)
99 return -ENOMEM; 91 return -ENOMEM;
100 92
101 priv->base.dtor = nv10_fence_destroy; 93 priv->base.dtor = nv10_fence_destroy;
94 priv->base.resume = nv17_fence_resume;
102 priv->base.context_new = nv50_fence_context_new; 95 priv->base.context_new = nv50_fence_context_new;
103 priv->base.context_del = nv10_fence_context_del; 96 priv->base.context_del = nv10_fence_context_del;
104 priv->base.emit = nv10_fence_emit;
105 priv->base.read = nv10_fence_read;
106 priv->base.sync = nv17_fence_sync;
107 spin_lock_init(&priv->lock); 97 spin_lock_init(&priv->lock);
108 98
109 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 99 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
@@ -119,13 +109,11 @@ nv50_fence_create(struct nouveau_drm *drm)
119 nouveau_bo_ref(NULL, &priv->bo); 109 nouveau_bo_ref(NULL, &priv->bo);
120 } 110 }
121 111
122 if (ret == 0) { 112 if (ret) {
123 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 113 nv10_fence_destroy(drm);
124 priv->base.sync = nv17_fence_sync; 114 return ret;
125 priv->base.resume = nv17_fence_resume;
126 } 115 }
127 116
128 if (ret) 117 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
129 nv10_fence_destroy(drm);
130 return ret; 118 return ret;
131} 119}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index c686650584b6..9fd475c89820 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/client.h>
26#include <core/class.h> 27#include <core/class.h>
27 28
28#include <engine/fifo.h> 29#include <engine/fifo.h>
@@ -33,79 +34,115 @@
33 34
34#include "nv50_display.h" 35#include "nv50_display.h"
35 36
36struct nv84_fence_chan { 37u64
37 struct nouveau_fence_chan base; 38nv84_fence_crtc(struct nouveau_channel *chan, int crtc)
38}; 39{
39 40 struct nv84_fence_chan *fctx = chan->fence;
40struct nv84_fence_priv { 41 return fctx->dispc_vma[crtc].offset;
41 struct nouveau_fence_priv base; 42}
42 struct nouveau_gpuobj *mem;
43};
44 43
45static int 44static int
46nv84_fence_emit(struct nouveau_fence *fence) 45nv84_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
47{ 46{
48 struct nouveau_channel *chan = fence->channel; 47 int ret = RING_SPACE(chan, 8);
49 struct nouveau_fifo_chan *fifo = (void *)chan->object;
50 int ret = RING_SPACE(chan, 7);
51 if (ret == 0) { 48 if (ret == 0) {
52 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 49 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
53 OUT_RING (chan, NvSema); 50 OUT_RING (chan, chan->vram);
54 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 51 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
55 OUT_RING (chan, upper_32_bits(fifo->chid * 16)); 52 OUT_RING (chan, upper_32_bits(virtual));
56 OUT_RING (chan, lower_32_bits(fifo->chid * 16)); 53 OUT_RING (chan, lower_32_bits(virtual));
57 OUT_RING (chan, fence->sequence); 54 OUT_RING (chan, sequence);
58 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); 55 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
56 OUT_RING (chan, 0x00000000);
59 FIRE_RING (chan); 57 FIRE_RING (chan);
60 } 58 }
61 return ret; 59 return ret;
62} 60}
63 61
64
65static int 62static int
66nv84_fence_sync(struct nouveau_fence *fence, 63nv84_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
67 struct nouveau_channel *prev, struct nouveau_channel *chan)
68{ 64{
69 struct nouveau_fifo_chan *fifo = (void *)prev->object;
70 int ret = RING_SPACE(chan, 7); 65 int ret = RING_SPACE(chan, 7);
71 if (ret == 0) { 66 if (ret == 0) {
72 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1); 67 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
73 OUT_RING (chan, NvSema); 68 OUT_RING (chan, chan->vram);
74 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 69 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
75 OUT_RING (chan, upper_32_bits(fifo->chid * 16)); 70 OUT_RING (chan, upper_32_bits(virtual));
76 OUT_RING (chan, lower_32_bits(fifo->chid * 16)); 71 OUT_RING (chan, lower_32_bits(virtual));
77 OUT_RING (chan, fence->sequence); 72 OUT_RING (chan, sequence);
78 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL); 73 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL);
79 FIRE_RING (chan); 74 FIRE_RING (chan);
80 } 75 }
81 return ret; 76 return ret;
82} 77}
83 78
79static int
80nv84_fence_emit(struct nouveau_fence *fence)
81{
82 struct nouveau_channel *chan = fence->channel;
83 struct nv84_fence_chan *fctx = chan->fence;
84 struct nouveau_fifo_chan *fifo = (void *)chan->object;
85 u64 addr = fifo->chid * 16;
86
87 if (fence->sysmem)
88 addr += fctx->vma_gart.offset;
89 else
90 addr += fctx->vma.offset;
91
92 return fctx->base.emit32(chan, addr, fence->sequence);
93}
94
95static int
96nv84_fence_sync(struct nouveau_fence *fence,
97 struct nouveau_channel *prev, struct nouveau_channel *chan)
98{
99 struct nv84_fence_chan *fctx = chan->fence;
100 struct nouveau_fifo_chan *fifo = (void *)prev->object;
101 u64 addr = fifo->chid * 16;
102
103 if (fence->sysmem)
104 addr += fctx->vma_gart.offset;
105 else
106 addr += fctx->vma.offset;
107
108 return fctx->base.sync32(chan, addr, fence->sequence);
109}
110
84static u32 111static u32
85nv84_fence_read(struct nouveau_channel *chan) 112nv84_fence_read(struct nouveau_channel *chan)
86{ 113{
87 struct nouveau_fifo_chan *fifo = (void *)chan->object; 114 struct nouveau_fifo_chan *fifo = (void *)chan->object;
88 struct nv84_fence_priv *priv = chan->drm->fence; 115 struct nv84_fence_priv *priv = chan->drm->fence;
89 return nv_ro32(priv->mem, fifo->chid * 16); 116 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
90} 117}
91 118
92static void 119static void
93nv84_fence_context_del(struct nouveau_channel *chan) 120nv84_fence_context_del(struct nouveau_channel *chan)
94{ 121{
122 struct drm_device *dev = chan->drm->dev;
123 struct nv84_fence_priv *priv = chan->drm->fence;
95 struct nv84_fence_chan *fctx = chan->fence; 124 struct nv84_fence_chan *fctx = chan->fence;
125 int i;
126
127 for (i = 0; i < dev->mode_config.num_crtc; i++) {
128 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
129 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
130 }
131
132 nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
133 nouveau_bo_vma_del(priv->bo, &fctx->vma);
96 nouveau_fence_context_del(&fctx->base); 134 nouveau_fence_context_del(&fctx->base);
97 chan->fence = NULL; 135 chan->fence = NULL;
98 kfree(fctx); 136 kfree(fctx);
99} 137}
100 138
101static int 139int
102nv84_fence_context_new(struct nouveau_channel *chan) 140nv84_fence_context_new(struct nouveau_channel *chan)
103{ 141{
104 struct drm_device *dev = chan->drm->dev;
105 struct nouveau_fifo_chan *fifo = (void *)chan->object; 142 struct nouveau_fifo_chan *fifo = (void *)chan->object;
143 struct nouveau_client *client = nouveau_client(fifo);
106 struct nv84_fence_priv *priv = chan->drm->fence; 144 struct nv84_fence_priv *priv = chan->drm->fence;
107 struct nv84_fence_chan *fctx; 145 struct nv84_fence_chan *fctx;
108 struct nouveau_object *object;
109 int ret, i; 146 int ret, i;
110 147
111 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); 148 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -113,44 +150,74 @@ nv84_fence_context_new(struct nouveau_channel *chan)
113 return -ENOMEM; 150 return -ENOMEM;
114 151
115 nouveau_fence_context_new(&fctx->base); 152 nouveau_fence_context_new(&fctx->base);
153 fctx->base.emit = nv84_fence_emit;
154 fctx->base.sync = nv84_fence_sync;
155 fctx->base.read = nv84_fence_read;
156 fctx->base.emit32 = nv84_fence_emit32;
157 fctx->base.sync32 = nv84_fence_sync32;
116 158
117 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 159 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
118 NvSema, 0x0002, 160 if (ret == 0) {
119 &(struct nv_dma_class) { 161 ret = nouveau_bo_vma_add(priv->bo_gart, client->vm,
120 .flags = NV_DMA_TARGET_VRAM | 162 &fctx->vma_gart);
121 NV_DMA_ACCESS_RDWR, 163 }
122 .start = priv->mem->addr,
123 .limit = priv->mem->addr +
124 priv->mem->size - 1,
125 }, sizeof(struct nv_dma_class),
126 &object);
127
128 /* dma objects for display sync channel semaphore blocks */
129 for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
130 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
131 164
132 ret = nouveau_object_new(nv_object(chan->cli), chan->handle, 165 /* map display semaphore buffers into channel's vm */
133 NvEvoSema0 + i, 0x003d, 166 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
134 &(struct nv_dma_class) { 167 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
135 .flags = NV_DMA_TARGET_VRAM | 168 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
136 NV_DMA_ACCESS_RDWR,
137 .start = bo->bo.offset,
138 .limit = bo->bo.offset + 0xfff,
139 }, sizeof(struct nv_dma_class),
140 &object);
141 } 169 }
142 170
171 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
172
143 if (ret) 173 if (ret)
144 nv84_fence_context_del(chan); 174 nv84_fence_context_del(chan);
145 nv_wo32(priv->mem, fifo->chid * 16, 0x00000000);
146 return ret; 175 return ret;
147} 176}
148 177
178static bool
179nv84_fence_suspend(struct nouveau_drm *drm)
180{
181 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
182 struct nv84_fence_priv *priv = drm->fence;
183 int i;
184
185 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
186 if (priv->suspend) {
187 for (i = 0; i <= pfifo->max; i++)
188 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i*4);
189 }
190
191 return priv->suspend != NULL;
192}
193
194static void
195nv84_fence_resume(struct nouveau_drm *drm)
196{
197 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
198 struct nv84_fence_priv *priv = drm->fence;
199 int i;
200
201 if (priv->suspend) {
202 for (i = 0; i <= pfifo->max; i++)
203 nouveau_bo_wr32(priv->bo, i*4, priv->suspend[i]);
204 vfree(priv->suspend);
205 priv->suspend = NULL;
206 }
207}
208
149static void 209static void
150nv84_fence_destroy(struct nouveau_drm *drm) 210nv84_fence_destroy(struct nouveau_drm *drm)
151{ 211{
152 struct nv84_fence_priv *priv = drm->fence; 212 struct nv84_fence_priv *priv = drm->fence;
153 nouveau_gpuobj_ref(NULL, &priv->mem); 213 nouveau_bo_unmap(priv->bo_gart);
214 if (priv->bo_gart)
215 nouveau_bo_unpin(priv->bo_gart);
216 nouveau_bo_ref(NULL, &priv->bo_gart);
217 nouveau_bo_unmap(priv->bo);
218 if (priv->bo)
219 nouveau_bo_unpin(priv->bo);
220 nouveau_bo_ref(NULL, &priv->bo);
154 drm->fence = NULL; 221 drm->fence = NULL;
155 kfree(priv); 222 kfree(priv);
156} 223}
@@ -160,7 +227,6 @@ nv84_fence_create(struct nouveau_drm *drm)
160{ 227{
161 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); 228 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
162 struct nv84_fence_priv *priv; 229 struct nv84_fence_priv *priv;
163 u32 chan = pfifo->max + 1;
164 int ret; 230 int ret;
165 231
166 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL); 232 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -168,14 +234,42 @@ nv84_fence_create(struct nouveau_drm *drm)
168 return -ENOMEM; 234 return -ENOMEM;
169 235
170 priv->base.dtor = nv84_fence_destroy; 236 priv->base.dtor = nv84_fence_destroy;
237 priv->base.suspend = nv84_fence_suspend;
238 priv->base.resume = nv84_fence_resume;
171 priv->base.context_new = nv84_fence_context_new; 239 priv->base.context_new = nv84_fence_context_new;
172 priv->base.context_del = nv84_fence_context_del; 240 priv->base.context_del = nv84_fence_context_del;
173 priv->base.emit = nv84_fence_emit;
174 priv->base.sync = nv84_fence_sync;
175 priv->base.read = nv84_fence_read;
176 241
177 ret = nouveau_gpuobj_new(drm->device, NULL, chan * 16, 0x1000, 0, 242 init_waitqueue_head(&priv->base.waiting);
178 &priv->mem); 243 priv->base.uevent = true;
244
245 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
246 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
247 if (ret == 0) {
248 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
249 if (ret == 0) {
250 ret = nouveau_bo_map(priv->bo);
251 if (ret)
252 nouveau_bo_unpin(priv->bo);
253 }
254 if (ret)
255 nouveau_bo_ref(NULL, &priv->bo);
256 }
257
258 if (ret == 0)
259 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
260 TTM_PL_FLAG_TT, 0, 0, NULL,
261 &priv->bo_gart);
262 if (ret == 0) {
263 ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT);
264 if (ret == 0) {
265 ret = nouveau_bo_map(priv->bo_gart);
266 if (ret)
267 nouveau_bo_unpin(priv->bo_gart);
268 }
269 if (ret)
270 nouveau_bo_ref(NULL, &priv->bo_gart);
271 }
272
179 if (ret) 273 if (ret)
180 nv84_fence_destroy(drm); 274 nv84_fence_destroy(drm);
181 return ret; 275 return ret;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 2a56b1b551cb..9566267fbc42 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -34,203 +34,57 @@
34 34
35#include "nv50_display.h" 35#include "nv50_display.h"
36 36
37struct nvc0_fence_priv {
38 struct nouveau_fence_priv base;
39 struct nouveau_bo *bo;
40 u32 *suspend;
41};
42
43struct nvc0_fence_chan {
44 struct nouveau_fence_chan base;
45 struct nouveau_vma vma;
46 struct nouveau_vma dispc_vma[4];
47};
48
49u64
50nvc0_fence_crtc(struct nouveau_channel *chan, int crtc)
51{
52 struct nvc0_fence_chan *fctx = chan->fence;
53 return fctx->dispc_vma[crtc].offset;
54}
55
56static int 37static int
57nvc0_fence_emit(struct nouveau_fence *fence) 38nvc0_fence_emit32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
58{ 39{
59 struct nouveau_channel *chan = fence->channel; 40 int ret = RING_SPACE(chan, 6);
60 struct nvc0_fence_chan *fctx = chan->fence;
61 struct nouveau_fifo_chan *fifo = (void *)chan->object;
62 u64 addr = fctx->vma.offset + fifo->chid * 16;
63 int ret;
64
65 ret = RING_SPACE(chan, 5);
66 if (ret == 0) { 41 if (ret == 0) {
67 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 42 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 5);
68 OUT_RING (chan, upper_32_bits(addr)); 43 OUT_RING (chan, upper_32_bits(virtual));
69 OUT_RING (chan, lower_32_bits(addr)); 44 OUT_RING (chan, lower_32_bits(virtual));
70 OUT_RING (chan, fence->sequence); 45 OUT_RING (chan, sequence);
71 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG); 46 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG);
47 OUT_RING (chan, 0x00000000);
72 FIRE_RING (chan); 48 FIRE_RING (chan);
73 } 49 }
74
75 return ret; 50 return ret;
76} 51}
77 52
78static int 53static int
79nvc0_fence_sync(struct nouveau_fence *fence, 54nvc0_fence_sync32(struct nouveau_channel *chan, u64 virtual, u32 sequence)
80 struct nouveau_channel *prev, struct nouveau_channel *chan)
81{ 55{
82 struct nvc0_fence_chan *fctx = chan->fence; 56 int ret = RING_SPACE(chan, 5);
83 struct nouveau_fifo_chan *fifo = (void *)prev->object;
84 u64 addr = fctx->vma.offset + fifo->chid * 16;
85 int ret;
86
87 ret = RING_SPACE(chan, 5);
88 if (ret == 0) { 57 if (ret == 0) {
89 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4); 58 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
90 OUT_RING (chan, upper_32_bits(addr)); 59 OUT_RING (chan, upper_32_bits(virtual));
91 OUT_RING (chan, lower_32_bits(addr)); 60 OUT_RING (chan, lower_32_bits(virtual));
92 OUT_RING (chan, fence->sequence); 61 OUT_RING (chan, sequence);
93 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL | 62 OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL |
94 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD); 63 NVC0_SUBCHAN_SEMAPHORE_TRIGGER_YIELD);
95 FIRE_RING (chan); 64 FIRE_RING (chan);
96 } 65 }
97
98 return ret; 66 return ret;
99} 67}
100 68
101static u32
102nvc0_fence_read(struct nouveau_channel *chan)
103{
104 struct nouveau_fifo_chan *fifo = (void *)chan->object;
105 struct nvc0_fence_priv *priv = chan->drm->fence;
106 return nouveau_bo_rd32(priv->bo, fifo->chid * 16/4);
107}
108
109static void
110nvc0_fence_context_del(struct nouveau_channel *chan)
111{
112 struct drm_device *dev = chan->drm->dev;
113 struct nvc0_fence_priv *priv = chan->drm->fence;
114 struct nvc0_fence_chan *fctx = chan->fence;
115 int i;
116
117 for (i = 0; i < dev->mode_config.num_crtc; i++) {
118 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
119 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
120 }
121
122 nouveau_bo_vma_del(priv->bo, &fctx->vma);
123 nouveau_fence_context_del(&fctx->base);
124 chan->fence = NULL;
125 kfree(fctx);
126}
127
128static int 69static int
129nvc0_fence_context_new(struct nouveau_channel *chan) 70nvc0_fence_context_new(struct nouveau_channel *chan)
130{ 71{
131 struct nouveau_fifo_chan *fifo = (void *)chan->object; 72 int ret = nv84_fence_context_new(chan);
132 struct nouveau_client *client = nouveau_client(fifo); 73 if (ret == 0) {
133 struct nvc0_fence_priv *priv = chan->drm->fence; 74 struct nv84_fence_chan *fctx = chan->fence;
134 struct nvc0_fence_chan *fctx; 75 fctx->base.emit32 = nvc0_fence_emit32;
135 int ret, i; 76 fctx->base.sync32 = nvc0_fence_sync32;
136
137 fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
138 if (!fctx)
139 return -ENOMEM;
140
141 nouveau_fence_context_new(&fctx->base);
142
143 ret = nouveau_bo_vma_add(priv->bo, client->vm, &fctx->vma);
144 if (ret)
145 nvc0_fence_context_del(chan);
146
147 /* map display semaphore buffers into channel's vm */
148 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
149 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
150 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
151 } 77 }
152
153 nouveau_bo_wr32(priv->bo, fifo->chid * 16/4, 0x00000000);
154 return ret; 78 return ret;
155} 79}
156 80
157static bool
158nvc0_fence_suspend(struct nouveau_drm *drm)
159{
160 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
161 struct nvc0_fence_priv *priv = drm->fence;
162 int i;
163
164 priv->suspend = vmalloc((pfifo->max + 1) * sizeof(u32));
165 if (priv->suspend) {
166 for (i = 0; i <= pfifo->max; i++)
167 priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
168 }
169
170 return priv->suspend != NULL;
171}
172
173static void
174nvc0_fence_resume(struct nouveau_drm *drm)
175{
176 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device);
177 struct nvc0_fence_priv *priv = drm->fence;
178 int i;
179
180 if (priv->suspend) {
181 for (i = 0; i <= pfifo->max; i++)
182 nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
183 vfree(priv->suspend);
184 priv->suspend = NULL;
185 }
186}
187
188static void
189nvc0_fence_destroy(struct nouveau_drm *drm)
190{
191 struct nvc0_fence_priv *priv = drm->fence;
192 nouveau_bo_unmap(priv->bo);
193 if (priv->bo)
194 nouveau_bo_unpin(priv->bo);
195 nouveau_bo_ref(NULL, &priv->bo);
196 drm->fence = NULL;
197 kfree(priv);
198}
199
200int 81int
201nvc0_fence_create(struct nouveau_drm *drm) 82nvc0_fence_create(struct nouveau_drm *drm)
202{ 83{
203 struct nouveau_fifo *pfifo = nouveau_fifo(drm->device); 84 int ret = nv84_fence_create(drm);
204 struct nvc0_fence_priv *priv;
205 int ret;
206
207 priv = drm->fence = kzalloc(sizeof(*priv), GFP_KERNEL);
208 if (!priv)
209 return -ENOMEM;
210
211 priv->base.dtor = nvc0_fence_destroy;
212 priv->base.suspend = nvc0_fence_suspend;
213 priv->base.resume = nvc0_fence_resume;
214 priv->base.context_new = nvc0_fence_context_new;
215 priv->base.context_del = nvc0_fence_context_del;
216 priv->base.emit = nvc0_fence_emit;
217 priv->base.sync = nvc0_fence_sync;
218 priv->base.read = nvc0_fence_read;
219
220 ret = nouveau_bo_new(drm->dev, 16 * (pfifo->max + 1), 0,
221 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
222 if (ret == 0) { 85 if (ret == 0) {
223 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 86 struct nv84_fence_priv *priv = drm->fence;
224 if (ret == 0) { 87 priv->base.context_new = nvc0_fence_context_new;
225 ret = nouveau_bo_map(priv->bo);
226 if (ret)
227 nouveau_bo_unpin(priv->bo);
228 }
229 if (ret)
230 nouveau_bo_ref(NULL, &priv->bo);
231 } 88 }
232
233 if (ret)
234 nvc0_fence_destroy(drm);
235 return ret; 89 return ret;
236} 90}
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 09f65dc3d2c8..09f65dc3d2c8 100644
--- a/drivers/staging/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f2845..d85e058f2845 100644
--- a/drivers/staging/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
diff --git a/drivers/gpu/drm/omapdrm/TODO b/drivers/gpu/drm/omapdrm/TODO
new file mode 100644
index 000000000000..4d8c18aa5dd7
--- /dev/null
+++ b/drivers/gpu/drm/omapdrm/TODO
@@ -0,0 +1,23 @@
1TODO
2. Where should we do eviction (detatch_pages())? We aren't necessarily
3 accessing the pages via a GART, so maybe we need some other threshold
4 to put a cap on the # of pages that can be pin'd.
5 . Use mm_shrinker to trigger unpinning pages.
6 . This is mainly theoretical since most of these devices don't actually
7 have swap or harddrive.
8. GEM/shmem backed pages can have existing mappings (kernel linear map,
9 etc..), which isn't really ideal.
10. Revisit GEM sync object infrastructure.. TTM has some framework for this
11 already. Possibly this could be refactored out and made more common?
12 There should be some way to do this with less wheel-reinvention.
13 . This can be handled by the dma-buf fence/reservation stuff when it
14 lands
15
16Userspace:
17. git://anongit.freedesktop.org/xorg/driver/xf86-video-omap
18
19Currently tested on
20. OMAP3530 beagleboard
21. OMAP4430 pandaboard
22. OMAP4460 pandaboard
23. OMAP5432 uEVM
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 8979c80adb5f..c451c41a7a7d 100644
--- a/drivers/staging/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_connector.c 2 * drivers/gpu/drm/omapdrm/omap_connector.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 32109c09357c..bec66a490b8f 100644
--- a/drivers/staging/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_crtc.c 2 * drivers/gpu/drm/omapdrm/omap_crtc.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
@@ -274,17 +274,16 @@ static void page_flip_worker(struct work_struct *work)
274 struct omap_crtc *omap_crtc = 274 struct omap_crtc *omap_crtc =
275 container_of(work, struct omap_crtc, page_flip_work); 275 container_of(work, struct omap_crtc, page_flip_work);
276 struct drm_crtc *crtc = &omap_crtc->base; 276 struct drm_crtc *crtc = &omap_crtc->base;
277 struct drm_device *dev = crtc->dev;
278 struct drm_display_mode *mode = &crtc->mode; 277 struct drm_display_mode *mode = &crtc->mode;
279 struct drm_gem_object *bo; 278 struct drm_gem_object *bo;
280 279
281 mutex_lock(&dev->mode_config.mutex); 280 mutex_lock(&crtc->mutex);
282 omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb, 281 omap_plane_mode_set(omap_crtc->plane, crtc, crtc->fb,
283 0, 0, mode->hdisplay, mode->vdisplay, 282 0, 0, mode->hdisplay, mode->vdisplay,
284 crtc->x << 16, crtc->y << 16, 283 crtc->x << 16, crtc->y << 16,
285 mode->hdisplay << 16, mode->vdisplay << 16, 284 mode->hdisplay << 16, mode->vdisplay << 16,
286 vblank_cb, crtc); 285 vblank_cb, crtc);
287 mutex_unlock(&dev->mode_config.mutex); 286 mutex_unlock(&crtc->mutex);
288 287
289 bo = omap_framebuffer_bo(crtc->fb, 0); 288 bo = omap_framebuffer_bo(crtc->fb, 0);
290 drm_gem_object_unreference_unlocked(bo); 289 drm_gem_object_unreference_unlocked(bo);
@@ -417,7 +416,7 @@ static void apply_worker(struct work_struct *work)
417 * the callbacks and list modification all serialized 416 * the callbacks and list modification all serialized
418 * with respect to modesetting ioctls from userspace. 417 * with respect to modesetting ioctls from userspace.
419 */ 418 */
420 mutex_lock(&dev->mode_config.mutex); 419 mutex_lock(&crtc->mutex);
421 dispc_runtime_get(); 420 dispc_runtime_get();
422 421
423 /* 422 /*
@@ -462,16 +461,15 @@ static void apply_worker(struct work_struct *work)
462 461
463out: 462out:
464 dispc_runtime_put(); 463 dispc_runtime_put();
465 mutex_unlock(&dev->mode_config.mutex); 464 mutex_unlock(&crtc->mutex);
466} 465}
467 466
468int omap_crtc_apply(struct drm_crtc *crtc, 467int omap_crtc_apply(struct drm_crtc *crtc,
469 struct omap_drm_apply *apply) 468 struct omap_drm_apply *apply)
470{ 469{
471 struct omap_crtc *omap_crtc = to_omap_crtc(crtc); 470 struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
472 struct drm_device *dev = crtc->dev;
473 471
474 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 472 WARN_ON(!mutex_is_locked(&crtc->mutex));
475 473
476 /* no need to queue it again if it is already queued: */ 474 /* no need to queue it again if it is already queued: */
477 if (apply->queued) 475 if (apply->queued)
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 2f122e00b51d..c27f59da7f29 100644
--- a/drivers/staging/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_debugfs.c 2 * drivers/gpu/drm/omapdrm/omap_debugfs.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org> 5 * Author: Rob Clark <rob.clark@linaro.org>
@@ -57,21 +57,11 @@ static int fb_show(struct seq_file *m, void *arg)
57 struct drm_device *dev = node->minor->dev; 57 struct drm_device *dev = node->minor->dev;
58 struct omap_drm_private *priv = dev->dev_private; 58 struct omap_drm_private *priv = dev->dev_private;
59 struct drm_framebuffer *fb; 59 struct drm_framebuffer *fb;
60 int ret;
61
62 ret = mutex_lock_interruptible(&dev->mode_config.mutex);
63 if (ret)
64 return ret;
65
66 ret = mutex_lock_interruptible(&dev->struct_mutex);
67 if (ret) {
68 mutex_unlock(&dev->mode_config.mutex);
69 return ret;
70 }
71 60
72 seq_printf(m, "fbcon "); 61 seq_printf(m, "fbcon ");
73 omap_framebuffer_describe(priv->fbdev->fb, m); 62 omap_framebuffer_describe(priv->fbdev->fb, m);
74 63
64 mutex_lock(&dev->mode_config.fb_lock);
75 list_for_each_entry(fb, &dev->mode_config.fb_list, head) { 65 list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
76 if (fb == priv->fbdev->fb) 66 if (fb == priv->fbdev->fb)
77 continue; 67 continue;
@@ -79,9 +69,7 @@ static int fb_show(struct seq_file *m, void *arg)
79 seq_printf(m, "user "); 69 seq_printf(m, "user ");
80 omap_framebuffer_describe(fb, m); 70 omap_framebuffer_describe(fb, m);
81 } 71 }
82 72 mutex_unlock(&dev->mode_config.fb_lock);
83 mutex_unlock(&dev->struct_mutex);
84 mutex_unlock(&dev->mode_config.mutex);
85 73
86 return 0; 74 return 0;
87} 75}
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 58bcd6ae0255..58bcd6ae0255 100644
--- a/drivers/staging/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c81..9b794c933c81 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index 4fdd61e54bd2..4fdd61e54bd2 100644
--- a/drivers/staging/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 480dc343446c..079c54c6f94c 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_drv.c 2 * drivers/gpu/drm/omapdrm/omap_drv.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
@@ -452,9 +452,9 @@ static void dev_lastclose(struct drm_device *dev)
452 } 452 }
453 } 453 }
454 454
455 mutex_lock(&dev->mode_config.mutex); 455 drm_modeset_lock_all(dev);
456 ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev); 456 ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
457 mutex_unlock(&dev->mode_config.mutex); 457 drm_modeset_unlock_all(dev);
458 if (ret) 458 if (ret)
459 DBG("failed to restore crtc mode"); 459 DBG("failed to restore crtc mode");
460} 460}
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index f921027e7500..d4f997bb4ac0 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_drv.h 2 * drivers/gpu/drm/omapdrm/omap_drv.h
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
@@ -25,8 +25,8 @@
25#include <linux/types.h> 25#include <linux/types.h>
26#include <drm/drmP.h> 26#include <drm/drmP.h>
27#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
28#include <drm/omap_drm.h>
28#include <linux/platform_data/omap_drm.h> 29#include <linux/platform_data/omap_drm.h>
29#include "omap_drm.h"
30 30
31 31
32#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__) 32#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 25fc0c7b4f6d..21d126d0317e 100644
--- a/drivers/staging/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_encoder.c 2 * drivers/gpu/drm/omapdrm/omap_encoder.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index bb4969942148..8031402e7951 100644
--- a/drivers/staging/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_fb.c 2 * drivers/gpu/drm/omapdrm/omap_fb.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
@@ -423,14 +423,6 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
423 } 423 }
424 424
425 fb = &omap_fb->base; 425 fb = &omap_fb->base;
426 ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
427 if (ret) {
428 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
429 goto fail;
430 }
431
432 DBG("create: FB ID: %d (%p)", fb->base.id, fb);
433
434 omap_fb->format = format; 426 omap_fb->format = format;
435 427
436 for (i = 0; i < n; i++) { 428 for (i = 0; i < n; i++) {
@@ -461,6 +453,14 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
461 453
462 drm_helper_mode_fill_fb_struct(fb, mode_cmd); 454 drm_helper_mode_fill_fb_struct(fb, mode_cmd);
463 455
456 ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
457 if (ret) {
458 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
459 goto fail;
460 }
461
462 DBG("create: FB ID: %d (%p)", fb->base.id, fb);
463
464 return fb; 464 return fb;
465 465
466fail: 466fail:
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 70f2d6ed2ed3..b11ce609fcc2 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_fbdev.c 2 * drivers/gpu/drm/omapdrm/omap_fbdev.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
@@ -131,9 +131,6 @@ static struct fb_ops omap_fb_ops = {
131 .fb_pan_display = omap_fbdev_pan_display, 131 .fb_pan_display = omap_fbdev_pan_display,
132 .fb_blank = drm_fb_helper_blank, 132 .fb_blank = drm_fb_helper_blank,
133 .fb_setcmap = drm_fb_helper_setcmap, 133 .fb_setcmap = drm_fb_helper_setcmap,
134
135 .fb_debug_enter = drm_fb_helper_debug_enter,
136 .fb_debug_leave = drm_fb_helper_debug_leave,
137}; 134};
138 135
139static int omap_fbdev_create(struct drm_fb_helper *helper, 136static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -275,8 +272,10 @@ fail:
275 if (ret) { 272 if (ret) {
276 if (fbi) 273 if (fbi)
277 framebuffer_release(fbi); 274 framebuffer_release(fbi);
278 if (fb) 275 if (fb) {
276 drm_framebuffer_unregister_private(fb);
279 drm_framebuffer_remove(fb); 277 drm_framebuffer_remove(fb);
278 }
280 } 279 }
281 280
282 return ret; 281 return ret;
@@ -294,25 +293,10 @@ static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
294 DBG("fbdev: get gamma"); 293 DBG("fbdev: get gamma");
295} 294}
296 295
297static int omap_fbdev_probe(struct drm_fb_helper *helper,
298 struct drm_fb_helper_surface_size *sizes)
299{
300 int new_fb = 0;
301 int ret;
302
303 if (!helper->fb) {
304 ret = omap_fbdev_create(helper, sizes);
305 if (ret)
306 return ret;
307 new_fb = 1;
308 }
309 return new_fb;
310}
311
312static struct drm_fb_helper_funcs omap_fb_helper_funcs = { 296static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
313 .gamma_set = omap_crtc_fb_gamma_set, 297 .gamma_set = omap_crtc_fb_gamma_set,
314 .gamma_get = omap_crtc_fb_gamma_get, 298 .gamma_get = omap_crtc_fb_gamma_get,
315 .fb_probe = omap_fbdev_probe, 299 .fb_probe = omap_fbdev_create,
316}; 300};
317 301
318static struct drm_fb_helper *get_fb(struct fb_info *fbi) 302static struct drm_fb_helper *get_fb(struct fb_info *fbi)
@@ -365,6 +349,10 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
365 } 349 }
366 350
367 drm_fb_helper_single_add_all_connectors(helper); 351 drm_fb_helper_single_add_all_connectors(helper);
352
353 /* disable all the possible outputs/crtcs before entering KMS mode */
354 drm_helper_disable_unused_functions(dev);
355
368 drm_fb_helper_initial_config(helper, 32); 356 drm_fb_helper_initial_config(helper, 32);
369 357
370 priv->fbdev = helper; 358 priv->fbdev = helper;
@@ -398,8 +386,10 @@ void omap_fbdev_free(struct drm_device *dev)
398 fbdev = to_omap_fbdev(priv->fbdev); 386 fbdev = to_omap_fbdev(priv->fbdev);
399 387
400 /* this will free the backing object */ 388 /* this will free the backing object */
401 if (fbdev->fb) 389 if (fbdev->fb) {
390 drm_framebuffer_unregister_private(fbdev->fb);
402 drm_framebuffer_remove(fbdev->fb); 391 drm_framebuffer_remove(fbdev->fb);
392 }
403 393
404 kfree(fbdev); 394 kfree(fbdev);
405 395
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 518d03d4d4f3..ebbdf4132e9c 100644
--- a/drivers/staging/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_gem.c 2 * drivers/gpu/drm/omapdrm/omap_gem.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org> 5 * Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index a3236abfca3d..ac74d1bc67bf 100644
--- a/drivers/staging/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_gem_dmabuf.c 2 * drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org> 5 * Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
index ffb8cceaeb46..e4a66a35fc6a 100644
--- a/drivers/staging/omapdrm/omap_gem_helpers.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_gem_helpers.c 2 * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org> 5 * Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 2629ba7be6c8..e01303ee00c3 100644
--- a/drivers/staging/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_irq.c 2 * drivers/gpu/drm/omapdrm/omap_irq.c
3 * 3 *
4 * Copyright (C) 2012 Texas Instruments 4 * Copyright (C) 2012 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org> 5 * Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index c063476db3bb..2882cda6ea19 100644
--- a/drivers/staging/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/staging/omapdrm/omap_plane.c 2 * drivers/gpu/drm/omapdrm/omap_plane.c
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org> 5 * Author: Rob Clark <rob.clark@linaro.org>
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index efb609510540..efb609510540 100644
--- a/drivers/staging/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
diff --git a/drivers/staging/omapdrm/tcm-sita.h b/drivers/gpu/drm/omapdrm/tcm-sita.h
index 0444f868671c..0444f868671c 100644
--- a/drivers/staging/omapdrm/tcm-sita.h
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.h
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index a8d5ce47686f..a8d5ce47686f 100644
--- a/drivers/staging/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index ea92bbe3ed37..970f8e92dbb7 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -1,31 +1,8 @@
1config DRM_RADEON_KMS 1config DRM_RADEON_UMS
2 bool "Enable modesetting on radeon by default - NEW DRIVER" 2 bool "Enable userspace modesetting on radeon (DEPRECATED)"
3 depends on DRM_RADEON 3 depends on DRM_RADEON
4 select BACKLIGHT_CLASS_DEVICE
5 help 4 help
6 Choose this option if you want kernel modesetting enabled by default. 5 Choose this option if you still need userspace modesetting.
7 6
8 This is a completely new driver. It's only part of the existing drm 7 Userspace modesetting is deprecated for quite some time now, so
9 for compatibility reasons. It requires an entirely different graphics 8 enable this only if you have ancient versions of the DDX drivers.
10 stack above it and works very differently from the old drm stack.
11 i.e. don't enable this unless you know what you are doing it may
12 cause issues or bugs compared to the previous userspace driver stack.
13
14 When kernel modesetting is enabled the IOCTL of radeon/drm
15 driver are considered as invalid and an error message is printed
16 in the log and they return failure.
17
18 KMS enabled userspace will use new API to talk with the radeon/drm
19 driver. The new API provide functions to create/destroy/share/mmap
20 buffer object which are then managed by the kernel memory manager
21 (here TTM). In order to submit command to the GPU the userspace
22 provide a buffer holding the command stream, along this buffer
23 userspace have to provide a list of buffer object used by the
24 command stream. The kernel radeon driver will then place buffer
25 in GPU accessible memory and will update command stream to reflect
26 the position of the different buffers.
27
28 The kernel will also perform security check on command stream
29 provided by the user, we want to catch and forbid any illegal use
30 of the GPU such as DMA into random system memory or into memory
31 not owned by the process supplying the command stream.
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index a6598fd66423..bf172522ea68 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -56,8 +56,12 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
56 56
57$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h 57$(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
58 58
59radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ 59radeon-y := radeon_drv.o
60 radeon_irq.o r300_cmdbuf.o r600_cp.o 60
61# add UMS driver
62radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
63 radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o
64
61# add KMS driver 65# add KMS driver
62radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ 66radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
63 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \ 67 radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
@@ -67,7 +71,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
67 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \ 71 radeon_clocks.o radeon_fb.o radeon_gem.o radeon_ring.o radeon_irq_kms.o \
68 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 72 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
69 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 73 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
70 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 74 r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
71 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ 75 r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
72 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \ 76 evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
73 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \ 77 evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 5ce9bf51a8de..46a9c3772850 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1238,6 +1238,8 @@ static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1238static void atom_index_iio(struct atom_context *ctx, int base) 1238static void atom_index_iio(struct atom_context *ctx, int base)
1239{ 1239{
1240 ctx->iio = kzalloc(2 * 256, GFP_KERNEL); 1240 ctx->iio = kzalloc(2 * 256, GFP_KERNEL);
1241 if (!ctx->iio)
1242 return;
1241 while (CU8(base) == ATOM_IIO_START) { 1243 while (CU8(base) == ATOM_IIO_START) {
1242 ctx->iio[CU8(base + 1)] = base + 2; 1244 ctx->iio[CU8(base + 1)] = base + 2;
1243 base += 2; 1245 base += 2;
@@ -1287,6 +1289,10 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
1287 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); 1289 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR);
1288 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); 1290 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR);
1289 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); 1291 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4);
1292 if (!ctx->iio) {
1293 atom_destroy(ctx);
1294 return NULL;
1295 }
1290 1296
1291 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR)); 1297 str = CSTR(CU16(base + ATOM_ROM_MSG_PTR));
1292 while (*str && ((*str == '\n') || (*str == '\r'))) 1298 while (*str && ((*str == '\n') || (*str == '\r')))
@@ -1335,8 +1341,7 @@ int atom_asic_init(struct atom_context *ctx)
1335 1341
1336void atom_destroy(struct atom_context *ctx) 1342void atom_destroy(struct atom_context *ctx)
1337{ 1343{
1338 if (ctx->iio) 1344 kfree(ctx->iio);
1339 kfree(ctx->iio);
1340 kfree(ctx); 1345 kfree(ctx);
1341} 1346}
1342 1347
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 9175615bbd8a..21a892c6ab9c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -252,8 +252,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
252 radeon_crtc->enabled = true; 252 radeon_crtc->enabled = true;
253 /* adjust pm to dpms changes BEFORE enabling crtcs */ 253 /* adjust pm to dpms changes BEFORE enabling crtcs */
254 radeon_pm_compute_clocks(rdev); 254 radeon_pm_compute_clocks(rdev);
255 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
256 atombios_powergate_crtc(crtc, ATOM_DISABLE);
257 atombios_enable_crtc(crtc, ATOM_ENABLE); 255 atombios_enable_crtc(crtc, ATOM_ENABLE);
258 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 256 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
259 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 257 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
@@ -271,8 +269,6 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
271 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); 269 atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
272 atombios_enable_crtc(crtc, ATOM_DISABLE); 270 atombios_enable_crtc(crtc, ATOM_DISABLE);
273 radeon_crtc->enabled = false; 271 radeon_crtc->enabled = false;
274 if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
275 atombios_powergate_crtc(crtc, ATOM_ENABLE);
276 /* adjust pm to dpms changes AFTER disabling crtcs */ 272 /* adjust pm to dpms changes AFTER disabling crtcs */
277 radeon_pm_compute_clocks(rdev); 273 radeon_pm_compute_clocks(rdev);
278 break; 274 break;
@@ -1844,6 +1840,8 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
1844 int i; 1840 int i;
1845 1841
1846 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1842 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
1843 if (ASIC_IS_DCE6(rdev))
1844 atombios_powergate_crtc(crtc, ATOM_ENABLE);
1847 1845
1848 for (i = 0; i < rdev->num_crtc; i++) { 1846 for (i = 0; i < rdev->num_crtc; i++) {
1849 if (rdev->mode_info.crtcs[i] && 1847 if (rdev->mode_info.crtcs[i] &&
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a2d478e8692a..3c38ea46531c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -403,6 +403,19 @@ void evergreen_pm_misc(struct radeon_device *rdev)
403 rdev->pm.current_vddc = voltage->voltage; 403 rdev->pm.current_vddc = voltage->voltage;
404 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); 404 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
405 } 405 }
406
407 /* starting with BTC, there is one state that is used for both
408 * MH and SH. Difference is that we always use the high clock index for
409 * mclk and vddci.
410 */
411 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
412 (rdev->family >= CHIP_BARTS) &&
413 rdev->pm.active_crtc_count &&
414 ((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
415 (rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
416 voltage = &rdev->pm.power_state[req_ps_idx].
417 clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].voltage;
418
406 /* 0xff01 is a flag rather then an actual voltage */ 419 /* 0xff01 is a flag rather then an actual voltage */
407 if (voltage->vddci == 0xff01) 420 if (voltage->vddci == 0xff01)
408 return; 421 return;
@@ -2308,32 +2321,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
2308 return 0; 2321 return 0;
2309} 2322}
2310 2323
2311bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2324void evergreen_print_gpu_status_regs(struct radeon_device *rdev)
2312{ 2325{
2313 u32 srbm_status;
2314 u32 grbm_status;
2315 u32 grbm_status_se0, grbm_status_se1;
2316
2317 srbm_status = RREG32(SRBM_STATUS);
2318 grbm_status = RREG32(GRBM_STATUS);
2319 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2320 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2321 if (!(grbm_status & GUI_ACTIVE)) {
2322 radeon_ring_lockup_update(ring);
2323 return false;
2324 }
2325 /* force CP activities */
2326 radeon_ring_force_activity(rdev, ring);
2327 return radeon_ring_test_lockup(rdev, ring);
2328}
2329
2330static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
2331{
2332 u32 grbm_reset = 0;
2333
2334 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2335 return;
2336
2337 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 2326 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
2338 RREG32(GRBM_STATUS)); 2327 RREG32(GRBM_STATUS));
2339 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 2328 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
@@ -2342,6 +2331,8 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
2342 RREG32(GRBM_STATUS_SE1)); 2331 RREG32(GRBM_STATUS_SE1));
2343 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 2332 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
2344 RREG32(SRBM_STATUS)); 2333 RREG32(SRBM_STATUS));
2334 dev_info(rdev->dev, " SRBM_STATUS2 = 0x%08X\n",
2335 RREG32(SRBM_STATUS2));
2345 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2336 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2346 RREG32(CP_STALLED_STAT1)); 2337 RREG32(CP_STALLED_STAT1));
2347 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 2338 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
@@ -2350,112 +2341,283 @@ static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
2350 RREG32(CP_BUSY_STAT)); 2341 RREG32(CP_BUSY_STAT));
2351 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 2342 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2352 RREG32(CP_STAT)); 2343 RREG32(CP_STAT));
2344 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2345 RREG32(DMA_STATUS_REG));
2346 if (rdev->family >= CHIP_CAYMAN) {
2347 dev_info(rdev->dev, " R_00D834_DMA_STATUS_REG = 0x%08X\n",
2348 RREG32(DMA_STATUS_REG + 0x800));
2349 }
2350}
2353 2351
2354 /* Disable CP parsing/prefetching */ 2352bool evergreen_is_display_hung(struct radeon_device *rdev)
2355 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 2353{
2354 u32 crtc_hung = 0;
2355 u32 crtc_status[6];
2356 u32 i, j, tmp;
2356 2357
2357 /* reset all the gfx blocks */ 2358 for (i = 0; i < rdev->num_crtc; i++) {
2358 grbm_reset = (SOFT_RESET_CP | 2359 if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN) {
2359 SOFT_RESET_CB | 2360 crtc_status[i] = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2360 SOFT_RESET_DB | 2361 crtc_hung |= (1 << i);
2361 SOFT_RESET_PA | 2362 }
2362 SOFT_RESET_SC | 2363 }
2363 SOFT_RESET_SPI |
2364 SOFT_RESET_SH |
2365 SOFT_RESET_SX |
2366 SOFT_RESET_TC |
2367 SOFT_RESET_TA |
2368 SOFT_RESET_VC |
2369 SOFT_RESET_VGT);
2370
2371 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2372 WREG32(GRBM_SOFT_RESET, grbm_reset);
2373 (void)RREG32(GRBM_SOFT_RESET);
2374 udelay(50);
2375 WREG32(GRBM_SOFT_RESET, 0);
2376 (void)RREG32(GRBM_SOFT_RESET);
2377 2364
2378 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 2365 for (j = 0; j < 10; j++) {
2379 RREG32(GRBM_STATUS)); 2366 for (i = 0; i < rdev->num_crtc; i++) {
2380 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 2367 if (crtc_hung & (1 << i)) {
2381 RREG32(GRBM_STATUS_SE0)); 2368 tmp = RREG32(EVERGREEN_CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
2382 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n", 2369 if (tmp != crtc_status[i])
2383 RREG32(GRBM_STATUS_SE1)); 2370 crtc_hung &= ~(1 << i);
2384 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n", 2371 }
2385 RREG32(SRBM_STATUS)); 2372 }
2386 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2373 if (crtc_hung == 0)
2387 RREG32(CP_STALLED_STAT1)); 2374 return false;
2388 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 2375 udelay(100);
2389 RREG32(CP_STALLED_STAT2)); 2376 }
2390 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 2377
2391 RREG32(CP_BUSY_STAT)); 2378 return true;
2392 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2393 RREG32(CP_STAT));
2394} 2379}
2395 2380
2396static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev) 2381static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
2397{ 2382{
2383 u32 reset_mask = 0;
2398 u32 tmp; 2384 u32 tmp;
2399 2385
2400 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 2386 /* GRBM_STATUS */
2401 return; 2387 tmp = RREG32(GRBM_STATUS);
2388 if (tmp & (PA_BUSY | SC_BUSY |
2389 SH_BUSY | SX_BUSY |
2390 TA_BUSY | VGT_BUSY |
2391 DB_BUSY | CB_BUSY |
2392 SPI_BUSY | VGT_BUSY_NO_DMA))
2393 reset_mask |= RADEON_RESET_GFX;
2402 2394
2403 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 2395 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
2404 RREG32(DMA_STATUS_REG)); 2396 CP_BUSY | CP_COHERENCY_BUSY))
2397 reset_mask |= RADEON_RESET_CP;
2405 2398
2406 /* Disable DMA */ 2399 if (tmp & GRBM_EE_BUSY)
2407 tmp = RREG32(DMA_RB_CNTL); 2400 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
2408 tmp &= ~DMA_RB_ENABLE;
2409 WREG32(DMA_RB_CNTL, tmp);
2410 2401
2411 /* Reset dma */ 2402 /* DMA_STATUS_REG */
2412 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA); 2403 tmp = RREG32(DMA_STATUS_REG);
2413 RREG32(SRBM_SOFT_RESET); 2404 if (!(tmp & DMA_IDLE))
2414 udelay(50); 2405 reset_mask |= RADEON_RESET_DMA;
2415 WREG32(SRBM_SOFT_RESET, 0);
2416 2406
2417 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 2407 /* SRBM_STATUS2 */
2418 RREG32(DMA_STATUS_REG)); 2408 tmp = RREG32(SRBM_STATUS2);
2409 if (tmp & DMA_BUSY)
2410 reset_mask |= RADEON_RESET_DMA;
2411
2412 /* SRBM_STATUS */
2413 tmp = RREG32(SRBM_STATUS);
2414 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
2415 reset_mask |= RADEON_RESET_RLC;
2416
2417 if (tmp & IH_BUSY)
2418 reset_mask |= RADEON_RESET_IH;
2419
2420 if (tmp & SEM_BUSY)
2421 reset_mask |= RADEON_RESET_SEM;
2422
2423 if (tmp & GRBM_RQ_PENDING)
2424 reset_mask |= RADEON_RESET_GRBM;
2425
2426 if (tmp & VMC_BUSY)
2427 reset_mask |= RADEON_RESET_VMC;
2428
2429 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
2430 MCC_BUSY | MCD_BUSY))
2431 reset_mask |= RADEON_RESET_MC;
2432
2433 if (evergreen_is_display_hung(rdev))
2434 reset_mask |= RADEON_RESET_DISPLAY;
2435
2436 /* VM_L2_STATUS */
2437 tmp = RREG32(VM_L2_STATUS);
2438 if (tmp & L2_BUSY)
2439 reset_mask |= RADEON_RESET_VMC;
2440
2441 return reset_mask;
2419} 2442}
2420 2443
2421static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 2444static void evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2422{ 2445{
2423 struct evergreen_mc_save save; 2446 struct evergreen_mc_save save;
2424 2447 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
2425 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2448 u32 tmp;
2426 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
2427
2428 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2429 reset_mask &= ~RADEON_RESET_DMA;
2430 2449
2431 if (reset_mask == 0) 2450 if (reset_mask == 0)
2432 return 0; 2451 return;
2433 2452
2434 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 2453 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2435 2454
2455 evergreen_print_gpu_status_regs(rdev);
2456
2457 /* Disable CP parsing/prefetching */
2458 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2459
2460 if (reset_mask & RADEON_RESET_DMA) {
2461 /* Disable DMA */
2462 tmp = RREG32(DMA_RB_CNTL);
2463 tmp &= ~DMA_RB_ENABLE;
2464 WREG32(DMA_RB_CNTL, tmp);
2465 }
2466
2467 udelay(50);
2468
2436 evergreen_mc_stop(rdev, &save); 2469 evergreen_mc_stop(rdev, &save);
2437 if (evergreen_mc_wait_for_idle(rdev)) { 2470 if (evergreen_mc_wait_for_idle(rdev)) {
2438 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2471 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2439 } 2472 }
2440 2473
2441 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 2474 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
2442 evergreen_gpu_soft_reset_gfx(rdev); 2475 grbm_soft_reset |= SOFT_RESET_DB |
2476 SOFT_RESET_CB |
2477 SOFT_RESET_PA |
2478 SOFT_RESET_SC |
2479 SOFT_RESET_SPI |
2480 SOFT_RESET_SX |
2481 SOFT_RESET_SH |
2482 SOFT_RESET_TC |
2483 SOFT_RESET_TA |
2484 SOFT_RESET_VC |
2485 SOFT_RESET_VGT;
2486 }
2487
2488 if (reset_mask & RADEON_RESET_CP) {
2489 grbm_soft_reset |= SOFT_RESET_CP |
2490 SOFT_RESET_VGT;
2491
2492 srbm_soft_reset |= SOFT_RESET_GRBM;
2493 }
2443 2494
2444 if (reset_mask & RADEON_RESET_DMA) 2495 if (reset_mask & RADEON_RESET_DMA)
2445 evergreen_gpu_soft_reset_dma(rdev); 2496 srbm_soft_reset |= SOFT_RESET_DMA;
2497
2498 if (reset_mask & RADEON_RESET_DISPLAY)
2499 srbm_soft_reset |= SOFT_RESET_DC;
2500
2501 if (reset_mask & RADEON_RESET_RLC)
2502 srbm_soft_reset |= SOFT_RESET_RLC;
2503
2504 if (reset_mask & RADEON_RESET_SEM)
2505 srbm_soft_reset |= SOFT_RESET_SEM;
2506
2507 if (reset_mask & RADEON_RESET_IH)
2508 srbm_soft_reset |= SOFT_RESET_IH;
2509
2510 if (reset_mask & RADEON_RESET_GRBM)
2511 srbm_soft_reset |= SOFT_RESET_GRBM;
2512
2513 if (reset_mask & RADEON_RESET_VMC)
2514 srbm_soft_reset |= SOFT_RESET_VMC;
2515
2516 if (!(rdev->flags & RADEON_IS_IGP)) {
2517 if (reset_mask & RADEON_RESET_MC)
2518 srbm_soft_reset |= SOFT_RESET_MC;
2519 }
2520
2521 if (grbm_soft_reset) {
2522 tmp = RREG32(GRBM_SOFT_RESET);
2523 tmp |= grbm_soft_reset;
2524 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2525 WREG32(GRBM_SOFT_RESET, tmp);
2526 tmp = RREG32(GRBM_SOFT_RESET);
2527
2528 udelay(50);
2529
2530 tmp &= ~grbm_soft_reset;
2531 WREG32(GRBM_SOFT_RESET, tmp);
2532 tmp = RREG32(GRBM_SOFT_RESET);
2533 }
2534
2535 if (srbm_soft_reset) {
2536 tmp = RREG32(SRBM_SOFT_RESET);
2537 tmp |= srbm_soft_reset;
2538 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2539 WREG32(SRBM_SOFT_RESET, tmp);
2540 tmp = RREG32(SRBM_SOFT_RESET);
2541
2542 udelay(50);
2543
2544 tmp &= ~srbm_soft_reset;
2545 WREG32(SRBM_SOFT_RESET, tmp);
2546 tmp = RREG32(SRBM_SOFT_RESET);
2547 }
2446 2548
2447 /* Wait a little for things to settle down */ 2549 /* Wait a little for things to settle down */
2448 udelay(50); 2550 udelay(50);
2449 2551
2450 evergreen_mc_resume(rdev, &save); 2552 evergreen_mc_resume(rdev, &save);
2451 return 0; 2553 udelay(50);
2554
2555 evergreen_print_gpu_status_regs(rdev);
2452} 2556}
2453 2557
2454int evergreen_asic_reset(struct radeon_device *rdev) 2558int evergreen_asic_reset(struct radeon_device *rdev)
2455{ 2559{
2456 return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX | 2560 u32 reset_mask;
2457 RADEON_RESET_COMPUTE | 2561
2458 RADEON_RESET_DMA)); 2562 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2563
2564 if (reset_mask)
2565 r600_set_bios_scratch_engine_hung(rdev, true);
2566
2567 evergreen_gpu_soft_reset(rdev, reset_mask);
2568
2569 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2570
2571 if (!reset_mask)
2572 r600_set_bios_scratch_engine_hung(rdev, false);
2573
2574 return 0;
2575}
2576
2577/**
2578 * evergreen_gfx_is_lockup - Check if the GFX engine is locked up
2579 *
2580 * @rdev: radeon_device pointer
2581 * @ring: radeon_ring structure holding ring information
2582 *
2583 * Check if the GFX engine is locked up.
2584 * Returns true if the engine appears to be locked up, false if not.
2585 */
2586bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2587{
2588 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2589
2590 if (!(reset_mask & (RADEON_RESET_GFX |
2591 RADEON_RESET_COMPUTE |
2592 RADEON_RESET_CP))) {
2593 radeon_ring_lockup_update(ring);
2594 return false;
2595 }
2596 /* force CP activities */
2597 radeon_ring_force_activity(rdev, ring);
2598 return radeon_ring_test_lockup(rdev, ring);
2599}
2600
2601/**
2602 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
2603 *
2604 * @rdev: radeon_device pointer
2605 * @ring: radeon_ring structure holding ring information
2606 *
2607 * Check if the async DMA engine is locked up.
2608 * Returns true if the engine appears to be locked up, false if not.
2609 */
2610bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2611{
2612 u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
2613
2614 if (!(reset_mask & RADEON_RESET_DMA)) {
2615 radeon_ring_lockup_update(ring);
2616 return false;
2617 }
2618 /* force ring activities */
2619 radeon_ring_force_activity(rdev, ring);
2620 return radeon_ring_test_lockup(rdev, ring);
2459} 2621}
2460 2622
2461/* Interrupts */ 2623/* Interrupts */
@@ -3280,14 +3442,14 @@ void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3280 struct radeon_ring *ring = &rdev->ring[fence->ring]; 3442 struct radeon_ring *ring = &rdev->ring[fence->ring];
3281 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 3443 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3282 /* write the fence */ 3444 /* write the fence */
3283 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0)); 3445 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
3284 radeon_ring_write(ring, addr & 0xfffffffc); 3446 radeon_ring_write(ring, addr & 0xfffffffc);
3285 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 3447 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3286 radeon_ring_write(ring, fence->seq); 3448 radeon_ring_write(ring, fence->seq);
3287 /* generate an interrupt */ 3449 /* generate an interrupt */
3288 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0)); 3450 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
3289 /* flush HDP */ 3451 /* flush HDP */
3290 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 3452 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
3291 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); 3453 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
3292 radeon_ring_write(ring, 1); 3454 radeon_ring_write(ring, 1);
3293} 3455}
@@ -3310,7 +3472,7 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3310 while ((next_rptr & 7) != 5) 3472 while ((next_rptr & 7) != 5)
3311 next_rptr++; 3473 next_rptr++;
3312 next_rptr += 3; 3474 next_rptr += 3;
3313 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 3475 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
3314 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); 3476 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3315 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff); 3477 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3316 radeon_ring_write(ring, next_rptr); 3478 radeon_ring_write(ring, next_rptr);
@@ -3320,8 +3482,8 @@ void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3320 * Pad as necessary with NOPs. 3482 * Pad as necessary with NOPs.
3321 */ 3483 */
3322 while ((ring->wptr & 7) != 5) 3484 while ((ring->wptr & 7) != 5)
3323 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 3485 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
3324 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0)); 3486 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
3325 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 3487 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3326 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 3488 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3327 3489
@@ -3380,7 +3542,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,
3380 if (cur_size_in_dw > 0xFFFFF) 3542 if (cur_size_in_dw > 0xFFFFF)
3381 cur_size_in_dw = 0xFFFFF; 3543 cur_size_in_dw = 0xFFFFF;
3382 size_in_dw -= cur_size_in_dw; 3544 size_in_dw -= cur_size_in_dw;
3383 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 3545 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
3384 radeon_ring_write(ring, dst_offset & 0xfffffffc); 3546 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3385 radeon_ring_write(ring, src_offset & 0xfffffffc); 3547 radeon_ring_write(ring, src_offset & 0xfffffffc);
3386 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 3548 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
@@ -3488,7 +3650,7 @@ static int evergreen_startup(struct radeon_device *rdev)
3488 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; 3650 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3489 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, 3651 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3490 DMA_RB_RPTR, DMA_RB_WPTR, 3652 DMA_RB_RPTR, DMA_RB_WPTR,
3491 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 3653 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
3492 if (r) 3654 if (r)
3493 return r; 3655 return r;
3494 3656
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index ee4cff534f10..99fb13286fd0 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -36,9 +36,6 @@
36 36
37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc); 38 struct radeon_cs_reloc **cs_reloc);
39static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
40 struct radeon_cs_reloc **cs_reloc);
41
42struct evergreen_cs_track { 39struct evergreen_cs_track {
43 u32 group_size; 40 u32 group_size;
44 u32 nbanks; 41 u32 nbanks;
@@ -1009,223 +1006,35 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
1009} 1006}
1010 1007
1011/** 1008/**
1012 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet 1009 * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
1013 * @parser: parser structure holding parsing context.
1014 * @pkt: where to store packet informations
1015 *
1016 * Assume that chunk_ib_index is properly set. Will return -EINVAL
1017 * if packet is bigger than remaining ib size. or if packets is unknown.
1018 **/
1019static int evergreen_cs_packet_parse(struct radeon_cs_parser *p,
1020 struct radeon_cs_packet *pkt,
1021 unsigned idx)
1022{
1023 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1024 uint32_t header;
1025
1026 if (idx >= ib_chunk->length_dw) {
1027 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1028 idx, ib_chunk->length_dw);
1029 return -EINVAL;
1030 }
1031 header = radeon_get_ib_value(p, idx);
1032 pkt->idx = idx;
1033 pkt->type = CP_PACKET_GET_TYPE(header);
1034 pkt->count = CP_PACKET_GET_COUNT(header);
1035 pkt->one_reg_wr = 0;
1036 switch (pkt->type) {
1037 case PACKET_TYPE0:
1038 pkt->reg = CP_PACKET0_GET_REG(header);
1039 break;
1040 case PACKET_TYPE3:
1041 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1042 break;
1043 case PACKET_TYPE2:
1044 pkt->count = -1;
1045 break;
1046 default:
1047 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1048 return -EINVAL;
1049 }
1050 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1051 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1052 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1053 return -EINVAL;
1054 }
1055 return 0;
1056}
1057
1058/**
1059 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1060 * @parser: parser structure holding parsing context. 1010 * @parser: parser structure holding parsing context.
1061 * @data: pointer to relocation data
1062 * @offset_start: starting offset
1063 * @offset_mask: offset mask (to align start offset on)
1064 * @reloc: reloc informations
1065 * 1011 *
1066 * Check next packet is relocation packet3, do bo validation and compute 1012 * This is an Evergreen(+)-specific function for parsing VLINE packets.
1067 * GPU offset using the provided start. 1013 * Real work is done by r600_cs_common_vline_parse function.
1068 **/ 1014 * Here we just set up ASIC-specific register table and call
1069static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, 1015 * the common implementation function.
1070 struct radeon_cs_reloc **cs_reloc)
1071{
1072 struct radeon_cs_chunk *relocs_chunk;
1073 struct radeon_cs_packet p3reloc;
1074 unsigned idx;
1075 int r;
1076
1077 if (p->chunk_relocs_idx == -1) {
1078 DRM_ERROR("No relocation chunk !\n");
1079 return -EINVAL;
1080 }
1081 *cs_reloc = NULL;
1082 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1083 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
1084 if (r) {
1085 return r;
1086 }
1087 p->idx += p3reloc.count + 2;
1088 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1089 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1090 p3reloc.idx);
1091 return -EINVAL;
1092 }
1093 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1094 if (idx >= relocs_chunk->length_dw) {
1095 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1096 idx, relocs_chunk->length_dw);
1097 return -EINVAL;
1098 }
1099 /* FIXME: we assume reloc size is 4 dwords */
1100 *cs_reloc = p->relocs_ptr[(idx / 4)];
1101 return 0;
1102}
1103
1104/**
1105 * evergreen_cs_packet_next_is_pkt3_nop() - test if the next packet is NOP
1106 * @p: structure holding the parser context.
1107 *
1108 * Check if the next packet is a relocation packet3.
1109 **/
1110static bool evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
1111{
1112 struct radeon_cs_packet p3reloc;
1113 int r;
1114
1115 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx);
1116 if (r) {
1117 return false;
1118 }
1119 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1120 return false;
1121 }
1122 return true;
1123}
1124
1125/**
1126 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
1127 * @parser: parser structure holding parsing context.
1128 *
1129 * Userspace sends a special sequence for VLINE waits.
1130 * PACKET0 - VLINE_START_END + value
1131 * PACKET3 - WAIT_REG_MEM poll vline status reg
1132 * RELOC (P3) - crtc_id in reloc.
1133 *
1134 * This function parses this and relocates the VLINE START END
1135 * and WAIT_REG_MEM packets to the correct crtc.
1136 * It also detects a switched off crtc and nulls out the
1137 * wait in that case.
1138 */ 1016 */
1139static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p) 1017static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1140{ 1018{
1141 struct drm_mode_object *obj;
1142 struct drm_crtc *crtc;
1143 struct radeon_crtc *radeon_crtc;
1144 struct radeon_cs_packet p3reloc, wait_reg_mem;
1145 int crtc_id;
1146 int r;
1147 uint32_t header, h_idx, reg, wait_reg_mem_info;
1148 volatile uint32_t *ib;
1149
1150 ib = p->ib.ptr;
1151
1152 /* parse the WAIT_REG_MEM */
1153 r = evergreen_cs_packet_parse(p, &wait_reg_mem, p->idx);
1154 if (r)
1155 return r;
1156
1157 /* check its a WAIT_REG_MEM */
1158 if (wait_reg_mem.type != PACKET_TYPE3 ||
1159 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
1160 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
1161 return -EINVAL;
1162 }
1163
1164 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
1165 /* bit 4 is reg (0) or mem (1) */
1166 if (wait_reg_mem_info & 0x10) {
1167 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
1168 return -EINVAL;
1169 }
1170 /* waiting for value to be equal */
1171 if ((wait_reg_mem_info & 0x7) != 0x3) {
1172 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
1173 return -EINVAL;
1174 }
1175 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != EVERGREEN_VLINE_STATUS) {
1176 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1177 return -EINVAL;
1178 }
1179
1180 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != EVERGREEN_VLINE_STAT) {
1181 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1182 return -EINVAL;
1183 }
1184
1185 /* jump over the NOP */
1186 r = evergreen_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1187 if (r)
1188 return r;
1189
1190 h_idx = p->idx - 2;
1191 p->idx += wait_reg_mem.count + 2;
1192 p->idx += p3reloc.count + 2;
1193 1019
1194 header = radeon_get_ib_value(p, h_idx); 1020 static uint32_t vline_start_end[6] = {
1195 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 1021 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
1196 reg = CP_PACKET0_GET_REG(header); 1022 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
1197 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1023 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
1198 if (!obj) { 1024 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
1199 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1025 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
1200 return -EINVAL; 1026 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
1201 } 1027 };
1202 crtc = obj_to_crtc(obj); 1028 static uint32_t vline_status[6] = {
1203 radeon_crtc = to_radeon_crtc(crtc); 1029 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1204 crtc_id = radeon_crtc->crtc_id; 1030 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1205 1031 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1206 if (!crtc->enabled) { 1032 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1207 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 1033 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1208 ib[h_idx + 2] = PACKET2(0); 1034 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
1209 ib[h_idx + 3] = PACKET2(0); 1035 };
1210 ib[h_idx + 4] = PACKET2(0); 1036
1211 ib[h_idx + 5] = PACKET2(0); 1037 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
1212 ib[h_idx + 6] = PACKET2(0);
1213 ib[h_idx + 7] = PACKET2(0);
1214 ib[h_idx + 8] = PACKET2(0);
1215 } else {
1216 switch (reg) {
1217 case EVERGREEN_VLINE_START_END:
1218 header &= ~R600_CP_PACKET0_REG_MASK;
1219 header |= (EVERGREEN_VLINE_START_END + radeon_crtc->crtc_offset) >> 2;
1220 ib[h_idx] = header;
1221 ib[h_idx + 4] = (EVERGREEN_VLINE_STATUS + radeon_crtc->crtc_offset) >> 2;
1222 break;
1223 default:
1224 DRM_ERROR("unknown crtc reloc\n");
1225 return -EINVAL;
1226 }
1227 }
1228 return 0;
1229} 1038}
1230 1039
1231static int evergreen_packet0_check(struct radeon_cs_parser *p, 1040static int evergreen_packet0_check(struct radeon_cs_parser *p,
@@ -1347,7 +1156,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1347 case SQ_LSTMP_RING_BASE: 1156 case SQ_LSTMP_RING_BASE:
1348 case SQ_PSTMP_RING_BASE: 1157 case SQ_PSTMP_RING_BASE:
1349 case SQ_VSTMP_RING_BASE: 1158 case SQ_VSTMP_RING_BASE:
1350 r = evergreen_cs_packet_next_reloc(p, &reloc); 1159 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1351 if (r) { 1160 if (r) {
1352 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1161 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1353 "0x%04X\n", reg); 1162 "0x%04X\n", reg);
@@ -1376,7 +1185,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1376 case DB_Z_INFO: 1185 case DB_Z_INFO:
1377 track->db_z_info = radeon_get_ib_value(p, idx); 1186 track->db_z_info = radeon_get_ib_value(p, idx);
1378 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1187 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1379 r = evergreen_cs_packet_next_reloc(p, &reloc); 1188 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1380 if (r) { 1189 if (r) {
1381 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1190 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1382 "0x%04X\n", reg); 1191 "0x%04X\n", reg);
@@ -1418,7 +1227,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1418 track->db_dirty = true; 1227 track->db_dirty = true;
1419 break; 1228 break;
1420 case DB_Z_READ_BASE: 1229 case DB_Z_READ_BASE:
1421 r = evergreen_cs_packet_next_reloc(p, &reloc); 1230 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1422 if (r) { 1231 if (r) {
1423 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1232 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1424 "0x%04X\n", reg); 1233 "0x%04X\n", reg);
@@ -1430,7 +1239,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1430 track->db_dirty = true; 1239 track->db_dirty = true;
1431 break; 1240 break;
1432 case DB_Z_WRITE_BASE: 1241 case DB_Z_WRITE_BASE:
1433 r = evergreen_cs_packet_next_reloc(p, &reloc); 1242 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1434 if (r) { 1243 if (r) {
1435 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1244 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1436 "0x%04X\n", reg); 1245 "0x%04X\n", reg);
@@ -1442,7 +1251,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1442 track->db_dirty = true; 1251 track->db_dirty = true;
1443 break; 1252 break;
1444 case DB_STENCIL_READ_BASE: 1253 case DB_STENCIL_READ_BASE:
1445 r = evergreen_cs_packet_next_reloc(p, &reloc); 1254 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1446 if (r) { 1255 if (r) {
1447 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1256 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1448 "0x%04X\n", reg); 1257 "0x%04X\n", reg);
@@ -1454,7 +1263,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1454 track->db_dirty = true; 1263 track->db_dirty = true;
1455 break; 1264 break;
1456 case DB_STENCIL_WRITE_BASE: 1265 case DB_STENCIL_WRITE_BASE:
1457 r = evergreen_cs_packet_next_reloc(p, &reloc); 1266 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1458 if (r) { 1267 if (r) {
1459 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1268 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1460 "0x%04X\n", reg); 1269 "0x%04X\n", reg);
@@ -1477,7 +1286,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1477 case VGT_STRMOUT_BUFFER_BASE_1: 1286 case VGT_STRMOUT_BUFFER_BASE_1:
1478 case VGT_STRMOUT_BUFFER_BASE_2: 1287 case VGT_STRMOUT_BUFFER_BASE_2:
1479 case VGT_STRMOUT_BUFFER_BASE_3: 1288 case VGT_STRMOUT_BUFFER_BASE_3:
1480 r = evergreen_cs_packet_next_reloc(p, &reloc); 1289 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1481 if (r) { 1290 if (r) {
1482 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1291 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1483 "0x%04X\n", reg); 1292 "0x%04X\n", reg);
@@ -1499,7 +1308,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1499 track->streamout_dirty = true; 1308 track->streamout_dirty = true;
1500 break; 1309 break;
1501 case CP_COHER_BASE: 1310 case CP_COHER_BASE:
1502 r = evergreen_cs_packet_next_reloc(p, &reloc); 1311 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1503 if (r) { 1312 if (r) {
1504 dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1313 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1505 "0x%04X\n", reg); 1314 "0x%04X\n", reg);
@@ -1563,7 +1372,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1563 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 1372 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1564 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1373 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1565 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1374 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1566 r = evergreen_cs_packet_next_reloc(p, &reloc); 1375 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1567 if (r) { 1376 if (r) {
1568 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1377 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1569 "0x%04X\n", reg); 1378 "0x%04X\n", reg);
@@ -1581,7 +1390,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1581 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 1390 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1582 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 1391 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
1583 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1392 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1584 r = evergreen_cs_packet_next_reloc(p, &reloc); 1393 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1585 if (r) { 1394 if (r) {
1586 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1395 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1587 "0x%04X\n", reg); 1396 "0x%04X\n", reg);
@@ -1642,7 +1451,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1642 case CB_COLOR5_ATTRIB: 1451 case CB_COLOR5_ATTRIB:
1643 case CB_COLOR6_ATTRIB: 1452 case CB_COLOR6_ATTRIB:
1644 case CB_COLOR7_ATTRIB: 1453 case CB_COLOR7_ATTRIB:
1645 r = evergreen_cs_packet_next_reloc(p, &reloc); 1454 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1646 if (r) { 1455 if (r) {
1647 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1456 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1648 "0x%04X\n", reg); 1457 "0x%04X\n", reg);
@@ -1670,7 +1479,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1670 case CB_COLOR9_ATTRIB: 1479 case CB_COLOR9_ATTRIB:
1671 case CB_COLOR10_ATTRIB: 1480 case CB_COLOR10_ATTRIB:
1672 case CB_COLOR11_ATTRIB: 1481 case CB_COLOR11_ATTRIB:
1673 r = evergreen_cs_packet_next_reloc(p, &reloc); 1482 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1674 if (r) { 1483 if (r) {
1675 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1484 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1676 "0x%04X\n", reg); 1485 "0x%04X\n", reg);
@@ -1703,7 +1512,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1703 case CB_COLOR6_FMASK: 1512 case CB_COLOR6_FMASK:
1704 case CB_COLOR7_FMASK: 1513 case CB_COLOR7_FMASK:
1705 tmp = (reg - CB_COLOR0_FMASK) / 0x3c; 1514 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1706 r = evergreen_cs_packet_next_reloc(p, &reloc); 1515 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1707 if (r) { 1516 if (r) {
1708 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1517 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1709 return -EINVAL; 1518 return -EINVAL;
@@ -1720,7 +1529,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1720 case CB_COLOR6_CMASK: 1529 case CB_COLOR6_CMASK:
1721 case CB_COLOR7_CMASK: 1530 case CB_COLOR7_CMASK:
1722 tmp = (reg - CB_COLOR0_CMASK) / 0x3c; 1531 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1723 r = evergreen_cs_packet_next_reloc(p, &reloc); 1532 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1724 if (r) { 1533 if (r) {
1725 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1534 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1726 return -EINVAL; 1535 return -EINVAL;
@@ -1758,7 +1567,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1758 case CB_COLOR5_BASE: 1567 case CB_COLOR5_BASE:
1759 case CB_COLOR6_BASE: 1568 case CB_COLOR6_BASE:
1760 case CB_COLOR7_BASE: 1569 case CB_COLOR7_BASE:
1761 r = evergreen_cs_packet_next_reloc(p, &reloc); 1570 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1762 if (r) { 1571 if (r) {
1763 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1572 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1764 "0x%04X\n", reg); 1573 "0x%04X\n", reg);
@@ -1774,7 +1583,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1774 case CB_COLOR9_BASE: 1583 case CB_COLOR9_BASE:
1775 case CB_COLOR10_BASE: 1584 case CB_COLOR10_BASE:
1776 case CB_COLOR11_BASE: 1585 case CB_COLOR11_BASE:
1777 r = evergreen_cs_packet_next_reloc(p, &reloc); 1586 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1778 if (r) { 1587 if (r) {
1779 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1588 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1780 "0x%04X\n", reg); 1589 "0x%04X\n", reg);
@@ -1787,7 +1596,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1787 track->cb_dirty = true; 1596 track->cb_dirty = true;
1788 break; 1597 break;
1789 case DB_HTILE_DATA_BASE: 1598 case DB_HTILE_DATA_BASE:
1790 r = evergreen_cs_packet_next_reloc(p, &reloc); 1599 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1791 if (r) { 1600 if (r) {
1792 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1601 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1793 "0x%04X\n", reg); 1602 "0x%04X\n", reg);
@@ -1905,7 +1714,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1905 case SQ_ALU_CONST_CACHE_LS_13: 1714 case SQ_ALU_CONST_CACHE_LS_13:
1906 case SQ_ALU_CONST_CACHE_LS_14: 1715 case SQ_ALU_CONST_CACHE_LS_14:
1907 case SQ_ALU_CONST_CACHE_LS_15: 1716 case SQ_ALU_CONST_CACHE_LS_15:
1908 r = evergreen_cs_packet_next_reloc(p, &reloc); 1717 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1909 if (r) { 1718 if (r) {
1910 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1719 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1911 "0x%04X\n", reg); 1720 "0x%04X\n", reg);
@@ -1919,7 +1728,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1919 "0x%04X\n", reg); 1728 "0x%04X\n", reg);
1920 return -EINVAL; 1729 return -EINVAL;
1921 } 1730 }
1922 r = evergreen_cs_packet_next_reloc(p, &reloc); 1731 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1923 if (r) { 1732 if (r) {
1924 dev_warn(p->dev, "bad SET_CONFIG_REG " 1733 dev_warn(p->dev, "bad SET_CONFIG_REG "
1925 "0x%04X\n", reg); 1734 "0x%04X\n", reg);
@@ -1933,7 +1742,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1933 "0x%04X\n", reg); 1742 "0x%04X\n", reg);
1934 return -EINVAL; 1743 return -EINVAL;
1935 } 1744 }
1936 r = evergreen_cs_packet_next_reloc(p, &reloc); 1745 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1937 if (r) { 1746 if (r) {
1938 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1747 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1939 "0x%04X\n", reg); 1748 "0x%04X\n", reg);
@@ -2018,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2018 return -EINVAL; 1827 return -EINVAL;
2019 } 1828 }
2020 1829
2021 r = evergreen_cs_packet_next_reloc(p, &reloc); 1830 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2022 if (r) { 1831 if (r) {
2023 DRM_ERROR("bad SET PREDICATION\n"); 1832 DRM_ERROR("bad SET PREDICATION\n");
2024 return -EINVAL; 1833 return -EINVAL;
@@ -2064,7 +1873,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2064 DRM_ERROR("bad INDEX_BASE\n"); 1873 DRM_ERROR("bad INDEX_BASE\n");
2065 return -EINVAL; 1874 return -EINVAL;
2066 } 1875 }
2067 r = evergreen_cs_packet_next_reloc(p, &reloc); 1876 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2068 if (r) { 1877 if (r) {
2069 DRM_ERROR("bad INDEX_BASE\n"); 1878 DRM_ERROR("bad INDEX_BASE\n");
2070 return -EINVAL; 1879 return -EINVAL;
@@ -2091,7 +1900,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2091 DRM_ERROR("bad DRAW_INDEX\n"); 1900 DRM_ERROR("bad DRAW_INDEX\n");
2092 return -EINVAL; 1901 return -EINVAL;
2093 } 1902 }
2094 r = evergreen_cs_packet_next_reloc(p, &reloc); 1903 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2095 if (r) { 1904 if (r) {
2096 DRM_ERROR("bad DRAW_INDEX\n"); 1905 DRM_ERROR("bad DRAW_INDEX\n");
2097 return -EINVAL; 1906 return -EINVAL;
@@ -2119,7 +1928,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2119 DRM_ERROR("bad DRAW_INDEX_2\n"); 1928 DRM_ERROR("bad DRAW_INDEX_2\n");
2120 return -EINVAL; 1929 return -EINVAL;
2121 } 1930 }
2122 r = evergreen_cs_packet_next_reloc(p, &reloc); 1931 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2123 if (r) { 1932 if (r) {
2124 DRM_ERROR("bad DRAW_INDEX_2\n"); 1933 DRM_ERROR("bad DRAW_INDEX_2\n");
2125 return -EINVAL; 1934 return -EINVAL;
@@ -2210,7 +2019,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2210 DRM_ERROR("bad DISPATCH_INDIRECT\n"); 2019 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2211 return -EINVAL; 2020 return -EINVAL;
2212 } 2021 }
2213 r = evergreen_cs_packet_next_reloc(p, &reloc); 2022 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2214 if (r) { 2023 if (r) {
2215 DRM_ERROR("bad DISPATCH_INDIRECT\n"); 2024 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2216 return -EINVAL; 2025 return -EINVAL;
@@ -2231,7 +2040,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2231 if (idx_value & 0x10) { 2040 if (idx_value & 0x10) {
2232 uint64_t offset; 2041 uint64_t offset;
2233 2042
2234 r = evergreen_cs_packet_next_reloc(p, &reloc); 2043 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2235 if (r) { 2044 if (r) {
2236 DRM_ERROR("bad WAIT_REG_MEM\n"); 2045 DRM_ERROR("bad WAIT_REG_MEM\n");
2237 return -EINVAL; 2046 return -EINVAL;
@@ -2243,6 +2052,9 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2243 2052
2244 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc); 2053 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2245 ib[idx+2] = upper_32_bits(offset) & 0xff; 2054 ib[idx+2] = upper_32_bits(offset) & 0xff;
2055 } else if (idx_value & 0x100) {
2056 DRM_ERROR("cannot use PFP on REG wait\n");
2057 return -EINVAL;
2246 } 2058 }
2247 break; 2059 break;
2248 case PACKET3_CP_DMA: 2060 case PACKET3_CP_DMA:
@@ -2282,7 +2094,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2282 } 2094 }
2283 /* src address space is memory */ 2095 /* src address space is memory */
2284 if (((info & 0x60000000) >> 29) == 0) { 2096 if (((info & 0x60000000) >> 29) == 0) {
2285 r = evergreen_cs_packet_next_reloc(p, &reloc); 2097 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2286 if (r) { 2098 if (r) {
2287 DRM_ERROR("bad CP DMA SRC\n"); 2099 DRM_ERROR("bad CP DMA SRC\n");
2288 return -EINVAL; 2100 return -EINVAL;
@@ -2320,7 +2132,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2320 return -EINVAL; 2132 return -EINVAL;
2321 } 2133 }
2322 if (((info & 0x00300000) >> 20) == 0) { 2134 if (((info & 0x00300000) >> 20) == 0) {
2323 r = evergreen_cs_packet_next_reloc(p, &reloc); 2135 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2324 if (r) { 2136 if (r) {
2325 DRM_ERROR("bad CP DMA DST\n"); 2137 DRM_ERROR("bad CP DMA DST\n");
2326 return -EINVAL; 2138 return -EINVAL;
@@ -2354,7 +2166,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2354 /* 0xffffffff/0x0 is flush all cache flag */ 2166 /* 0xffffffff/0x0 is flush all cache flag */
2355 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 2167 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2356 radeon_get_ib_value(p, idx + 2) != 0) { 2168 radeon_get_ib_value(p, idx + 2) != 0) {
2357 r = evergreen_cs_packet_next_reloc(p, &reloc); 2169 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2358 if (r) { 2170 if (r) {
2359 DRM_ERROR("bad SURFACE_SYNC\n"); 2171 DRM_ERROR("bad SURFACE_SYNC\n");
2360 return -EINVAL; 2172 return -EINVAL;
@@ -2370,7 +2182,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2370 if (pkt->count) { 2182 if (pkt->count) {
2371 uint64_t offset; 2183 uint64_t offset;
2372 2184
2373 r = evergreen_cs_packet_next_reloc(p, &reloc); 2185 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2374 if (r) { 2186 if (r) {
2375 DRM_ERROR("bad EVENT_WRITE\n"); 2187 DRM_ERROR("bad EVENT_WRITE\n");
2376 return -EINVAL; 2188 return -EINVAL;
@@ -2391,7 +2203,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2391 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 2203 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2392 return -EINVAL; 2204 return -EINVAL;
2393 } 2205 }
2394 r = evergreen_cs_packet_next_reloc(p, &reloc); 2206 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2395 if (r) { 2207 if (r) {
2396 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 2208 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2397 return -EINVAL; 2209 return -EINVAL;
@@ -2413,7 +2225,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2413 DRM_ERROR("bad EVENT_WRITE_EOS\n"); 2225 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2414 return -EINVAL; 2226 return -EINVAL;
2415 } 2227 }
2416 r = evergreen_cs_packet_next_reloc(p, &reloc); 2228 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2417 if (r) { 2229 if (r) {
2418 DRM_ERROR("bad EVENT_WRITE_EOS\n"); 2230 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2419 return -EINVAL; 2231 return -EINVAL;
@@ -2480,7 +2292,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2480 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) { 2292 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2481 case SQ_TEX_VTX_VALID_TEXTURE: 2293 case SQ_TEX_VTX_VALID_TEXTURE:
2482 /* tex base */ 2294 /* tex base */
2483 r = evergreen_cs_packet_next_reloc(p, &reloc); 2295 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2484 if (r) { 2296 if (r) {
2485 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 2297 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2486 return -EINVAL; 2298 return -EINVAL;
@@ -2511,13 +2323,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2511 2323
2512 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) && 2324 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2513 !mip_address && 2325 !mip_address &&
2514 !evergreen_cs_packet_next_is_pkt3_nop(p)) { 2326 !radeon_cs_packet_next_is_pkt3_nop(p)) {
2515 /* MIP_ADDRESS should point to FMASK for an MSAA texture. 2327 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2516 * It should be 0 if FMASK is disabled. */ 2328 * It should be 0 if FMASK is disabled. */
2517 moffset = 0; 2329 moffset = 0;
2518 mipmap = NULL; 2330 mipmap = NULL;
2519 } else { 2331 } else {
2520 r = evergreen_cs_packet_next_reloc(p, &reloc); 2332 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2521 if (r) { 2333 if (r) {
2522 DRM_ERROR("bad SET_RESOURCE (tex)\n"); 2334 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2523 return -EINVAL; 2335 return -EINVAL;
@@ -2536,7 +2348,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2536 { 2348 {
2537 uint64_t offset64; 2349 uint64_t offset64;
2538 /* vtx base */ 2350 /* vtx base */
2539 r = evergreen_cs_packet_next_reloc(p, &reloc); 2351 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2540 if (r) { 2352 if (r) {
2541 DRM_ERROR("bad SET_RESOURCE (vtx)\n"); 2353 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2542 return -EINVAL; 2354 return -EINVAL;
@@ -2618,7 +2430,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2618 /* Updating memory at DST_ADDRESS. */ 2430 /* Updating memory at DST_ADDRESS. */
2619 if (idx_value & 0x1) { 2431 if (idx_value & 0x1) {
2620 u64 offset; 2432 u64 offset;
2621 r = evergreen_cs_packet_next_reloc(p, &reloc); 2433 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2622 if (r) { 2434 if (r) {
2623 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2435 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2624 return -EINVAL; 2436 return -EINVAL;
@@ -2637,7 +2449,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2637 /* Reading data from SRC_ADDRESS. */ 2449 /* Reading data from SRC_ADDRESS. */
2638 if (((idx_value >> 1) & 0x3) == 2) { 2450 if (((idx_value >> 1) & 0x3) == 2) {
2639 u64 offset; 2451 u64 offset;
2640 r = evergreen_cs_packet_next_reloc(p, &reloc); 2452 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2641 if (r) { 2453 if (r) {
2642 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2454 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2643 return -EINVAL; 2455 return -EINVAL;
@@ -2662,7 +2474,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2662 DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2474 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2663 return -EINVAL; 2475 return -EINVAL;
2664 } 2476 }
2665 r = evergreen_cs_packet_next_reloc(p, &reloc); 2477 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2666 if (r) { 2478 if (r) {
2667 DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2479 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2668 return -EINVAL; 2480 return -EINVAL;
@@ -2691,7 +2503,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2691 if (idx_value & 0x1) { 2503 if (idx_value & 0x1) {
2692 u64 offset; 2504 u64 offset;
2693 /* SRC is memory. */ 2505 /* SRC is memory. */
2694 r = evergreen_cs_packet_next_reloc(p, &reloc); 2506 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2695 if (r) { 2507 if (r) {
2696 DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2508 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2697 return -EINVAL; 2509 return -EINVAL;
@@ -2715,7 +2527,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2715 if (idx_value & 0x2) { 2527 if (idx_value & 0x2) {
2716 u64 offset; 2528 u64 offset;
2717 /* DST is memory. */ 2529 /* DST is memory. */
2718 r = evergreen_cs_packet_next_reloc(p, &reloc); 2530 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2719 if (r) { 2531 if (r) {
2720 DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2532 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2721 return -EINVAL; 2533 return -EINVAL;
@@ -2819,7 +2631,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2819 p->track = track; 2631 p->track = track;
2820 } 2632 }
2821 do { 2633 do {
2822 r = evergreen_cs_packet_parse(p, &pkt, p->idx); 2634 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2823 if (r) { 2635 if (r) {
2824 kfree(p->track); 2636 kfree(p->track);
2825 p->track = NULL; 2637 p->track = NULL;
@@ -2827,12 +2639,12 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2827 } 2639 }
2828 p->idx += pkt.count + 2; 2640 p->idx += pkt.count + 2;
2829 switch (pkt.type) { 2641 switch (pkt.type) {
2830 case PACKET_TYPE0: 2642 case RADEON_PACKET_TYPE0:
2831 r = evergreen_cs_parse_packet0(p, &pkt); 2643 r = evergreen_cs_parse_packet0(p, &pkt);
2832 break; 2644 break;
2833 case PACKET_TYPE2: 2645 case RADEON_PACKET_TYPE2:
2834 break; 2646 break;
2835 case PACKET_TYPE3: 2647 case RADEON_PACKET_TYPE3:
2836 r = evergreen_packet3_check(p, &pkt); 2648 r = evergreen_packet3_check(p, &pkt);
2837 break; 2649 break;
2838 default: 2650 default:
@@ -2858,16 +2670,6 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2858 return 0; 2670 return 0;
2859} 2671}
2860 2672
2861/*
2862 * DMA
2863 */
2864
2865#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2866#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
2867#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2868#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
2869#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
2870
2871/** 2673/**
2872 * evergreen_dma_cs_parse() - parse the DMA IB 2674 * evergreen_dma_cs_parse() - parse the DMA IB
2873 * @p: parser structure holding parsing context. 2675 * @p: parser structure holding parsing context.
@@ -2881,9 +2683,9 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2881{ 2683{
2882 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2684 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2883 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc; 2685 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
2884 u32 header, cmd, count, tiled, new_cmd, misc; 2686 u32 header, cmd, count, sub_cmd;
2885 volatile u32 *ib = p->ib.ptr; 2687 volatile u32 *ib = p->ib.ptr;
2886 u32 idx, idx_value; 2688 u32 idx;
2887 u64 src_offset, dst_offset, dst2_offset; 2689 u64 src_offset, dst_offset, dst2_offset;
2888 int r; 2690 int r;
2889 2691
@@ -2897,9 +2699,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2897 header = radeon_get_ib_value(p, idx); 2699 header = radeon_get_ib_value(p, idx);
2898 cmd = GET_DMA_CMD(header); 2700 cmd = GET_DMA_CMD(header);
2899 count = GET_DMA_COUNT(header); 2701 count = GET_DMA_COUNT(header);
2900 tiled = GET_DMA_T(header); 2702 sub_cmd = GET_DMA_SUB_CMD(header);
2901 new_cmd = GET_DMA_NEW(header);
2902 misc = GET_DMA_MISC(header);
2903 2703
2904 switch (cmd) { 2704 switch (cmd) {
2905 case DMA_PACKET_WRITE: 2705 case DMA_PACKET_WRITE:
@@ -2908,19 +2708,27 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2908 DRM_ERROR("bad DMA_PACKET_WRITE\n"); 2708 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2909 return -EINVAL; 2709 return -EINVAL;
2910 } 2710 }
2911 if (tiled) { 2711 switch (sub_cmd) {
2712 /* tiled */
2713 case 8:
2912 dst_offset = radeon_get_ib_value(p, idx+1); 2714 dst_offset = radeon_get_ib_value(p, idx+1);
2913 dst_offset <<= 8; 2715 dst_offset <<= 8;
2914 2716
2915 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2717 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2916 p->idx += count + 7; 2718 p->idx += count + 7;
2917 } else { 2719 break;
2720 /* linear */
2721 case 0:
2918 dst_offset = radeon_get_ib_value(p, idx+1); 2722 dst_offset = radeon_get_ib_value(p, idx+1);
2919 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; 2723 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2920 2724
2921 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2725 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2922 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2726 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2923 p->idx += count + 3; 2727 p->idx += count + 3;
2728 break;
2729 default:
2730 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header);
2731 return -EINVAL;
2924 } 2732 }
2925 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2733 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2926 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", 2734 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
@@ -2939,338 +2747,330 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2939 DRM_ERROR("bad DMA_PACKET_COPY\n"); 2747 DRM_ERROR("bad DMA_PACKET_COPY\n");
2940 return -EINVAL; 2748 return -EINVAL;
2941 } 2749 }
2942 if (tiled) { 2750 switch (sub_cmd) {
2943 idx_value = radeon_get_ib_value(p, idx + 2); 2751 /* Copy L2L, DW aligned */
2944 if (new_cmd) { 2752 case 0x00:
2945 switch (misc) { 2753 /* L2L, dw */
2946 case 0: 2754 src_offset = radeon_get_ib_value(p, idx+2);
2947 /* L2T, frame to fields */ 2755 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2948 if (idx_value & (1 << 31)) { 2756 dst_offset = radeon_get_ib_value(p, idx+1);
2949 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); 2757 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
2950 return -EINVAL; 2758 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2951 } 2759 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
2952 r = r600_dma_cs_next_reloc(p, &dst2_reloc); 2760 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2953 if (r) { 2761 return -EINVAL;
2954 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); 2762 }
2955 return -EINVAL; 2763 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2956 } 2764 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
2957 dst_offset = radeon_get_ib_value(p, idx+1); 2765 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2958 dst_offset <<= 8; 2766 return -EINVAL;
2959 dst2_offset = radeon_get_ib_value(p, idx+2); 2767 }
2960 dst2_offset <<= 8; 2768 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2961 src_offset = radeon_get_ib_value(p, idx+8); 2769 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2962 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; 2770 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2771 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2964 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", 2772 p->idx += 5;
2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2773 break;
2966 return -EINVAL; 2774 /* Copy L2T/T2L */
2967 } 2775 case 0x08:
2968 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2776 /* detile bit */
2969 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", 2777 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2970 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2778 /* tiled src, linear dst */
2971 return -EINVAL; 2779 src_offset = radeon_get_ib_value(p, idx+1);
2972 } 2780 src_offset <<= 8;
2973 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { 2781 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2974 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", 2782
2975 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 2783 dst_offset = radeon_get_ib_value(p, idx + 7);
2976 return -EINVAL; 2784 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2977 } 2785 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2978 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8); 2786 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2979 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2980 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2981 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2982 p->idx += 10;
2983 break;
2984 case 1:
2985 /* L2T, T2L partial */
2986 if (p->family < CHIP_CAYMAN) {
2987 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2988 return -EINVAL;
2989 }
2990 /* detile bit */
2991 if (idx_value & (1 << 31)) {
2992 /* tiled src, linear dst */
2993 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2994
2995 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2996 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2997 } else {
2998 /* linear src, tiled dst */
2999 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3000 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3001
3002 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3003 }
3004 p->idx += 12;
3005 break;
3006 case 3:
3007 /* L2T, broadcast */
3008 if (idx_value & (1 << 31)) {
3009 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3010 return -EINVAL;
3011 }
3012 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3013 if (r) {
3014 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3015 return -EINVAL;
3016 }
3017 dst_offset = radeon_get_ib_value(p, idx+1);
3018 dst_offset <<= 8;
3019 dst2_offset = radeon_get_ib_value(p, idx+2);
3020 dst2_offset <<= 8;
3021 src_offset = radeon_get_ib_value(p, idx+8);
3022 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3023 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3024 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3025 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3026 return -EINVAL;
3027 }
3028 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3029 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3030 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3031 return -EINVAL;
3032 }
3033 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3034 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3035 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3036 return -EINVAL;
3037 }
3038 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3039 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3040 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3041 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3042 p->idx += 10;
3043 break;
3044 case 4:
3045 /* L2T, T2L */
3046 /* detile bit */
3047 if (idx_value & (1 << 31)) {
3048 /* tiled src, linear dst */
3049 src_offset = radeon_get_ib_value(p, idx+1);
3050 src_offset <<= 8;
3051 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3052
3053 dst_offset = radeon_get_ib_value(p, idx+7);
3054 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3055 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3056 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3057 } else {
3058 /* linear src, tiled dst */
3059 src_offset = radeon_get_ib_value(p, idx+7);
3060 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3061 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3062 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3063
3064 dst_offset = radeon_get_ib_value(p, idx+1);
3065 dst_offset <<= 8;
3066 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3067 }
3068 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3069 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
3070 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3071 return -EINVAL;
3072 }
3073 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3074 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
3075 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3076 return -EINVAL;
3077 }
3078 p->idx += 9;
3079 break;
3080 case 5:
3081 /* T2T partial */
3082 if (p->family < CHIP_CAYMAN) {
3083 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3084 return -EINVAL;
3085 }
3086 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3087 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3088 p->idx += 13;
3089 break;
3090 case 7:
3091 /* L2T, broadcast */
3092 if (idx_value & (1 << 31)) {
3093 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3094 return -EINVAL;
3095 }
3096 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3097 if (r) {
3098 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3099 return -EINVAL;
3100 }
3101 dst_offset = radeon_get_ib_value(p, idx+1);
3102 dst_offset <<= 8;
3103 dst2_offset = radeon_get_ib_value(p, idx+2);
3104 dst2_offset <<= 8;
3105 src_offset = radeon_get_ib_value(p, idx+8);
3106 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3107 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3108 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3109 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3110 return -EINVAL;
3111 }
3112 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3113 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3114 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3115 return -EINVAL;
3116 }
3117 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3118 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3119 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3120 return -EINVAL;
3121 }
3122 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3123 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3124 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3125 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3126 p->idx += 10;
3127 break;
3128 default:
3129 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3130 return -EINVAL;
3131 }
3132 } else { 2787 } else {
3133 switch (misc) { 2788 /* linear src, tiled dst */
3134 case 0: 2789 src_offset = radeon_get_ib_value(p, idx+7);
3135 /* detile bit */ 2790 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3136 if (idx_value & (1 << 31)) { 2791 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3137 /* tiled src, linear dst */ 2792 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3138 src_offset = radeon_get_ib_value(p, idx+1); 2793
3139 src_offset <<= 8; 2794 dst_offset = radeon_get_ib_value(p, idx+1);
3140 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8); 2795 dst_offset <<= 8;
3141 2796 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3142 dst_offset = radeon_get_ib_value(p, idx+7);
3143 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3144 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3145 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3146 } else {
3147 /* linear src, tiled dst */
3148 src_offset = radeon_get_ib_value(p, idx+7);
3149 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3150 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3151 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3152
3153 dst_offset = radeon_get_ib_value(p, idx+1);
3154 dst_offset <<= 8;
3155 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3156 }
3157 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3158 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3159 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3160 return -EINVAL;
3161 }
3162 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3163 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3164 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3165 return -EINVAL;
3166 }
3167 p->idx += 9;
3168 break;
3169 default:
3170 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3171 return -EINVAL;
3172 }
3173 } 2797 }
3174 } else { 2798 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3175 if (new_cmd) { 2799 dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n",
3176 switch (misc) { 2800 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3177 case 0: 2801 return -EINVAL;
3178 /* L2L, byte */ 2802 }
3179 src_offset = radeon_get_ib_value(p, idx+2); 2803 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3180 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2804 dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n",
3181 dst_offset = radeon_get_ib_value(p, idx+1); 2805 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3182 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 2806 return -EINVAL;
3183 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { 2807 }
3184 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", 2808 p->idx += 9;
3185 src_offset + count, radeon_bo_size(src_reloc->robj)); 2809 break;
3186 return -EINVAL; 2810 /* Copy L2L, byte aligned */
3187 } 2811 case 0x40:
3188 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) { 2812 /* L2L, byte */
3189 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n", 2813 src_offset = radeon_get_ib_value(p, idx+2);
3190 dst_offset + count, radeon_bo_size(dst_reloc->robj)); 2814 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3191 return -EINVAL; 2815 dst_offset = radeon_get_ib_value(p, idx+1);
3192 } 2816 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32;
3193 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); 2817 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
3194 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); 2818 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
3195 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2819 src_offset + count, radeon_bo_size(src_reloc->robj));
3196 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2820 return -EINVAL;
3197 p->idx += 5; 2821 }
3198 break; 2822 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
3199 case 1: 2823 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
3200 /* L2L, partial */ 2824 dst_offset + count, radeon_bo_size(dst_reloc->robj));
3201 if (p->family < CHIP_CAYMAN) { 2825 return -EINVAL;
3202 DRM_ERROR("L2L Partial is cayman only !\n"); 2826 }
3203 return -EINVAL; 2827 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3204 } 2828 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3205 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff); 2829 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3206 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2830 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3207 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff); 2831 p->idx += 5;
3208 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2832 break;
3209 2833 /* Copy L2L, partial */
3210 p->idx += 9; 2834 case 0x41:
3211 break; 2835 /* L2L, partial */
3212 case 4: 2836 if (p->family < CHIP_CAYMAN) {
3213 /* L2L, dw, broadcast */ 2837 DRM_ERROR("L2L Partial is cayman only !\n");
3214 r = r600_dma_cs_next_reloc(p, &dst2_reloc); 2838 return -EINVAL;
3215 if (r) { 2839 }
3216 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); 2840 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3217 return -EINVAL; 2841 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3218 } 2842 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3219 dst_offset = radeon_get_ib_value(p, idx+1); 2843 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3220 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 2844
3221 dst2_offset = radeon_get_ib_value(p, idx+2); 2845 p->idx += 9;
3222 dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32; 2846 break;
3223 src_offset = radeon_get_ib_value(p, idx+3); 2847 /* Copy L2L, DW aligned, broadcast */
3224 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; 2848 case 0x44:
3225 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 2849 /* L2L, dw, broadcast */
3226 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", 2850 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3227 src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); 2851 if (r) {
3228 return -EINVAL; 2852 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
3229 } 2853 return -EINVAL;
3230 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { 2854 }
3231 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n", 2855 dst_offset = radeon_get_ib_value(p, idx+1);
3232 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); 2856 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
3233 return -EINVAL; 2857 dst2_offset = radeon_get_ib_value(p, idx+2);
3234 } 2858 dst2_offset |= ((u64)(radeon_get_ib_value(p, idx+5) & 0xff)) << 32;
3235 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { 2859 src_offset = radeon_get_ib_value(p, idx+3);
3236 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n", 2860 src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32;
3237 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); 2861 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3238 return -EINVAL; 2862 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
3239 } 2863 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3240 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc); 2864 return -EINVAL;
3241 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc); 2865 }
3242 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc); 2866 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3243 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff; 2867 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
3244 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff; 2868 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3245 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff; 2869 return -EINVAL;
3246 p->idx += 7; 2870 }
3247 break; 2871 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3248 default: 2872 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
3249 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); 2873 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3250 return -EINVAL; 2874 return -EINVAL;
3251 } 2875 }
2876 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2877 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
2878 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2879 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2880 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
2881 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2882 p->idx += 7;
2883 break;
2884 /* Copy L2T Frame to Field */
2885 case 0x48:
2886 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2887 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2888 return -EINVAL;
2889 }
2890 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2891 if (r) {
2892 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2893 return -EINVAL;
2894 }
2895 dst_offset = radeon_get_ib_value(p, idx+1);
2896 dst_offset <<= 8;
2897 dst2_offset = radeon_get_ib_value(p, idx+2);
2898 dst2_offset <<= 8;
2899 src_offset = radeon_get_ib_value(p, idx+8);
2900 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
2901 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2902 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
2903 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2904 return -EINVAL;
2905 }
2906 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2907 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2908 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2909 return -EINVAL;
2910 }
2911 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2912 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2913 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2914 return -EINVAL;
2915 }
2916 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2917 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2918 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2919 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2920 p->idx += 10;
2921 break;
2922 /* Copy L2T/T2L, partial */
2923 case 0x49:
2924 /* L2T, T2L partial */
2925 if (p->family < CHIP_CAYMAN) {
2926 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2927 return -EINVAL;
2928 }
2929 /* detile bit */
2930 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2931 /* tiled src, linear dst */
2932 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2933
2934 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2935 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2936 } else {
2937 /* linear src, tiled dst */
2938 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2939 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2940
2941 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2942 }
2943 p->idx += 12;
2944 break;
2945 /* Copy L2T broadcast */
2946 case 0x4b:
2947 /* L2T, broadcast */
2948 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2949 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2950 return -EINVAL;
2951 }
2952 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2953 if (r) {
2954 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2955 return -EINVAL;
2956 }
2957 dst_offset = radeon_get_ib_value(p, idx+1);
2958 dst_offset <<= 8;
2959 dst2_offset = radeon_get_ib_value(p, idx+2);
2960 dst2_offset <<= 8;
2961 src_offset = radeon_get_ib_value(p, idx+8);
2962 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2964 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2966 return -EINVAL;
2967 }
2968 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2969 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
2970 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2971 return -EINVAL;
2972 }
2973 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2974 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
2975 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2976 return -EINVAL;
2977 }
2978 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2979 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2980 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2981 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2982 p->idx += 10;
2983 break;
2984 /* Copy L2T/T2L (tile units) */
2985 case 0x4c:
2986 /* L2T, T2L */
2987 /* detile bit */
2988 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
2989 /* tiled src, linear dst */
2990 src_offset = radeon_get_ib_value(p, idx+1);
2991 src_offset <<= 8;
2992 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2993
2994 dst_offset = radeon_get_ib_value(p, idx+7);
2995 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
2996 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2997 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3252 } else { 2998 } else {
3253 /* L2L, dw */ 2999 /* linear src, tiled dst */
3254 src_offset = radeon_get_ib_value(p, idx+2); 3000 src_offset = radeon_get_ib_value(p, idx+7);
3255 src_offset |= ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; 3001 src_offset |= ((u64)(radeon_get_ib_value(p, idx+8) & 0xff)) << 32;
3002 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3003 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3004
3256 dst_offset = radeon_get_ib_value(p, idx+1); 3005 dst_offset = radeon_get_ib_value(p, idx+1);
3257 dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; 3006 dst_offset <<= 8;
3258 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { 3007 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3259 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
3260 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3261 return -EINVAL;
3262 }
3263 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3264 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
3265 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3266 return -EINVAL;
3267 }
3268 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3269 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3270 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3271 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3272 p->idx += 5;
3273 } 3008 }
3009 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3010 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
3011 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3012 return -EINVAL;
3013 }
3014 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3015 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
3016 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3017 return -EINVAL;
3018 }
3019 p->idx += 9;
3020 break;
3021 /* Copy T2T, partial (tile units) */
3022 case 0x4d:
3023 /* T2T partial */
3024 if (p->family < CHIP_CAYMAN) {
3025 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3026 return -EINVAL;
3027 }
3028 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3029 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3030 p->idx += 13;
3031 break;
3032 /* Copy L2T broadcast (tile units) */
3033 case 0x4f:
3034 /* L2T, broadcast */
3035 if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) {
3036 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3037 return -EINVAL;
3038 }
3039 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3040 if (r) {
3041 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3042 return -EINVAL;
3043 }
3044 dst_offset = radeon_get_ib_value(p, idx+1);
3045 dst_offset <<= 8;
3046 dst2_offset = radeon_get_ib_value(p, idx+2);
3047 dst2_offset <<= 8;
3048 src_offset = radeon_get_ib_value(p, idx+8);
3049 src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32;
3050 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3051 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3052 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3053 return -EINVAL;
3054 }
3055 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3056 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3057 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3058 return -EINVAL;
3059 }
3060 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3061 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3062 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3063 return -EINVAL;
3064 }
3065 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3066 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3067 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3068 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3069 p->idx += 10;
3070 break;
3071 default:
3072 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header);
3073 return -EINVAL;
3274 } 3074 }
3275 break; 3075 break;
3276 case DMA_PACKET_CONSTANT_FILL: 3076 case DMA_PACKET_CONSTANT_FILL:
@@ -3583,19 +3383,19 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3583 3383
3584 do { 3384 do {
3585 pkt.idx = idx; 3385 pkt.idx = idx;
3586 pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]); 3386 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
3587 pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]); 3387 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
3588 pkt.one_reg_wr = 0; 3388 pkt.one_reg_wr = 0;
3589 switch (pkt.type) { 3389 switch (pkt.type) {
3590 case PACKET_TYPE0: 3390 case RADEON_PACKET_TYPE0:
3591 dev_err(rdev->dev, "Packet0 not allowed!\n"); 3391 dev_err(rdev->dev, "Packet0 not allowed!\n");
3592 ret = -EINVAL; 3392 ret = -EINVAL;
3593 break; 3393 break;
3594 case PACKET_TYPE2: 3394 case RADEON_PACKET_TYPE2:
3595 idx += 1; 3395 idx += 1;
3596 break; 3396 break;
3597 case PACKET_TYPE3: 3397 case RADEON_PACKET_TYPE3:
3598 pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]); 3398 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
3599 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt); 3399 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
3600 idx += pkt.count + 2; 3400 idx += pkt.count + 2;
3601 break; 3401 break;
@@ -3623,88 +3423,79 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3623int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) 3423int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3624{ 3424{
3625 u32 idx = 0; 3425 u32 idx = 0;
3626 u32 header, cmd, count, tiled, new_cmd, misc; 3426 u32 header, cmd, count, sub_cmd;
3627 3427
3628 do { 3428 do {
3629 header = ib->ptr[idx]; 3429 header = ib->ptr[idx];
3630 cmd = GET_DMA_CMD(header); 3430 cmd = GET_DMA_CMD(header);
3631 count = GET_DMA_COUNT(header); 3431 count = GET_DMA_COUNT(header);
3632 tiled = GET_DMA_T(header); 3432 sub_cmd = GET_DMA_SUB_CMD(header);
3633 new_cmd = GET_DMA_NEW(header);
3634 misc = GET_DMA_MISC(header);
3635 3433
3636 switch (cmd) { 3434 switch (cmd) {
3637 case DMA_PACKET_WRITE: 3435 case DMA_PACKET_WRITE:
3638 if (tiled) 3436 switch (sub_cmd) {
3437 /* tiled */
3438 case 8:
3639 idx += count + 7; 3439 idx += count + 7;
3640 else 3440 break;
3441 /* linear */
3442 case 0:
3641 idx += count + 3; 3443 idx += count + 3;
3444 break;
3445 default:
3446 DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]);
3447 return -EINVAL;
3448 }
3642 break; 3449 break;
3643 case DMA_PACKET_COPY: 3450 case DMA_PACKET_COPY:
3644 if (tiled) { 3451 switch (sub_cmd) {
3645 if (new_cmd) { 3452 /* Copy L2L, DW aligned */
3646 switch (misc) { 3453 case 0x00:
3647 case 0: 3454 idx += 5;
3648 /* L2T, frame to fields */ 3455 break;
3649 idx += 10; 3456 /* Copy L2T/T2L */
3650 break; 3457 case 0x08:
3651 case 1: 3458 idx += 9;
3652 /* L2T, T2L partial */ 3459 break;
3653 idx += 12; 3460 /* Copy L2L, byte aligned */
3654 break; 3461 case 0x40:
3655 case 3: 3462 idx += 5;
3656 /* L2T, broadcast */ 3463 break;
3657 idx += 10; 3464 /* Copy L2L, partial */
3658 break; 3465 case 0x41:
3659 case 4: 3466 idx += 9;
3660 /* L2T, T2L */ 3467 break;
3661 idx += 9; 3468 /* Copy L2L, DW aligned, broadcast */
3662 break; 3469 case 0x44:
3663 case 5: 3470 idx += 7;
3664 /* T2T partial */ 3471 break;
3665 idx += 13; 3472 /* Copy L2T Frame to Field */
3666 break; 3473 case 0x48:
3667 case 7: 3474 idx += 10;
3668 /* L2T, broadcast */ 3475 break;
3669 idx += 10; 3476 /* Copy L2T/T2L, partial */
3670 break; 3477 case 0x49:
3671 default: 3478 idx += 12;
3672 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); 3479 break;
3673 return -EINVAL; 3480 /* Copy L2T broadcast */
3674 } 3481 case 0x4b:
3675 } else { 3482 idx += 10;
3676 switch (misc) { 3483 break;
3677 case 0: 3484 /* Copy L2T/T2L (tile units) */
3678 idx += 9; 3485 case 0x4c:
3679 break; 3486 idx += 9;
3680 default: 3487 break;
3681 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc); 3488 /* Copy T2T, partial (tile units) */
3682 return -EINVAL; 3489 case 0x4d:
3683 } 3490 idx += 13;
3684 } 3491 break;
3685 } else { 3492 /* Copy L2T broadcast (tile units) */
3686 if (new_cmd) { 3493 case 0x4f:
3687 switch (misc) { 3494 idx += 10;
3688 case 0: 3495 break;
3689 /* L2L, byte */ 3496 default:
3690 idx += 5; 3497 DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]);
3691 break; 3498 return -EINVAL;
3692 case 1:
3693 /* L2L, partial */
3694 idx += 9;
3695 break;
3696 case 4:
3697 /* L2L, dw, broadcast */
3698 idx += 7;
3699 break;
3700 default:
3701 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3702 return -EINVAL;
3703 }
3704 } else {
3705 /* L2L, dw */
3706 idx += 5;
3707 }
3708 } 3499 }
3709 break; 3500 break;
3710 case DMA_PACKET_CONSTANT_FILL: 3501 case DMA_PACKET_CONSTANT_FILL:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 327c08b54180..4fdecc2b4040 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -24,6 +24,7 @@
24 * Authors: Christian König 24 * Authors: Christian König
25 * Rafał Miłecki 25 * Rafał Miłecki
26 */ 26 */
27#include <linux/hdmi.h>
27#include <drm/drmP.h> 28#include <drm/drmP.h>
28#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
29#include "radeon.h" 30#include "radeon.h"
@@ -54,79 +55,18 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
54} 55}
55 56
56/* 57/*
57 * calculate the crc for a given info frame
58 */
59static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
60 uint8_t versionNumber,
61 uint8_t length,
62 uint8_t *frame)
63{
64 int i;
65 frame[0] = packetType + versionNumber + length;
66 for (i = 1; i <= length; i++)
67 frame[0] += frame[i];
68 frame[0] = 0x100 - frame[0];
69}
70
71/*
72 * build a HDMI Video Info Frame 58 * build a HDMI Video Info Frame
73 */ 59 */
74static void evergreen_hdmi_videoinfoframe( 60static void evergreen_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
75 struct drm_encoder *encoder, 61 void *buffer, size_t size)
76 uint8_t color_format,
77 int active_information_present,
78 uint8_t active_format_aspect_ratio,
79 uint8_t scan_information,
80 uint8_t colorimetry,
81 uint8_t ex_colorimetry,
82 uint8_t quantization,
83 int ITC,
84 uint8_t picture_aspect_ratio,
85 uint8_t video_format_identification,
86 uint8_t pixel_repetition,
87 uint8_t non_uniform_picture_scaling,
88 uint8_t bar_info_data_valid,
89 uint16_t top_bar,
90 uint16_t bottom_bar,
91 uint16_t left_bar,
92 uint16_t right_bar
93)
94{ 62{
95 struct drm_device *dev = encoder->dev; 63 struct drm_device *dev = encoder->dev;
96 struct radeon_device *rdev = dev->dev_private; 64 struct radeon_device *rdev = dev->dev_private;
97 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 65 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
98 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 66 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
99 uint32_t offset = dig->afmt->offset; 67 uint32_t offset = dig->afmt->offset;
68 uint8_t *frame = buffer + 3;
100 69
101 uint8_t frame[14];
102
103 frame[0x0] = 0;
104 frame[0x1] =
105 (scan_information & 0x3) |
106 ((bar_info_data_valid & 0x3) << 2) |
107 ((active_information_present & 0x1) << 4) |
108 ((color_format & 0x3) << 5);
109 frame[0x2] =
110 (active_format_aspect_ratio & 0xF) |
111 ((picture_aspect_ratio & 0x3) << 4) |
112 ((colorimetry & 0x3) << 6);
113 frame[0x3] =
114 (non_uniform_picture_scaling & 0x3) |
115 ((quantization & 0x3) << 2) |
116 ((ex_colorimetry & 0x7) << 4) |
117 ((ITC & 0x1) << 7);
118 frame[0x4] = (video_format_identification & 0x7F);
119 frame[0x5] = (pixel_repetition & 0xF);
120 frame[0x6] = (top_bar & 0xFF);
121 frame[0x7] = (top_bar >> 8);
122 frame[0x8] = (bottom_bar & 0xFF);
123 frame[0x9] = (bottom_bar >> 8);
124 frame[0xA] = (left_bar & 0xFF);
125 frame[0xB] = (left_bar >> 8);
126 frame[0xC] = (right_bar & 0xFF);
127 frame[0xD] = (right_bar >> 8);
128
129 evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
130 /* Our header values (type, version, length) should be alright, Intel 70 /* Our header values (type, version, length) should be alright, Intel
131 * is using the same. Checksum function also seems to be OK, it works 71 * is using the same. Checksum function also seems to be OK, it works
132 * fine for audio infoframe. However calculated value is always lower 72 * fine for audio infoframe. However calculated value is always lower
@@ -154,7 +94,10 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
154 struct radeon_device *rdev = dev->dev_private; 94 struct radeon_device *rdev = dev->dev_private;
155 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 95 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
156 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 96 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
97 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
98 struct hdmi_avi_infoframe frame;
157 uint32_t offset; 99 uint32_t offset;
100 ssize_t err;
158 101
159 /* Silent, r600_hdmi_enable will raise WARN for us */ 102 /* Silent, r600_hdmi_enable will raise WARN for us */
160 if (!dig->afmt->enabled) 103 if (!dig->afmt->enabled)
@@ -200,9 +143,19 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
200 143
201 WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */ 144 WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
202 145
203 evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 146 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
204 0, 0, 0, 0, 0, 0); 147 if (err < 0) {
148 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
149 return;
150 }
151
152 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
153 if (err < 0) {
154 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
155 return;
156 }
205 157
158 evergreen_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
206 evergreen_hdmi_update_ACR(encoder, mode->clock); 159 evergreen_hdmi_update_ACR(encoder, mode->clock);
207 160
208 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 161 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 034f4c22e5db..f585be16e2d5 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -223,6 +223,7 @@
223#define EVERGREEN_CRTC_STATUS 0x6e8c 223#define EVERGREEN_CRTC_STATUS 0x6e8c
224# define EVERGREEN_CRTC_V_BLANK (1 << 0) 224# define EVERGREEN_CRTC_V_BLANK (1 << 0)
225#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 225#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
226#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
226#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 227#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
227#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 228#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
228 229
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0bfd0e9e469b..982d25ad9af3 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -729,6 +729,18 @@
729#define WAIT_UNTIL 0x8040 729#define WAIT_UNTIL 0x8040
730 730
731#define SRBM_STATUS 0x0E50 731#define SRBM_STATUS 0x0E50
732#define RLC_RQ_PENDING (1 << 3)
733#define GRBM_RQ_PENDING (1 << 5)
734#define VMC_BUSY (1 << 8)
735#define MCB_BUSY (1 << 9)
736#define MCB_NON_DISPLAY_BUSY (1 << 10)
737#define MCC_BUSY (1 << 11)
738#define MCD_BUSY (1 << 12)
739#define SEM_BUSY (1 << 14)
740#define RLC_BUSY (1 << 15)
741#define IH_BUSY (1 << 17)
742#define SRBM_STATUS2 0x0EC4
743#define DMA_BUSY (1 << 5)
732#define SRBM_SOFT_RESET 0x0E60 744#define SRBM_SOFT_RESET 0x0E60
733#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6 745#define SRBM_SOFT_RESET_ALL_MASK 0x00FEEFA6
734#define SOFT_RESET_BIF (1 << 1) 746#define SOFT_RESET_BIF (1 << 1)
@@ -924,20 +936,23 @@
924#define CAYMAN_DMA1_CNTL 0xd82c 936#define CAYMAN_DMA1_CNTL 0xd82c
925 937
926/* async DMA packets */ 938/* async DMA packets */
927#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \ 939#define DMA_PACKET(cmd, sub_cmd, n) ((((cmd) & 0xF) << 28) | \
928 (((t) & 0x1) << 23) | \ 940 (((sub_cmd) & 0xFF) << 20) |\
929 (((s) & 0x1) << 22) | \ 941 (((n) & 0xFFFFF) << 0))
930 (((n) & 0xFFFFF) << 0)) 942#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
943#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
944#define GET_DMA_SUB_CMD(h) (((h) & 0x0ff00000) >> 20)
945
931/* async DMA Packet types */ 946/* async DMA Packet types */
932#define DMA_PACKET_WRITE 0x2 947#define DMA_PACKET_WRITE 0x2
933#define DMA_PACKET_COPY 0x3 948#define DMA_PACKET_COPY 0x3
934#define DMA_PACKET_INDIRECT_BUFFER 0x4 949#define DMA_PACKET_INDIRECT_BUFFER 0x4
935#define DMA_PACKET_SEMAPHORE 0x5 950#define DMA_PACKET_SEMAPHORE 0x5
936#define DMA_PACKET_FENCE 0x6 951#define DMA_PACKET_FENCE 0x6
937#define DMA_PACKET_TRAP 0x7 952#define DMA_PACKET_TRAP 0x7
938#define DMA_PACKET_SRBM_WRITE 0x9 953#define DMA_PACKET_SRBM_WRITE 0x9
939#define DMA_PACKET_CONSTANT_FILL 0xd 954#define DMA_PACKET_CONSTANT_FILL 0xd
940#define DMA_PACKET_NOP 0xf 955#define DMA_PACKET_NOP 0xf
941 956
942/* PCIE link stuff */ 957/* PCIE link stuff */
943#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 958#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
@@ -980,16 +995,7 @@
980/* 995/*
981 * PM4 996 * PM4
982 */ 997 */
983#define PACKET_TYPE0 0 998#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
984#define PACKET_TYPE1 1
985#define PACKET_TYPE2 2
986#define PACKET_TYPE3 3
987
988#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
989#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
990#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
991#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
992#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
993 (((reg) >> 2) & 0xFFFF) | \ 999 (((reg) >> 2) & 0xFFFF) | \
994 ((n) & 0x3FFF) << 16) 1000 ((n) & 0x3FFF) << 16)
995#define CP_PACKET2 0x80000000 1001#define CP_PACKET2 0x80000000
@@ -998,7 +1004,7 @@
998 1004
999#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) 1005#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
1000 1006
1001#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ 1007#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
1002 (((op) & 0xFF) << 8) | \ 1008 (((op) & 0xFF) << 8) | \
1003 ((n) & 0x3FFF) << 16) 1009 ((n) & 0x3FFF) << 16)
1004 1010
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 835992d8d067..7cead763be9e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -34,6 +34,8 @@
34#include "ni_reg.h" 34#include "ni_reg.h"
35#include "cayman_blit_shaders.h" 35#include "cayman_blit_shaders.h"
36 36
37extern bool evergreen_is_display_hung(struct radeon_device *rdev);
38extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
37extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 39extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
38extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 40extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
39extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 41extern int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
@@ -1310,120 +1312,90 @@ void cayman_dma_fini(struct radeon_device *rdev)
1310 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]); 1312 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1311} 1313}
1312 1314
1313static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev) 1315static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
1314{ 1316{
1315 u32 grbm_reset = 0; 1317 u32 reset_mask = 0;
1318 u32 tmp;
1316 1319
1317 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1320 /* GRBM_STATUS */
1318 return; 1321 tmp = RREG32(GRBM_STATUS);
1322 if (tmp & (PA_BUSY | SC_BUSY |
1323 SH_BUSY | SX_BUSY |
1324 TA_BUSY | VGT_BUSY |
1325 DB_BUSY | CB_BUSY |
1326 GDS_BUSY | SPI_BUSY |
1327 IA_BUSY | IA_BUSY_NO_DMA))
1328 reset_mask |= RADEON_RESET_GFX;
1319 1329
1320 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n", 1330 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
1321 RREG32(GRBM_STATUS)); 1331 CP_BUSY | CP_COHERENCY_BUSY))
1322 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n", 1332 reset_mask |= RADEON_RESET_CP;
1323 RREG32(GRBM_STATUS_SE0));
1324 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
1325 RREG32(GRBM_STATUS_SE1));
1326 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
1327 RREG32(SRBM_STATUS));
1328 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1329 RREG32(CP_STALLED_STAT1));
1330 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1331 RREG32(CP_STALLED_STAT2));
1332 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1333 RREG32(CP_BUSY_STAT));
1334 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1335 RREG32(CP_STAT));
1336 1333
1337 /* Disable CP parsing/prefetching */ 1334 if (tmp & GRBM_EE_BUSY)
1338 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1335 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1339 1336
1340 /* reset all the gfx blocks */ 1337 /* DMA_STATUS_REG 0 */
1341 grbm_reset = (SOFT_RESET_CP | 1338 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1342 SOFT_RESET_CB | 1339 if (!(tmp & DMA_IDLE))
1343 SOFT_RESET_DB | 1340 reset_mask |= RADEON_RESET_DMA;
1344 SOFT_RESET_GDS |
1345 SOFT_RESET_PA |
1346 SOFT_RESET_SC |
1347 SOFT_RESET_SPI |
1348 SOFT_RESET_SH |
1349 SOFT_RESET_SX |
1350 SOFT_RESET_TC |
1351 SOFT_RESET_TA |
1352 SOFT_RESET_VGT |
1353 SOFT_RESET_IA);
1354
1355 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
1356 WREG32(GRBM_SOFT_RESET, grbm_reset);
1357 (void)RREG32(GRBM_SOFT_RESET);
1358 udelay(50);
1359 WREG32(GRBM_SOFT_RESET, 0);
1360 (void)RREG32(GRBM_SOFT_RESET);
1361
1362 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
1363 RREG32(GRBM_STATUS));
1364 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
1365 RREG32(GRBM_STATUS_SE0));
1366 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
1367 RREG32(GRBM_STATUS_SE1));
1368 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
1369 RREG32(SRBM_STATUS));
1370 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1371 RREG32(CP_STALLED_STAT1));
1372 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1373 RREG32(CP_STALLED_STAT2));
1374 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1375 RREG32(CP_BUSY_STAT));
1376 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1377 RREG32(CP_STAT));
1378 1341
1379} 1342 /* DMA_STATUS_REG 1 */
1343 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1344 if (!(tmp & DMA_IDLE))
1345 reset_mask |= RADEON_RESET_DMA1;
1380 1346
1381static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev) 1347 /* SRBM_STATUS2 */
1382{ 1348 tmp = RREG32(SRBM_STATUS2);
1383 u32 tmp; 1349 if (tmp & DMA_BUSY)
1350 reset_mask |= RADEON_RESET_DMA;
1384 1351
1385 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1352 if (tmp & DMA1_BUSY)
1386 return; 1353 reset_mask |= RADEON_RESET_DMA1;
1387 1354
1388 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1355 /* SRBM_STATUS */
1389 RREG32(DMA_STATUS_REG)); 1356 tmp = RREG32(SRBM_STATUS);
1357 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
1358 reset_mask |= RADEON_RESET_RLC;
1390 1359
1391 /* dma0 */ 1360 if (tmp & IH_BUSY)
1392 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 1361 reset_mask |= RADEON_RESET_IH;
1393 tmp &= ~DMA_RB_ENABLE;
1394 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1395 1362
1396 /* dma1 */ 1363 if (tmp & SEM_BUSY)
1397 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 1364 reset_mask |= RADEON_RESET_SEM;
1398 tmp &= ~DMA_RB_ENABLE;
1399 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1400 1365
1401 /* Reset dma */ 1366 if (tmp & GRBM_RQ_PENDING)
1402 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); 1367 reset_mask |= RADEON_RESET_GRBM;
1403 RREG32(SRBM_SOFT_RESET); 1368
1404 udelay(50); 1369 if (tmp & VMC_BUSY)
1405 WREG32(SRBM_SOFT_RESET, 0); 1370 reset_mask |= RADEON_RESET_VMC;
1371
1372 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
1373 MCC_BUSY | MCD_BUSY))
1374 reset_mask |= RADEON_RESET_MC;
1406 1375
1407 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1376 if (evergreen_is_display_hung(rdev))
1408 RREG32(DMA_STATUS_REG)); 1377 reset_mask |= RADEON_RESET_DISPLAY;
1409 1378
1379 /* VM_L2_STATUS */
1380 tmp = RREG32(VM_L2_STATUS);
1381 if (tmp & L2_BUSY)
1382 reset_mask |= RADEON_RESET_VMC;
1383
1384 return reset_mask;
1410} 1385}
1411 1386
1412static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1387static void cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1413{ 1388{
1414 struct evergreen_mc_save save; 1389 struct evergreen_mc_save save;
1415 1390 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1416 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1391 u32 tmp;
1417 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1418
1419 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1420 reset_mask &= ~RADEON_RESET_DMA;
1421 1392
1422 if (reset_mask == 0) 1393 if (reset_mask == 0)
1423 return 0; 1394 return;
1424 1395
1425 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1396 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1426 1397
1398 evergreen_print_gpu_status_regs(rdev);
1427 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n", 1399 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1428 RREG32(0x14F8)); 1400 RREG32(0x14F8));
1429 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n", 1401 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
@@ -1433,29 +1405,158 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1433 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1405 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1434 RREG32(0x14DC)); 1406 RREG32(0x14DC));
1435 1407
1408 /* Disable CP parsing/prefetching */
1409 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1410
1411 if (reset_mask & RADEON_RESET_DMA) {
1412 /* dma0 */
1413 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1414 tmp &= ~DMA_RB_ENABLE;
1415 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1416 }
1417
1418 if (reset_mask & RADEON_RESET_DMA1) {
1419 /* dma1 */
1420 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1421 tmp &= ~DMA_RB_ENABLE;
1422 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1423 }
1424
1425 udelay(50);
1426
1436 evergreen_mc_stop(rdev, &save); 1427 evergreen_mc_stop(rdev, &save);
1437 if (evergreen_mc_wait_for_idle(rdev)) { 1428 if (evergreen_mc_wait_for_idle(rdev)) {
1438 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1429 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1439 } 1430 }
1440 1431
1441 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 1432 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1442 cayman_gpu_soft_reset_gfx(rdev); 1433 grbm_soft_reset = SOFT_RESET_CB |
1434 SOFT_RESET_DB |
1435 SOFT_RESET_GDS |
1436 SOFT_RESET_PA |
1437 SOFT_RESET_SC |
1438 SOFT_RESET_SPI |
1439 SOFT_RESET_SH |
1440 SOFT_RESET_SX |
1441 SOFT_RESET_TC |
1442 SOFT_RESET_TA |
1443 SOFT_RESET_VGT |
1444 SOFT_RESET_IA;
1445 }
1446
1447 if (reset_mask & RADEON_RESET_CP) {
1448 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
1449
1450 srbm_soft_reset |= SOFT_RESET_GRBM;
1451 }
1443 1452
1444 if (reset_mask & RADEON_RESET_DMA) 1453 if (reset_mask & RADEON_RESET_DMA)
1445 cayman_gpu_soft_reset_dma(rdev); 1454 srbm_soft_reset |= SOFT_RESET_DMA;
1455
1456 if (reset_mask & RADEON_RESET_DMA1)
1457 srbm_soft_reset |= SOFT_RESET_DMA1;
1458
1459 if (reset_mask & RADEON_RESET_DISPLAY)
1460 srbm_soft_reset |= SOFT_RESET_DC;
1461
1462 if (reset_mask & RADEON_RESET_RLC)
1463 srbm_soft_reset |= SOFT_RESET_RLC;
1464
1465 if (reset_mask & RADEON_RESET_SEM)
1466 srbm_soft_reset |= SOFT_RESET_SEM;
1467
1468 if (reset_mask & RADEON_RESET_IH)
1469 srbm_soft_reset |= SOFT_RESET_IH;
1470
1471 if (reset_mask & RADEON_RESET_GRBM)
1472 srbm_soft_reset |= SOFT_RESET_GRBM;
1473
1474 if (reset_mask & RADEON_RESET_VMC)
1475 srbm_soft_reset |= SOFT_RESET_VMC;
1476
1477 if (!(rdev->flags & RADEON_IS_IGP)) {
1478 if (reset_mask & RADEON_RESET_MC)
1479 srbm_soft_reset |= SOFT_RESET_MC;
1480 }
1481
1482 if (grbm_soft_reset) {
1483 tmp = RREG32(GRBM_SOFT_RESET);
1484 tmp |= grbm_soft_reset;
1485 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
1486 WREG32(GRBM_SOFT_RESET, tmp);
1487 tmp = RREG32(GRBM_SOFT_RESET);
1488
1489 udelay(50);
1490
1491 tmp &= ~grbm_soft_reset;
1492 WREG32(GRBM_SOFT_RESET, tmp);
1493 tmp = RREG32(GRBM_SOFT_RESET);
1494 }
1495
1496 if (srbm_soft_reset) {
1497 tmp = RREG32(SRBM_SOFT_RESET);
1498 tmp |= srbm_soft_reset;
1499 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1500 WREG32(SRBM_SOFT_RESET, tmp);
1501 tmp = RREG32(SRBM_SOFT_RESET);
1502
1503 udelay(50);
1504
1505 tmp &= ~srbm_soft_reset;
1506 WREG32(SRBM_SOFT_RESET, tmp);
1507 tmp = RREG32(SRBM_SOFT_RESET);
1508 }
1446 1509
1447 /* Wait a little for things to settle down */ 1510 /* Wait a little for things to settle down */
1448 udelay(50); 1511 udelay(50);
1449 1512
1450 evergreen_mc_resume(rdev, &save); 1513 evergreen_mc_resume(rdev, &save);
1451 return 0; 1514 udelay(50);
1515
1516 evergreen_print_gpu_status_regs(rdev);
1452} 1517}
1453 1518
1454int cayman_asic_reset(struct radeon_device *rdev) 1519int cayman_asic_reset(struct radeon_device *rdev)
1455{ 1520{
1456 return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX | 1521 u32 reset_mask;
1457 RADEON_RESET_COMPUTE | 1522
1458 RADEON_RESET_DMA)); 1523 reset_mask = cayman_gpu_check_soft_reset(rdev);
1524
1525 if (reset_mask)
1526 r600_set_bios_scratch_engine_hung(rdev, true);
1527
1528 cayman_gpu_soft_reset(rdev, reset_mask);
1529
1530 reset_mask = cayman_gpu_check_soft_reset(rdev);
1531
1532 if (!reset_mask)
1533 r600_set_bios_scratch_engine_hung(rdev, false);
1534
1535 return 0;
1536}
1537
1538/**
1539 * cayman_gfx_is_lockup - Check if the GFX engine is locked up
1540 *
1541 * @rdev: radeon_device pointer
1542 * @ring: radeon_ring structure holding ring information
1543 *
1544 * Check if the GFX engine is locked up.
1545 * Returns true if the engine appears to be locked up, false if not.
1546 */
1547bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1548{
1549 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1550
1551 if (!(reset_mask & (RADEON_RESET_GFX |
1552 RADEON_RESET_COMPUTE |
1553 RADEON_RESET_CP))) {
1554 radeon_ring_lockup_update(ring);
1555 return false;
1556 }
1557 /* force CP activities */
1558 radeon_ring_force_activity(rdev, ring);
1559 return radeon_ring_test_lockup(rdev, ring);
1459} 1560}
1460 1561
1461/** 1562/**
@@ -1464,18 +1565,20 @@ int cayman_asic_reset(struct radeon_device *rdev)
1464 * @rdev: radeon_device pointer 1565 * @rdev: radeon_device pointer
1465 * @ring: radeon_ring structure holding ring information 1566 * @ring: radeon_ring structure holding ring information
1466 * 1567 *
1467 * Check if the async DMA engine is locked up (cayman-SI). 1568 * Check if the async DMA engine is locked up.
1468 * Returns true if the engine appears to be locked up, false if not. 1569 * Returns true if the engine appears to be locked up, false if not.
1469 */ 1570 */
1470bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1571bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1471{ 1572{
1472 u32 dma_status_reg; 1573 u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
1574 u32 mask;
1473 1575
1474 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 1576 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
1475 dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); 1577 mask = RADEON_RESET_DMA;
1476 else 1578 else
1477 dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); 1579 mask = RADEON_RESET_DMA1;
1478 if (dma_status_reg & DMA_IDLE) { 1580
1581 if (!(reset_mask & mask)) {
1479 radeon_ring_lockup_update(ring); 1582 radeon_ring_lockup_update(ring);
1480 return false; 1583 return false;
1481 } 1584 }
@@ -1843,19 +1946,21 @@ uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
1843 * cayman_vm_set_page - update the page tables using the CP 1946 * cayman_vm_set_page - update the page tables using the CP
1844 * 1947 *
1845 * @rdev: radeon_device pointer 1948 * @rdev: radeon_device pointer
1949 * @ib: indirect buffer to fill with commands
1846 * @pe: addr of the page entry 1950 * @pe: addr of the page entry
1847 * @addr: dst addr to write into pe 1951 * @addr: dst addr to write into pe
1848 * @count: number of page entries to update 1952 * @count: number of page entries to update
1849 * @incr: increase next addr by incr bytes 1953 * @incr: increase next addr by incr bytes
1850 * @flags: access flags 1954 * @flags: access flags
1851 * 1955 *
1852 * Update the page tables using the CP (cayman-si). 1956 * Update the page tables using the CP (cayman/TN).
1853 */ 1957 */
1854void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, 1958void cayman_vm_set_page(struct radeon_device *rdev,
1959 struct radeon_ib *ib,
1960 uint64_t pe,
1855 uint64_t addr, unsigned count, 1961 uint64_t addr, unsigned count,
1856 uint32_t incr, uint32_t flags) 1962 uint32_t incr, uint32_t flags)
1857{ 1963{
1858 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1859 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1964 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1860 uint64_t value; 1965 uint64_t value;
1861 unsigned ndw; 1966 unsigned ndw;
@@ -1866,9 +1971,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1866 if (ndw > 0x3FFF) 1971 if (ndw > 0x3FFF)
1867 ndw = 0x3FFF; 1972 ndw = 0x3FFF;
1868 1973
1869 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw)); 1974 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_ME_WRITE, ndw);
1870 radeon_ring_write(ring, pe); 1975 ib->ptr[ib->length_dw++] = pe;
1871 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1976 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
1872 for (; ndw > 1; ndw -= 2, --count, pe += 8) { 1977 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1873 if (flags & RADEON_VM_PAGE_SYSTEM) { 1978 if (flags & RADEON_VM_PAGE_SYSTEM) {
1874 value = radeon_vm_map_gart(rdev, addr); 1979 value = radeon_vm_map_gart(rdev, addr);
@@ -1880,8 +1985,8 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1880 } 1985 }
1881 addr += incr; 1986 addr += incr;
1882 value |= r600_flags; 1987 value |= r600_flags;
1883 radeon_ring_write(ring, value); 1988 ib->ptr[ib->length_dw++] = value;
1884 radeon_ring_write(ring, upper_32_bits(value)); 1989 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1885 } 1990 }
1886 } 1991 }
1887 } else { 1992 } else {
@@ -1891,9 +1996,9 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1891 ndw = 0xFFFFE; 1996 ndw = 0xFFFFE;
1892 1997
1893 /* for non-physically contiguous pages (system) */ 1998 /* for non-physically contiguous pages (system) */
1894 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw)); 1999 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
1895 radeon_ring_write(ring, pe); 2000 ib->ptr[ib->length_dw++] = pe;
1896 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 2001 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
1897 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 2002 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
1898 if (flags & RADEON_VM_PAGE_SYSTEM) { 2003 if (flags & RADEON_VM_PAGE_SYSTEM) {
1899 value = radeon_vm_map_gart(rdev, addr); 2004 value = radeon_vm_map_gart(rdev, addr);
@@ -1905,10 +2010,12 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1905 } 2010 }
1906 addr += incr; 2011 addr += incr;
1907 value |= r600_flags; 2012 value |= r600_flags;
1908 radeon_ring_write(ring, value); 2013 ib->ptr[ib->length_dw++] = value;
1909 radeon_ring_write(ring, upper_32_bits(value)); 2014 ib->ptr[ib->length_dw++] = upper_32_bits(value);
1910 } 2015 }
1911 } 2016 }
2017 while (ib->length_dw & 0x7)
2018 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
1912 } 2019 }
1913} 2020}
1914 2021
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 48e5022ee921..079dee202a9e 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -49,6 +49,16 @@
49#define RINGID(x) (((x) & 0x3) << 0) 49#define RINGID(x) (((x) & 0x3) << 0)
50#define VMID(x) (((x) & 0x7) << 0) 50#define VMID(x) (((x) & 0x7) << 0)
51#define SRBM_STATUS 0x0E50 51#define SRBM_STATUS 0x0E50
52#define RLC_RQ_PENDING (1 << 3)
53#define GRBM_RQ_PENDING (1 << 5)
54#define VMC_BUSY (1 << 8)
55#define MCB_BUSY (1 << 9)
56#define MCB_NON_DISPLAY_BUSY (1 << 10)
57#define MCC_BUSY (1 << 11)
58#define MCD_BUSY (1 << 12)
59#define SEM_BUSY (1 << 14)
60#define RLC_BUSY (1 << 15)
61#define IH_BUSY (1 << 17)
52 62
53#define SRBM_SOFT_RESET 0x0E60 63#define SRBM_SOFT_RESET 0x0E60
54#define SOFT_RESET_BIF (1 << 1) 64#define SOFT_RESET_BIF (1 << 1)
@@ -68,6 +78,10 @@
68#define SOFT_RESET_REGBB (1 << 22) 78#define SOFT_RESET_REGBB (1 << 22)
69#define SOFT_RESET_ORB (1 << 23) 79#define SOFT_RESET_ORB (1 << 23)
70 80
81#define SRBM_STATUS2 0x0EC4
82#define DMA_BUSY (1 << 5)
83#define DMA1_BUSY (1 << 6)
84
71#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 85#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
72#define REQUEST_TYPE(x) (((x) & 0xf) << 0) 86#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
73#define RESPONSE_TYPE_MASK 0x000000F0 87#define RESPONSE_TYPE_MASK 0x000000F0
@@ -474,16 +488,7 @@
474/* 488/*
475 * PM4 489 * PM4
476 */ 490 */
477#define PACKET_TYPE0 0 491#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
478#define PACKET_TYPE1 1
479#define PACKET_TYPE2 2
480#define PACKET_TYPE3 3
481
482#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
483#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
484#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
485#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
486#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
487 (((reg) >> 2) & 0xFFFF) | \ 492 (((reg) >> 2) & 0xFFFF) | \
488 ((n) & 0x3FFF) << 16) 493 ((n) & 0x3FFF) << 16)
489#define CP_PACKET2 0x80000000 494#define CP_PACKET2 0x80000000
@@ -492,7 +497,7 @@
492 497
493#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) 498#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
494 499
495#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ 500#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
496 (((op) & 0xFF) << 8) | \ 501 (((op) & 0xFF) << 8) | \
497 ((n) & 0x3FFF) << 16) 502 ((n) & 0x3FFF) << 16)
498 503
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 8ff7cac222dc..9db58530be37 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1215,11 +1215,11 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1215 struct radeon_cs_reloc *reloc; 1215 struct radeon_cs_reloc *reloc;
1216 u32 value; 1216 u32 value;
1217 1217
1218 r = r100_cs_packet_next_reloc(p, &reloc); 1218 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1219 if (r) { 1219 if (r) {
1220 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1220 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1221 idx, reg); 1221 idx, reg);
1222 r100_cs_dump_packet(p, pkt); 1222 radeon_cs_dump_packet(p, pkt);
1223 return r; 1223 return r;
1224 } 1224 }
1225 1225
@@ -1233,7 +1233,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1233 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 1233 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1234 if (reg == RADEON_SRC_PITCH_OFFSET) { 1234 if (reg == RADEON_SRC_PITCH_OFFSET) {
1235 DRM_ERROR("Cannot src blit from microtiled surface\n"); 1235 DRM_ERROR("Cannot src blit from microtiled surface\n");
1236 r100_cs_dump_packet(p, pkt); 1236 radeon_cs_dump_packet(p, pkt);
1237 return -EINVAL; 1237 return -EINVAL;
1238 } 1238 }
1239 tile_flags |= RADEON_DST_TILE_MICRO; 1239 tile_flags |= RADEON_DST_TILE_MICRO;
@@ -1263,16 +1263,16 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1263 if (c > 16) { 1263 if (c > 16) {
1264 DRM_ERROR("Only 16 vertex buffers are allowed %d\n", 1264 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
1265 pkt->opcode); 1265 pkt->opcode);
1266 r100_cs_dump_packet(p, pkt); 1266 radeon_cs_dump_packet(p, pkt);
1267 return -EINVAL; 1267 return -EINVAL;
1268 } 1268 }
1269 track->num_arrays = c; 1269 track->num_arrays = c;
1270 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1270 for (i = 0; i < (c - 1); i+=2, idx+=3) {
1271 r = r100_cs_packet_next_reloc(p, &reloc); 1271 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1272 if (r) { 1272 if (r) {
1273 DRM_ERROR("No reloc for packet3 %d\n", 1273 DRM_ERROR("No reloc for packet3 %d\n",
1274 pkt->opcode); 1274 pkt->opcode);
1275 r100_cs_dump_packet(p, pkt); 1275 radeon_cs_dump_packet(p, pkt);
1276 return r; 1276 return r;
1277 } 1277 }
1278 idx_value = radeon_get_ib_value(p, idx); 1278 idx_value = radeon_get_ib_value(p, idx);
@@ -1281,11 +1281,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1281 track->arrays[i + 0].esize = idx_value >> 8; 1281 track->arrays[i + 0].esize = idx_value >> 8;
1282 track->arrays[i + 0].robj = reloc->robj; 1282 track->arrays[i + 0].robj = reloc->robj;
1283 track->arrays[i + 0].esize &= 0x7F; 1283 track->arrays[i + 0].esize &= 0x7F;
1284 r = r100_cs_packet_next_reloc(p, &reloc); 1284 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1285 if (r) { 1285 if (r) {
1286 DRM_ERROR("No reloc for packet3 %d\n", 1286 DRM_ERROR("No reloc for packet3 %d\n",
1287 pkt->opcode); 1287 pkt->opcode);
1288 r100_cs_dump_packet(p, pkt); 1288 radeon_cs_dump_packet(p, pkt);
1289 return r; 1289 return r;
1290 } 1290 }
1291 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset); 1291 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
@@ -1294,11 +1294,11 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1294 track->arrays[i + 1].esize &= 0x7F; 1294 track->arrays[i + 1].esize &= 0x7F;
1295 } 1295 }
1296 if (c & 1) { 1296 if (c & 1) {
1297 r = r100_cs_packet_next_reloc(p, &reloc); 1297 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1298 if (r) { 1298 if (r) {
1299 DRM_ERROR("No reloc for packet3 %d\n", 1299 DRM_ERROR("No reloc for packet3 %d\n",
1300 pkt->opcode); 1300 pkt->opcode);
1301 r100_cs_dump_packet(p, pkt); 1301 radeon_cs_dump_packet(p, pkt);
1302 return r; 1302 return r;
1303 } 1303 }
1304 idx_value = radeon_get_ib_value(p, idx); 1304 idx_value = radeon_get_ib_value(p, idx);
@@ -1355,67 +1355,6 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1355 return 0; 1355 return 0;
1356} 1356}
1357 1357
1358void r100_cs_dump_packet(struct radeon_cs_parser *p,
1359 struct radeon_cs_packet *pkt)
1360{
1361 volatile uint32_t *ib;
1362 unsigned i;
1363 unsigned idx;
1364
1365 ib = p->ib.ptr;
1366 idx = pkt->idx;
1367 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
1368 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
1369 }
1370}
1371
1372/**
1373 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
1374 * @parser: parser structure holding parsing context.
1375 * @pkt: where to store packet informations
1376 *
1377 * Assume that chunk_ib_index is properly set. Will return -EINVAL
1378 * if packet is bigger than remaining ib size. or if packets is unknown.
1379 **/
1380int r100_cs_packet_parse(struct radeon_cs_parser *p,
1381 struct radeon_cs_packet *pkt,
1382 unsigned idx)
1383{
1384 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
1385 uint32_t header;
1386
1387 if (idx >= ib_chunk->length_dw) {
1388 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
1389 idx, ib_chunk->length_dw);
1390 return -EINVAL;
1391 }
1392 header = radeon_get_ib_value(p, idx);
1393 pkt->idx = idx;
1394 pkt->type = CP_PACKET_GET_TYPE(header);
1395 pkt->count = CP_PACKET_GET_COUNT(header);
1396 switch (pkt->type) {
1397 case PACKET_TYPE0:
1398 pkt->reg = CP_PACKET0_GET_REG(header);
1399 pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
1400 break;
1401 case PACKET_TYPE3:
1402 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
1403 break;
1404 case PACKET_TYPE2:
1405 pkt->count = -1;
1406 break;
1407 default:
1408 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
1409 return -EINVAL;
1410 }
1411 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
1412 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
1413 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
1414 return -EINVAL;
1415 }
1416 return 0;
1417}
1418
1419/** 1358/**
1420 * r100_cs_packet_next_vline() - parse userspace VLINE packet 1359 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1421 * @parser: parser structure holding parsing context. 1360 * @parser: parser structure holding parsing context.
@@ -1444,7 +1383,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1444 ib = p->ib.ptr; 1383 ib = p->ib.ptr;
1445 1384
1446 /* parse the wait until */ 1385 /* parse the wait until */
1447 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 1386 r = radeon_cs_packet_parse(p, &waitreloc, p->idx);
1448 if (r) 1387 if (r)
1449 return r; 1388 return r;
1450 1389
@@ -1461,7 +1400,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1461 } 1400 }
1462 1401
1463 /* jump over the NOP */ 1402 /* jump over the NOP */
1464 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2); 1403 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
1465 if (r) 1404 if (r)
1466 return r; 1405 return r;
1467 1406
@@ -1471,7 +1410,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1471 1410
1472 header = radeon_get_ib_value(p, h_idx); 1411 header = radeon_get_ib_value(p, h_idx);
1473 crtc_id = radeon_get_ib_value(p, h_idx + 5); 1412 crtc_id = radeon_get_ib_value(p, h_idx + 5);
1474 reg = CP_PACKET0_GET_REG(header); 1413 reg = R100_CP_PACKET0_GET_REG(header);
1475 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 1414 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1476 if (!obj) { 1415 if (!obj) {
1477 DRM_ERROR("cannot find crtc %d\n", crtc_id); 1416 DRM_ERROR("cannot find crtc %d\n", crtc_id);
@@ -1506,54 +1445,6 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1506 return 0; 1445 return 0;
1507} 1446}
1508 1447
1509/**
1510 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1511 * @parser: parser structure holding parsing context.
1512 * @data: pointer to relocation data
1513 * @offset_start: starting offset
1514 * @offset_mask: offset mask (to align start offset on)
1515 * @reloc: reloc informations
1516 *
1517 * Check next packet is relocation packet3, do bo validation and compute
1518 * GPU offset using the provided start.
1519 **/
1520int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1521 struct radeon_cs_reloc **cs_reloc)
1522{
1523 struct radeon_cs_chunk *relocs_chunk;
1524 struct radeon_cs_packet p3reloc;
1525 unsigned idx;
1526 int r;
1527
1528 if (p->chunk_relocs_idx == -1) {
1529 DRM_ERROR("No relocation chunk !\n");
1530 return -EINVAL;
1531 }
1532 *cs_reloc = NULL;
1533 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1534 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1535 if (r) {
1536 return r;
1537 }
1538 p->idx += p3reloc.count + 2;
1539 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1540 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1541 p3reloc.idx);
1542 r100_cs_dump_packet(p, &p3reloc);
1543 return -EINVAL;
1544 }
1545 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1546 if (idx >= relocs_chunk->length_dw) {
1547 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1548 idx, relocs_chunk->length_dw);
1549 r100_cs_dump_packet(p, &p3reloc);
1550 return -EINVAL;
1551 }
1552 /* FIXME: we assume reloc size is 4 dwords */
1553 *cs_reloc = p->relocs_ptr[(idx / 4)];
1554 return 0;
1555}
1556
1557static int r100_get_vtx_size(uint32_t vtx_fmt) 1448static int r100_get_vtx_size(uint32_t vtx_fmt)
1558{ 1449{
1559 int vtx_size; 1450 int vtx_size;
@@ -1631,7 +1522,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1631 if (r) { 1522 if (r) {
1632 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1523 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1633 idx, reg); 1524 idx, reg);
1634 r100_cs_dump_packet(p, pkt); 1525 radeon_cs_dump_packet(p, pkt);
1635 return r; 1526 return r;
1636 } 1527 }
1637 break; 1528 break;
@@ -1644,11 +1535,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1644 return r; 1535 return r;
1645 break; 1536 break;
1646 case RADEON_RB3D_DEPTHOFFSET: 1537 case RADEON_RB3D_DEPTHOFFSET:
1647 r = r100_cs_packet_next_reloc(p, &reloc); 1538 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1648 if (r) { 1539 if (r) {
1649 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1540 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1650 idx, reg); 1541 idx, reg);
1651 r100_cs_dump_packet(p, pkt); 1542 radeon_cs_dump_packet(p, pkt);
1652 return r; 1543 return r;
1653 } 1544 }
1654 track->zb.robj = reloc->robj; 1545 track->zb.robj = reloc->robj;
@@ -1657,11 +1548,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1657 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1548 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1658 break; 1549 break;
1659 case RADEON_RB3D_COLOROFFSET: 1550 case RADEON_RB3D_COLOROFFSET:
1660 r = r100_cs_packet_next_reloc(p, &reloc); 1551 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1661 if (r) { 1552 if (r) {
1662 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1553 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1663 idx, reg); 1554 idx, reg);
1664 r100_cs_dump_packet(p, pkt); 1555 radeon_cs_dump_packet(p, pkt);
1665 return r; 1556 return r;
1666 } 1557 }
1667 track->cb[0].robj = reloc->robj; 1558 track->cb[0].robj = reloc->robj;
@@ -1673,11 +1564,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1673 case RADEON_PP_TXOFFSET_1: 1564 case RADEON_PP_TXOFFSET_1:
1674 case RADEON_PP_TXOFFSET_2: 1565 case RADEON_PP_TXOFFSET_2:
1675 i = (reg - RADEON_PP_TXOFFSET_0) / 24; 1566 i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1676 r = r100_cs_packet_next_reloc(p, &reloc); 1567 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1677 if (r) { 1568 if (r) {
1678 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1569 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1679 idx, reg); 1570 idx, reg);
1680 r100_cs_dump_packet(p, pkt); 1571 radeon_cs_dump_packet(p, pkt);
1681 return r; 1572 return r;
1682 } 1573 }
1683 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1574 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1700,11 +1591,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1700 case RADEON_PP_CUBIC_OFFSET_T0_3: 1591 case RADEON_PP_CUBIC_OFFSET_T0_3:
1701 case RADEON_PP_CUBIC_OFFSET_T0_4: 1592 case RADEON_PP_CUBIC_OFFSET_T0_4:
1702 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; 1593 i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
1703 r = r100_cs_packet_next_reloc(p, &reloc); 1594 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1704 if (r) { 1595 if (r) {
1705 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1596 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1706 idx, reg); 1597 idx, reg);
1707 r100_cs_dump_packet(p, pkt); 1598 radeon_cs_dump_packet(p, pkt);
1708 return r; 1599 return r;
1709 } 1600 }
1710 track->textures[0].cube_info[i].offset = idx_value; 1601 track->textures[0].cube_info[i].offset = idx_value;
@@ -1718,11 +1609,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1718 case RADEON_PP_CUBIC_OFFSET_T1_3: 1609 case RADEON_PP_CUBIC_OFFSET_T1_3:
1719 case RADEON_PP_CUBIC_OFFSET_T1_4: 1610 case RADEON_PP_CUBIC_OFFSET_T1_4:
1720 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; 1611 i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
1721 r = r100_cs_packet_next_reloc(p, &reloc); 1612 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1722 if (r) { 1613 if (r) {
1723 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1614 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1724 idx, reg); 1615 idx, reg);
1725 r100_cs_dump_packet(p, pkt); 1616 radeon_cs_dump_packet(p, pkt);
1726 return r; 1617 return r;
1727 } 1618 }
1728 track->textures[1].cube_info[i].offset = idx_value; 1619 track->textures[1].cube_info[i].offset = idx_value;
@@ -1736,11 +1627,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1736 case RADEON_PP_CUBIC_OFFSET_T2_3: 1627 case RADEON_PP_CUBIC_OFFSET_T2_3:
1737 case RADEON_PP_CUBIC_OFFSET_T2_4: 1628 case RADEON_PP_CUBIC_OFFSET_T2_4:
1738 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; 1629 i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1739 r = r100_cs_packet_next_reloc(p, &reloc); 1630 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1740 if (r) { 1631 if (r) {
1741 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1632 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1742 idx, reg); 1633 idx, reg);
1743 r100_cs_dump_packet(p, pkt); 1634 radeon_cs_dump_packet(p, pkt);
1744 return r; 1635 return r;
1745 } 1636 }
1746 track->textures[2].cube_info[i].offset = idx_value; 1637 track->textures[2].cube_info[i].offset = idx_value;
@@ -1754,11 +1645,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1754 track->zb_dirty = true; 1645 track->zb_dirty = true;
1755 break; 1646 break;
1756 case RADEON_RB3D_COLORPITCH: 1647 case RADEON_RB3D_COLORPITCH:
1757 r = r100_cs_packet_next_reloc(p, &reloc); 1648 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1758 if (r) { 1649 if (r) {
1759 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1650 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1760 idx, reg); 1651 idx, reg);
1761 r100_cs_dump_packet(p, pkt); 1652 radeon_cs_dump_packet(p, pkt);
1762 return r; 1653 return r;
1763 } 1654 }
1764 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 1655 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -1825,11 +1716,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1825 track->zb_dirty = true; 1716 track->zb_dirty = true;
1826 break; 1717 break;
1827 case RADEON_RB3D_ZPASS_ADDR: 1718 case RADEON_RB3D_ZPASS_ADDR:
1828 r = r100_cs_packet_next_reloc(p, &reloc); 1719 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1829 if (r) { 1720 if (r) {
1830 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1721 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1831 idx, reg); 1722 idx, reg);
1832 r100_cs_dump_packet(p, pkt); 1723 radeon_cs_dump_packet(p, pkt);
1833 return r; 1724 return r;
1834 } 1725 }
1835 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1726 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1986,10 +1877,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1986 return r; 1877 return r;
1987 break; 1878 break;
1988 case PACKET3_INDX_BUFFER: 1879 case PACKET3_INDX_BUFFER:
1989 r = r100_cs_packet_next_reloc(p, &reloc); 1880 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1990 if (r) { 1881 if (r) {
1991 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1882 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1992 r100_cs_dump_packet(p, pkt); 1883 radeon_cs_dump_packet(p, pkt);
1993 return r; 1884 return r;
1994 } 1885 }
1995 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset); 1886 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
@@ -2000,10 +1891,10 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
2000 break; 1891 break;
2001 case 0x23: 1892 case 0x23:
2002 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ 1893 /* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
2003 r = r100_cs_packet_next_reloc(p, &reloc); 1894 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
2004 if (r) { 1895 if (r) {
2005 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1896 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
2006 r100_cs_dump_packet(p, pkt); 1897 radeon_cs_dump_packet(p, pkt);
2007 return r; 1898 return r;
2008 } 1899 }
2009 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset); 1900 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
@@ -2100,37 +1991,36 @@ int r100_cs_parse(struct radeon_cs_parser *p)
2100 r100_cs_track_clear(p->rdev, track); 1991 r100_cs_track_clear(p->rdev, track);
2101 p->track = track; 1992 p->track = track;
2102 do { 1993 do {
2103 r = r100_cs_packet_parse(p, &pkt, p->idx); 1994 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2104 if (r) { 1995 if (r) {
2105 return r; 1996 return r;
2106 } 1997 }
2107 p->idx += pkt.count + 2; 1998 p->idx += pkt.count + 2;
2108 switch (pkt.type) { 1999 switch (pkt.type) {
2109 case PACKET_TYPE0: 2000 case RADEON_PACKET_TYPE0:
2110 if (p->rdev->family >= CHIP_R200) 2001 if (p->rdev->family >= CHIP_R200)
2111 r = r100_cs_parse_packet0(p, &pkt, 2002 r = r100_cs_parse_packet0(p, &pkt,
2112 p->rdev->config.r100.reg_safe_bm, 2003 p->rdev->config.r100.reg_safe_bm,
2113 p->rdev->config.r100.reg_safe_bm_size, 2004 p->rdev->config.r100.reg_safe_bm_size,
2114 &r200_packet0_check); 2005 &r200_packet0_check);
2115 else 2006 else
2116 r = r100_cs_parse_packet0(p, &pkt, 2007 r = r100_cs_parse_packet0(p, &pkt,
2117 p->rdev->config.r100.reg_safe_bm, 2008 p->rdev->config.r100.reg_safe_bm,
2118 p->rdev->config.r100.reg_safe_bm_size, 2009 p->rdev->config.r100.reg_safe_bm_size,
2119 &r100_packet0_check); 2010 &r100_packet0_check);
2120 break; 2011 break;
2121 case PACKET_TYPE2: 2012 case RADEON_PACKET_TYPE2:
2122 break; 2013 break;
2123 case PACKET_TYPE3: 2014 case RADEON_PACKET_TYPE3:
2124 r = r100_packet3_check(p, &pkt); 2015 r = r100_packet3_check(p, &pkt);
2125 break; 2016 break;
2126 default: 2017 default:
2127 DRM_ERROR("Unknown packet type %d !\n", 2018 DRM_ERROR("Unknown packet type %d !\n",
2128 pkt.type); 2019 pkt.type);
2129 return -EINVAL; 2020 return -EINVAL;
2130 } 2021 }
2131 if (r) { 2022 if (r)
2132 return r; 2023 return r;
2133 }
2134 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2024 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2135 return 0; 2025 return 0;
2136} 2026}
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 6a603b378adb..eb40888bdfcc 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -81,10 +81,6 @@ struct r100_cs_track {
81 81
82int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); 82int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
83void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track); 83void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track);
84int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
85 struct radeon_cs_reloc **cs_reloc);
86void r100_cs_dump_packet(struct radeon_cs_parser *p,
87 struct radeon_cs_packet *pkt);
88 84
89int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); 85int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
90 86
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index eab91760fae0..f0f8ee69f480 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -64,17 +64,6 @@
64 REG_SET(PACKET3_IT_OPCODE, (op)) | \ 64 REG_SET(PACKET3_IT_OPCODE, (op)) | \
65 REG_SET(PACKET3_COUNT, (n))) 65 REG_SET(PACKET3_COUNT, (n)))
66 66
67#define PACKET_TYPE0 0
68#define PACKET_TYPE1 1
69#define PACKET_TYPE2 2
70#define PACKET_TYPE3 3
71
72#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
73#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
74#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
75#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
76#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
77
78/* Registers */ 67/* Registers */
79#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 68#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
80#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) 69#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 98143a5c5b73..b3807edb1936 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -162,7 +162,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
162 if (r) { 162 if (r) {
163 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 163 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
164 idx, reg); 164 idx, reg);
165 r100_cs_dump_packet(p, pkt); 165 radeon_cs_dump_packet(p, pkt);
166 return r; 166 return r;
167 } 167 }
168 break; 168 break;
@@ -175,11 +175,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
175 return r; 175 return r;
176 break; 176 break;
177 case RADEON_RB3D_DEPTHOFFSET: 177 case RADEON_RB3D_DEPTHOFFSET:
178 r = r100_cs_packet_next_reloc(p, &reloc); 178 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
179 if (r) { 179 if (r) {
180 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 180 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
181 idx, reg); 181 idx, reg);
182 r100_cs_dump_packet(p, pkt); 182 radeon_cs_dump_packet(p, pkt);
183 return r; 183 return r;
184 } 184 }
185 track->zb.robj = reloc->robj; 185 track->zb.robj = reloc->robj;
@@ -188,11 +188,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
188 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 188 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
189 break; 189 break;
190 case RADEON_RB3D_COLOROFFSET: 190 case RADEON_RB3D_COLOROFFSET:
191 r = r100_cs_packet_next_reloc(p, &reloc); 191 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
192 if (r) { 192 if (r) {
193 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 193 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
194 idx, reg); 194 idx, reg);
195 r100_cs_dump_packet(p, pkt); 195 radeon_cs_dump_packet(p, pkt);
196 return r; 196 return r;
197 } 197 }
198 track->cb[0].robj = reloc->robj; 198 track->cb[0].robj = reloc->robj;
@@ -207,11 +207,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
207 case R200_PP_TXOFFSET_4: 207 case R200_PP_TXOFFSET_4:
208 case R200_PP_TXOFFSET_5: 208 case R200_PP_TXOFFSET_5:
209 i = (reg - R200_PP_TXOFFSET_0) / 24; 209 i = (reg - R200_PP_TXOFFSET_0) / 24;
210 r = r100_cs_packet_next_reloc(p, &reloc); 210 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
211 if (r) { 211 if (r) {
212 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 212 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
213 idx, reg); 213 idx, reg);
214 r100_cs_dump_packet(p, pkt); 214 radeon_cs_dump_packet(p, pkt);
215 return r; 215 return r;
216 } 216 }
217 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 217 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
@@ -260,11 +260,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
260 case R200_PP_CUBIC_OFFSET_F5_5: 260 case R200_PP_CUBIC_OFFSET_F5_5:
261 i = (reg - R200_PP_TXOFFSET_0) / 24; 261 i = (reg - R200_PP_TXOFFSET_0) / 24;
262 face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4; 262 face = (reg - ((i * 24) + R200_PP_TXOFFSET_0)) / 4;
263 r = r100_cs_packet_next_reloc(p, &reloc); 263 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
264 if (r) { 264 if (r) {
265 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 265 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
266 idx, reg); 266 idx, reg);
267 r100_cs_dump_packet(p, pkt); 267 radeon_cs_dump_packet(p, pkt);
268 return r; 268 return r;
269 } 269 }
270 track->textures[i].cube_info[face - 1].offset = idx_value; 270 track->textures[i].cube_info[face - 1].offset = idx_value;
@@ -278,11 +278,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
278 track->zb_dirty = true; 278 track->zb_dirty = true;
279 break; 279 break;
280 case RADEON_RB3D_COLORPITCH: 280 case RADEON_RB3D_COLORPITCH:
281 r = r100_cs_packet_next_reloc(p, &reloc); 281 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
282 if (r) { 282 if (r) {
283 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 283 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
284 idx, reg); 284 idx, reg);
285 r100_cs_dump_packet(p, pkt); 285 radeon_cs_dump_packet(p, pkt);
286 return r; 286 return r;
287 } 287 }
288 288
@@ -355,11 +355,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
355 track->zb_dirty = true; 355 track->zb_dirty = true;
356 break; 356 break;
357 case RADEON_RB3D_ZPASS_ADDR: 357 case RADEON_RB3D_ZPASS_ADDR:
358 r = r100_cs_packet_next_reloc(p, &reloc); 358 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
359 if (r) { 359 if (r) {
360 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 360 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
361 idx, reg); 361 idx, reg);
362 r100_cs_dump_packet(p, pkt); 362 radeon_cs_dump_packet(p, pkt);
363 return r; 363 return r;
364 } 364 }
365 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 365 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index d0ba6023a1f8..c60350e6872d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -615,7 +615,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
615 if (r) { 615 if (r) {
616 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 616 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
617 idx, reg); 617 idx, reg);
618 r100_cs_dump_packet(p, pkt); 618 radeon_cs_dump_packet(p, pkt);
619 return r; 619 return r;
620 } 620 }
621 break; 621 break;
@@ -630,11 +630,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
630 case R300_RB3D_COLOROFFSET2: 630 case R300_RB3D_COLOROFFSET2:
631 case R300_RB3D_COLOROFFSET3: 631 case R300_RB3D_COLOROFFSET3:
632 i = (reg - R300_RB3D_COLOROFFSET0) >> 2; 632 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
633 r = r100_cs_packet_next_reloc(p, &reloc); 633 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
634 if (r) { 634 if (r) {
635 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 635 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
636 idx, reg); 636 idx, reg);
637 r100_cs_dump_packet(p, pkt); 637 radeon_cs_dump_packet(p, pkt);
638 return r; 638 return r;
639 } 639 }
640 track->cb[i].robj = reloc->robj; 640 track->cb[i].robj = reloc->robj;
@@ -643,11 +643,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
643 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 643 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
644 break; 644 break;
645 case R300_ZB_DEPTHOFFSET: 645 case R300_ZB_DEPTHOFFSET:
646 r = r100_cs_packet_next_reloc(p, &reloc); 646 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
647 if (r) { 647 if (r) {
648 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 648 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
649 idx, reg); 649 idx, reg);
650 r100_cs_dump_packet(p, pkt); 650 radeon_cs_dump_packet(p, pkt);
651 return r; 651 return r;
652 } 652 }
653 track->zb.robj = reloc->robj; 653 track->zb.robj = reloc->robj;
@@ -672,11 +672,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
672 case R300_TX_OFFSET_0+56: 672 case R300_TX_OFFSET_0+56:
673 case R300_TX_OFFSET_0+60: 673 case R300_TX_OFFSET_0+60:
674 i = (reg - R300_TX_OFFSET_0) >> 2; 674 i = (reg - R300_TX_OFFSET_0) >> 2;
675 r = r100_cs_packet_next_reloc(p, &reloc); 675 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
676 if (r) { 676 if (r) {
677 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 677 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
678 idx, reg); 678 idx, reg);
679 r100_cs_dump_packet(p, pkt); 679 radeon_cs_dump_packet(p, pkt);
680 return r; 680 return r;
681 } 681 }
682 682
@@ -745,11 +745,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
745 /* RB3D_COLORPITCH2 */ 745 /* RB3D_COLORPITCH2 */
746 /* RB3D_COLORPITCH3 */ 746 /* RB3D_COLORPITCH3 */
747 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 747 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
748 r = r100_cs_packet_next_reloc(p, &reloc); 748 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
749 if (r) { 749 if (r) {
750 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 750 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
751 idx, reg); 751 idx, reg);
752 r100_cs_dump_packet(p, pkt); 752 radeon_cs_dump_packet(p, pkt);
753 return r; 753 return r;
754 } 754 }
755 755
@@ -830,11 +830,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
830 case 0x4F24: 830 case 0x4F24:
831 /* ZB_DEPTHPITCH */ 831 /* ZB_DEPTHPITCH */
832 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { 832 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
833 r = r100_cs_packet_next_reloc(p, &reloc); 833 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
834 if (r) { 834 if (r) {
835 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 835 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
836 idx, reg); 836 idx, reg);
837 r100_cs_dump_packet(p, pkt); 837 radeon_cs_dump_packet(p, pkt);
838 return r; 838 return r;
839 } 839 }
840 840
@@ -1045,11 +1045,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1045 track->tex_dirty = true; 1045 track->tex_dirty = true;
1046 break; 1046 break;
1047 case R300_ZB_ZPASS_ADDR: 1047 case R300_ZB_ZPASS_ADDR:
1048 r = r100_cs_packet_next_reloc(p, &reloc); 1048 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1049 if (r) { 1049 if (r) {
1050 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1050 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1051 idx, reg); 1051 idx, reg);
1052 r100_cs_dump_packet(p, pkt); 1052 radeon_cs_dump_packet(p, pkt);
1053 return r; 1053 return r;
1054 } 1054 }
1055 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1055 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
@@ -1087,11 +1087,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1087 track->cb_dirty = true; 1087 track->cb_dirty = true;
1088 break; 1088 break;
1089 case R300_RB3D_AARESOLVE_OFFSET: 1089 case R300_RB3D_AARESOLVE_OFFSET:
1090 r = r100_cs_packet_next_reloc(p, &reloc); 1090 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1091 if (r) { 1091 if (r) {
1092 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 1092 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1093 idx, reg); 1093 idx, reg);
1094 r100_cs_dump_packet(p, pkt); 1094 radeon_cs_dump_packet(p, pkt);
1095 return r; 1095 return r;
1096 } 1096 }
1097 track->aa.robj = reloc->robj; 1097 track->aa.robj = reloc->robj;
@@ -1156,10 +1156,10 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1156 return r; 1156 return r;
1157 break; 1157 break;
1158 case PACKET3_INDX_BUFFER: 1158 case PACKET3_INDX_BUFFER:
1159 r = r100_cs_packet_next_reloc(p, &reloc); 1159 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1160 if (r) { 1160 if (r) {
1161 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode); 1161 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1162 r100_cs_dump_packet(p, pkt); 1162 radeon_cs_dump_packet(p, pkt);
1163 return r; 1163 return r;
1164 } 1164 }
1165 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset); 1165 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
@@ -1257,21 +1257,21 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1257 r100_cs_track_clear(p->rdev, track); 1257 r100_cs_track_clear(p->rdev, track);
1258 p->track = track; 1258 p->track = track;
1259 do { 1259 do {
1260 r = r100_cs_packet_parse(p, &pkt, p->idx); 1260 r = radeon_cs_packet_parse(p, &pkt, p->idx);
1261 if (r) { 1261 if (r) {
1262 return r; 1262 return r;
1263 } 1263 }
1264 p->idx += pkt.count + 2; 1264 p->idx += pkt.count + 2;
1265 switch (pkt.type) { 1265 switch (pkt.type) {
1266 case PACKET_TYPE0: 1266 case RADEON_PACKET_TYPE0:
1267 r = r100_cs_parse_packet0(p, &pkt, 1267 r = r100_cs_parse_packet0(p, &pkt,
1268 p->rdev->config.r300.reg_safe_bm, 1268 p->rdev->config.r300.reg_safe_bm,
1269 p->rdev->config.r300.reg_safe_bm_size, 1269 p->rdev->config.r300.reg_safe_bm_size,
1270 &r300_packet0_check); 1270 &r300_packet0_check);
1271 break; 1271 break;
1272 case PACKET_TYPE2: 1272 case RADEON_PACKET_TYPE2:
1273 break; 1273 break;
1274 case PACKET_TYPE3: 1274 case RADEON_PACKET_TYPE3:
1275 r = r300_packet3_check(p, &pkt); 1275 r = r300_packet3_check(p, &pkt);
1276 break; 1276 break;
1277 default: 1277 default:
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 002ab038d2ab..865e2c9980db 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -29,6 +29,8 @@
29 * 29 *
30 * Authors: 30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net> 31 * Nicolai Haehnle <prefect_@gmx.net>
32 *
33 * ------------------------ This file is DEPRECATED! -------------------------
32 */ 34 */
33 35
34#include <drm/drmP.h> 36#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 1f519a5ffb8c..ff229a00d273 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -65,17 +65,6 @@
65 REG_SET(PACKET3_IT_OPCODE, (op)) | \ 65 REG_SET(PACKET3_IT_OPCODE, (op)) | \
66 REG_SET(PACKET3_COUNT, (n))) 66 REG_SET(PACKET3_COUNT, (n)))
67 67
68#define PACKET_TYPE0 0
69#define PACKET_TYPE1 1
70#define PACKET_TYPE2 2
71#define PACKET_TYPE3 3
72
73#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
74#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
75#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
76#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
77#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
78
79/* Registers */ 68/* Registers */
80#define R_000148_MC_FB_LOCATION 0x000148 69#define R_000148_MC_FB_LOCATION 0x000148
81#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) 70#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index ec576aaafb73..c0dc8d3ba0bb 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -355,6 +355,7 @@
355# define AVIVO_D1CRTC_V_BLANK (1 << 0) 355# define AVIVO_D1CRTC_V_BLANK (1 << 0)
356#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0 356#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
357#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 357#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
358#define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac
358#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 359#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
359 360
360#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 361#define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index becb03e8b32f..6d4b5611daf4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -94,6 +94,12 @@ MODULE_FIRMWARE("radeon/SUMO_me.bin");
94MODULE_FIRMWARE("radeon/SUMO2_pfp.bin"); 94MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
95MODULE_FIRMWARE("radeon/SUMO2_me.bin"); 95MODULE_FIRMWARE("radeon/SUMO2_me.bin");
96 96
97static const u32 crtc_offsets[2] =
98{
99 0,
100 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
101};
102
97int r600_debugfs_mc_info_init(struct radeon_device *rdev); 103int r600_debugfs_mc_info_init(struct radeon_device *rdev);
98 104
99/* r600,rv610,rv630,rv620,rv635,rv670 */ 105/* r600,rv610,rv630,rv620,rv635,rv670 */
@@ -103,6 +109,19 @@ void r600_fini(struct radeon_device *rdev);
103void r600_irq_disable(struct radeon_device *rdev); 109void r600_irq_disable(struct radeon_device *rdev);
104static void r600_pcie_gen2_enable(struct radeon_device *rdev); 110static void r600_pcie_gen2_enable(struct radeon_device *rdev);
105 111
112/**
113 * r600_get_xclk - get the xclk
114 *
115 * @rdev: radeon_device pointer
116 *
117 * Returns the reference clock used by the gfx engine
118 * (r6xx, IGPs, APUs).
119 */
120u32 r600_get_xclk(struct radeon_device *rdev)
121{
122 return rdev->clock.spll.reference_freq;
123}
124
106/* get temperature in millidegrees */ 125/* get temperature in millidegrees */
107int rv6xx_get_temp(struct radeon_device *rdev) 126int rv6xx_get_temp(struct radeon_device *rdev)
108{ 127{
@@ -1254,169 +1273,301 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
1254 radeon_bo_unref(&rdev->vram_scratch.robj); 1273 radeon_bo_unref(&rdev->vram_scratch.robj);
1255} 1274}
1256 1275
1257/* We doesn't check that the GPU really needs a reset we simply do the 1276void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung)
1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that.
1260 */
1261static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
1262{ 1277{
1263 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 1278 u32 tmp = RREG32(R600_BIOS_3_SCRATCH);
1264 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1265 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
1266 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
1267 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
1268 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
1269 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
1270 S_008010_GUI_ACTIVE(1);
1271 u32 grbm2_busy_mask = S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
1272 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
1273 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
1274 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
1275 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
1276 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
1277 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
1278 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
1279 u32 tmp;
1280 1279
1281 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1280 if (hung)
1282 return; 1281 tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1282 else
1283 tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1284
1285 WREG32(R600_BIOS_3_SCRATCH, tmp);
1286}
1283 1287
1288static void r600_print_gpu_status_regs(struct radeon_device *rdev)
1289{
1284 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1290 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1285 RREG32(R_008010_GRBM_STATUS)); 1291 RREG32(R_008010_GRBM_STATUS));
1286 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1292 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1287 RREG32(R_008014_GRBM_STATUS2)); 1293 RREG32(R_008014_GRBM_STATUS2));
1288 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1294 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1289 RREG32(R_000E50_SRBM_STATUS)); 1295 RREG32(R_000E50_SRBM_STATUS));
1290 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1296 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1291 RREG32(CP_STALLED_STAT1)); 1297 RREG32(CP_STALLED_STAT1));
1292 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1298 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
1293 RREG32(CP_STALLED_STAT2)); 1299 RREG32(CP_STALLED_STAT2));
1294 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1300 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
1295 RREG32(CP_BUSY_STAT)); 1301 RREG32(CP_BUSY_STAT));
1296 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1302 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1297 RREG32(CP_STAT)); 1303 RREG32(CP_STAT));
1298 1304 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1299 /* Disable CP parsing/prefetching */ 1305 RREG32(DMA_STATUS_REG));
1300 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1306}
1301 1307
1302 /* Check if any of the rendering block is busy and reset it */ 1308static bool r600_is_display_hung(struct radeon_device *rdev)
1303 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 1309{
1304 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 1310 u32 crtc_hung = 0;
1305 tmp = S_008020_SOFT_RESET_CR(1) | 1311 u32 crtc_status[2];
1306 S_008020_SOFT_RESET_DB(1) | 1312 u32 i, j, tmp;
1307 S_008020_SOFT_RESET_CB(1) | 1313
1308 S_008020_SOFT_RESET_PA(1) | 1314 for (i = 0; i < rdev->num_crtc; i++) {
1309 S_008020_SOFT_RESET_SC(1) | 1315 if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN) {
1310 S_008020_SOFT_RESET_SMX(1) | 1316 crtc_status[i] = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1311 S_008020_SOFT_RESET_SPI(1) | 1317 crtc_hung |= (1 << i);
1312 S_008020_SOFT_RESET_SX(1) | 1318 }
1313 S_008020_SOFT_RESET_SH(1) |
1314 S_008020_SOFT_RESET_TC(1) |
1315 S_008020_SOFT_RESET_TA(1) |
1316 S_008020_SOFT_RESET_VC(1) |
1317 S_008020_SOFT_RESET_VGT(1);
1318 dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1319 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1320 RREG32(R_008020_GRBM_SOFT_RESET);
1321 mdelay(15);
1322 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1323 } 1319 }
1324 /* Reset CP (we always reset CP) */
1325 tmp = S_008020_SOFT_RESET_CP(1);
1326 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1327 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1328 RREG32(R_008020_GRBM_SOFT_RESET);
1329 mdelay(15);
1330 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1331 1320
1332 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n", 1321 for (j = 0; j < 10; j++) {
1333 RREG32(R_008010_GRBM_STATUS)); 1322 for (i = 0; i < rdev->num_crtc; i++) {
1334 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n", 1323 if (crtc_hung & (1 << i)) {
1335 RREG32(R_008014_GRBM_STATUS2)); 1324 tmp = RREG32(AVIVO_D1CRTC_STATUS_HV_COUNT + crtc_offsets[i]);
1336 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n", 1325 if (tmp != crtc_status[i])
1337 RREG32(R_000E50_SRBM_STATUS)); 1326 crtc_hung &= ~(1 << i);
1338 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1327 }
1339 RREG32(CP_STALLED_STAT1)); 1328 }
1340 dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n", 1329 if (crtc_hung == 0)
1341 RREG32(CP_STALLED_STAT2)); 1330 return false;
1342 dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n", 1331 udelay(100);
1343 RREG32(CP_BUSY_STAT)); 1332 }
1344 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1345 RREG32(CP_STAT));
1346 1333
1334 return true;
1347} 1335}
1348 1336
1349static void r600_gpu_soft_reset_dma(struct radeon_device *rdev) 1337static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
1350{ 1338{
1339 u32 reset_mask = 0;
1351 u32 tmp; 1340 u32 tmp;
1352 1341
1353 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 1342 /* GRBM_STATUS */
1354 return; 1343 tmp = RREG32(R_008010_GRBM_STATUS);
1344 if (rdev->family >= CHIP_RV770) {
1345 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1346 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1347 G_008010_TA_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1348 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1349 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1350 reset_mask |= RADEON_RESET_GFX;
1351 } else {
1352 if (G_008010_PA_BUSY(tmp) | G_008010_SC_BUSY(tmp) |
1353 G_008010_SH_BUSY(tmp) | G_008010_SX_BUSY(tmp) |
1354 G_008010_TA03_BUSY(tmp) | G_008010_VGT_BUSY(tmp) |
1355 G_008010_DB03_BUSY(tmp) | G_008010_CB03_BUSY(tmp) |
1356 G_008010_SPI03_BUSY(tmp) | G_008010_VGT_BUSY_NO_DMA(tmp))
1357 reset_mask |= RADEON_RESET_GFX;
1358 }
1355 1359
1356 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1360 if (G_008010_CF_RQ_PENDING(tmp) | G_008010_PF_RQ_PENDING(tmp) |
1357 RREG32(DMA_STATUS_REG)); 1361 G_008010_CP_BUSY(tmp) | G_008010_CP_COHERENCY_BUSY(tmp))
1362 reset_mask |= RADEON_RESET_CP;
1358 1363
1359 /* Disable DMA */ 1364 if (G_008010_GRBM_EE_BUSY(tmp))
1360 tmp = RREG32(DMA_RB_CNTL); 1365 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
1361 tmp &= ~DMA_RB_ENABLE;
1362 WREG32(DMA_RB_CNTL, tmp);
1363 1366
1364 /* Reset dma */ 1367 /* DMA_STATUS_REG */
1365 if (rdev->family >= CHIP_RV770) 1368 tmp = RREG32(DMA_STATUS_REG);
1366 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA); 1369 if (!(tmp & DMA_IDLE))
1367 else 1370 reset_mask |= RADEON_RESET_DMA;
1368 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
1369 RREG32(SRBM_SOFT_RESET);
1370 udelay(50);
1371 WREG32(SRBM_SOFT_RESET, 0);
1372 1371
1373 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n", 1372 /* SRBM_STATUS */
1374 RREG32(DMA_STATUS_REG)); 1373 tmp = RREG32(R_000E50_SRBM_STATUS);
1374 if (G_000E50_RLC_RQ_PENDING(tmp) | G_000E50_RLC_BUSY(tmp))
1375 reset_mask |= RADEON_RESET_RLC;
1376
1377 if (G_000E50_IH_BUSY(tmp))
1378 reset_mask |= RADEON_RESET_IH;
1379
1380 if (G_000E50_SEM_BUSY(tmp))
1381 reset_mask |= RADEON_RESET_SEM;
1382
1383 if (G_000E50_GRBM_RQ_PENDING(tmp))
1384 reset_mask |= RADEON_RESET_GRBM;
1385
1386 if (G_000E50_VMC_BUSY(tmp))
1387 reset_mask |= RADEON_RESET_VMC;
1388
1389 if (G_000E50_MCB_BUSY(tmp) | G_000E50_MCDZ_BUSY(tmp) |
1390 G_000E50_MCDY_BUSY(tmp) | G_000E50_MCDX_BUSY(tmp) |
1391 G_000E50_MCDW_BUSY(tmp))
1392 reset_mask |= RADEON_RESET_MC;
1393
1394 if (r600_is_display_hung(rdev))
1395 reset_mask |= RADEON_RESET_DISPLAY;
1396
1397 return reset_mask;
1375} 1398}
1376 1399
1377static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 1400static void r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1378{ 1401{
1379 struct rv515_mc_save save; 1402 struct rv515_mc_save save;
1380 1403 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
1381 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1404 u32 tmp;
1382 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
1383
1384 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1385 reset_mask &= ~RADEON_RESET_DMA;
1386 1405
1387 if (reset_mask == 0) 1406 if (reset_mask == 0)
1388 return 0; 1407 return;
1389 1408
1390 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 1409 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1391 1410
1411 r600_print_gpu_status_regs(rdev);
1412
1413 /* Disable CP parsing/prefetching */
1414 if (rdev->family >= CHIP_RV770)
1415 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1) | S_0086D8_CP_PFP_HALT(1));
1416 else
1417 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1418
1419 /* disable the RLC */
1420 WREG32(RLC_CNTL, 0);
1421
1422 if (reset_mask & RADEON_RESET_DMA) {
1423 /* Disable DMA */
1424 tmp = RREG32(DMA_RB_CNTL);
1425 tmp &= ~DMA_RB_ENABLE;
1426 WREG32(DMA_RB_CNTL, tmp);
1427 }
1428
1429 mdelay(50);
1430
1392 rv515_mc_stop(rdev, &save); 1431 rv515_mc_stop(rdev, &save);
1393 if (r600_mc_wait_for_idle(rdev)) { 1432 if (r600_mc_wait_for_idle(rdev)) {
1394 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 1433 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1395 } 1434 }
1396 1435
1397 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 1436 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) {
1398 r600_gpu_soft_reset_gfx(rdev); 1437 if (rdev->family >= CHIP_RV770)
1438 grbm_soft_reset |= S_008020_SOFT_RESET_DB(1) |
1439 S_008020_SOFT_RESET_CB(1) |
1440 S_008020_SOFT_RESET_PA(1) |
1441 S_008020_SOFT_RESET_SC(1) |
1442 S_008020_SOFT_RESET_SPI(1) |
1443 S_008020_SOFT_RESET_SX(1) |
1444 S_008020_SOFT_RESET_SH(1) |
1445 S_008020_SOFT_RESET_TC(1) |
1446 S_008020_SOFT_RESET_TA(1) |
1447 S_008020_SOFT_RESET_VC(1) |
1448 S_008020_SOFT_RESET_VGT(1);
1449 else
1450 grbm_soft_reset |= S_008020_SOFT_RESET_CR(1) |
1451 S_008020_SOFT_RESET_DB(1) |
1452 S_008020_SOFT_RESET_CB(1) |
1453 S_008020_SOFT_RESET_PA(1) |
1454 S_008020_SOFT_RESET_SC(1) |
1455 S_008020_SOFT_RESET_SMX(1) |
1456 S_008020_SOFT_RESET_SPI(1) |
1457 S_008020_SOFT_RESET_SX(1) |
1458 S_008020_SOFT_RESET_SH(1) |
1459 S_008020_SOFT_RESET_TC(1) |
1460 S_008020_SOFT_RESET_TA(1) |
1461 S_008020_SOFT_RESET_VC(1) |
1462 S_008020_SOFT_RESET_VGT(1);
1463 }
1464
1465 if (reset_mask & RADEON_RESET_CP) {
1466 grbm_soft_reset |= S_008020_SOFT_RESET_CP(1) |
1467 S_008020_SOFT_RESET_VGT(1);
1468
1469 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1470 }
1471
1472 if (reset_mask & RADEON_RESET_DMA) {
1473 if (rdev->family >= CHIP_RV770)
1474 srbm_soft_reset |= RV770_SOFT_RESET_DMA;
1475 else
1476 srbm_soft_reset |= SOFT_RESET_DMA;
1477 }
1478
1479 if (reset_mask & RADEON_RESET_RLC)
1480 srbm_soft_reset |= S_000E60_SOFT_RESET_RLC(1);
1481
1482 if (reset_mask & RADEON_RESET_SEM)
1483 srbm_soft_reset |= S_000E60_SOFT_RESET_SEM(1);
1484
1485 if (reset_mask & RADEON_RESET_IH)
1486 srbm_soft_reset |= S_000E60_SOFT_RESET_IH(1);
1487
1488 if (reset_mask & RADEON_RESET_GRBM)
1489 srbm_soft_reset |= S_000E60_SOFT_RESET_GRBM(1);
1490
1491 if (!(rdev->flags & RADEON_IS_IGP)) {
1492 if (reset_mask & RADEON_RESET_MC)
1493 srbm_soft_reset |= S_000E60_SOFT_RESET_MC(1);
1494 }
1495
1496 if (reset_mask & RADEON_RESET_VMC)
1497 srbm_soft_reset |= S_000E60_SOFT_RESET_VMC(1);
1498
1499 if (grbm_soft_reset) {
1500 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1501 tmp |= grbm_soft_reset;
1502 dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp);
1503 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1504 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1505
1506 udelay(50);
1507
1508 tmp &= ~grbm_soft_reset;
1509 WREG32(R_008020_GRBM_SOFT_RESET, tmp);
1510 tmp = RREG32(R_008020_GRBM_SOFT_RESET);
1511 }
1512
1513 if (srbm_soft_reset) {
1514 tmp = RREG32(SRBM_SOFT_RESET);
1515 tmp |= srbm_soft_reset;
1516 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
1517 WREG32(SRBM_SOFT_RESET, tmp);
1518 tmp = RREG32(SRBM_SOFT_RESET);
1519
1520 udelay(50);
1399 1521
1400 if (reset_mask & RADEON_RESET_DMA) 1522 tmp &= ~srbm_soft_reset;
1401 r600_gpu_soft_reset_dma(rdev); 1523 WREG32(SRBM_SOFT_RESET, tmp);
1524 tmp = RREG32(SRBM_SOFT_RESET);
1525 }
1402 1526
1403 /* Wait a little for things to settle down */ 1527 /* Wait a little for things to settle down */
1404 mdelay(1); 1528 mdelay(1);
1405 1529
1406 rv515_mc_resume(rdev, &save); 1530 rv515_mc_resume(rdev, &save);
1531 udelay(50);
1532
1533 r600_print_gpu_status_regs(rdev);
1534}
1535
1536int r600_asic_reset(struct radeon_device *rdev)
1537{
1538 u32 reset_mask;
1539
1540 reset_mask = r600_gpu_check_soft_reset(rdev);
1541
1542 if (reset_mask)
1543 r600_set_bios_scratch_engine_hung(rdev, true);
1544
1545 r600_gpu_soft_reset(rdev, reset_mask);
1546
1547 reset_mask = r600_gpu_check_soft_reset(rdev);
1548
1549 if (!reset_mask)
1550 r600_set_bios_scratch_engine_hung(rdev, false);
1551
1407 return 0; 1552 return 0;
1408} 1553}
1409 1554
1410bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1555/**
1556 * r600_gfx_is_lockup - Check if the GFX engine is locked up
1557 *
1558 * @rdev: radeon_device pointer
1559 * @ring: radeon_ring structure holding ring information
1560 *
1561 * Check if the GFX engine is locked up.
1562 * Returns true if the engine appears to be locked up, false if not.
1563 */
1564bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1411{ 1565{
1412 u32 srbm_status; 1566 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1413 u32 grbm_status; 1567
1414 u32 grbm_status2; 1568 if (!(reset_mask & (RADEON_RESET_GFX |
1415 1569 RADEON_RESET_COMPUTE |
1416 srbm_status = RREG32(R_000E50_SRBM_STATUS); 1570 RADEON_RESET_CP))) {
1417 grbm_status = RREG32(R_008010_GRBM_STATUS);
1418 grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
1419 if (!G_008010_GUI_ACTIVE(grbm_status)) {
1420 radeon_ring_lockup_update(ring); 1571 radeon_ring_lockup_update(ring);
1421 return false; 1572 return false;
1422 } 1573 }
@@ -1431,15 +1582,14 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1431 * @rdev: radeon_device pointer 1582 * @rdev: radeon_device pointer
1432 * @ring: radeon_ring structure holding ring information 1583 * @ring: radeon_ring structure holding ring information
1433 * 1584 *
1434 * Check if the async DMA engine is locked up (r6xx-evergreen). 1585 * Check if the async DMA engine is locked up.
1435 * Returns true if the engine appears to be locked up, false if not. 1586 * Returns true if the engine appears to be locked up, false if not.
1436 */ 1587 */
1437bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 1588bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1438{ 1589{
1439 u32 dma_status_reg; 1590 u32 reset_mask = r600_gpu_check_soft_reset(rdev);
1440 1591
1441 dma_status_reg = RREG32(DMA_STATUS_REG); 1592 if (!(reset_mask & RADEON_RESET_DMA)) {
1442 if (dma_status_reg & DMA_IDLE) {
1443 radeon_ring_lockup_update(ring); 1593 radeon_ring_lockup_update(ring);
1444 return false; 1594 return false;
1445 } 1595 }
@@ -1448,13 +1598,6 @@ bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1448 return radeon_ring_test_lockup(rdev, ring); 1598 return radeon_ring_test_lockup(rdev, ring);
1449} 1599}
1450 1600
1451int r600_asic_reset(struct radeon_device *rdev)
1452{
1453 return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1454 RADEON_RESET_COMPUTE |
1455 RADEON_RESET_DMA));
1456}
1457
1458u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1601u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1459 u32 tiling_pipe_num, 1602 u32 tiling_pipe_num,
1460 u32 max_rb_num, 1603 u32 max_rb_num,
@@ -4318,14 +4461,14 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev)
4318} 4461}
4319 4462
4320/** 4463/**
4321 * r600_get_gpu_clock - return GPU clock counter snapshot 4464 * r600_get_gpu_clock_counter - return GPU clock counter snapshot
4322 * 4465 *
4323 * @rdev: radeon_device pointer 4466 * @rdev: radeon_device pointer
4324 * 4467 *
4325 * Fetches a GPU clock counter snapshot (R6xx-cayman). 4468 * Fetches a GPU clock counter snapshot (R6xx-cayman).
4326 * Returns the 64 bit clock counter snapshot. 4469 * Returns the 64 bit clock counter snapshot.
4327 */ 4470 */
4328uint64_t r600_get_gpu_clock(struct radeon_device *rdev) 4471uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev)
4329{ 4472{
4330 uint64_t clock; 4473 uint64_t clock;
4331 4474
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 77da1f9c0b8e..f651881eb0ae 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -22,6 +22,8 @@
22 * 22 *
23 * Authors: 23 * Authors:
24 * Alex Deucher <alexander.deucher@amd.com> 24 * Alex Deucher <alexander.deucher@amd.com>
25 *
26 * ------------------------ This file is DEPRECATED! -------------------------
25 */ 27 */
26#include <drm/drmP.h> 28#include <drm/drmP.h>
27#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
@@ -488,37 +490,6 @@ set_default_state(drm_radeon_private_t *dev_priv)
488 ADVANCE_RING(); 490 ADVANCE_RING();
489} 491}
490 492
491/* 23 bits of float fractional data */
492#define I2F_FRAC_BITS 23
493#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
494
495/*
496 * Converts unsigned integer into 32-bit IEEE floating point representation.
497 * Will be exact from 0 to 2^24. Above that, we round towards zero
498 * as the fractional bits will not fit in a float. (It would be better to
499 * round towards even as the fpu does, but that is slower.)
500 */
501__pure uint32_t int2float(uint32_t x)
502{
503 uint32_t msb, exponent, fraction;
504
505 /* Zero is special */
506 if (!x) return 0;
507
508 /* Get location of the most significant bit */
509 msb = __fls(x);
510
511 /*
512 * Use a rotate instead of a shift because that works both leftwards
513 * and rightwards due to the mod(32) behaviour. This means we don't
514 * need to check to see if we are above 2^24 or not.
515 */
516 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
517 exponent = (127 + msb) << I2F_FRAC_BITS;
518
519 return fraction + exponent;
520}
521
522static int r600_nomm_get_vb(struct drm_device *dev) 493static int r600_nomm_get_vb(struct drm_device *dev)
523{ 494{
524 drm_radeon_private_t *dev_priv = dev->dev_private; 495 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e082dca6feee..9fb5780a552f 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -31,6 +31,37 @@
31#include "r600_blit_shaders.h" 31#include "r600_blit_shaders.h"
32#include "radeon_blit_common.h" 32#include "radeon_blit_common.h"
33 33
34/* 23 bits of float fractional data */
35#define I2F_FRAC_BITS 23
36#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
37
38/*
39 * Converts unsigned integer into 32-bit IEEE floating point representation.
40 * Will be exact from 0 to 2^24. Above that, we round towards zero
41 * as the fractional bits will not fit in a float. (It would be better to
42 * round towards even as the fpu does, but that is slower.)
43 */
44__pure uint32_t int2float(uint32_t x)
45{
46 uint32_t msb, exponent, fraction;
47
48 /* Zero is special */
49 if (!x) return 0;
50
51 /* Get location of the most significant bit */
52 msb = __fls(x);
53
54 /*
55 * Use a rotate instead of a shift because that works both leftwards
56 * and rightwards due to the mod(32) behaviour. This means we don't
57 * need to check to see if we are above 2^24 or not.
58 */
59 fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
60 exponent = (127 + msb) << I2F_FRAC_BITS;
61
62 return fraction + exponent;
63}
64
34/* emits 21 on rv770+, 23 on r600 */ 65/* emits 21 on rv770+, 23 on r600 */
35static void 66static void
36set_render_target(struct radeon_device *rdev, int format, 67set_render_target(struct radeon_device *rdev, int format,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index be85f75aedda..1c51c08b1fde 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -24,6 +24,8 @@
24 * Authors: 24 * Authors:
25 * Dave Airlie <airlied@redhat.com> 25 * Dave Airlie <airlied@redhat.com>
26 * Alex Deucher <alexander.deucher@amd.com> 26 * Alex Deucher <alexander.deucher@amd.com>
27 *
28 * ------------------------ This file is DEPRECATED! -------------------------
27 */ 29 */
28 30
29#include <linux/module.h> 31#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9b2512bf1a46..01a3ec83f284 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -31,12 +31,7 @@
31#include "r600d.h" 31#include "r600d.h"
32#include "r600_reg_safe.h" 32#include "r600_reg_safe.h"
33 33
34static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 34static int r600_nomm;
35 struct radeon_cs_reloc **cs_reloc);
36static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
37 struct radeon_cs_reloc **cs_reloc);
38typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
39static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
40extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size); 35extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
41 36
42 37
@@ -784,170 +779,29 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
784} 779}
785 780
786/** 781/**
787 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet 782 * r600_cs_packet_parse_vline() - parse userspace VLINE packet
788 * @parser: parser structure holding parsing context.
789 * @pkt: where to store packet informations
790 *
791 * Assume that chunk_ib_index is properly set. Will return -EINVAL
792 * if packet is bigger than remaining ib size. or if packets is unknown.
793 **/
794static int r600_cs_packet_parse(struct radeon_cs_parser *p,
795 struct radeon_cs_packet *pkt,
796 unsigned idx)
797{
798 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
799 uint32_t header;
800
801 if (idx >= ib_chunk->length_dw) {
802 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
803 idx, ib_chunk->length_dw);
804 return -EINVAL;
805 }
806 header = radeon_get_ib_value(p, idx);
807 pkt->idx = idx;
808 pkt->type = CP_PACKET_GET_TYPE(header);
809 pkt->count = CP_PACKET_GET_COUNT(header);
810 pkt->one_reg_wr = 0;
811 switch (pkt->type) {
812 case PACKET_TYPE0:
813 pkt->reg = CP_PACKET0_GET_REG(header);
814 break;
815 case PACKET_TYPE3:
816 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
817 break;
818 case PACKET_TYPE2:
819 pkt->count = -1;
820 break;
821 default:
822 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
823 return -EINVAL;
824 }
825 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
826 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
827 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
828 return -EINVAL;
829 }
830 return 0;
831}
832
833/**
834 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
835 * @parser: parser structure holding parsing context.
836 * @data: pointer to relocation data
837 * @offset_start: starting offset
838 * @offset_mask: offset mask (to align start offset on)
839 * @reloc: reloc informations
840 *
841 * Check next packet is relocation packet3, do bo validation and compute
842 * GPU offset using the provided start.
843 **/
844static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
845 struct radeon_cs_reloc **cs_reloc)
846{
847 struct radeon_cs_chunk *relocs_chunk;
848 struct radeon_cs_packet p3reloc;
849 unsigned idx;
850 int r;
851
852 if (p->chunk_relocs_idx == -1) {
853 DRM_ERROR("No relocation chunk !\n");
854 return -EINVAL;
855 }
856 *cs_reloc = NULL;
857 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
858 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
859 if (r) {
860 return r;
861 }
862 p->idx += p3reloc.count + 2;
863 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
864 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
865 p3reloc.idx);
866 return -EINVAL;
867 }
868 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
869 if (idx >= relocs_chunk->length_dw) {
870 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
871 idx, relocs_chunk->length_dw);
872 return -EINVAL;
873 }
874 /* FIXME: we assume reloc size is 4 dwords */
875 *cs_reloc = p->relocs_ptr[(idx / 4)];
876 return 0;
877}
878
879/**
880 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
881 * @parser: parser structure holding parsing context. 783 * @parser: parser structure holding parsing context.
882 * @data: pointer to relocation data
883 * @offset_start: starting offset
884 * @offset_mask: offset mask (to align start offset on)
885 * @reloc: reloc informations
886 * 784 *
887 * Check next packet is relocation packet3, do bo validation and compute 785 * This is an R600-specific function for parsing VLINE packets.
888 * GPU offset using the provided start. 786 * Real work is done by r600_cs_common_vline_parse function.
889 **/ 787 * Here we just set up ASIC-specific register table and call
890static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 788 * the common implementation function.
891 struct radeon_cs_reloc **cs_reloc) 789 */
892{ 790static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
893 struct radeon_cs_chunk *relocs_chunk;
894 struct radeon_cs_packet p3reloc;
895 unsigned idx;
896 int r;
897
898 if (p->chunk_relocs_idx == -1) {
899 DRM_ERROR("No relocation chunk !\n");
900 return -EINVAL;
901 }
902 *cs_reloc = NULL;
903 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
904 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
905 if (r) {
906 return r;
907 }
908 p->idx += p3reloc.count + 2;
909 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
910 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
911 p3reloc.idx);
912 return -EINVAL;
913 }
914 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
915 if (idx >= relocs_chunk->length_dw) {
916 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
917 idx, relocs_chunk->length_dw);
918 return -EINVAL;
919 }
920 *cs_reloc = p->relocs;
921 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
922 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
923 return 0;
924}
925
926/**
927 * r600_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
928 * @parser: parser structure holding parsing context.
929 *
930 * Check next packet is relocation packet3, do bo validation and compute
931 * GPU offset using the provided start.
932 **/
933static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
934{ 791{
935 struct radeon_cs_packet p3reloc; 792 static uint32_t vline_start_end[2] = {AVIVO_D1MODE_VLINE_START_END,
936 int r; 793 AVIVO_D2MODE_VLINE_START_END};
794 static uint32_t vline_status[2] = {AVIVO_D1MODE_VLINE_STATUS,
795 AVIVO_D2MODE_VLINE_STATUS};
937 796
938 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 797 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
939 if (r) {
940 return 0;
941 }
942 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
943 return 0;
944 }
945 return 1;
946} 798}
947 799
948/** 800/**
949 * r600_cs_packet_next_vline() - parse userspace VLINE packet 801 * r600_cs_common_vline_parse() - common vline parser
950 * @parser: parser structure holding parsing context. 802 * @parser: parser structure holding parsing context.
803 * @vline_start_end: table of vline_start_end registers
804 * @vline_status: table of vline_status registers
951 * 805 *
952 * Userspace sends a special sequence for VLINE waits. 806 * Userspace sends a special sequence for VLINE waits.
953 * PACKET0 - VLINE_START_END + value 807 * PACKET0 - VLINE_START_END + value
@@ -957,9 +811,16 @@ static int r600_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
957 * This function parses this and relocates the VLINE START END 811 * This function parses this and relocates the VLINE START END
958 * and WAIT_REG_MEM packets to the correct crtc. 812 * and WAIT_REG_MEM packets to the correct crtc.
959 * It also detects a switched off crtc and nulls out the 813 * It also detects a switched off crtc and nulls out the
960 * wait in that case. 814 * wait in that case. This function is common for all ASICs that
815 * are R600 and newer. The parsing algorithm is the same, and only
816 * differs in which registers are used.
817 *
818 * Caller is the ASIC-specific function which passes the parser
819 * context and ASIC-specific register table
961 */ 820 */
962static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) 821int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
822 uint32_t *vline_start_end,
823 uint32_t *vline_status)
963{ 824{
964 struct drm_mode_object *obj; 825 struct drm_mode_object *obj;
965 struct drm_crtc *crtc; 826 struct drm_crtc *crtc;
@@ -973,12 +834,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
973 ib = p->ib.ptr; 834 ib = p->ib.ptr;
974 835
975 /* parse the WAIT_REG_MEM */ 836 /* parse the WAIT_REG_MEM */
976 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx); 837 r = radeon_cs_packet_parse(p, &wait_reg_mem, p->idx);
977 if (r) 838 if (r)
978 return r; 839 return r;
979 840
980 /* check its a WAIT_REG_MEM */ 841 /* check its a WAIT_REG_MEM */
981 if (wait_reg_mem.type != PACKET_TYPE3 || 842 if (wait_reg_mem.type != RADEON_PACKET_TYPE3 ||
982 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) { 843 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
983 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n"); 844 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
984 return -EINVAL; 845 return -EINVAL;
@@ -987,7 +848,12 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
987 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1); 848 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
988 /* bit 4 is reg (0) or mem (1) */ 849 /* bit 4 is reg (0) or mem (1) */
989 if (wait_reg_mem_info & 0x10) { 850 if (wait_reg_mem_info & 0x10) {
990 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n"); 851 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM instead of REG\n");
852 return -EINVAL;
853 }
854 /* bit 8 is me (0) or pfp (1) */
855 if (wait_reg_mem_info & 0x100) {
856 DRM_ERROR("vline WAIT_REG_MEM waiting on PFP instead of ME\n");
991 return -EINVAL; 857 return -EINVAL;
992 } 858 }
993 /* waiting for value to be equal */ 859 /* waiting for value to be equal */
@@ -995,18 +861,18 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
995 DRM_ERROR("vline WAIT_REG_MEM function not equal\n"); 861 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
996 return -EINVAL; 862 return -EINVAL;
997 } 863 }
998 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) { 864 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != vline_status[0]) {
999 DRM_ERROR("vline WAIT_REG_MEM bad reg\n"); 865 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
1000 return -EINVAL; 866 return -EINVAL;
1001 } 867 }
1002 868
1003 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) { 869 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != RADEON_VLINE_STAT) {
1004 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n"); 870 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
1005 return -EINVAL; 871 return -EINVAL;
1006 } 872 }
1007 873
1008 /* jump over the NOP */ 874 /* jump over the NOP */
1009 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2); 875 r = radeon_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
1010 if (r) 876 if (r)
1011 return r; 877 return r;
1012 878
@@ -1016,7 +882,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
1016 882
1017 header = radeon_get_ib_value(p, h_idx); 883 header = radeon_get_ib_value(p, h_idx);
1018 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); 884 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
1019 reg = CP_PACKET0_GET_REG(header); 885 reg = R600_CP_PACKET0_GET_REG(header);
1020 886
1021 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 887 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1022 if (!obj) { 888 if (!obj) {
@@ -1028,7 +894,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
1028 crtc_id = radeon_crtc->crtc_id; 894 crtc_id = radeon_crtc->crtc_id;
1029 895
1030 if (!crtc->enabled) { 896 if (!crtc->enabled) {
1031 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */ 897 /* CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
1032 ib[h_idx + 2] = PACKET2(0); 898 ib[h_idx + 2] = PACKET2(0);
1033 ib[h_idx + 3] = PACKET2(0); 899 ib[h_idx + 3] = PACKET2(0);
1034 ib[h_idx + 4] = PACKET2(0); 900 ib[h_idx + 4] = PACKET2(0);
@@ -1036,20 +902,15 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
1036 ib[h_idx + 6] = PACKET2(0); 902 ib[h_idx + 6] = PACKET2(0);
1037 ib[h_idx + 7] = PACKET2(0); 903 ib[h_idx + 7] = PACKET2(0);
1038 ib[h_idx + 8] = PACKET2(0); 904 ib[h_idx + 8] = PACKET2(0);
1039 } else if (crtc_id == 1) { 905 } else if (reg == vline_start_end[0]) {
1040 switch (reg) { 906 header &= ~R600_CP_PACKET0_REG_MASK;
1041 case AVIVO_D1MODE_VLINE_START_END: 907 header |= vline_start_end[crtc_id] >> 2;
1042 header &= ~R600_CP_PACKET0_REG_MASK;
1043 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1044 break;
1045 default:
1046 DRM_ERROR("unknown crtc reloc\n");
1047 return -EINVAL;
1048 }
1049 ib[h_idx] = header; 908 ib[h_idx] = header;
1050 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2; 909 ib[h_idx + 4] = vline_status[crtc_id] >> 2;
910 } else {
911 DRM_ERROR("unknown crtc reloc\n");
912 return -EINVAL;
1051 } 913 }
1052
1053 return 0; 914 return 0;
1054} 915}
1055 916
@@ -1155,8 +1016,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1155 break; 1016 break;
1156 case R_028010_DB_DEPTH_INFO: 1017 case R_028010_DB_DEPTH_INFO:
1157 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1018 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1158 r600_cs_packet_next_is_pkt3_nop(p)) { 1019 radeon_cs_packet_next_is_pkt3_nop(p)) {
1159 r = r600_cs_packet_next_reloc(p, &reloc); 1020 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1160 if (r) { 1021 if (r) {
1161 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1022 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1162 "0x%04X\n", reg); 1023 "0x%04X\n", reg);
@@ -1198,7 +1059,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1198 case VGT_STRMOUT_BUFFER_BASE_1: 1059 case VGT_STRMOUT_BUFFER_BASE_1:
1199 case VGT_STRMOUT_BUFFER_BASE_2: 1060 case VGT_STRMOUT_BUFFER_BASE_2:
1200 case VGT_STRMOUT_BUFFER_BASE_3: 1061 case VGT_STRMOUT_BUFFER_BASE_3:
1201 r = r600_cs_packet_next_reloc(p, &reloc); 1062 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1202 if (r) { 1063 if (r) {
1203 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1064 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1204 "0x%04X\n", reg); 1065 "0x%04X\n", reg);
@@ -1221,7 +1082,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1221 track->streamout_dirty = true; 1082 track->streamout_dirty = true;
1222 break; 1083 break;
1223 case CP_COHER_BASE: 1084 case CP_COHER_BASE:
1224 r = r600_cs_packet_next_reloc(p, &reloc); 1085 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1225 if (r) { 1086 if (r) {
1226 dev_warn(p->dev, "missing reloc for CP_COHER_BASE " 1087 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1227 "0x%04X\n", reg); 1088 "0x%04X\n", reg);
@@ -1256,8 +1117,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1256 case R_0280B8_CB_COLOR6_INFO: 1117 case R_0280B8_CB_COLOR6_INFO:
1257 case R_0280BC_CB_COLOR7_INFO: 1118 case R_0280BC_CB_COLOR7_INFO:
1258 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) && 1119 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
1259 r600_cs_packet_next_is_pkt3_nop(p)) { 1120 radeon_cs_packet_next_is_pkt3_nop(p)) {
1260 r = r600_cs_packet_next_reloc(p, &reloc); 1121 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1261 if (r) { 1122 if (r) {
1262 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1123 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1263 return -EINVAL; 1124 return -EINVAL;
@@ -1320,7 +1181,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1320 case R_0280F8_CB_COLOR6_FRAG: 1181 case R_0280F8_CB_COLOR6_FRAG:
1321 case R_0280FC_CB_COLOR7_FRAG: 1182 case R_0280FC_CB_COLOR7_FRAG:
1322 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; 1183 tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
1323 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1184 if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1324 if (!track->cb_color_base_last[tmp]) { 1185 if (!track->cb_color_base_last[tmp]) {
1325 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1186 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1326 return -EINVAL; 1187 return -EINVAL;
@@ -1329,7 +1190,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1329 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; 1190 track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp];
1330 ib[idx] = track->cb_color_base_last[tmp]; 1191 ib[idx] = track->cb_color_base_last[tmp];
1331 } else { 1192 } else {
1332 r = r600_cs_packet_next_reloc(p, &reloc); 1193 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1333 if (r) { 1194 if (r) {
1334 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1195 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1335 return -EINVAL; 1196 return -EINVAL;
@@ -1351,7 +1212,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1351 case R_0280D8_CB_COLOR6_TILE: 1212 case R_0280D8_CB_COLOR6_TILE:
1352 case R_0280DC_CB_COLOR7_TILE: 1213 case R_0280DC_CB_COLOR7_TILE:
1353 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; 1214 tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
1354 if (!r600_cs_packet_next_is_pkt3_nop(p)) { 1215 if (!radeon_cs_packet_next_is_pkt3_nop(p)) {
1355 if (!track->cb_color_base_last[tmp]) { 1216 if (!track->cb_color_base_last[tmp]) {
1356 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); 1217 dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
1357 return -EINVAL; 1218 return -EINVAL;
@@ -1360,7 +1221,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1360 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; 1221 track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp];
1361 ib[idx] = track->cb_color_base_last[tmp]; 1222 ib[idx] = track->cb_color_base_last[tmp];
1362 } else { 1223 } else {
1363 r = r600_cs_packet_next_reloc(p, &reloc); 1224 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1364 if (r) { 1225 if (r) {
1365 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1226 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1366 return -EINVAL; 1227 return -EINVAL;
@@ -1395,7 +1256,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1395 case CB_COLOR5_BASE: 1256 case CB_COLOR5_BASE:
1396 case CB_COLOR6_BASE: 1257 case CB_COLOR6_BASE:
1397 case CB_COLOR7_BASE: 1258 case CB_COLOR7_BASE:
1398 r = r600_cs_packet_next_reloc(p, &reloc); 1259 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1399 if (r) { 1260 if (r) {
1400 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1261 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1401 "0x%04X\n", reg); 1262 "0x%04X\n", reg);
@@ -1410,7 +1271,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1410 track->cb_dirty = true; 1271 track->cb_dirty = true;
1411 break; 1272 break;
1412 case DB_DEPTH_BASE: 1273 case DB_DEPTH_BASE:
1413 r = r600_cs_packet_next_reloc(p, &reloc); 1274 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1414 if (r) { 1275 if (r) {
1415 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1276 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1416 "0x%04X\n", reg); 1277 "0x%04X\n", reg);
@@ -1423,7 +1284,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1423 track->db_dirty = true; 1284 track->db_dirty = true;
1424 break; 1285 break;
1425 case DB_HTILE_DATA_BASE: 1286 case DB_HTILE_DATA_BASE:
1426 r = r600_cs_packet_next_reloc(p, &reloc); 1287 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1427 if (r) { 1288 if (r) {
1428 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1289 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1429 "0x%04X\n", reg); 1290 "0x%04X\n", reg);
@@ -1493,7 +1354,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1493 case SQ_ALU_CONST_CACHE_VS_13: 1354 case SQ_ALU_CONST_CACHE_VS_13:
1494 case SQ_ALU_CONST_CACHE_VS_14: 1355 case SQ_ALU_CONST_CACHE_VS_14:
1495 case SQ_ALU_CONST_CACHE_VS_15: 1356 case SQ_ALU_CONST_CACHE_VS_15:
1496 r = r600_cs_packet_next_reloc(p, &reloc); 1357 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1497 if (r) { 1358 if (r) {
1498 dev_warn(p->dev, "bad SET_CONTEXT_REG " 1359 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1499 "0x%04X\n", reg); 1360 "0x%04X\n", reg);
@@ -1502,7 +1363,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1502 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1363 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1503 break; 1364 break;
1504 case SX_MEMORY_EXPORT_BASE: 1365 case SX_MEMORY_EXPORT_BASE:
1505 r = r600_cs_packet_next_reloc(p, &reloc); 1366 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1506 if (r) { 1367 if (r) {
1507 dev_warn(p->dev, "bad SET_CONFIG_REG " 1368 dev_warn(p->dev, "bad SET_CONFIG_REG "
1508 "0x%04X\n", reg); 1369 "0x%04X\n", reg);
@@ -1788,7 +1649,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1788 return -EINVAL; 1649 return -EINVAL;
1789 } 1650 }
1790 1651
1791 r = r600_cs_packet_next_reloc(p, &reloc); 1652 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1792 if (r) { 1653 if (r) {
1793 DRM_ERROR("bad SET PREDICATION\n"); 1654 DRM_ERROR("bad SET PREDICATION\n");
1794 return -EINVAL; 1655 return -EINVAL;
@@ -1829,7 +1690,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1829 DRM_ERROR("bad DRAW_INDEX\n"); 1690 DRM_ERROR("bad DRAW_INDEX\n");
1830 return -EINVAL; 1691 return -EINVAL;
1831 } 1692 }
1832 r = r600_cs_packet_next_reloc(p, &reloc); 1693 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1833 if (r) { 1694 if (r) {
1834 DRM_ERROR("bad DRAW_INDEX\n"); 1695 DRM_ERROR("bad DRAW_INDEX\n");
1835 return -EINVAL; 1696 return -EINVAL;
@@ -1881,7 +1742,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1881 if (idx_value & 0x10) { 1742 if (idx_value & 0x10) {
1882 uint64_t offset; 1743 uint64_t offset;
1883 1744
1884 r = r600_cs_packet_next_reloc(p, &reloc); 1745 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1885 if (r) { 1746 if (r) {
1886 DRM_ERROR("bad WAIT_REG_MEM\n"); 1747 DRM_ERROR("bad WAIT_REG_MEM\n");
1887 return -EINVAL; 1748 return -EINVAL;
@@ -1893,6 +1754,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1893 1754
1894 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); 1755 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0);
1895 ib[idx+2] = upper_32_bits(offset) & 0xff; 1756 ib[idx+2] = upper_32_bits(offset) & 0xff;
1757 } else if (idx_value & 0x100) {
1758 DRM_ERROR("cannot use PFP on REG wait\n");
1759 return -EINVAL;
1896 } 1760 }
1897 break; 1761 break;
1898 case PACKET3_CP_DMA: 1762 case PACKET3_CP_DMA:
@@ -1915,7 +1779,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1915 return -EINVAL; 1779 return -EINVAL;
1916 } 1780 }
1917 /* src address space is memory */ 1781 /* src address space is memory */
1918 r = r600_cs_packet_next_reloc(p, &reloc); 1782 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1919 if (r) { 1783 if (r) {
1920 DRM_ERROR("bad CP DMA SRC\n"); 1784 DRM_ERROR("bad CP DMA SRC\n");
1921 return -EINVAL; 1785 return -EINVAL;
@@ -1945,7 +1809,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1945 DRM_ERROR("CP DMA DAIC only supported for registers\n"); 1809 DRM_ERROR("CP DMA DAIC only supported for registers\n");
1946 return -EINVAL; 1810 return -EINVAL;
1947 } 1811 }
1948 r = r600_cs_packet_next_reloc(p, &reloc); 1812 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1949 if (r) { 1813 if (r) {
1950 DRM_ERROR("bad CP DMA DST\n"); 1814 DRM_ERROR("bad CP DMA DST\n");
1951 return -EINVAL; 1815 return -EINVAL;
@@ -1975,7 +1839,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1975 /* 0xffffffff/0x0 is flush all cache flag */ 1839 /* 0xffffffff/0x0 is flush all cache flag */
1976 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff || 1840 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
1977 radeon_get_ib_value(p, idx + 2) != 0) { 1841 radeon_get_ib_value(p, idx + 2) != 0) {
1978 r = r600_cs_packet_next_reloc(p, &reloc); 1842 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1979 if (r) { 1843 if (r) {
1980 DRM_ERROR("bad SURFACE_SYNC\n"); 1844 DRM_ERROR("bad SURFACE_SYNC\n");
1981 return -EINVAL; 1845 return -EINVAL;
@@ -1991,7 +1855,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1991 if (pkt->count) { 1855 if (pkt->count) {
1992 uint64_t offset; 1856 uint64_t offset;
1993 1857
1994 r = r600_cs_packet_next_reloc(p, &reloc); 1858 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
1995 if (r) { 1859 if (r) {
1996 DRM_ERROR("bad EVENT_WRITE\n"); 1860 DRM_ERROR("bad EVENT_WRITE\n");
1997 return -EINVAL; 1861 return -EINVAL;
@@ -2012,7 +1876,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2012 DRM_ERROR("bad EVENT_WRITE_EOP\n"); 1876 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2013 return -EINVAL; 1877 return -EINVAL;
2014 } 1878 }
2015 r = r600_cs_packet_next_reloc(p, &reloc); 1879 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2016 if (r) { 1880 if (r) {
2017 DRM_ERROR("bad EVENT_WRITE\n"); 1881 DRM_ERROR("bad EVENT_WRITE\n");
2018 return -EINVAL; 1882 return -EINVAL;
@@ -2078,7 +1942,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2078 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { 1942 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
2079 case SQ_TEX_VTX_VALID_TEXTURE: 1943 case SQ_TEX_VTX_VALID_TEXTURE:
2080 /* tex base */ 1944 /* tex base */
2081 r = r600_cs_packet_next_reloc(p, &reloc); 1945 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2082 if (r) { 1946 if (r) {
2083 DRM_ERROR("bad SET_RESOURCE\n"); 1947 DRM_ERROR("bad SET_RESOURCE\n");
2084 return -EINVAL; 1948 return -EINVAL;
@@ -2092,7 +1956,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2092 } 1956 }
2093 texture = reloc->robj; 1957 texture = reloc->robj;
2094 /* tex mip base */ 1958 /* tex mip base */
2095 r = r600_cs_packet_next_reloc(p, &reloc); 1959 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2096 if (r) { 1960 if (r) {
2097 DRM_ERROR("bad SET_RESOURCE\n"); 1961 DRM_ERROR("bad SET_RESOURCE\n");
2098 return -EINVAL; 1962 return -EINVAL;
@@ -2113,7 +1977,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2113 { 1977 {
2114 uint64_t offset64; 1978 uint64_t offset64;
2115 /* vtx base */ 1979 /* vtx base */
2116 r = r600_cs_packet_next_reloc(p, &reloc); 1980 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2117 if (r) { 1981 if (r) {
2118 DRM_ERROR("bad SET_RESOURCE\n"); 1982 DRM_ERROR("bad SET_RESOURCE\n");
2119 return -EINVAL; 1983 return -EINVAL;
@@ -2214,7 +2078,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2214 { 2078 {
2215 u64 offset; 2079 u64 offset;
2216 2080
2217 r = r600_cs_packet_next_reloc(p, &reloc); 2081 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2218 if (r) { 2082 if (r) {
2219 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n"); 2083 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2220 return -EINVAL; 2084 return -EINVAL;
@@ -2258,7 +2122,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2258 /* Updating memory at DST_ADDRESS. */ 2122 /* Updating memory at DST_ADDRESS. */
2259 if (idx_value & 0x1) { 2123 if (idx_value & 0x1) {
2260 u64 offset; 2124 u64 offset;
2261 r = r600_cs_packet_next_reloc(p, &reloc); 2125 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2262 if (r) { 2126 if (r) {
2263 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); 2127 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2264 return -EINVAL; 2128 return -EINVAL;
@@ -2277,7 +2141,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2277 /* Reading data from SRC_ADDRESS. */ 2141 /* Reading data from SRC_ADDRESS. */
2278 if (((idx_value >> 1) & 0x3) == 2) { 2142 if (((idx_value >> 1) & 0x3) == 2) {
2279 u64 offset; 2143 u64 offset;
2280 r = r600_cs_packet_next_reloc(p, &reloc); 2144 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2281 if (r) { 2145 if (r) {
2282 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); 2146 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2283 return -EINVAL; 2147 return -EINVAL;
@@ -2302,7 +2166,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2302 DRM_ERROR("bad MEM_WRITE (invalid count)\n"); 2166 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2303 return -EINVAL; 2167 return -EINVAL;
2304 } 2168 }
2305 r = r600_cs_packet_next_reloc(p, &reloc); 2169 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2306 if (r) { 2170 if (r) {
2307 DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); 2171 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2308 return -EINVAL; 2172 return -EINVAL;
@@ -2331,7 +2195,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2331 if (idx_value & 0x1) { 2195 if (idx_value & 0x1) {
2332 u64 offset; 2196 u64 offset;
2333 /* SRC is memory. */ 2197 /* SRC is memory. */
2334 r = r600_cs_packet_next_reloc(p, &reloc); 2198 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2335 if (r) { 2199 if (r) {
2336 DRM_ERROR("bad COPY_DW (missing src reloc)\n"); 2200 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2337 return -EINVAL; 2201 return -EINVAL;
@@ -2355,7 +2219,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2355 if (idx_value & 0x2) { 2219 if (idx_value & 0x2) {
2356 u64 offset; 2220 u64 offset;
2357 /* DST is memory. */ 2221 /* DST is memory. */
2358 r = r600_cs_packet_next_reloc(p, &reloc); 2222 r = radeon_cs_packet_next_reloc(p, &reloc, r600_nomm);
2359 if (r) { 2223 if (r) {
2360 DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); 2224 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2361 return -EINVAL; 2225 return -EINVAL;
@@ -2410,7 +2274,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
2410 p->track = track; 2274 p->track = track;
2411 } 2275 }
2412 do { 2276 do {
2413 r = r600_cs_packet_parse(p, &pkt, p->idx); 2277 r = radeon_cs_packet_parse(p, &pkt, p->idx);
2414 if (r) { 2278 if (r) {
2415 kfree(p->track); 2279 kfree(p->track);
2416 p->track = NULL; 2280 p->track = NULL;
@@ -2418,12 +2282,12 @@ int r600_cs_parse(struct radeon_cs_parser *p)
2418 } 2282 }
2419 p->idx += pkt.count + 2; 2283 p->idx += pkt.count + 2;
2420 switch (pkt.type) { 2284 switch (pkt.type) {
2421 case PACKET_TYPE0: 2285 case RADEON_PACKET_TYPE0:
2422 r = r600_cs_parse_packet0(p, &pkt); 2286 r = r600_cs_parse_packet0(p, &pkt);
2423 break; 2287 break;
2424 case PACKET_TYPE2: 2288 case RADEON_PACKET_TYPE2:
2425 break; 2289 break;
2426 case PACKET_TYPE3: 2290 case RADEON_PACKET_TYPE3:
2427 r = r600_packet3_check(p, &pkt); 2291 r = r600_packet3_check(p, &pkt);
2428 break; 2292 break;
2429 default: 2293 default:
@@ -2449,17 +2313,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
2449 return 0; 2313 return 0;
2450} 2314}
2451 2315
2452static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 2316#ifdef CONFIG_DRM_RADEON_UMS
2453{
2454 if (p->chunk_relocs_idx == -1) {
2455 return 0;
2456 }
2457 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2458 if (p->relocs == NULL) {
2459 return -ENOMEM;
2460 }
2461 return 0;
2462}
2463 2317
2464/** 2318/**
2465 * cs_parser_fini() - clean parser states 2319 * cs_parser_fini() - clean parser states
@@ -2485,6 +2339,18 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2485 kfree(parser->chunks_array); 2339 kfree(parser->chunks_array);
2486} 2340}
2487 2341
2342static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2343{
2344 if (p->chunk_relocs_idx == -1) {
2345 return 0;
2346 }
2347 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
2348 if (p->relocs == NULL) {
2349 return -ENOMEM;
2350 }
2351 return 0;
2352}
2353
2488int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, 2354int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2489 unsigned family, u32 *ib, int *l) 2355 unsigned family, u32 *ib, int *l)
2490{ 2356{
@@ -2543,9 +2409,11 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2543 2409
2544void r600_cs_legacy_init(void) 2410void r600_cs_legacy_init(void)
2545{ 2411{
2546 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; 2412 r600_nomm = 1;
2547} 2413}
2548 2414
2415#endif
2416
2549/* 2417/*
2550 * DMA 2418 * DMA
2551 */ 2419 */
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index ff80efe9cb7d..21ecc0e12dc4 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -23,6 +23,7 @@
23 * 23 *
24 * Authors: Christian König 24 * Authors: Christian König
25 */ 25 */
26#include <linux/hdmi.h>
26#include <drm/drmP.h> 27#include <drm/drmP.h>
27#include <drm/radeon_drm.h> 28#include <drm/radeon_drm.h>
28#include "radeon.h" 29#include "radeon.h"
@@ -121,79 +122,18 @@ static void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
121} 122}
122 123
123/* 124/*
124 * calculate the crc for a given info frame
125 */
126static void r600_hdmi_infoframe_checksum(uint8_t packetType,
127 uint8_t versionNumber,
128 uint8_t length,
129 uint8_t *frame)
130{
131 int i;
132 frame[0] = packetType + versionNumber + length;
133 for (i = 1; i <= length; i++)
134 frame[0] += frame[i];
135 frame[0] = 0x100 - frame[0];
136}
137
138/*
139 * build a HDMI Video Info Frame 125 * build a HDMI Video Info Frame
140 */ 126 */
141static void r600_hdmi_videoinfoframe( 127static void r600_hdmi_update_avi_infoframe(struct drm_encoder *encoder,
142 struct drm_encoder *encoder, 128 void *buffer, size_t size)
143 enum r600_hdmi_color_format color_format,
144 int active_information_present,
145 uint8_t active_format_aspect_ratio,
146 uint8_t scan_information,
147 uint8_t colorimetry,
148 uint8_t ex_colorimetry,
149 uint8_t quantization,
150 int ITC,
151 uint8_t picture_aspect_ratio,
152 uint8_t video_format_identification,
153 uint8_t pixel_repetition,
154 uint8_t non_uniform_picture_scaling,
155 uint8_t bar_info_data_valid,
156 uint16_t top_bar,
157 uint16_t bottom_bar,
158 uint16_t left_bar,
159 uint16_t right_bar
160)
161{ 129{
162 struct drm_device *dev = encoder->dev; 130 struct drm_device *dev = encoder->dev;
163 struct radeon_device *rdev = dev->dev_private; 131 struct radeon_device *rdev = dev->dev_private;
164 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 132 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
165 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 133 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
166 uint32_t offset = dig->afmt->offset; 134 uint32_t offset = dig->afmt->offset;
135 uint8_t *frame = buffer + 3;
167 136
168 uint8_t frame[14];
169
170 frame[0x0] = 0;
171 frame[0x1] =
172 (scan_information & 0x3) |
173 ((bar_info_data_valid & 0x3) << 2) |
174 ((active_information_present & 0x1) << 4) |
175 ((color_format & 0x3) << 5);
176 frame[0x2] =
177 (active_format_aspect_ratio & 0xF) |
178 ((picture_aspect_ratio & 0x3) << 4) |
179 ((colorimetry & 0x3) << 6);
180 frame[0x3] =
181 (non_uniform_picture_scaling & 0x3) |
182 ((quantization & 0x3) << 2) |
183 ((ex_colorimetry & 0x7) << 4) |
184 ((ITC & 0x1) << 7);
185 frame[0x4] = (video_format_identification & 0x7F);
186 frame[0x5] = (pixel_repetition & 0xF);
187 frame[0x6] = (top_bar & 0xFF);
188 frame[0x7] = (top_bar >> 8);
189 frame[0x8] = (bottom_bar & 0xFF);
190 frame[0x9] = (bottom_bar >> 8);
191 frame[0xA] = (left_bar & 0xFF);
192 frame[0xB] = (left_bar >> 8);
193 frame[0xC] = (right_bar & 0xFF);
194 frame[0xD] = (right_bar >> 8);
195
196 r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
197 /* Our header values (type, version, length) should be alright, Intel 137 /* Our header values (type, version, length) should be alright, Intel
198 * is using the same. Checksum function also seems to be OK, it works 138 * is using the same. Checksum function also seems to be OK, it works
199 * fine for audio infoframe. However calculated value is always lower 139 * fine for audio infoframe. However calculated value is always lower
@@ -215,39 +155,15 @@ static void r600_hdmi_videoinfoframe(
215/* 155/*
216 * build a Audio Info Frame 156 * build a Audio Info Frame
217 */ 157 */
218static void r600_hdmi_audioinfoframe( 158static void r600_hdmi_update_audio_infoframe(struct drm_encoder *encoder,
219 struct drm_encoder *encoder, 159 const void *buffer, size_t size)
220 uint8_t channel_count,
221 uint8_t coding_type,
222 uint8_t sample_size,
223 uint8_t sample_frequency,
224 uint8_t format,
225 uint8_t channel_allocation,
226 uint8_t level_shift,
227 int downmix_inhibit
228)
229{ 160{
230 struct drm_device *dev = encoder->dev; 161 struct drm_device *dev = encoder->dev;
231 struct radeon_device *rdev = dev->dev_private; 162 struct radeon_device *rdev = dev->dev_private;
232 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 163 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
233 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 164 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
234 uint32_t offset = dig->afmt->offset; 165 uint32_t offset = dig->afmt->offset;
235 166 const u8 *frame = buffer + 3;
236 uint8_t frame[11];
237
238 frame[0x0] = 0;
239 frame[0x1] = (channel_count & 0x7) | ((coding_type & 0xF) << 4);
240 frame[0x2] = (sample_size & 0x3) | ((sample_frequency & 0x7) << 2);
241 frame[0x3] = format;
242 frame[0x4] = channel_allocation;
243 frame[0x5] = ((level_shift & 0xF) << 3) | ((downmix_inhibit & 0x1) << 7);
244 frame[0x6] = 0;
245 frame[0x7] = 0;
246 frame[0x8] = 0;
247 frame[0x9] = 0;
248 frame[0xA] = 0;
249
250 r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
251 167
252 WREG32(HDMI0_AUDIO_INFO0 + offset, 168 WREG32(HDMI0_AUDIO_INFO0 + offset,
253 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 169 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -320,7 +236,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
320 struct radeon_device *rdev = dev->dev_private; 236 struct radeon_device *rdev = dev->dev_private;
321 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 237 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
322 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 238 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
239 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
240 struct hdmi_avi_infoframe frame;
323 uint32_t offset; 241 uint32_t offset;
242 ssize_t err;
324 243
325 /* Silent, r600_hdmi_enable will raise WARN for us */ 244 /* Silent, r600_hdmi_enable will raise WARN for us */
326 if (!dig->afmt->enabled) 245 if (!dig->afmt->enabled)
@@ -371,9 +290,19 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
371 290
372 WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */ 291 WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
373 292
374 r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0, 293 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
375 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); 294 if (err < 0) {
295 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
296 return;
297 }
376 298
299 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
300 if (err < 0) {
301 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
302 return;
303 }
304
305 r600_hdmi_update_avi_infoframe(encoder, buffer, sizeof(buffer));
377 r600_hdmi_update_ACR(encoder, mode->clock); 306 r600_hdmi_update_ACR(encoder, mode->clock);
378 307
379 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */ 308 /* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
@@ -395,8 +324,11 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
395 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 324 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
396 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 325 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
397 struct r600_audio audio = r600_audio_status(rdev); 326 struct r600_audio audio = r600_audio_status(rdev);
327 uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
328 struct hdmi_audio_infoframe frame;
398 uint32_t offset; 329 uint32_t offset;
399 uint32_t iec; 330 uint32_t iec;
331 ssize_t err;
400 332
401 if (!dig->afmt || !dig->afmt->enabled) 333 if (!dig->afmt || !dig->afmt->enabled)
402 return; 334 return;
@@ -462,9 +394,21 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
462 iec |= 0x5 << 16; 394 iec |= 0x5 << 16;
463 WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f); 395 WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
464 396
465 r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0, 397 err = hdmi_audio_infoframe_init(&frame);
466 0); 398 if (err < 0) {
399 DRM_ERROR("failed to setup audio infoframe\n");
400 return;
401 }
402
403 frame.channels = audio.channels;
404
405 err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
406 if (err < 0) {
407 DRM_ERROR("failed to pack audio infoframe\n");
408 return;
409 }
467 410
411 r600_hdmi_update_audio_infoframe(encoder, buffer, sizeof(buffer));
468 r600_hdmi_audio_workaround(encoder); 412 r600_hdmi_audio_workaround(encoder);
469} 413}
470 414
@@ -544,7 +488,6 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
544 488
545 /* Called for ATOM_ENCODER_MODE_HDMI only */ 489 /* Called for ATOM_ENCODER_MODE_HDMI only */
546 if (!dig || !dig->afmt) { 490 if (!dig || !dig->afmt) {
547 WARN_ON(1);
548 return; 491 return;
549 } 492 }
550 if (!dig->afmt->enabled) 493 if (!dig->afmt->enabled)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 4a53402b1852..a42ba11a3bed 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -182,6 +182,8 @@
182#define CP_COHER_BASE 0x85F8 182#define CP_COHER_BASE 0x85F8
183#define CP_DEBUG 0xC1FC 183#define CP_DEBUG 0xC1FC
184#define R_0086D8_CP_ME_CNTL 0x86D8 184#define R_0086D8_CP_ME_CNTL 0x86D8
185#define S_0086D8_CP_PFP_HALT(x) (((x) & 1)<<26)
186#define C_0086D8_CP_PFP_HALT(x) ((x) & 0xFBFFFFFF)
185#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28) 187#define S_0086D8_CP_ME_HALT(x) (((x) & 1)<<28)
186#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF) 188#define C_0086D8_CP_ME_HALT(x) ((x) & 0xEFFFFFFF)
187#define CP_ME_RAM_DATA 0xC160 189#define CP_ME_RAM_DATA 0xC160
@@ -1143,19 +1145,10 @@
1143/* 1145/*
1144 * PM4 1146 * PM4
1145 */ 1147 */
1146#define PACKET_TYPE0 0 1148#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
1147#define PACKET_TYPE1 1
1148#define PACKET_TYPE2 2
1149#define PACKET_TYPE3 3
1150
1151#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
1152#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
1153#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
1154#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
1155#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
1156 (((reg) >> 2) & 0xFFFF) | \ 1149 (((reg) >> 2) & 0xFFFF) | \
1157 ((n) & 0x3FFF) << 16) 1150 ((n) & 0x3FFF) << 16)
1158#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ 1151#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
1159 (((op) & 0xFF) << 8) | \ 1152 (((op) & 0xFF) << 8) | \
1160 ((n) & 0x3FFF) << 16) 1153 ((n) & 0x3FFF) << 16)
1161 1154
@@ -1328,6 +1321,7 @@
1328#define G_008010_VC_BUSY(x) (((x) >> 11) & 1) 1321#define G_008010_VC_BUSY(x) (((x) >> 11) & 1)
1329#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1) 1322#define G_008010_DB03_CLEAN(x) (((x) >> 12) & 1)
1330#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1) 1323#define G_008010_CB03_CLEAN(x) (((x) >> 13) & 1)
1324#define G_008010_TA_BUSY(x) (((x) >> 14) & 1)
1331#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1) 1325#define G_008010_VGT_BUSY_NO_DMA(x) (((x) >> 16) & 1)
1332#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1) 1326#define G_008010_VGT_BUSY(x) (((x) >> 17) & 1)
1333#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1) 1327#define G_008010_TA03_BUSY(x) (((x) >> 18) & 1)
@@ -1395,6 +1389,7 @@
1395#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) 1389#define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1)
1396#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) 1390#define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1)
1397#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) 1391#define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1)
1392#define G_000E50_IH_BUSY(x) (((x) >> 17) & 1)
1398#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1) 1393#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1)
1399#define R_000E60_SRBM_SOFT_RESET 0x0E60 1394#define R_000E60_SRBM_SOFT_RESET 0x0E60
1400#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) 1395#define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a08f657329a0..8263af3fd832 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -136,6 +136,15 @@ extern int radeon_lockup_timeout;
136#define RADEON_RESET_GFX (1 << 0) 136#define RADEON_RESET_GFX (1 << 0)
137#define RADEON_RESET_COMPUTE (1 << 1) 137#define RADEON_RESET_COMPUTE (1 << 1)
138#define RADEON_RESET_DMA (1 << 2) 138#define RADEON_RESET_DMA (1 << 2)
139#define RADEON_RESET_CP (1 << 3)
140#define RADEON_RESET_GRBM (1 << 4)
141#define RADEON_RESET_DMA1 (1 << 5)
142#define RADEON_RESET_RLC (1 << 6)
143#define RADEON_RESET_SEM (1 << 7)
144#define RADEON_RESET_IH (1 << 8)
145#define RADEON_RESET_VMC (1 << 9)
146#define RADEON_RESET_MC (1 << 10)
147#define RADEON_RESET_DISPLAY (1 << 11)
139 148
140/* 149/*
141 * Errata workarounds. 150 * Errata workarounds.
@@ -341,7 +350,6 @@ struct radeon_bo {
341 struct drm_gem_object gem_base; 350 struct drm_gem_object gem_base;
342 351
343 struct ttm_bo_kmap_obj dma_buf_vmap; 352 struct ttm_bo_kmap_obj dma_buf_vmap;
344 int vmapping_count;
345}; 353};
346#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) 354#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
347 355
@@ -771,6 +779,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
771 struct radeon_ib *ib, struct radeon_vm *vm, 779 struct radeon_ib *ib, struct radeon_vm *vm,
772 unsigned size); 780 unsigned size);
773void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); 781void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
782void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence);
774int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, 783int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
775 struct radeon_ib *const_ib); 784 struct radeon_ib *const_ib);
776int radeon_ib_pool_init(struct radeon_device *rdev); 785int radeon_ib_pool_init(struct radeon_device *rdev);
@@ -1169,6 +1178,10 @@ struct radeon_asic {
1169 bool (*gui_idle)(struct radeon_device *rdev); 1178 bool (*gui_idle)(struct radeon_device *rdev);
1170 /* wait for mc_idle */ 1179 /* wait for mc_idle */
1171 int (*mc_wait_for_idle)(struct radeon_device *rdev); 1180 int (*mc_wait_for_idle)(struct radeon_device *rdev);
1181 /* get the reference clock */
1182 u32 (*get_xclk)(struct radeon_device *rdev);
1183 /* get the gpu clock counter */
1184 uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev);
1172 /* gart */ 1185 /* gart */
1173 struct { 1186 struct {
1174 void (*tlb_flush)(struct radeon_device *rdev); 1187 void (*tlb_flush)(struct radeon_device *rdev);
@@ -1179,7 +1192,9 @@ struct radeon_asic {
1179 void (*fini)(struct radeon_device *rdev); 1192 void (*fini)(struct radeon_device *rdev);
1180 1193
1181 u32 pt_ring_index; 1194 u32 pt_ring_index;
1182 void (*set_page)(struct radeon_device *rdev, uint64_t pe, 1195 void (*set_page)(struct radeon_device *rdev,
1196 struct radeon_ib *ib,
1197 uint64_t pe,
1183 uint64_t addr, unsigned count, 1198 uint64_t addr, unsigned count,
1184 uint32_t incr, uint32_t flags); 1199 uint32_t incr, uint32_t flags);
1185 } vm; 1200 } vm;
@@ -1757,6 +1772,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1757#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA)) 1772#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
1758#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ 1773#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
1759 (rdev->flags & RADEON_IS_IGP)) 1774 (rdev->flags & RADEON_IS_IGP))
1775#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
1760 1776
1761/* 1777/*
1762 * BIOS helpers. 1778 * BIOS helpers.
@@ -1801,7 +1817,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1801#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) 1817#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
1802#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) 1818#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
1803#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) 1819#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
1804#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags))) 1820#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
1805#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) 1821#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1806#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) 1822#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1807#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) 1823#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
@@ -1847,10 +1863,13 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1847#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) 1863#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
1848#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) 1864#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
1849#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) 1865#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
1866#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev))
1867#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev))
1850 1868
1851/* Common functions */ 1869/* Common functions */
1852/* AGP */ 1870/* AGP */
1853extern int radeon_gpu_reset(struct radeon_device *rdev); 1871extern int radeon_gpu_reset(struct radeon_device *rdev);
1872extern void r600_set_bios_scratch_engine_hung(struct radeon_device *rdev, bool hung);
1854extern void radeon_agp_disable(struct radeon_device *rdev); 1873extern void radeon_agp_disable(struct radeon_device *rdev);
1855extern int radeon_modeset_init(struct radeon_device *rdev); 1874extern int radeon_modeset_init(struct radeon_device *rdev);
1856extern void radeon_modeset_fini(struct radeon_device *rdev); 1875extern void radeon_modeset_fini(struct radeon_device *rdev);
@@ -1972,6 +1991,19 @@ static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1972static inline void radeon_acpi_fini(struct radeon_device *rdev) { } 1991static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
1973#endif 1992#endif
1974 1993
1994int radeon_cs_packet_parse(struct radeon_cs_parser *p,
1995 struct radeon_cs_packet *pkt,
1996 unsigned idx);
1997bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
1998void radeon_cs_dump_packet(struct radeon_cs_parser *p,
1999 struct radeon_cs_packet *pkt);
2000int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
2001 struct radeon_cs_reloc **cs_reloc,
2002 int nomm);
2003int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
2004 uint32_t *vline_start_end,
2005 uint32_t *vline_status);
2006
1975#include "radeon_object.h" 2007#include "radeon_object.h"
1976 2008
1977#endif 2009#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 0b202c07fe50..aba0a893ea98 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -934,6 +934,8 @@ static struct radeon_asic r600_asic = {
934 .ioctl_wait_idle = r600_ioctl_wait_idle, 934 .ioctl_wait_idle = r600_ioctl_wait_idle,
935 .gui_idle = &r600_gui_idle, 935 .gui_idle = &r600_gui_idle,
936 .mc_wait_for_idle = &r600_mc_wait_for_idle, 936 .mc_wait_for_idle = &r600_mc_wait_for_idle,
937 .get_xclk = &r600_get_xclk,
938 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
937 .gart = { 939 .gart = {
938 .tlb_flush = &r600_pcie_gart_tlb_flush, 940 .tlb_flush = &r600_pcie_gart_tlb_flush,
939 .set_page = &rs600_gart_set_page, 941 .set_page = &rs600_gart_set_page,
@@ -946,7 +948,7 @@ static struct radeon_asic r600_asic = {
946 .cs_parse = &r600_cs_parse, 948 .cs_parse = &r600_cs_parse,
947 .ring_test = &r600_ring_test, 949 .ring_test = &r600_ring_test,
948 .ib_test = &r600_ib_test, 950 .ib_test = &r600_ib_test,
949 .is_lockup = &r600_gpu_is_lockup, 951 .is_lockup = &r600_gfx_is_lockup,
950 }, 952 },
951 [R600_RING_TYPE_DMA_INDEX] = { 953 [R600_RING_TYPE_DMA_INDEX] = {
952 .ib_execute = &r600_dma_ring_ib_execute, 954 .ib_execute = &r600_dma_ring_ib_execute,
@@ -1018,6 +1020,8 @@ static struct radeon_asic rs780_asic = {
1018 .ioctl_wait_idle = r600_ioctl_wait_idle, 1020 .ioctl_wait_idle = r600_ioctl_wait_idle,
1019 .gui_idle = &r600_gui_idle, 1021 .gui_idle = &r600_gui_idle,
1020 .mc_wait_for_idle = &r600_mc_wait_for_idle, 1022 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1023 .get_xclk = &r600_get_xclk,
1024 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1021 .gart = { 1025 .gart = {
1022 .tlb_flush = &r600_pcie_gart_tlb_flush, 1026 .tlb_flush = &r600_pcie_gart_tlb_flush,
1023 .set_page = &rs600_gart_set_page, 1027 .set_page = &rs600_gart_set_page,
@@ -1030,7 +1034,7 @@ static struct radeon_asic rs780_asic = {
1030 .cs_parse = &r600_cs_parse, 1034 .cs_parse = &r600_cs_parse,
1031 .ring_test = &r600_ring_test, 1035 .ring_test = &r600_ring_test,
1032 .ib_test = &r600_ib_test, 1036 .ib_test = &r600_ib_test,
1033 .is_lockup = &r600_gpu_is_lockup, 1037 .is_lockup = &r600_gfx_is_lockup,
1034 }, 1038 },
1035 [R600_RING_TYPE_DMA_INDEX] = { 1039 [R600_RING_TYPE_DMA_INDEX] = {
1036 .ib_execute = &r600_dma_ring_ib_execute, 1040 .ib_execute = &r600_dma_ring_ib_execute,
@@ -1102,6 +1106,8 @@ static struct radeon_asic rv770_asic = {
1102 .ioctl_wait_idle = r600_ioctl_wait_idle, 1106 .ioctl_wait_idle = r600_ioctl_wait_idle,
1103 .gui_idle = &r600_gui_idle, 1107 .gui_idle = &r600_gui_idle,
1104 .mc_wait_for_idle = &r600_mc_wait_for_idle, 1108 .mc_wait_for_idle = &r600_mc_wait_for_idle,
1109 .get_xclk = &rv770_get_xclk,
1110 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1105 .gart = { 1111 .gart = {
1106 .tlb_flush = &r600_pcie_gart_tlb_flush, 1112 .tlb_flush = &r600_pcie_gart_tlb_flush,
1107 .set_page = &rs600_gart_set_page, 1113 .set_page = &rs600_gart_set_page,
@@ -1114,7 +1120,7 @@ static struct radeon_asic rv770_asic = {
1114 .cs_parse = &r600_cs_parse, 1120 .cs_parse = &r600_cs_parse,
1115 .ring_test = &r600_ring_test, 1121 .ring_test = &r600_ring_test,
1116 .ib_test = &r600_ib_test, 1122 .ib_test = &r600_ib_test,
1117 .is_lockup = &r600_gpu_is_lockup, 1123 .is_lockup = &r600_gfx_is_lockup,
1118 }, 1124 },
1119 [R600_RING_TYPE_DMA_INDEX] = { 1125 [R600_RING_TYPE_DMA_INDEX] = {
1120 .ib_execute = &r600_dma_ring_ib_execute, 1126 .ib_execute = &r600_dma_ring_ib_execute,
@@ -1186,6 +1192,8 @@ static struct radeon_asic evergreen_asic = {
1186 .ioctl_wait_idle = r600_ioctl_wait_idle, 1192 .ioctl_wait_idle = r600_ioctl_wait_idle,
1187 .gui_idle = &r600_gui_idle, 1193 .gui_idle = &r600_gui_idle,
1188 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1194 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1195 .get_xclk = &rv770_get_xclk,
1196 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1189 .gart = { 1197 .gart = {
1190 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1198 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1191 .set_page = &rs600_gart_set_page, 1199 .set_page = &rs600_gart_set_page,
@@ -1198,7 +1206,7 @@ static struct radeon_asic evergreen_asic = {
1198 .cs_parse = &evergreen_cs_parse, 1206 .cs_parse = &evergreen_cs_parse,
1199 .ring_test = &r600_ring_test, 1207 .ring_test = &r600_ring_test,
1200 .ib_test = &r600_ib_test, 1208 .ib_test = &r600_ib_test,
1201 .is_lockup = &evergreen_gpu_is_lockup, 1209 .is_lockup = &evergreen_gfx_is_lockup,
1202 }, 1210 },
1203 [R600_RING_TYPE_DMA_INDEX] = { 1211 [R600_RING_TYPE_DMA_INDEX] = {
1204 .ib_execute = &evergreen_dma_ring_ib_execute, 1212 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1207,7 +1215,7 @@ static struct radeon_asic evergreen_asic = {
1207 .cs_parse = &evergreen_dma_cs_parse, 1215 .cs_parse = &evergreen_dma_cs_parse,
1208 .ring_test = &r600_dma_ring_test, 1216 .ring_test = &r600_dma_ring_test,
1209 .ib_test = &r600_dma_ib_test, 1217 .ib_test = &r600_dma_ib_test,
1210 .is_lockup = &r600_dma_is_lockup, 1218 .is_lockup = &evergreen_dma_is_lockup,
1211 } 1219 }
1212 }, 1220 },
1213 .irq = { 1221 .irq = {
@@ -1270,6 +1278,8 @@ static struct radeon_asic sumo_asic = {
1270 .ioctl_wait_idle = r600_ioctl_wait_idle, 1278 .ioctl_wait_idle = r600_ioctl_wait_idle,
1271 .gui_idle = &r600_gui_idle, 1279 .gui_idle = &r600_gui_idle,
1272 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1280 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1281 .get_xclk = &r600_get_xclk,
1282 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1273 .gart = { 1283 .gart = {
1274 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1284 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1275 .set_page = &rs600_gart_set_page, 1285 .set_page = &rs600_gart_set_page,
@@ -1282,7 +1292,7 @@ static struct radeon_asic sumo_asic = {
1282 .cs_parse = &evergreen_cs_parse, 1292 .cs_parse = &evergreen_cs_parse,
1283 .ring_test = &r600_ring_test, 1293 .ring_test = &r600_ring_test,
1284 .ib_test = &r600_ib_test, 1294 .ib_test = &r600_ib_test,
1285 .is_lockup = &evergreen_gpu_is_lockup, 1295 .is_lockup = &evergreen_gfx_is_lockup,
1286 }, 1296 },
1287 [R600_RING_TYPE_DMA_INDEX] = { 1297 [R600_RING_TYPE_DMA_INDEX] = {
1288 .ib_execute = &evergreen_dma_ring_ib_execute, 1298 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1291,7 +1301,7 @@ static struct radeon_asic sumo_asic = {
1291 .cs_parse = &evergreen_dma_cs_parse, 1301 .cs_parse = &evergreen_dma_cs_parse,
1292 .ring_test = &r600_dma_ring_test, 1302 .ring_test = &r600_dma_ring_test,
1293 .ib_test = &r600_dma_ib_test, 1303 .ib_test = &r600_dma_ib_test,
1294 .is_lockup = &r600_dma_is_lockup, 1304 .is_lockup = &evergreen_dma_is_lockup,
1295 } 1305 }
1296 }, 1306 },
1297 .irq = { 1307 .irq = {
@@ -1354,6 +1364,8 @@ static struct radeon_asic btc_asic = {
1354 .ioctl_wait_idle = r600_ioctl_wait_idle, 1364 .ioctl_wait_idle = r600_ioctl_wait_idle,
1355 .gui_idle = &r600_gui_idle, 1365 .gui_idle = &r600_gui_idle,
1356 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1366 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1367 .get_xclk = &rv770_get_xclk,
1368 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1357 .gart = { 1369 .gart = {
1358 .tlb_flush = &evergreen_pcie_gart_tlb_flush, 1370 .tlb_flush = &evergreen_pcie_gart_tlb_flush,
1359 .set_page = &rs600_gart_set_page, 1371 .set_page = &rs600_gart_set_page,
@@ -1366,7 +1378,7 @@ static struct radeon_asic btc_asic = {
1366 .cs_parse = &evergreen_cs_parse, 1378 .cs_parse = &evergreen_cs_parse,
1367 .ring_test = &r600_ring_test, 1379 .ring_test = &r600_ring_test,
1368 .ib_test = &r600_ib_test, 1380 .ib_test = &r600_ib_test,
1369 .is_lockup = &evergreen_gpu_is_lockup, 1381 .is_lockup = &evergreen_gfx_is_lockup,
1370 }, 1382 },
1371 [R600_RING_TYPE_DMA_INDEX] = { 1383 [R600_RING_TYPE_DMA_INDEX] = {
1372 .ib_execute = &evergreen_dma_ring_ib_execute, 1384 .ib_execute = &evergreen_dma_ring_ib_execute,
@@ -1375,7 +1387,7 @@ static struct radeon_asic btc_asic = {
1375 .cs_parse = &evergreen_dma_cs_parse, 1387 .cs_parse = &evergreen_dma_cs_parse,
1376 .ring_test = &r600_dma_ring_test, 1388 .ring_test = &r600_dma_ring_test,
1377 .ib_test = &r600_dma_ib_test, 1389 .ib_test = &r600_dma_ib_test,
1378 .is_lockup = &r600_dma_is_lockup, 1390 .is_lockup = &evergreen_dma_is_lockup,
1379 } 1391 }
1380 }, 1392 },
1381 .irq = { 1393 .irq = {
@@ -1438,6 +1450,8 @@ static struct radeon_asic cayman_asic = {
1438 .ioctl_wait_idle = r600_ioctl_wait_idle, 1450 .ioctl_wait_idle = r600_ioctl_wait_idle,
1439 .gui_idle = &r600_gui_idle, 1451 .gui_idle = &r600_gui_idle,
1440 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1452 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1453 .get_xclk = &rv770_get_xclk,
1454 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1441 .gart = { 1455 .gart = {
1442 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1456 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1443 .set_page = &rs600_gart_set_page, 1457 .set_page = &rs600_gart_set_page,
@@ -1445,7 +1459,7 @@ static struct radeon_asic cayman_asic = {
1445 .vm = { 1459 .vm = {
1446 .init = &cayman_vm_init, 1460 .init = &cayman_vm_init,
1447 .fini = &cayman_vm_fini, 1461 .fini = &cayman_vm_fini,
1448 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1462 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1449 .set_page = &cayman_vm_set_page, 1463 .set_page = &cayman_vm_set_page,
1450 }, 1464 },
1451 .ring = { 1465 .ring = {
@@ -1457,7 +1471,7 @@ static struct radeon_asic cayman_asic = {
1457 .cs_parse = &evergreen_cs_parse, 1471 .cs_parse = &evergreen_cs_parse,
1458 .ring_test = &r600_ring_test, 1472 .ring_test = &r600_ring_test,
1459 .ib_test = &r600_ib_test, 1473 .ib_test = &r600_ib_test,
1460 .is_lockup = &evergreen_gpu_is_lockup, 1474 .is_lockup = &cayman_gfx_is_lockup,
1461 .vm_flush = &cayman_vm_flush, 1475 .vm_flush = &cayman_vm_flush,
1462 }, 1476 },
1463 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1477 [CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1468,7 +1482,7 @@ static struct radeon_asic cayman_asic = {
1468 .cs_parse = &evergreen_cs_parse, 1482 .cs_parse = &evergreen_cs_parse,
1469 .ring_test = &r600_ring_test, 1483 .ring_test = &r600_ring_test,
1470 .ib_test = &r600_ib_test, 1484 .ib_test = &r600_ib_test,
1471 .is_lockup = &evergreen_gpu_is_lockup, 1485 .is_lockup = &cayman_gfx_is_lockup,
1472 .vm_flush = &cayman_vm_flush, 1486 .vm_flush = &cayman_vm_flush,
1473 }, 1487 },
1474 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1488 [CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1479,7 +1493,7 @@ static struct radeon_asic cayman_asic = {
1479 .cs_parse = &evergreen_cs_parse, 1493 .cs_parse = &evergreen_cs_parse,
1480 .ring_test = &r600_ring_test, 1494 .ring_test = &r600_ring_test,
1481 .ib_test = &r600_ib_test, 1495 .ib_test = &r600_ib_test,
1482 .is_lockup = &evergreen_gpu_is_lockup, 1496 .is_lockup = &cayman_gfx_is_lockup,
1483 .vm_flush = &cayman_vm_flush, 1497 .vm_flush = &cayman_vm_flush,
1484 }, 1498 },
1485 [R600_RING_TYPE_DMA_INDEX] = { 1499 [R600_RING_TYPE_DMA_INDEX] = {
@@ -1565,6 +1579,8 @@ static struct radeon_asic trinity_asic = {
1565 .ioctl_wait_idle = r600_ioctl_wait_idle, 1579 .ioctl_wait_idle = r600_ioctl_wait_idle,
1566 .gui_idle = &r600_gui_idle, 1580 .gui_idle = &r600_gui_idle,
1567 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1581 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1582 .get_xclk = &r600_get_xclk,
1583 .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
1568 .gart = { 1584 .gart = {
1569 .tlb_flush = &cayman_pcie_gart_tlb_flush, 1585 .tlb_flush = &cayman_pcie_gart_tlb_flush,
1570 .set_page = &rs600_gart_set_page, 1586 .set_page = &rs600_gart_set_page,
@@ -1572,7 +1588,7 @@ static struct radeon_asic trinity_asic = {
1572 .vm = { 1588 .vm = {
1573 .init = &cayman_vm_init, 1589 .init = &cayman_vm_init,
1574 .fini = &cayman_vm_fini, 1590 .fini = &cayman_vm_fini,
1575 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1591 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1576 .set_page = &cayman_vm_set_page, 1592 .set_page = &cayman_vm_set_page,
1577 }, 1593 },
1578 .ring = { 1594 .ring = {
@@ -1584,7 +1600,7 @@ static struct radeon_asic trinity_asic = {
1584 .cs_parse = &evergreen_cs_parse, 1600 .cs_parse = &evergreen_cs_parse,
1585 .ring_test = &r600_ring_test, 1601 .ring_test = &r600_ring_test,
1586 .ib_test = &r600_ib_test, 1602 .ib_test = &r600_ib_test,
1587 .is_lockup = &evergreen_gpu_is_lockup, 1603 .is_lockup = &cayman_gfx_is_lockup,
1588 .vm_flush = &cayman_vm_flush, 1604 .vm_flush = &cayman_vm_flush,
1589 }, 1605 },
1590 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1606 [CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1595,7 +1611,7 @@ static struct radeon_asic trinity_asic = {
1595 .cs_parse = &evergreen_cs_parse, 1611 .cs_parse = &evergreen_cs_parse,
1596 .ring_test = &r600_ring_test, 1612 .ring_test = &r600_ring_test,
1597 .ib_test = &r600_ib_test, 1613 .ib_test = &r600_ib_test,
1598 .is_lockup = &evergreen_gpu_is_lockup, 1614 .is_lockup = &cayman_gfx_is_lockup,
1599 .vm_flush = &cayman_vm_flush, 1615 .vm_flush = &cayman_vm_flush,
1600 }, 1616 },
1601 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1617 [CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1606,7 +1622,7 @@ static struct radeon_asic trinity_asic = {
1606 .cs_parse = &evergreen_cs_parse, 1622 .cs_parse = &evergreen_cs_parse,
1607 .ring_test = &r600_ring_test, 1623 .ring_test = &r600_ring_test,
1608 .ib_test = &r600_ib_test, 1624 .ib_test = &r600_ib_test,
1609 .is_lockup = &evergreen_gpu_is_lockup, 1625 .is_lockup = &cayman_gfx_is_lockup,
1610 .vm_flush = &cayman_vm_flush, 1626 .vm_flush = &cayman_vm_flush,
1611 }, 1627 },
1612 [R600_RING_TYPE_DMA_INDEX] = { 1628 [R600_RING_TYPE_DMA_INDEX] = {
@@ -1692,6 +1708,8 @@ static struct radeon_asic si_asic = {
1692 .ioctl_wait_idle = r600_ioctl_wait_idle, 1708 .ioctl_wait_idle = r600_ioctl_wait_idle,
1693 .gui_idle = &r600_gui_idle, 1709 .gui_idle = &r600_gui_idle,
1694 .mc_wait_for_idle = &evergreen_mc_wait_for_idle, 1710 .mc_wait_for_idle = &evergreen_mc_wait_for_idle,
1711 .get_xclk = &si_get_xclk,
1712 .get_gpu_clock_counter = &si_get_gpu_clock_counter,
1695 .gart = { 1713 .gart = {
1696 .tlb_flush = &si_pcie_gart_tlb_flush, 1714 .tlb_flush = &si_pcie_gart_tlb_flush,
1697 .set_page = &rs600_gart_set_page, 1715 .set_page = &rs600_gart_set_page,
@@ -1699,7 +1717,7 @@ static struct radeon_asic si_asic = {
1699 .vm = { 1717 .vm = {
1700 .init = &si_vm_init, 1718 .init = &si_vm_init,
1701 .fini = &si_vm_fini, 1719 .fini = &si_vm_fini,
1702 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1720 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1703 .set_page = &si_vm_set_page, 1721 .set_page = &si_vm_set_page,
1704 }, 1722 },
1705 .ring = { 1723 .ring = {
@@ -1711,7 +1729,7 @@ static struct radeon_asic si_asic = {
1711 .cs_parse = NULL, 1729 .cs_parse = NULL,
1712 .ring_test = &r600_ring_test, 1730 .ring_test = &r600_ring_test,
1713 .ib_test = &r600_ib_test, 1731 .ib_test = &r600_ib_test,
1714 .is_lockup = &si_gpu_is_lockup, 1732 .is_lockup = &si_gfx_is_lockup,
1715 .vm_flush = &si_vm_flush, 1733 .vm_flush = &si_vm_flush,
1716 }, 1734 },
1717 [CAYMAN_RING_TYPE_CP1_INDEX] = { 1735 [CAYMAN_RING_TYPE_CP1_INDEX] = {
@@ -1722,7 +1740,7 @@ static struct radeon_asic si_asic = {
1722 .cs_parse = NULL, 1740 .cs_parse = NULL,
1723 .ring_test = &r600_ring_test, 1741 .ring_test = &r600_ring_test,
1724 .ib_test = &r600_ib_test, 1742 .ib_test = &r600_ib_test,
1725 .is_lockup = &si_gpu_is_lockup, 1743 .is_lockup = &si_gfx_is_lockup,
1726 .vm_flush = &si_vm_flush, 1744 .vm_flush = &si_vm_flush,
1727 }, 1745 },
1728 [CAYMAN_RING_TYPE_CP2_INDEX] = { 1746 [CAYMAN_RING_TYPE_CP2_INDEX] = {
@@ -1733,7 +1751,7 @@ static struct radeon_asic si_asic = {
1733 .cs_parse = NULL, 1751 .cs_parse = NULL,
1734 .ring_test = &r600_ring_test, 1752 .ring_test = &r600_ring_test,
1735 .ib_test = &r600_ib_test, 1753 .ib_test = &r600_ib_test,
1736 .is_lockup = &si_gpu_is_lockup, 1754 .is_lockup = &si_gfx_is_lockup,
1737 .vm_flush = &si_vm_flush, 1755 .vm_flush = &si_vm_flush,
1738 }, 1756 },
1739 [R600_RING_TYPE_DMA_INDEX] = { 1757 [R600_RING_TYPE_DMA_INDEX] = {
@@ -1744,7 +1762,7 @@ static struct radeon_asic si_asic = {
1744 .cs_parse = NULL, 1762 .cs_parse = NULL,
1745 .ring_test = &r600_dma_ring_test, 1763 .ring_test = &r600_dma_ring_test,
1746 .ib_test = &r600_dma_ib_test, 1764 .ib_test = &r600_dma_ib_test,
1747 .is_lockup = &cayman_dma_is_lockup, 1765 .is_lockup = &si_dma_is_lockup,
1748 .vm_flush = &si_dma_vm_flush, 1766 .vm_flush = &si_dma_vm_flush,
1749 }, 1767 },
1750 [CAYMAN_RING_TYPE_DMA1_INDEX] = { 1768 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
@@ -1755,7 +1773,7 @@ static struct radeon_asic si_asic = {
1755 .cs_parse = NULL, 1773 .cs_parse = NULL,
1756 .ring_test = &r600_dma_ring_test, 1774 .ring_test = &r600_dma_ring_test,
1757 .ib_test = &r600_dma_ib_test, 1775 .ib_test = &r600_dma_ib_test,
1758 .is_lockup = &cayman_dma_is_lockup, 1776 .is_lockup = &si_dma_is_lockup,
1759 .vm_flush = &si_dma_vm_flush, 1777 .vm_flush = &si_dma_vm_flush,
1760 } 1778 }
1761 }, 1779 },
@@ -1944,9 +1962,13 @@ int radeon_asic_init(struct radeon_device *rdev)
1944 case CHIP_TAHITI: 1962 case CHIP_TAHITI:
1945 case CHIP_PITCAIRN: 1963 case CHIP_PITCAIRN:
1946 case CHIP_VERDE: 1964 case CHIP_VERDE:
1965 case CHIP_OLAND:
1947 rdev->asic = &si_asic; 1966 rdev->asic = &si_asic;
1948 /* set num crtcs */ 1967 /* set num crtcs */
1949 rdev->num_crtc = 6; 1968 if (rdev->family == CHIP_OLAND)
1969 rdev->num_crtc = 2;
1970 else
1971 rdev->num_crtc = 6;
1950 break; 1972 break;
1951 default: 1973 default:
1952 /* FIXME: not supported yet */ 1974 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 15d70e613076..3535f73ad3e2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -319,7 +319,7 @@ void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
319 bool emit_wait); 319 bool emit_wait);
320void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 320void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
321bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 321bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
322bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); 322bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
323int r600_asic_reset(struct radeon_device *rdev); 323int r600_asic_reset(struct radeon_device *rdev);
324int r600_set_surface_reg(struct radeon_device *rdev, int reg, 324int r600_set_surface_reg(struct radeon_device *rdev, int reg,
325 uint32_t tiling_flags, uint32_t pitch, 325 uint32_t tiling_flags, uint32_t pitch,
@@ -389,7 +389,8 @@ void r600_kms_blit_copy(struct radeon_device *rdev,
389 unsigned num_gpu_pages, 389 unsigned num_gpu_pages,
390 struct radeon_sa_bo *vb); 390 struct radeon_sa_bo *vb);
391int r600_mc_wait_for_idle(struct radeon_device *rdev); 391int r600_mc_wait_for_idle(struct radeon_device *rdev);
392uint64_t r600_get_gpu_clock(struct radeon_device *rdev); 392u32 r600_get_xclk(struct radeon_device *rdev);
393uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
393 394
394/* 395/*
395 * rv770,rv730,rv710,rv740 396 * rv770,rv730,rv710,rv740
@@ -407,6 +408,7 @@ int rv770_copy_dma(struct radeon_device *rdev,
407 uint64_t src_offset, uint64_t dst_offset, 408 uint64_t src_offset, uint64_t dst_offset,
408 unsigned num_gpu_pages, 409 unsigned num_gpu_pages,
409 struct radeon_fence **fence); 410 struct radeon_fence **fence);
411u32 rv770_get_xclk(struct radeon_device *rdev);
410 412
411/* 413/*
412 * evergreen 414 * evergreen
@@ -422,7 +424,8 @@ int evergreen_init(struct radeon_device *rdev);
422void evergreen_fini(struct radeon_device *rdev); 424void evergreen_fini(struct radeon_device *rdev);
423int evergreen_suspend(struct radeon_device *rdev); 425int evergreen_suspend(struct radeon_device *rdev);
424int evergreen_resume(struct radeon_device *rdev); 426int evergreen_resume(struct radeon_device *rdev);
425bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); 427bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
428bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
426int evergreen_asic_reset(struct radeon_device *rdev); 429int evergreen_asic_reset(struct radeon_device *rdev);
427void evergreen_bandwidth_update(struct radeon_device *rdev); 430void evergreen_bandwidth_update(struct radeon_device *rdev);
428void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 431void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -473,13 +476,16 @@ int cayman_vm_init(struct radeon_device *rdev);
473void cayman_vm_fini(struct radeon_device *rdev); 476void cayman_vm_fini(struct radeon_device *rdev);
474void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 477void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
475uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); 478uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
476void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe, 479void cayman_vm_set_page(struct radeon_device *rdev,
480 struct radeon_ib *ib,
481 uint64_t pe,
477 uint64_t addr, unsigned count, 482 uint64_t addr, unsigned count,
478 uint32_t incr, uint32_t flags); 483 uint32_t incr, uint32_t flags);
479int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 484int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
480int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 485int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
481void cayman_dma_ring_ib_execute(struct radeon_device *rdev, 486void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
482 struct radeon_ib *ib); 487 struct radeon_ib *ib);
488bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
483bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); 489bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
484void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 490void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
485 491
@@ -496,23 +502,27 @@ int si_init(struct radeon_device *rdev);
496void si_fini(struct radeon_device *rdev); 502void si_fini(struct radeon_device *rdev);
497int si_suspend(struct radeon_device *rdev); 503int si_suspend(struct radeon_device *rdev);
498int si_resume(struct radeon_device *rdev); 504int si_resume(struct radeon_device *rdev);
499bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); 505bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
506bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
500int si_asic_reset(struct radeon_device *rdev); 507int si_asic_reset(struct radeon_device *rdev);
501void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 508void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
502int si_irq_set(struct radeon_device *rdev); 509int si_irq_set(struct radeon_device *rdev);
503int si_irq_process(struct radeon_device *rdev); 510int si_irq_process(struct radeon_device *rdev);
504int si_vm_init(struct radeon_device *rdev); 511int si_vm_init(struct radeon_device *rdev);
505void si_vm_fini(struct radeon_device *rdev); 512void si_vm_fini(struct radeon_device *rdev);
506void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, 513void si_vm_set_page(struct radeon_device *rdev,
514 struct radeon_ib *ib,
515 uint64_t pe,
507 uint64_t addr, unsigned count, 516 uint64_t addr, unsigned count,
508 uint32_t incr, uint32_t flags); 517 uint32_t incr, uint32_t flags);
509void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 518void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
510int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 519int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
511uint64_t si_get_gpu_clock(struct radeon_device *rdev);
512int si_copy_dma(struct radeon_device *rdev, 520int si_copy_dma(struct radeon_device *rdev,
513 uint64_t src_offset, uint64_t dst_offset, 521 uint64_t src_offset, uint64_t dst_offset,
514 unsigned num_gpu_pages, 522 unsigned num_gpu_pages,
515 struct radeon_fence **fence); 523 struct radeon_fence **fence);
516void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 524void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
525u32 si_get_xclk(struct radeon_device *rdev);
526uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
517 527
518#endif 528#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 15f5ded65e0c..d96070bf8388 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -43,6 +43,12 @@ struct atpx_verify_interface {
43 u32 function_bits; /* supported functions bit vector */ 43 u32 function_bits; /* supported functions bit vector */
44} __packed; 44} __packed;
45 45
46struct atpx_px_params {
47 u16 size; /* structure size in bytes (includes size field) */
48 u32 valid_flags; /* which flags are valid */
49 u32 flags; /* flags */
50} __packed;
51
46struct atpx_power_control { 52struct atpx_power_control {
47 u16 size; 53 u16 size;
48 u8 dgpu_state; 54 u8 dgpu_state;
@@ -123,9 +129,61 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
123} 129}
124 130
125/** 131/**
132 * radeon_atpx_validate_functions - validate ATPX functions
133 *
134 * @atpx: radeon atpx struct
135 *
136 * Validate that required functions are enabled (all asics).
137 * returns 0 on success, error on failure.
138 */
139static int radeon_atpx_validate(struct radeon_atpx *atpx)
140{
141 /* make sure required functions are enabled */
142 /* dGPU power control is required */
143 atpx->functions.power_cntl = true;
144
145 if (atpx->functions.px_params) {
146 union acpi_object *info;
147 struct atpx_px_params output;
148 size_t size;
149 u32 valid_bits;
150
151 info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
152 if (!info)
153 return -EIO;
154
155 memset(&output, 0, sizeof(output));
156
157 size = *(u16 *) info->buffer.pointer;
158 if (size < 10) {
159 printk("ATPX buffer is too small: %zu\n", size);
160 kfree(info);
161 return -EINVAL;
162 }
163 size = min(sizeof(output), size);
164
165 memcpy(&output, info->buffer.pointer, size);
166
167 valid_bits = output.flags & output.valid_flags;
168 /* if separate mux flag is set, mux controls are required */
169 if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
170 atpx->functions.i2c_mux_cntl = true;
171 atpx->functions.disp_mux_cntl = true;
172 }
173 /* if any outputs are muxed, mux controls are required */
174 if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
175 ATPX_TV_SIGNAL_MUXED |
176 ATPX_DFP_SIGNAL_MUXED))
177 atpx->functions.disp_mux_cntl = true;
178
179 kfree(info);
180 }
181 return 0;
182}
183
184/**
126 * radeon_atpx_verify_interface - verify ATPX 185 * radeon_atpx_verify_interface - verify ATPX
127 * 186 *
128 * @handle: acpi handle
129 * @atpx: radeon atpx struct 187 * @atpx: radeon atpx struct
130 * 188 *
131 * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function 189 * Execute the ATPX_FUNCTION_VERIFY_INTERFACE ATPX function
@@ -406,8 +464,19 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
406 */ 464 */
407static int radeon_atpx_init(void) 465static int radeon_atpx_init(void)
408{ 466{
467 int r;
468
409 /* set up the ATPX handle */ 469 /* set up the ATPX handle */
410 return radeon_atpx_verify_interface(&radeon_atpx_priv.atpx); 470 r = radeon_atpx_verify_interface(&radeon_atpx_priv.atpx);
471 if (r)
472 return r;
473
474 /* validate the atpx setup */
475 r = radeon_atpx_validate(&radeon_atpx_priv.atpx);
476 if (r)
477 return r;
478
479 return 0;
411} 480}
412 481
413/** 482/**
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 9143fc45e35b..efc4f6441ef4 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -27,6 +27,8 @@
27 * Authors: 27 * Authors:
28 * Kevin E. Martin <martin@valinux.com> 28 * Kevin E. Martin <martin@valinux.com>
29 * Gareth Hughes <gareth@valinux.com> 29 * Gareth Hughes <gareth@valinux.com>
30 *
31 * ------------------------ This file is DEPRECATED! -------------------------
30 */ 32 */
31 33
32#include <linux/module.h> 34#include <linux/module.h>
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5407459e56d2..70d38241b083 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -29,9 +29,6 @@
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31 31
32void r100_cs_dump_packet(struct radeon_cs_parser *p,
33 struct radeon_cs_packet *pkt);
34
35static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) 32static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
36{ 33{
37 struct drm_device *ddev = p->rdev->ddev; 34 struct drm_device *ddev = p->rdev->ddev;
@@ -128,18 +125,6 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
128 return 0; 125 return 0;
129} 126}
130 127
131static void radeon_cs_sync_to(struct radeon_cs_parser *p,
132 struct radeon_fence *fence)
133{
134 struct radeon_fence *other;
135
136 if (!fence)
137 return;
138
139 other = p->ib.sync_to[fence->ring];
140 p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
141}
142
143static void radeon_cs_sync_rings(struct radeon_cs_parser *p) 128static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
144{ 129{
145 int i; 130 int i;
@@ -148,7 +133,7 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
148 if (!p->relocs[i].robj) 133 if (!p->relocs[i].robj)
149 continue; 134 continue;
150 135
151 radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj); 136 radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
152 } 137 }
153} 138}
154 139
@@ -203,7 +188,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
203 p->chunks[i].length_dw = user_chunk.length_dw; 188 p->chunks[i].length_dw = user_chunk.length_dw;
204 p->chunks[i].kdata = NULL; 189 p->chunks[i].kdata = NULL;
205 p->chunks[i].chunk_id = user_chunk.chunk_id; 190 p->chunks[i].chunk_id = user_chunk.chunk_id;
206 191 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
207 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 192 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
208 p->chunk_relocs_idx = i; 193 p->chunk_relocs_idx = i;
209 } 194 }
@@ -226,9 +211,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
226 return -EINVAL; 211 return -EINVAL;
227 } 212 }
228 213
229 p->chunks[i].length_dw = user_chunk.length_dw;
230 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
231
232 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 214 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
233 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) || 215 if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
234 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) { 216 (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
@@ -478,8 +460,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
478 goto out; 460 goto out;
479 } 461 }
480 radeon_cs_sync_rings(parser); 462 radeon_cs_sync_rings(parser);
481 radeon_cs_sync_to(parser, vm->fence); 463 radeon_ib_sync_to(&parser->ib, vm->fence);
482 radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring)); 464 radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
465 rdev, vm, parser->ring));
483 466
484 if ((rdev->family >= CHIP_TAHITI) && 467 if ((rdev->family >= CHIP_TAHITI) &&
485 (parser->chunk_const_ib_idx != -1)) { 468 (parser->chunk_const_ib_idx != -1)) {
@@ -648,3 +631,152 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
648 idx_value = ibc->kpage[new_page][pg_offset/4]; 631 idx_value = ibc->kpage[new_page][pg_offset/4];
649 return idx_value; 632 return idx_value;
650} 633}
634
635/**
636 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
637 * @parser: parser structure holding parsing context.
638 * @pkt: where to store packet information
639 *
640 * Assume that chunk_ib_index is properly set. Will return -EINVAL
641 * if packet is bigger than remaining ib size. or if packets is unknown.
642 **/
643int radeon_cs_packet_parse(struct radeon_cs_parser *p,
644 struct radeon_cs_packet *pkt,
645 unsigned idx)
646{
647 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
648 struct radeon_device *rdev = p->rdev;
649 uint32_t header;
650
651 if (idx >= ib_chunk->length_dw) {
652 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
653 idx, ib_chunk->length_dw);
654 return -EINVAL;
655 }
656 header = radeon_get_ib_value(p, idx);
657 pkt->idx = idx;
658 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
659 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
660 pkt->one_reg_wr = 0;
661 switch (pkt->type) {
662 case RADEON_PACKET_TYPE0:
663 if (rdev->family < CHIP_R600) {
664 pkt->reg = R100_CP_PACKET0_GET_REG(header);
665 pkt->one_reg_wr =
666 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
667 } else
668 pkt->reg = R600_CP_PACKET0_GET_REG(header);
669 break;
670 case RADEON_PACKET_TYPE3:
671 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
672 break;
673 case RADEON_PACKET_TYPE2:
674 pkt->count = -1;
675 break;
676 default:
677 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
678 return -EINVAL;
679 }
680 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
681 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
682 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
683 return -EINVAL;
684 }
685 return 0;
686}
687
688/**
689 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
690 * @p: structure holding the parser context.
691 *
692 * Check if the next packet is NOP relocation packet3.
693 **/
694bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
695{
696 struct radeon_cs_packet p3reloc;
697 int r;
698
699 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
700 if (r)
701 return false;
702 if (p3reloc.type != RADEON_PACKET_TYPE3)
703 return false;
704 if (p3reloc.opcode != RADEON_PACKET3_NOP)
705 return false;
706 return true;
707}
708
709/**
710 * radeon_cs_dump_packet() - dump raw packet context
711 * @p: structure holding the parser context.
712 * @pkt: structure holding the packet.
713 *
714 * Used mostly for debugging and error reporting.
715 **/
716void radeon_cs_dump_packet(struct radeon_cs_parser *p,
717 struct radeon_cs_packet *pkt)
718{
719 volatile uint32_t *ib;
720 unsigned i;
721 unsigned idx;
722
723 ib = p->ib.ptr;
724 idx = pkt->idx;
725 for (i = 0; i <= (pkt->count + 1); i++, idx++)
726 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
727}
728
729/**
730 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
731 * @parser: parser structure holding parsing context.
732 * @data: pointer to relocation data
733 * @offset_start: starting offset
734 * @offset_mask: offset mask (to align start offset on)
735 * @reloc: reloc informations
736 *
737 * Check if next packet is relocation packet3, do bo validation and compute
738 * GPU offset using the provided start.
739 **/
740int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
741 struct radeon_cs_reloc **cs_reloc,
742 int nomm)
743{
744 struct radeon_cs_chunk *relocs_chunk;
745 struct radeon_cs_packet p3reloc;
746 unsigned idx;
747 int r;
748
749 if (p->chunk_relocs_idx == -1) {
750 DRM_ERROR("No relocation chunk !\n");
751 return -EINVAL;
752 }
753 *cs_reloc = NULL;
754 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
755 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
756 if (r)
757 return r;
758 p->idx += p3reloc.count + 2;
759 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
760 p3reloc.opcode != RADEON_PACKET3_NOP) {
761 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
762 p3reloc.idx);
763 radeon_cs_dump_packet(p, &p3reloc);
764 return -EINVAL;
765 }
766 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
767 if (idx >= relocs_chunk->length_dw) {
768 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
769 idx, relocs_chunk->length_dw);
770 radeon_cs_dump_packet(p, &p3reloc);
771 return -EINVAL;
772 }
773 /* FIXME: we assume reloc size is 4 dwords */
774 if (nomm) {
775 *cs_reloc = p->relocs;
776 (*cs_reloc)->lobj.gpu_offset =
777 (u64)relocs_chunk->kdata[idx + 3] << 32;
778 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
779 } else
780 *cs_reloc = p->relocs_ptr[(idx / 4)];
781 return 0;
782}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0d67674b64b1..b097d5b4ff39 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -246,8 +246,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
246 int i = 0; 246 int i = 0;
247 struct drm_crtc *crtc_p; 247 struct drm_crtc *crtc_p;
248 248
249 /* avivo cursor image can't end on 128 pixel boundary or 249 /*
250 * avivo cursor image can't end on 128 pixel boundary or
250 * go past the end of the frame if both crtcs are enabled 251 * go past the end of the frame if both crtcs are enabled
252 *
253 * NOTE: It is safe to access crtc->enabled of other crtcs
254 * without holding either the mode_config lock or the other
255 * crtc's lock as long as write access to this flag _always_
256 * grabs all locks.
251 */ 257 */
252 list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) { 258 list_for_each_entry(crtc_p, &crtc->dev->mode_config.crtc_list, head) {
253 if (crtc_p->enabled) 259 if (crtc_p->enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d6562bb0c93..44b8034a400d 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -93,6 +93,7 @@ static const char radeon_family_name[][16] = {
93 "TAHITI", 93 "TAHITI",
94 "PITCAIRN", 94 "PITCAIRN",
95 "VERDE", 95 "VERDE",
96 "OLAND",
96 "LAST", 97 "LAST",
97}; 98};
98 99
@@ -758,6 +759,11 @@ int radeon_atombios_init(struct radeon_device *rdev)
758 atom_card_info->pll_write = cail_pll_write; 759 atom_card_info->pll_write = cail_pll_write;
759 760
760 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 761 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
762 if (!rdev->mode_info.atom_context) {
763 radeon_atombios_fini(rdev);
764 return -ENOMEM;
765 }
766
761 mutex_init(&rdev->mode_info.atom_context->mutex); 767 mutex_init(&rdev->mode_info.atom_context->mutex);
762 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 768 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
763 atom_allocate_fb_scratch(rdev->mode_info.atom_context); 769 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
@@ -777,9 +783,11 @@ void radeon_atombios_fini(struct radeon_device *rdev)
777{ 783{
778 if (rdev->mode_info.atom_context) { 784 if (rdev->mode_info.atom_context) {
779 kfree(rdev->mode_info.atom_context->scratch); 785 kfree(rdev->mode_info.atom_context->scratch);
780 kfree(rdev->mode_info.atom_context);
781 } 786 }
787 kfree(rdev->mode_info.atom_context);
788 rdev->mode_info.atom_context = NULL;
782 kfree(rdev->mode_info.atom_card_info); 789 kfree(rdev->mode_info.atom_card_info);
790 rdev->mode_info.atom_card_info = NULL;
783} 791}
784 792
785/* COMBIOS */ 793/* COMBIOS */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 05c96fa0b051..e38fd559f1ab 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1089,12 +1089,12 @@ radeon_framebuffer_init(struct drm_device *dev,
1089{ 1089{
1090 int ret; 1090 int ret;
1091 rfb->obj = obj; 1091 rfb->obj = obj;
1092 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
1092 ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); 1093 ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
1093 if (ret) { 1094 if (ret) {
1094 rfb->obj = NULL; 1095 rfb->obj = NULL;
1095 return ret; 1096 return ret;
1096 } 1097 }
1097 drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
1098 return 0; 1098 return 0;
1099} 1099}
1100 1100
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d9bf96ee299a..167758488ed6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -118,20 +118,32 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
118int radeon_mode_dumb_destroy(struct drm_file *file_priv, 118int radeon_mode_dumb_destroy(struct drm_file *file_priv,
119 struct drm_device *dev, 119 struct drm_device *dev,
120 uint32_t handle); 120 uint32_t handle);
121struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, 121struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
122 struct drm_gem_object *obj, 122struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
123 int flags); 123 size_t size,
124struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev, 124 struct sg_table *sg);
125 struct dma_buf *dma_buf); 125int radeon_gem_prime_pin(struct drm_gem_object *obj);
126void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
127void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
128extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
129 unsigned long arg);
126 130
127#if defined(CONFIG_DEBUG_FS) 131#if defined(CONFIG_DEBUG_FS)
128int radeon_debugfs_init(struct drm_minor *minor); 132int radeon_debugfs_init(struct drm_minor *minor);
129void radeon_debugfs_cleanup(struct drm_minor *minor); 133void radeon_debugfs_cleanup(struct drm_minor *minor);
130#endif 134#endif
131 135
136/* atpx handler */
137#if defined(CONFIG_VGA_SWITCHEROO)
138void radeon_register_atpx_handler(void);
139void radeon_unregister_atpx_handler(void);
140#else
141static inline void radeon_register_atpx_handler(void) {}
142static inline void radeon_unregister_atpx_handler(void) {}
143#endif
132 144
133int radeon_no_wb; 145int radeon_no_wb;
134int radeon_modeset = -1; 146int radeon_modeset = 1;
135int radeon_dynclks = -1; 147int radeon_dynclks = -1;
136int radeon_r4xx_atom = 0; 148int radeon_r4xx_atom = 0;
137int radeon_agpmode = 0; 149int radeon_agpmode = 0;
@@ -199,6 +211,14 @@ module_param_named(msi, radeon_msi, int, 0444);
199MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)"); 211MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (defaul 10000 = 10 seconds, 0 = disable)");
200module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444); 212module_param_named(lockup_timeout, radeon_lockup_timeout, int, 0444);
201 213
214static struct pci_device_id pciidlist[] = {
215 radeon_PCI_IDS
216};
217
218MODULE_DEVICE_TABLE(pci, pciidlist);
219
220#ifdef CONFIG_DRM_RADEON_UMS
221
202static int radeon_suspend(struct drm_device *dev, pm_message_t state) 222static int radeon_suspend(struct drm_device *dev, pm_message_t state)
203{ 223{
204 drm_radeon_private_t *dev_priv = dev->dev_private; 224 drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -227,14 +247,6 @@ static int radeon_resume(struct drm_device *dev)
227 return 0; 247 return 0;
228} 248}
229 249
230static struct pci_device_id pciidlist[] = {
231 radeon_PCI_IDS
232};
233
234#if defined(CONFIG_DRM_RADEON_KMS)
235MODULE_DEVICE_TABLE(pci, pciidlist);
236#endif
237
238static const struct file_operations radeon_driver_old_fops = { 250static const struct file_operations radeon_driver_old_fops = {
239 .owner = THIS_MODULE, 251 .owner = THIS_MODULE,
240 .open = drm_open, 252 .open = drm_open,
@@ -284,6 +296,8 @@ static struct drm_driver driver_old = {
284 .patchlevel = DRIVER_PATCHLEVEL, 296 .patchlevel = DRIVER_PATCHLEVEL,
285}; 297};
286 298
299#endif
300
287static struct drm_driver kms_driver; 301static struct drm_driver kms_driver;
288 302
289static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) 303static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
@@ -397,8 +411,13 @@ static struct drm_driver kms_driver = {
397 411
398 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 412 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
399 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 413 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
400 .gem_prime_export = radeon_gem_prime_export, 414 .gem_prime_export = drm_gem_prime_export,
401 .gem_prime_import = radeon_gem_prime_import, 415 .gem_prime_import = drm_gem_prime_import,
416 .gem_prime_pin = radeon_gem_prime_pin,
417 .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
418 .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
419 .gem_prime_vmap = radeon_gem_prime_vmap,
420 .gem_prime_vunmap = radeon_gem_prime_vunmap,
402 421
403 .name = DRIVER_NAME, 422 .name = DRIVER_NAME,
404 .desc = DRIVER_DESC, 423 .desc = DRIVER_DESC,
@@ -411,10 +430,12 @@ static struct drm_driver kms_driver = {
411static struct drm_driver *driver; 430static struct drm_driver *driver;
412static struct pci_driver *pdriver; 431static struct pci_driver *pdriver;
413 432
433#ifdef CONFIG_DRM_RADEON_UMS
414static struct pci_driver radeon_pci_driver = { 434static struct pci_driver radeon_pci_driver = {
415 .name = DRIVER_NAME, 435 .name = DRIVER_NAME,
416 .id_table = pciidlist, 436 .id_table = pciidlist,
417}; 437};
438#endif
418 439
419static struct pci_driver radeon_kms_pci_driver = { 440static struct pci_driver radeon_kms_pci_driver = {
420 .name = DRIVER_NAME, 441 .name = DRIVER_NAME,
@@ -427,28 +448,6 @@ static struct pci_driver radeon_kms_pci_driver = {
427 448
428static int __init radeon_init(void) 449static int __init radeon_init(void)
429{ 450{
430 driver = &driver_old;
431 pdriver = &radeon_pci_driver;
432 driver->num_ioctls = radeon_max_ioctl;
433#ifdef CONFIG_VGA_CONSOLE
434 if (vgacon_text_force() && radeon_modeset == -1) {
435 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
436 driver = &driver_old;
437 pdriver = &radeon_pci_driver;
438 driver->driver_features &= ~DRIVER_MODESET;
439 radeon_modeset = 0;
440 }
441#endif
442 /* if enabled by default */
443 if (radeon_modeset == -1) {
444#ifdef CONFIG_DRM_RADEON_KMS
445 DRM_INFO("radeon defaulting to kernel modesetting.\n");
446 radeon_modeset = 1;
447#else
448 DRM_INFO("radeon defaulting to userspace modesetting.\n");
449 radeon_modeset = 0;
450#endif
451 }
452 if (radeon_modeset == 1) { 451 if (radeon_modeset == 1) {
453 DRM_INFO("radeon kernel modesetting enabled.\n"); 452 DRM_INFO("radeon kernel modesetting enabled.\n");
454 driver = &kms_driver; 453 driver = &kms_driver;
@@ -456,9 +455,21 @@ static int __init radeon_init(void)
456 driver->driver_features |= DRIVER_MODESET; 455 driver->driver_features |= DRIVER_MODESET;
457 driver->num_ioctls = radeon_max_kms_ioctl; 456 driver->num_ioctls = radeon_max_kms_ioctl;
458 radeon_register_atpx_handler(); 457 radeon_register_atpx_handler();
458
459 } else {
460#ifdef CONFIG_DRM_RADEON_UMS
461 DRM_INFO("radeon userspace modesetting enabled.\n");
462 driver = &driver_old;
463 pdriver = &radeon_pci_driver;
464 driver->driver_features &= ~DRIVER_MODESET;
465 driver->num_ioctls = radeon_max_ioctl;
466#else
467 DRM_ERROR("No UMS support in radeon module!\n");
468 return -EINVAL;
469#endif
459 } 470 }
460 /* if the vga console setting is enabled still 471
461 * let modprobe override it */ 472 /* let modprobe override vga console setting */
462 return drm_pci_init(driver, pdriver); 473 return drm_pci_init(driver, pdriver);
463} 474}
464 475
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index e7fdf163a8ca..b369d42f7de5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -113,6 +113,9 @@
113#define DRIVER_MINOR 33 113#define DRIVER_MINOR 33
114#define DRIVER_PATCHLEVEL 0 114#define DRIVER_PATCHLEVEL 0
115 115
116/* The rest of the file is DEPRECATED! */
117#ifdef CONFIG_DRM_RADEON_UMS
118
116enum radeon_cp_microcode_version { 119enum radeon_cp_microcode_version {
117 UCODE_R100, 120 UCODE_R100,
118 UCODE_R200, 121 UCODE_R200,
@@ -418,8 +421,6 @@ extern int radeon_driver_open(struct drm_device *dev,
418 struct drm_file *file_priv); 421 struct drm_file *file_priv);
419extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 422extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
420 unsigned long arg); 423 unsigned long arg);
421extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
422 unsigned long arg);
423 424
424extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); 425extern int radeon_master_create(struct drm_device *dev, struct drm_master *master);
425extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); 426extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -462,15 +463,6 @@ extern void r600_blit_swap(struct drm_device *dev,
462 int sx, int sy, int dx, int dy, 463 int sx, int sy, int dx, int dy,
463 int w, int h, int src_pitch, int dst_pitch, int cpp); 464 int w, int h, int src_pitch, int dst_pitch, int cpp);
464 465
465/* atpx handler */
466#if defined(CONFIG_VGA_SWITCHEROO)
467void radeon_register_atpx_handler(void);
468void radeon_unregister_atpx_handler(void);
469#else
470static inline void radeon_register_atpx_handler(void) {}
471static inline void radeon_unregister_atpx_handler(void) {}
472#endif
473
474/* Flags for stats.boxes 466/* Flags for stats.boxes
475 */ 467 */
476#define RADEON_BOX_DMA_IDLE 0x1 468#define RADEON_BOX_DMA_IDLE 0x1
@@ -2167,4 +2159,6 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
2167} while (0) 2159} while (0)
2168 2160
2169 2161
2162#endif /* CONFIG_DRM_RADEON_UMS */
2163
2170#endif /* __RADEON_DRV_H__ */ 2164#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index d1fafeabea09..2d91123f2759 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -91,6 +91,7 @@ enum radeon_family {
91 CHIP_TAHITI, 91 CHIP_TAHITI,
92 CHIP_PITCAIRN, 92 CHIP_PITCAIRN,
93 CHIP_VERDE, 93 CHIP_VERDE,
94 CHIP_OLAND,
94 CHIP_LAST, 95 CHIP_LAST,
95}; 96};
96 97
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cc8489d8c6d1..b1746741bc59 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -187,9 +187,10 @@ out_unref:
187 return ret; 187 return ret;
188} 188}
189 189
190static int radeonfb_create(struct radeon_fbdev *rfbdev, 190static int radeonfb_create(struct drm_fb_helper *helper,
191 struct drm_fb_helper_surface_size *sizes) 191 struct drm_fb_helper_surface_size *sizes)
192{ 192{
193 struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
193 struct radeon_device *rdev = rfbdev->rdev; 194 struct radeon_device *rdev = rfbdev->rdev;
194 struct fb_info *info; 195 struct fb_info *info;
195 struct drm_framebuffer *fb = NULL; 196 struct drm_framebuffer *fb = NULL;
@@ -293,28 +294,13 @@ out_unref:
293 } 294 }
294 if (fb && ret) { 295 if (fb && ret) {
295 drm_gem_object_unreference(gobj); 296 drm_gem_object_unreference(gobj);
297 drm_framebuffer_unregister_private(fb);
296 drm_framebuffer_cleanup(fb); 298 drm_framebuffer_cleanup(fb);
297 kfree(fb); 299 kfree(fb);
298 } 300 }
299 return ret; 301 return ret;
300} 302}
301 303
302static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper,
303 struct drm_fb_helper_surface_size *sizes)
304{
305 struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper;
306 int new_fb = 0;
307 int ret;
308
309 if (!helper->fb) {
310 ret = radeonfb_create(rfbdev, sizes);
311 if (ret)
312 return ret;
313 new_fb = 1;
314 }
315 return new_fb;
316}
317
318void radeon_fb_output_poll_changed(struct radeon_device *rdev) 304void radeon_fb_output_poll_changed(struct radeon_device *rdev)
319{ 305{
320 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); 306 drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
@@ -339,6 +325,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
339 rfb->obj = NULL; 325 rfb->obj = NULL;
340 } 326 }
341 drm_fb_helper_fini(&rfbdev->helper); 327 drm_fb_helper_fini(&rfbdev->helper);
328 drm_framebuffer_unregister_private(&rfb->base);
342 drm_framebuffer_cleanup(&rfb->base); 329 drm_framebuffer_cleanup(&rfb->base);
343 330
344 return 0; 331 return 0;
@@ -347,7 +334,7 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
347static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { 334static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
348 .gamma_set = radeon_crtc_fb_gamma_set, 335 .gamma_set = radeon_crtc_fb_gamma_set,
349 .gamma_get = radeon_crtc_fb_gamma_get, 336 .gamma_get = radeon_crtc_fb_gamma_get,
350 .fb_probe = radeon_fb_find_or_create_single, 337 .fb_probe = radeonfb_create,
351}; 338};
352 339
353int radeon_fbdev_init(struct radeon_device *rdev) 340int radeon_fbdev_init(struct radeon_device *rdev)
@@ -377,6 +364,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
377 } 364 }
378 365
379 drm_fb_helper_single_add_all_connectors(&rfbdev->helper); 366 drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
367
368 /* disable all the possible outputs/crtcs before entering KMS mode */
369 drm_helper_disable_unused_functions(rdev->ddev);
370
380 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); 371 drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
381 return 0; 372 return 0;
382} 373}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 6e24f84755b5..2c1341f63dc5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -929,6 +929,7 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
929 */ 929 */
930static int radeon_vm_update_pdes(struct radeon_device *rdev, 930static int radeon_vm_update_pdes(struct radeon_device *rdev,
931 struct radeon_vm *vm, 931 struct radeon_vm *vm,
932 struct radeon_ib *ib,
932 uint64_t start, uint64_t end) 933 uint64_t start, uint64_t end)
933{ 934{
934 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8; 935 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
@@ -971,7 +972,7 @@ retry:
971 ((last_pt + incr * count) != pt)) { 972 ((last_pt + incr * count) != pt)) {
972 973
973 if (count) { 974 if (count) {
974 radeon_asic_vm_set_page(rdev, last_pde, 975 radeon_asic_vm_set_page(rdev, ib, last_pde,
975 last_pt, count, incr, 976 last_pt, count, incr,
976 RADEON_VM_PAGE_VALID); 977 RADEON_VM_PAGE_VALID);
977 } 978 }
@@ -985,7 +986,7 @@ retry:
985 } 986 }
986 987
987 if (count) { 988 if (count) {
988 radeon_asic_vm_set_page(rdev, last_pde, last_pt, count, 989 radeon_asic_vm_set_page(rdev, ib, last_pde, last_pt, count,
989 incr, RADEON_VM_PAGE_VALID); 990 incr, RADEON_VM_PAGE_VALID);
990 991
991 } 992 }
@@ -1009,6 +1010,7 @@ retry:
1009 */ 1010 */
1010static void radeon_vm_update_ptes(struct radeon_device *rdev, 1011static void radeon_vm_update_ptes(struct radeon_device *rdev,
1011 struct radeon_vm *vm, 1012 struct radeon_vm *vm,
1013 struct radeon_ib *ib,
1012 uint64_t start, uint64_t end, 1014 uint64_t start, uint64_t end,
1013 uint64_t dst, uint32_t flags) 1015 uint64_t dst, uint32_t flags)
1014{ 1016{
@@ -1038,7 +1040,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1038 if ((last_pte + 8 * count) != pte) { 1040 if ((last_pte + 8 * count) != pte) {
1039 1041
1040 if (count) { 1042 if (count) {
1041 radeon_asic_vm_set_page(rdev, last_pte, 1043 radeon_asic_vm_set_page(rdev, ib, last_pte,
1042 last_dst, count, 1044 last_dst, count,
1043 RADEON_GPU_PAGE_SIZE, 1045 RADEON_GPU_PAGE_SIZE,
1044 flags); 1046 flags);
@@ -1056,7 +1058,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
1056 } 1058 }
1057 1059
1058 if (count) { 1060 if (count) {
1059 radeon_asic_vm_set_page(rdev, last_pte, last_dst, count, 1061 radeon_asic_vm_set_page(rdev, ib, last_pte,
1062 last_dst, count,
1060 RADEON_GPU_PAGE_SIZE, flags); 1063 RADEON_GPU_PAGE_SIZE, flags);
1061 } 1064 }
1062} 1065}
@@ -1080,8 +1083,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1080 struct ttm_mem_reg *mem) 1083 struct ttm_mem_reg *mem)
1081{ 1084{
1082 unsigned ridx = rdev->asic->vm.pt_ring_index; 1085 unsigned ridx = rdev->asic->vm.pt_ring_index;
1083 struct radeon_ring *ring = &rdev->ring[ridx]; 1086 struct radeon_ib ib;
1084 struct radeon_semaphore *sem = NULL;
1085 struct radeon_bo_va *bo_va; 1087 struct radeon_bo_va *bo_va;
1086 unsigned nptes, npdes, ndw; 1088 unsigned nptes, npdes, ndw;
1087 uint64_t addr; 1089 uint64_t addr;
@@ -1124,25 +1126,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1124 bo_va->valid = false; 1126 bo_va->valid = false;
1125 } 1127 }
1126 1128
1127 if (vm->fence && radeon_fence_signaled(vm->fence)) {
1128 radeon_fence_unref(&vm->fence);
1129 }
1130
1131 if (vm->fence && vm->fence->ring != ridx) {
1132 r = radeon_semaphore_create(rdev, &sem);
1133 if (r) {
1134 return r;
1135 }
1136 }
1137
1138 nptes = radeon_bo_ngpu_pages(bo); 1129 nptes = radeon_bo_ngpu_pages(bo);
1139 1130
1140 /* assume two extra pdes in case the mapping overlaps the borders */ 1131 /* assume two extra pdes in case the mapping overlaps the borders */
1141 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2; 1132 npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
1142 1133
1143 /* estimate number of dw needed */ 1134 /* padding, etc. */
1144 /* semaphore, fence and padding */ 1135 ndw = 64;
1145 ndw = 32;
1146 1136
1147 if (RADEON_VM_BLOCK_SIZE > 11) 1137 if (RADEON_VM_BLOCK_SIZE > 11)
1148 /* reserve space for one header for every 2k dwords */ 1138 /* reserve space for one header for every 2k dwords */
@@ -1161,33 +1151,31 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1161 /* reserve space for pde addresses */ 1151 /* reserve space for pde addresses */
1162 ndw += npdes * 2; 1152 ndw += npdes * 2;
1163 1153
1164 r = radeon_ring_lock(rdev, ring, ndw); 1154 /* update too big for an IB */
1165 if (r) { 1155 if (ndw > 0xfffff)
1166 return r; 1156 return -ENOMEM;
1167 }
1168 1157
1169 if (sem && radeon_fence_need_sync(vm->fence, ridx)) { 1158 r = radeon_ib_get(rdev, ridx, &ib, NULL, ndw * 4);
1170 radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx); 1159 ib.length_dw = 0;
1171 radeon_fence_note_sync(vm->fence, ridx);
1172 }
1173 1160
1174 r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset); 1161 r = radeon_vm_update_pdes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset);
1175 if (r) { 1162 if (r) {
1176 radeon_ring_unlock_undo(rdev, ring); 1163 radeon_ib_free(rdev, &ib);
1177 return r; 1164 return r;
1178 } 1165 }
1179 1166
1180 radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset, 1167 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
1181 addr, bo_va->flags); 1168 addr, bo_va->flags);
1182 1169
1183 radeon_fence_unref(&vm->fence); 1170 radeon_ib_sync_to(&ib, vm->fence);
1184 r = radeon_fence_emit(rdev, &vm->fence, ridx); 1171 r = radeon_ib_schedule(rdev, &ib, NULL);
1185 if (r) { 1172 if (r) {
1186 radeon_ring_unlock_undo(rdev, ring); 1173 radeon_ib_free(rdev, &ib);
1187 return r; 1174 return r;
1188 } 1175 }
1189 radeon_ring_unlock_commit(rdev, ring); 1176 radeon_fence_unref(&vm->fence);
1190 radeon_semaphore_free(rdev, &sem, vm->fence); 1177 vm->fence = radeon_fence_ref(ib.fence);
1178 radeon_ib_free(rdev, &ib);
1191 radeon_fence_unref(&vm->last_flush); 1179 radeon_fence_unref(&vm->last_flush);
1192 1180
1193 return 0; 1181 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index e7710339a6a7..8d68e972789a 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -28,6 +28,8 @@
28 * Authors: 28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com> 29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Michel D�zer <michel@daenzer.net> 30 * Michel D�zer <michel@daenzer.net>
31 *
32 * ------------------------ This file is DEPRECATED! -------------------------
31 */ 33 */
32 34
33#include <drm/drmP.h> 35#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 9c312f9afb68..c75cb2c6ba71 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -185,11 +185,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
185 if (info->request == RADEON_INFO_TIMESTAMP) { 185 if (info->request == RADEON_INFO_TIMESTAMP) {
186 if (rdev->family >= CHIP_R600) { 186 if (rdev->family >= CHIP_R600) {
187 value_ptr64 = (uint64_t*)((unsigned long)info->value); 187 value_ptr64 = (uint64_t*)((unsigned long)info->value);
188 if (rdev->family >= CHIP_TAHITI) { 188 value64 = radeon_get_gpu_clock_counter(rdev);
189 value64 = si_get_gpu_clock(rdev);
190 } else {
191 value64 = r600_get_gpu_clock(rdev);
192 }
193 189
194 if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) { 190 if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) {
195 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); 191 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
@@ -282,7 +278,10 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
282 break; 278 break;
283 case RADEON_INFO_CLOCK_CRYSTAL_FREQ: 279 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
284 /* return clock value in KHz */ 280 /* return clock value in KHz */
285 value = rdev->clock.spll.reference_freq * 10; 281 if (rdev->asic->get_xclk)
282 value = radeon_get_xclk(rdev) * 10;
283 else
284 value = rdev->clock.spll.reference_freq * 10;
286 break; 285 break;
287 case RADEON_INFO_NUM_BACKENDS: 286 case RADEON_INFO_NUM_BACKENDS:
288 if (rdev->family >= CHIP_TAHITI) 287 if (rdev->family >= CHIP_TAHITI)
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
index b9f067241633..d54d2d7c9031 100644
--- a/drivers/gpu/drm/radeon/radeon_mem.c
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -27,6 +27,8 @@
27 * 27 *
28 * Authors: 28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com> 29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 * ------------------------ This file is DEPRECATED! -------------------------
30 */ 32 */
31 33
32#include <drm/drmP.h> 34#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0bfa656aa87d..338fd6a74e87 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -169,7 +169,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
169 169
170 /* starting with BTC, there is one state that is used for both 170 /* starting with BTC, there is one state that is used for both
171 * MH and SH. Difference is that we always use the high clock index for 171 * MH and SH. Difference is that we always use the high clock index for
172 * mclk. 172 * mclk and vddci.
173 */ 173 */
174 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) && 174 if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
175 (rdev->family >= CHIP_BARTS) && 175 (rdev->family >= CHIP_BARTS) &&
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 26c23bb651c6..4940af7e75e6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -28,199 +28,71 @@
28#include "radeon.h" 28#include "radeon.h"
29#include <drm/radeon_drm.h> 29#include <drm/radeon_drm.h>
30 30
31#include <linux/dma-buf.h> 31struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
32
33static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
34 enum dma_data_direction dir)
35{ 32{
36 struct radeon_bo *bo = attachment->dmabuf->priv; 33 struct radeon_bo *bo = gem_to_radeon_bo(obj);
37 struct drm_device *dev = bo->rdev->ddev;
38 int npages = bo->tbo.num_pages; 34 int npages = bo->tbo.num_pages;
39 struct sg_table *sg;
40 int nents;
41
42 mutex_lock(&dev->struct_mutex);
43 sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
44 nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
45 mutex_unlock(&dev->struct_mutex);
46 return sg;
47}
48
49static void radeon_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
50 struct sg_table *sg, enum dma_data_direction dir)
51{
52 dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
53 sg_free_table(sg);
54 kfree(sg);
55}
56
57static void radeon_gem_dmabuf_release(struct dma_buf *dma_buf)
58{
59 struct radeon_bo *bo = dma_buf->priv;
60
61 if (bo->gem_base.export_dma_buf == dma_buf) {
62 DRM_ERROR("unreference dmabuf %p\n", &bo->gem_base);
63 bo->gem_base.export_dma_buf = NULL;
64 drm_gem_object_unreference_unlocked(&bo->gem_base);
65 }
66}
67
68static void *radeon_gem_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
69{
70 return NULL;
71}
72
73static void radeon_gem_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
74{
75
76}
77static void *radeon_gem_kmap(struct dma_buf *dma_buf, unsigned long page_num)
78{
79 return NULL;
80}
81
82static void radeon_gem_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
83{
84 35
36 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
85} 37}
86 38
87static int radeon_gem_prime_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) 39void *radeon_gem_prime_vmap(struct drm_gem_object *obj)
88{ 40{
89 return -EINVAL; 41 struct radeon_bo *bo = gem_to_radeon_bo(obj);
90}
91
92static void *radeon_gem_prime_vmap(struct dma_buf *dma_buf)
93{
94 struct radeon_bo *bo = dma_buf->priv;
95 struct drm_device *dev = bo->rdev->ddev;
96 int ret; 42 int ret;
97 43
98 mutex_lock(&dev->struct_mutex);
99 if (bo->vmapping_count) {
100 bo->vmapping_count++;
101 goto out_unlock;
102 }
103
104 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 44 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
105 &bo->dma_buf_vmap); 45 &bo->dma_buf_vmap);
106 if (ret) { 46 if (ret)
107 mutex_unlock(&dev->struct_mutex);
108 return ERR_PTR(ret); 47 return ERR_PTR(ret);
109 } 48
110 bo->vmapping_count = 1;
111out_unlock:
112 mutex_unlock(&dev->struct_mutex);
113 return bo->dma_buf_vmap.virtual; 49 return bo->dma_buf_vmap.virtual;
114} 50}
115 51
116static void radeon_gem_prime_vunmap(struct dma_buf *dma_buf, void *vaddr) 52void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
117{ 53{
118 struct radeon_bo *bo = dma_buf->priv; 54 struct radeon_bo *bo = gem_to_radeon_bo(obj);
119 struct drm_device *dev = bo->rdev->ddev;
120 55
121 mutex_lock(&dev->struct_mutex); 56 ttm_bo_kunmap(&bo->dma_buf_vmap);
122 bo->vmapping_count--;
123 if (bo->vmapping_count == 0) {
124 ttm_bo_kunmap(&bo->dma_buf_vmap);
125 }
126 mutex_unlock(&dev->struct_mutex);
127} 57}
128const static struct dma_buf_ops radeon_dmabuf_ops = { 58
129 .map_dma_buf = radeon_gem_map_dma_buf, 59struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
130 .unmap_dma_buf = radeon_gem_unmap_dma_buf, 60 size_t size,
131 .release = radeon_gem_dmabuf_release, 61 struct sg_table *sg)
132 .kmap = radeon_gem_kmap,
133 .kmap_atomic = radeon_gem_kmap_atomic,
134 .kunmap = radeon_gem_kunmap,
135 .kunmap_atomic = radeon_gem_kunmap_atomic,
136 .mmap = radeon_gem_prime_mmap,
137 .vmap = radeon_gem_prime_vmap,
138 .vunmap = radeon_gem_prime_vunmap,
139};
140
141static int radeon_prime_create(struct drm_device *dev,
142 size_t size,
143 struct sg_table *sg,
144 struct radeon_bo **pbo)
145{ 62{
146 struct radeon_device *rdev = dev->dev_private; 63 struct radeon_device *rdev = dev->dev_private;
147 struct radeon_bo *bo; 64 struct radeon_bo *bo;
148 int ret; 65 int ret;
149 66
150 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false, 67 ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
151 RADEON_GEM_DOMAIN_GTT, sg, pbo); 68 RADEON_GEM_DOMAIN_GTT, sg, &bo);
152 if (ret) 69 if (ret)
153 return ret; 70 return ERR_PTR(ret);
154 bo = *pbo;
155 bo->gem_base.driver_private = bo; 71 bo->gem_base.driver_private = bo;
156 72
157 mutex_lock(&rdev->gem.mutex); 73 mutex_lock(&rdev->gem.mutex);
158 list_add_tail(&bo->list, &rdev->gem.objects); 74 list_add_tail(&bo->list, &rdev->gem.objects);
159 mutex_unlock(&rdev->gem.mutex); 75 mutex_unlock(&rdev->gem.mutex);
160 76
161 return 0; 77 return &bo->gem_base;
162} 78}
163 79
164struct dma_buf *radeon_gem_prime_export(struct drm_device *dev, 80int radeon_gem_prime_pin(struct drm_gem_object *obj)
165 struct drm_gem_object *obj,
166 int flags)
167{ 81{
168 struct radeon_bo *bo = gem_to_radeon_bo(obj); 82 struct radeon_bo *bo = gem_to_radeon_bo(obj);
169 int ret = 0; 83 int ret = 0;
170 84
171 ret = radeon_bo_reserve(bo, false); 85 ret = radeon_bo_reserve(bo, false);
172 if (unlikely(ret != 0)) 86 if (unlikely(ret != 0))
173 return ERR_PTR(ret); 87 return ret;
174 88
175 /* pin buffer into GTT */ 89 /* pin buffer into GTT */
176 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); 90 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
177 if (ret) { 91 if (ret) {
178 radeon_bo_unreserve(bo); 92 radeon_bo_unreserve(bo);
179 return ERR_PTR(ret); 93 return ret;
180 } 94 }
181 radeon_bo_unreserve(bo); 95 radeon_bo_unreserve(bo);
182 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
183}
184 96
185struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev, 97 return 0;
186 struct dma_buf *dma_buf)
187{
188 struct dma_buf_attachment *attach;
189 struct sg_table *sg;
190 struct radeon_bo *bo;
191 int ret;
192
193 if (dma_buf->ops == &radeon_dmabuf_ops) {
194 bo = dma_buf->priv;
195 if (bo->gem_base.dev == dev) {
196 drm_gem_object_reference(&bo->gem_base);
197 dma_buf_put(dma_buf);
198 return &bo->gem_base;
199 }
200 }
201
202 /* need to attach */
203 attach = dma_buf_attach(dma_buf, dev->dev);
204 if (IS_ERR(attach))
205 return ERR_CAST(attach);
206
207 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
208 if (IS_ERR(sg)) {
209 ret = PTR_ERR(sg);
210 goto fail_detach;
211 }
212
213 ret = radeon_prime_create(dev, dma_buf->size, sg, &bo);
214 if (ret)
215 goto fail_unmap;
216
217 bo->gem_base.import_attach = attach;
218
219 return &bo->gem_base;
220
221fail_unmap:
222 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
223fail_detach:
224 dma_buf_detach(dma_buf, attach);
225 return ERR_PTR(ret);
226} 98}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5d8f735d6aaf..7e2c2b7cf188 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3706,4 +3706,19 @@
3706 3706
3707#define RV530_GB_PIPE_SELECT2 0x4124 3707#define RV530_GB_PIPE_SELECT2 0x4124
3708 3708
3709#define RADEON_CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
3710#define RADEON_CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
3711#define RADEON_CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
3712#define RADEON_CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
3713#define R100_CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
3714#define R600_CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
3715#define RADEON_PACKET_TYPE0 0
3716#define RADEON_PACKET_TYPE1 1
3717#define RADEON_PACKET_TYPE2 2
3718#define RADEON_PACKET_TYPE3 3
3719
3720#define RADEON_PACKET3_NOP 0x10
3721
3722#define RADEON_VLINE_STAT (1 << 12)
3723
3709#endif 3724#endif
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index cd72062d5a91..8d58e268ff6d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -109,6 +109,25 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
109} 109}
110 110
111/** 111/**
112 * radeon_ib_sync_to - sync to fence before executing the IB
113 *
114 * @ib: IB object to add fence to
115 * @fence: fence to sync to
116 *
117 * Sync to the fence before executing the IB
118 */
119void radeon_ib_sync_to(struct radeon_ib *ib, struct radeon_fence *fence)
120{
121 struct radeon_fence *other;
122
123 if (!fence)
124 return;
125
126 other = ib->sync_to[fence->ring];
127 ib->sync_to[fence->ring] = radeon_fence_later(fence, other);
128}
129
130/**
112 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring 131 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
113 * 132 *
114 * @rdev: radeon_device pointer 133 * @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 8e9057b6a365..4d20910899d4 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -25,6 +25,8 @@
25 * Authors: 25 * Authors:
26 * Gareth Hughes <gareth@valinux.com> 26 * Gareth Hughes <gareth@valinux.com>
27 * Kevin E. Martin <martin@valinux.com> 27 * Kevin E. Martin <martin@valinux.com>
28 *
29 * ------------------------ This file is DEPRECATED! -------------------------
28 */ 30 */
29 31
30#include <drm/drmP.h> 32#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index 590309a710b1..6927a200daf4 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -205,17 +205,6 @@
205 REG_SET(PACKET3_IT_OPCODE, (op)) | \ 205 REG_SET(PACKET3_IT_OPCODE, (op)) | \
206 REG_SET(PACKET3_COUNT, (n))) 206 REG_SET(PACKET3_COUNT, (n)))
207 207
208#define PACKET_TYPE0 0
209#define PACKET_TYPE1 1
210#define PACKET_TYPE2 2
211#define PACKET_TYPE3 3
212
213#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
214#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
215#define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
218
219/* Registers */ 208/* Registers */
220#define R_0000F0_RBBM_SOFT_RESET 0x0000F0 209#define R_0000F0_RBBM_SOFT_RESET 0x0000F0
221#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0) 210#define S_0000F0_SOFT_RESET_CP(x) (((x) & 0x1) << 0)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 1b2444f4d8f4..d63fe1d0f53f 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -43,6 +43,31 @@ static void rv770_gpu_init(struct radeon_device *rdev);
43void rv770_fini(struct radeon_device *rdev); 43void rv770_fini(struct radeon_device *rdev);
44static void rv770_pcie_gen2_enable(struct radeon_device *rdev); 44static void rv770_pcie_gen2_enable(struct radeon_device *rdev);
45 45
46#define PCIE_BUS_CLK 10000
47#define TCLK (PCIE_BUS_CLK / 10)
48
49/**
50 * rv770_get_xclk - get the xclk
51 *
52 * @rdev: radeon_device pointer
53 *
54 * Returns the reference clock used by the gfx engine
55 * (r7xx-cayman).
56 */
57u32 rv770_get_xclk(struct radeon_device *rdev)
58{
59 u32 reference_clock = rdev->clock.spll.reference_freq;
60 u32 tmp = RREG32(CG_CLKPIN_CNTL);
61
62 if (tmp & MUX_TCLK_TO_XCLK)
63 return TCLK;
64
65 if (tmp & XTALIN_DIVIDE)
66 return reference_clock / 4;
67
68 return reference_clock;
69}
70
46u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base) 71u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
47{ 72{
48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 20e29d23d348..c55f950a4af7 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -128,6 +128,10 @@
128#define GUI_ACTIVE (1<<31) 128#define GUI_ACTIVE (1<<31)
129#define GRBM_STATUS2 0x8014 129#define GRBM_STATUS2 0x8014
130 130
131#define CG_CLKPIN_CNTL 0x660
132# define MUX_TCLK_TO_XCLK (1 << 8)
133# define XTALIN_DIVIDE (1 << 9)
134
131#define CG_MULT_THERMAL_STATUS 0x740 135#define CG_MULT_THERMAL_STATUS 0x740
132#define ASIC_T(x) ((x) << 16) 136#define ASIC_T(x) ((x) << 16)
133#define ASIC_T_MASK 0x3FF0000 137#define ASIC_T_MASK 0x3FF0000
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ae8b48205a6c..80979ed951eb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -38,6 +38,7 @@
38#define SI_CE_UCODE_SIZE 2144 38#define SI_CE_UCODE_SIZE 2144
39#define SI_RLC_UCODE_SIZE 2048 39#define SI_RLC_UCODE_SIZE 2048
40#define SI_MC_UCODE_SIZE 7769 40#define SI_MC_UCODE_SIZE 7769
41#define OLAND_MC_UCODE_SIZE 7863
41 42
42MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); 43MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
43MODULE_FIRMWARE("radeon/TAHITI_me.bin"); 44MODULE_FIRMWARE("radeon/TAHITI_me.bin");
@@ -54,6 +55,11 @@ MODULE_FIRMWARE("radeon/VERDE_me.bin");
54MODULE_FIRMWARE("radeon/VERDE_ce.bin"); 55MODULE_FIRMWARE("radeon/VERDE_ce.bin");
55MODULE_FIRMWARE("radeon/VERDE_mc.bin"); 56MODULE_FIRMWARE("radeon/VERDE_mc.bin");
56MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); 57MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
58MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
59MODULE_FIRMWARE("radeon/OLAND_me.bin");
60MODULE_FIRMWARE("radeon/OLAND_ce.bin");
61MODULE_FIRMWARE("radeon/OLAND_mc.bin");
62MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
57 63
58extern int r600_ih_ring_alloc(struct radeon_device *rdev); 64extern int r600_ih_ring_alloc(struct radeon_device *rdev);
59extern void r600_ih_ring_fini(struct radeon_device *rdev); 65extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -61,6 +67,35 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
61extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); 67extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
62extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); 68extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
63extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); 69extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
70extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
71extern bool evergreen_is_display_hung(struct radeon_device *rdev);
72
73#define PCIE_BUS_CLK 10000
74#define TCLK (PCIE_BUS_CLK / 10)
75
76/**
77 * si_get_xclk - get the xclk
78 *
79 * @rdev: radeon_device pointer
80 *
81 * Returns the reference clock used by the gfx engine
82 * (SI).
83 */
84u32 si_get_xclk(struct radeon_device *rdev)
85{
86 u32 reference_clock = rdev->clock.spll.reference_freq;
87 u32 tmp;
88
89 tmp = RREG32(CG_CLKPIN_CNTL_2);
90 if (tmp & MUX_TCLK_TO_XCLK)
91 return TCLK;
92
93 tmp = RREG32(CG_CLKPIN_CNTL);
94 if (tmp & XTALIN_DIVIDE)
95 return reference_clock / 4;
96
97 return reference_clock;
98}
64 99
65/* get temperature in millidegrees */ 100/* get temperature in millidegrees */
66int si_get_temp(struct radeon_device *rdev) 101int si_get_temp(struct radeon_device *rdev)
@@ -200,6 +235,45 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
200 {0x0000009f, 0x00a37400} 235 {0x0000009f, 0x00a37400}
201}; 236};
202 237
238static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
239 {0x0000006f, 0x03044000},
240 {0x00000070, 0x0480c018},
241 {0x00000071, 0x00000040},
242 {0x00000072, 0x01000000},
243 {0x00000074, 0x000000ff},
244 {0x00000075, 0x00143400},
245 {0x00000076, 0x08ec0800},
246 {0x00000077, 0x040000cc},
247 {0x00000079, 0x00000000},
248 {0x0000007a, 0x21000409},
249 {0x0000007c, 0x00000000},
250 {0x0000007d, 0xe8000000},
251 {0x0000007e, 0x044408a8},
252 {0x0000007f, 0x00000003},
253 {0x00000080, 0x00000000},
254 {0x00000081, 0x01000000},
255 {0x00000082, 0x02000000},
256 {0x00000083, 0x00000000},
257 {0x00000084, 0xe3f3e4f4},
258 {0x00000085, 0x00052024},
259 {0x00000087, 0x00000000},
260 {0x00000088, 0x66036603},
261 {0x00000089, 0x01000000},
262 {0x0000008b, 0x1c0a0000},
263 {0x0000008c, 0xff010000},
264 {0x0000008e, 0xffffefff},
265 {0x0000008f, 0xfff3efff},
266 {0x00000090, 0xfff3efbf},
267 {0x00000094, 0x00101101},
268 {0x00000095, 0x00000fff},
269 {0x00000096, 0x00116fff},
270 {0x00000097, 0x60010000},
271 {0x00000098, 0x10010000},
272 {0x00000099, 0x00006000},
273 {0x0000009a, 0x00001000},
274 {0x0000009f, 0x00a17730}
275};
276
203/* ucode loading */ 277/* ucode loading */
204static int si_mc_load_microcode(struct radeon_device *rdev) 278static int si_mc_load_microcode(struct radeon_device *rdev)
205{ 279{
@@ -228,6 +302,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
228 ucode_size = SI_MC_UCODE_SIZE; 302 ucode_size = SI_MC_UCODE_SIZE;
229 regs_size = TAHITI_IO_MC_REGS_SIZE; 303 regs_size = TAHITI_IO_MC_REGS_SIZE;
230 break; 304 break;
305 case CHIP_OLAND:
306 io_mc_regs = (u32 *)&oland_io_mc_regs;
307 ucode_size = OLAND_MC_UCODE_SIZE;
308 regs_size = TAHITI_IO_MC_REGS_SIZE;
309 break;
231 } 310 }
232 311
233 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 312 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -322,6 +401,15 @@ static int si_init_microcode(struct radeon_device *rdev)
322 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 401 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
323 mc_req_size = SI_MC_UCODE_SIZE * 4; 402 mc_req_size = SI_MC_UCODE_SIZE * 4;
324 break; 403 break;
404 case CHIP_OLAND:
405 chip_name = "OLAND";
406 rlc_chip_name = "OLAND";
407 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
408 me_req_size = SI_PM4_UCODE_SIZE * 4;
409 ce_req_size = SI_CE_UCODE_SIZE * 4;
410 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
411 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
412 break;
325 default: BUG(); 413 default: BUG();
326 } 414 }
327 415
@@ -1125,7 +1213,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1125 } 1213 }
1126 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 1214 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
1127 } 1215 }
1128 } else if (rdev->family == CHIP_VERDE) { 1216 } else if ((rdev->family == CHIP_VERDE) ||
1217 (rdev->family == CHIP_OLAND)) {
1129 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 1218 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
1130 switch (reg_offset) { 1219 switch (reg_offset) {
1131 case 0: /* non-AA compressed depth or any compressed stencil */ 1220 case 0: /* non-AA compressed depth or any compressed stencil */
@@ -1570,6 +1659,23 @@ static void si_gpu_init(struct radeon_device *rdev)
1570 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1659 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1571 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; 1660 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1572 break; 1661 break;
1662 case CHIP_OLAND:
1663 rdev->config.si.max_shader_engines = 1;
1664 rdev->config.si.max_tile_pipes = 4;
1665 rdev->config.si.max_cu_per_sh = 6;
1666 rdev->config.si.max_sh_per_se = 1;
1667 rdev->config.si.max_backends_per_se = 2;
1668 rdev->config.si.max_texture_channel_caches = 4;
1669 rdev->config.si.max_gprs = 256;
1670 rdev->config.si.max_gs_threads = 16;
1671 rdev->config.si.max_hw_contexts = 8;
1672
1673 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
1674 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
1675 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1676 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1677 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1678 break;
1573 } 1679 }
1574 1680
1575 /* Initialize HDP */ 1681 /* Initialize HDP */
@@ -2106,154 +2212,275 @@ static int si_cp_resume(struct radeon_device *rdev)
2106 return 0; 2212 return 0;
2107} 2213}
2108 2214
2109bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) 2215static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
2110{ 2216{
2111 u32 srbm_status; 2217 u32 reset_mask = 0;
2112 u32 grbm_status, grbm_status2; 2218 u32 tmp;
2113 u32 grbm_status_se0, grbm_status_se1;
2114
2115 srbm_status = RREG32(SRBM_STATUS);
2116 grbm_status = RREG32(GRBM_STATUS);
2117 grbm_status2 = RREG32(GRBM_STATUS2);
2118 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
2119 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
2120 if (!(grbm_status & GUI_ACTIVE)) {
2121 radeon_ring_lockup_update(ring);
2122 return false;
2123 }
2124 /* force CP activities */
2125 radeon_ring_force_activity(rdev, ring);
2126 return radeon_ring_test_lockup(rdev, ring);
2127}
2128 2219
2129static void si_gpu_soft_reset_gfx(struct radeon_device *rdev) 2220 /* GRBM_STATUS */
2130{ 2221 tmp = RREG32(GRBM_STATUS);
2131 u32 grbm_reset = 0; 2222 if (tmp & (PA_BUSY | SC_BUSY |
2223 BCI_BUSY | SX_BUSY |
2224 TA_BUSY | VGT_BUSY |
2225 DB_BUSY | CB_BUSY |
2226 GDS_BUSY | SPI_BUSY |
2227 IA_BUSY | IA_BUSY_NO_DMA))
2228 reset_mask |= RADEON_RESET_GFX;
2132 2229
2133 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2230 if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING |
2134 return; 2231 CP_BUSY | CP_COHERENCY_BUSY))
2232 reset_mask |= RADEON_RESET_CP;
2135 2233
2136 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2234 if (tmp & GRBM_EE_BUSY)
2137 RREG32(GRBM_STATUS)); 2235 reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP;
2138 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2139 RREG32(GRBM_STATUS2));
2140 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2141 RREG32(GRBM_STATUS_SE0));
2142 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2143 RREG32(GRBM_STATUS_SE1));
2144 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2145 RREG32(SRBM_STATUS));
2146 2236
2147 /* Disable CP parsing/prefetching */ 2237 /* GRBM_STATUS2 */
2148 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); 2238 tmp = RREG32(GRBM_STATUS2);
2239 if (tmp & (RLC_RQ_PENDING | RLC_BUSY))
2240 reset_mask |= RADEON_RESET_RLC;
2149 2241
2150 /* reset all the gfx blocks */ 2242 /* DMA_STATUS_REG 0 */
2151 grbm_reset = (SOFT_RESET_CP | 2243 tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
2152 SOFT_RESET_CB | 2244 if (!(tmp & DMA_IDLE))
2153 SOFT_RESET_DB | 2245 reset_mask |= RADEON_RESET_DMA;
2154 SOFT_RESET_GDS |
2155 SOFT_RESET_PA |
2156 SOFT_RESET_SC |
2157 SOFT_RESET_BCI |
2158 SOFT_RESET_SPI |
2159 SOFT_RESET_SX |
2160 SOFT_RESET_TC |
2161 SOFT_RESET_TA |
2162 SOFT_RESET_VGT |
2163 SOFT_RESET_IA);
2164
2165 dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
2166 WREG32(GRBM_SOFT_RESET, grbm_reset);
2167 (void)RREG32(GRBM_SOFT_RESET);
2168 udelay(50);
2169 WREG32(GRBM_SOFT_RESET, 0);
2170 (void)RREG32(GRBM_SOFT_RESET);
2171
2172 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2173 RREG32(GRBM_STATUS));
2174 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
2175 RREG32(GRBM_STATUS2));
2176 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
2177 RREG32(GRBM_STATUS_SE0));
2178 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
2179 RREG32(GRBM_STATUS_SE1));
2180 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2181 RREG32(SRBM_STATUS));
2182}
2183 2246
2184static void si_gpu_soft_reset_dma(struct radeon_device *rdev) 2247 /* DMA_STATUS_REG 1 */
2185{ 2248 tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
2186 u32 tmp; 2249 if (!(tmp & DMA_IDLE))
2250 reset_mask |= RADEON_RESET_DMA1;
2187 2251
2188 if (RREG32(DMA_STATUS_REG) & DMA_IDLE) 2252 /* SRBM_STATUS2 */
2189 return; 2253 tmp = RREG32(SRBM_STATUS2);
2254 if (tmp & DMA_BUSY)
2255 reset_mask |= RADEON_RESET_DMA;
2190 2256
2191 dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n", 2257 if (tmp & DMA1_BUSY)
2192 RREG32(DMA_STATUS_REG)); 2258 reset_mask |= RADEON_RESET_DMA1;
2193 2259
2194 /* dma0 */ 2260 /* SRBM_STATUS */
2195 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); 2261 tmp = RREG32(SRBM_STATUS);
2196 tmp &= ~DMA_RB_ENABLE;
2197 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
2198 2262
2199 /* dma1 */ 2263 if (tmp & IH_BUSY)
2200 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); 2264 reset_mask |= RADEON_RESET_IH;
2201 tmp &= ~DMA_RB_ENABLE;
2202 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
2203 2265
2204 /* Reset dma */ 2266 if (tmp & SEM_BUSY)
2205 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1); 2267 reset_mask |= RADEON_RESET_SEM;
2206 RREG32(SRBM_SOFT_RESET); 2268
2207 udelay(50); 2269 if (tmp & GRBM_RQ_PENDING)
2208 WREG32(SRBM_SOFT_RESET, 0); 2270 reset_mask |= RADEON_RESET_GRBM;
2271
2272 if (tmp & VMC_BUSY)
2273 reset_mask |= RADEON_RESET_VMC;
2209 2274
2210 dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n", 2275 if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY |
2211 RREG32(DMA_STATUS_REG)); 2276 MCC_BUSY | MCD_BUSY))
2277 reset_mask |= RADEON_RESET_MC;
2278
2279 if (evergreen_is_display_hung(rdev))
2280 reset_mask |= RADEON_RESET_DISPLAY;
2281
2282 /* VM_L2_STATUS */
2283 tmp = RREG32(VM_L2_STATUS);
2284 if (tmp & L2_BUSY)
2285 reset_mask |= RADEON_RESET_VMC;
2286
2287 return reset_mask;
2212} 2288}
2213 2289
2214static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) 2290static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2215{ 2291{
2216 struct evergreen_mc_save save; 2292 struct evergreen_mc_save save;
2217 2293 u32 grbm_soft_reset = 0, srbm_soft_reset = 0;
2218 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2294 u32 tmp;
2219 reset_mask &= ~(RADEON_RESET_GFX | RADEON_RESET_COMPUTE);
2220
2221 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2222 reset_mask &= ~RADEON_RESET_DMA;
2223 2295
2224 if (reset_mask == 0) 2296 if (reset_mask == 0)
2225 return 0; 2297 return;
2226 2298
2227 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); 2299 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2228 2300
2301 evergreen_print_gpu_status_regs(rdev);
2229 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 2302 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
2230 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); 2303 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
2231 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 2304 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
2232 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); 2305 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
2233 2306
2307 /* Disable CP parsing/prefetching */
2308 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
2309
2310 if (reset_mask & RADEON_RESET_DMA) {
2311 /* dma0 */
2312 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
2313 tmp &= ~DMA_RB_ENABLE;
2314 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
2315 }
2316 if (reset_mask & RADEON_RESET_DMA1) {
2317 /* dma1 */
2318 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
2319 tmp &= ~DMA_RB_ENABLE;
2320 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
2321 }
2322
2323 udelay(50);
2324
2234 evergreen_mc_stop(rdev, &save); 2325 evergreen_mc_stop(rdev, &save);
2235 if (radeon_mc_wait_for_idle(rdev)) { 2326 if (evergreen_mc_wait_for_idle(rdev)) {
2236 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 2327 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2237 } 2328 }
2238 2329
2239 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE)) 2330 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) {
2240 si_gpu_soft_reset_gfx(rdev); 2331 grbm_soft_reset = SOFT_RESET_CB |
2332 SOFT_RESET_DB |
2333 SOFT_RESET_GDS |
2334 SOFT_RESET_PA |
2335 SOFT_RESET_SC |
2336 SOFT_RESET_BCI |
2337 SOFT_RESET_SPI |
2338 SOFT_RESET_SX |
2339 SOFT_RESET_TC |
2340 SOFT_RESET_TA |
2341 SOFT_RESET_VGT |
2342 SOFT_RESET_IA;
2343 }
2344
2345 if (reset_mask & RADEON_RESET_CP) {
2346 grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT;
2347
2348 srbm_soft_reset |= SOFT_RESET_GRBM;
2349 }
2241 2350
2242 if (reset_mask & RADEON_RESET_DMA) 2351 if (reset_mask & RADEON_RESET_DMA)
2243 si_gpu_soft_reset_dma(rdev); 2352 srbm_soft_reset |= SOFT_RESET_DMA;
2353
2354 if (reset_mask & RADEON_RESET_DMA1)
2355 srbm_soft_reset |= SOFT_RESET_DMA1;
2356
2357 if (reset_mask & RADEON_RESET_DISPLAY)
2358 srbm_soft_reset |= SOFT_RESET_DC;
2359
2360 if (reset_mask & RADEON_RESET_RLC)
2361 grbm_soft_reset |= SOFT_RESET_RLC;
2362
2363 if (reset_mask & RADEON_RESET_SEM)
2364 srbm_soft_reset |= SOFT_RESET_SEM;
2365
2366 if (reset_mask & RADEON_RESET_IH)
2367 srbm_soft_reset |= SOFT_RESET_IH;
2368
2369 if (reset_mask & RADEON_RESET_GRBM)
2370 srbm_soft_reset |= SOFT_RESET_GRBM;
2371
2372 if (reset_mask & RADEON_RESET_VMC)
2373 srbm_soft_reset |= SOFT_RESET_VMC;
2374
2375 if (reset_mask & RADEON_RESET_MC)
2376 srbm_soft_reset |= SOFT_RESET_MC;
2377
2378 if (grbm_soft_reset) {
2379 tmp = RREG32(GRBM_SOFT_RESET);
2380 tmp |= grbm_soft_reset;
2381 dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp);
2382 WREG32(GRBM_SOFT_RESET, tmp);
2383 tmp = RREG32(GRBM_SOFT_RESET);
2384
2385 udelay(50);
2386
2387 tmp &= ~grbm_soft_reset;
2388 WREG32(GRBM_SOFT_RESET, tmp);
2389 tmp = RREG32(GRBM_SOFT_RESET);
2390 }
2391
2392 if (srbm_soft_reset) {
2393 tmp = RREG32(SRBM_SOFT_RESET);
2394 tmp |= srbm_soft_reset;
2395 dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2396 WREG32(SRBM_SOFT_RESET, tmp);
2397 tmp = RREG32(SRBM_SOFT_RESET);
2398
2399 udelay(50);
2400
2401 tmp &= ~srbm_soft_reset;
2402 WREG32(SRBM_SOFT_RESET, tmp);
2403 tmp = RREG32(SRBM_SOFT_RESET);
2404 }
2244 2405
2245 /* Wait a little for things to settle down */ 2406 /* Wait a little for things to settle down */
2246 udelay(50); 2407 udelay(50);
2247 2408
2248 evergreen_mc_resume(rdev, &save); 2409 evergreen_mc_resume(rdev, &save);
2249 return 0; 2410 udelay(50);
2411
2412 evergreen_print_gpu_status_regs(rdev);
2250} 2413}
2251 2414
2252int si_asic_reset(struct radeon_device *rdev) 2415int si_asic_reset(struct radeon_device *rdev)
2253{ 2416{
2254 return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX | 2417 u32 reset_mask;
2255 RADEON_RESET_COMPUTE | 2418
2256 RADEON_RESET_DMA)); 2419 reset_mask = si_gpu_check_soft_reset(rdev);
2420
2421 if (reset_mask)
2422 r600_set_bios_scratch_engine_hung(rdev, true);
2423
2424 si_gpu_soft_reset(rdev, reset_mask);
2425
2426 reset_mask = si_gpu_check_soft_reset(rdev);
2427
2428 if (!reset_mask)
2429 r600_set_bios_scratch_engine_hung(rdev, false);
2430
2431 return 0;
2432}
2433
2434/**
2435 * si_gfx_is_lockup - Check if the GFX engine is locked up
2436 *
2437 * @rdev: radeon_device pointer
2438 * @ring: radeon_ring structure holding ring information
2439 *
2440 * Check if the GFX engine is locked up.
2441 * Returns true if the engine appears to be locked up, false if not.
2442 */
2443bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2444{
2445 u32 reset_mask = si_gpu_check_soft_reset(rdev);
2446
2447 if (!(reset_mask & (RADEON_RESET_GFX |
2448 RADEON_RESET_COMPUTE |
2449 RADEON_RESET_CP))) {
2450 radeon_ring_lockup_update(ring);
2451 return false;
2452 }
2453 /* force CP activities */
2454 radeon_ring_force_activity(rdev, ring);
2455 return radeon_ring_test_lockup(rdev, ring);
2456}
2457
2458/**
2459 * si_dma_is_lockup - Check if the DMA engine is locked up
2460 *
2461 * @rdev: radeon_device pointer
2462 * @ring: radeon_ring structure holding ring information
2463 *
2464 * Check if the async DMA engine is locked up.
2465 * Returns true if the engine appears to be locked up, false if not.
2466 */
2467bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2468{
2469 u32 reset_mask = si_gpu_check_soft_reset(rdev);
2470 u32 mask;
2471
2472 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
2473 mask = RADEON_RESET_DMA;
2474 else
2475 mask = RADEON_RESET_DMA1;
2476
2477 if (!(reset_mask & mask)) {
2478 radeon_ring_lockup_update(ring);
2479 return false;
2480 }
2481 /* force ring activities */
2482 radeon_ring_force_activity(rdev, ring);
2483 return radeon_ring_test_lockup(rdev, ring);
2257} 2484}
2258 2485
2259/* MC */ 2486/* MC */
@@ -2855,19 +3082,19 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2855 3082
2856 do { 3083 do {
2857 pkt.idx = idx; 3084 pkt.idx = idx;
2858 pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]); 3085 pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]);
2859 pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]); 3086 pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]);
2860 pkt.one_reg_wr = 0; 3087 pkt.one_reg_wr = 0;
2861 switch (pkt.type) { 3088 switch (pkt.type) {
2862 case PACKET_TYPE0: 3089 case RADEON_PACKET_TYPE0:
2863 dev_err(rdev->dev, "Packet0 not allowed!\n"); 3090 dev_err(rdev->dev, "Packet0 not allowed!\n");
2864 ret = -EINVAL; 3091 ret = -EINVAL;
2865 break; 3092 break;
2866 case PACKET_TYPE2: 3093 case RADEON_PACKET_TYPE2:
2867 idx += 1; 3094 idx += 1;
2868 break; 3095 break;
2869 case PACKET_TYPE3: 3096 case RADEON_PACKET_TYPE3:
2870 pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]); 3097 pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
2871 if (ib->is_const_ib) 3098 if (ib->is_const_ib)
2872 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); 3099 ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
2873 else { 3100 else {
@@ -2920,19 +3147,21 @@ void si_vm_fini(struct radeon_device *rdev)
2920 * si_vm_set_page - update the page tables using the CP 3147 * si_vm_set_page - update the page tables using the CP
2921 * 3148 *
2922 * @rdev: radeon_device pointer 3149 * @rdev: radeon_device pointer
3150 * @ib: indirect buffer to fill with commands
2923 * @pe: addr of the page entry 3151 * @pe: addr of the page entry
2924 * @addr: dst addr to write into pe 3152 * @addr: dst addr to write into pe
2925 * @count: number of page entries to update 3153 * @count: number of page entries to update
2926 * @incr: increase next addr by incr bytes 3154 * @incr: increase next addr by incr bytes
2927 * @flags: access flags 3155 * @flags: access flags
2928 * 3156 *
2929 * Update the page tables using the CP (cayman-si). 3157 * Update the page tables using the CP (SI).
2930 */ 3158 */
2931void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, 3159void si_vm_set_page(struct radeon_device *rdev,
3160 struct radeon_ib *ib,
3161 uint64_t pe,
2932 uint64_t addr, unsigned count, 3162 uint64_t addr, unsigned count,
2933 uint32_t incr, uint32_t flags) 3163 uint32_t incr, uint32_t flags)
2934{ 3164{
2935 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2936 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 3165 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2937 uint64_t value; 3166 uint64_t value;
2938 unsigned ndw; 3167 unsigned ndw;
@@ -2943,11 +3172,11 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2943 if (ndw > 0x3FFE) 3172 if (ndw > 0x3FFE)
2944 ndw = 0x3FFE; 3173 ndw = 0x3FFE;
2945 3174
2946 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw)); 3175 ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw);
2947 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 3176 ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) |
2948 WRITE_DATA_DST_SEL(1))); 3177 WRITE_DATA_DST_SEL(1));
2949 radeon_ring_write(ring, pe); 3178 ib->ptr[ib->length_dw++] = pe;
2950 radeon_ring_write(ring, upper_32_bits(pe)); 3179 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
2951 for (; ndw > 2; ndw -= 2, --count, pe += 8) { 3180 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
2952 if (flags & RADEON_VM_PAGE_SYSTEM) { 3181 if (flags & RADEON_VM_PAGE_SYSTEM) {
2953 value = radeon_vm_map_gart(rdev, addr); 3182 value = radeon_vm_map_gart(rdev, addr);
@@ -2959,8 +3188,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2959 } 3188 }
2960 addr += incr; 3189 addr += incr;
2961 value |= r600_flags; 3190 value |= r600_flags;
2962 radeon_ring_write(ring, value); 3191 ib->ptr[ib->length_dw++] = value;
2963 radeon_ring_write(ring, upper_32_bits(value)); 3192 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2964 } 3193 }
2965 } 3194 }
2966 } else { 3195 } else {
@@ -2972,9 +3201,9 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2972 ndw = 0xFFFFE; 3201 ndw = 0xFFFFE;
2973 3202
2974 /* for non-physically contiguous pages (system) */ 3203 /* for non-physically contiguous pages (system) */
2975 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw)); 3204 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
2976 radeon_ring_write(ring, pe); 3205 ib->ptr[ib->length_dw++] = pe;
2977 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 3206 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
2978 for (; ndw > 0; ndw -= 2, --count, pe += 8) { 3207 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2979 if (flags & RADEON_VM_PAGE_SYSTEM) { 3208 if (flags & RADEON_VM_PAGE_SYSTEM) {
2980 value = radeon_vm_map_gart(rdev, addr); 3209 value = radeon_vm_map_gart(rdev, addr);
@@ -2986,8 +3215,8 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2986 } 3215 }
2987 addr += incr; 3216 addr += incr;
2988 value |= r600_flags; 3217 value |= r600_flags;
2989 radeon_ring_write(ring, value); 3218 ib->ptr[ib->length_dw++] = value;
2990 radeon_ring_write(ring, upper_32_bits(value)); 3219 ib->ptr[ib->length_dw++] = upper_32_bits(value);
2991 } 3220 }
2992 } 3221 }
2993 } else { 3222 } else {
@@ -3001,20 +3230,22 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
3001 else 3230 else
3002 value = 0; 3231 value = 0;
3003 /* for physically contiguous pages (vram) */ 3232 /* for physically contiguous pages (vram) */
3004 radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw)); 3233 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
3005 radeon_ring_write(ring, pe); /* dst addr */ 3234 ib->ptr[ib->length_dw++] = pe; /* dst addr */
3006 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 3235 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
3007 radeon_ring_write(ring, r600_flags); /* mask */ 3236 ib->ptr[ib->length_dw++] = r600_flags; /* mask */
3008 radeon_ring_write(ring, 0); 3237 ib->ptr[ib->length_dw++] = 0;
3009 radeon_ring_write(ring, value); /* value */ 3238 ib->ptr[ib->length_dw++] = value; /* value */
3010 radeon_ring_write(ring, upper_32_bits(value)); 3239 ib->ptr[ib->length_dw++] = upper_32_bits(value);
3011 radeon_ring_write(ring, incr); /* increment size */ 3240 ib->ptr[ib->length_dw++] = incr; /* increment size */
3012 radeon_ring_write(ring, 0); 3241 ib->ptr[ib->length_dw++] = 0;
3013 pe += ndw * 4; 3242 pe += ndw * 4;
3014 addr += (ndw / 2) * incr; 3243 addr += (ndw / 2) * incr;
3015 count -= ndw / 2; 3244 count -= ndw / 2;
3016 } 3245 }
3017 } 3246 }
3247 while (ib->length_dw & 0x7)
3248 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
3018 } 3249 }
3019} 3250}
3020 3251
@@ -4378,14 +4609,14 @@ void si_fini(struct radeon_device *rdev)
4378} 4609}
4379 4610
4380/** 4611/**
4381 * si_get_gpu_clock - return GPU clock counter snapshot 4612 * si_get_gpu_clock_counter - return GPU clock counter snapshot
4382 * 4613 *
4383 * @rdev: radeon_device pointer 4614 * @rdev: radeon_device pointer
4384 * 4615 *
4385 * Fetches a GPU clock counter snapshot (SI). 4616 * Fetches a GPU clock counter snapshot (SI).
4386 * Returns the 64 bit clock counter snapshot. 4617 * Returns the 64 bit clock counter snapshot.
4387 */ 4618 */
4388uint64_t si_get_gpu_clock(struct radeon_device *rdev) 4619uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev)
4389{ 4620{
4390 uint64_t clock; 4621 uint64_t clock;
4391 4622
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c056aae814f0..23fc08fc8e7f 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -58,9 +58,22 @@
58#define VGA_HDP_CONTROL 0x328 58#define VGA_HDP_CONTROL 0x328
59#define VGA_MEMORY_DISABLE (1 << 4) 59#define VGA_MEMORY_DISABLE (1 << 4)
60 60
61#define CG_CLKPIN_CNTL 0x660
62# define XTALIN_DIVIDE (1 << 1)
63#define CG_CLKPIN_CNTL_2 0x664
64# define MUX_TCLK_TO_XCLK (1 << 8)
65
61#define DMIF_ADDR_CONFIG 0xBD4 66#define DMIF_ADDR_CONFIG 0xBD4
62 67
63#define SRBM_STATUS 0xE50 68#define SRBM_STATUS 0xE50
69#define GRBM_RQ_PENDING (1 << 5)
70#define VMC_BUSY (1 << 8)
71#define MCB_BUSY (1 << 9)
72#define MCB_NON_DISPLAY_BUSY (1 << 10)
73#define MCC_BUSY (1 << 11)
74#define MCD_BUSY (1 << 12)
75#define SEM_BUSY (1 << 14)
76#define IH_BUSY (1 << 17)
64 77
65#define SRBM_SOFT_RESET 0x0E60 78#define SRBM_SOFT_RESET 0x0E60
66#define SOFT_RESET_BIF (1 << 1) 79#define SOFT_RESET_BIF (1 << 1)
@@ -81,6 +94,10 @@
81#define CC_SYS_RB_BACKEND_DISABLE 0xe80 94#define CC_SYS_RB_BACKEND_DISABLE 0xe80
82#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 95#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
83 96
97#define SRBM_STATUS2 0x0EC4
98#define DMA_BUSY (1 << 5)
99#define DMA1_BUSY (1 << 6)
100
84#define VM_L2_CNTL 0x1400 101#define VM_L2_CNTL 0x1400
85#define ENABLE_L2_CACHE (1 << 0) 102#define ENABLE_L2_CACHE (1 << 0)
86#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) 103#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
@@ -783,16 +800,7 @@
783/* 800/*
784 * PM4 801 * PM4
785 */ 802 */
786#define PACKET_TYPE0 0 803#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
787#define PACKET_TYPE1 1
788#define PACKET_TYPE2 2
789#define PACKET_TYPE3 3
790
791#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
792#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
793#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
794#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
795#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
796 (((reg) >> 2) & 0xFFFF) | \ 804 (((reg) >> 2) & 0xFFFF) | \
797 ((n) & 0x3FFF) << 16) 805 ((n) & 0x3FFF) << 16)
798#define CP_PACKET2 0x80000000 806#define CP_PACKET2 0x80000000
@@ -801,7 +809,7 @@
801 809
802#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v))) 810#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
803 811
804#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \ 812#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
805 (((op) & 0xFF) << 8) | \ 813 (((op) & 0xFF) << 8) | \
806 ((n) & 0x3FFF) << 16) 814 ((n) & 0x3FFF) << 16)
807 815
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index d1d5306ebf24..f6e0b5395051 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -313,9 +313,9 @@ static int shmob_drm_pm_resume(struct device *dev)
313{ 313{
314 struct shmob_drm_device *sdev = dev_get_drvdata(dev); 314 struct shmob_drm_device *sdev = dev_get_drvdata(dev);
315 315
316 mutex_lock(&sdev->ddev->mode_config.mutex); 316 drm_modeset_lock_all(sdev->ddev);
317 shmob_drm_crtc_resume(&sdev->crtc); 317 shmob_drm_crtc_resume(&sdev->crtc);
318 mutex_unlock(&sdev->ddev->mode_config.mutex); 318 drm_modeset_unlock_all(sdev->ddev);
319 319
320 drm_kms_helper_poll_enable(sdev->ddev); 320 drm_kms_helper_poll_enable(sdev->ddev);
321 return 0; 321 return 0;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index be1daf7344d3..c92955df0658 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -4,6 +4,7 @@ config DRM_TEGRA
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
7 select DRM_HDMI
7 select FB_CFB_FILLRECT 8 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA 9 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT 10 select FB_CFB_IMAGEBLIT
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b6679b36700f..de94707b9dbe 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -17,26 +17,257 @@
17#include "drm.h" 17#include "drm.h"
18#include "dc.h" 18#include "dc.h"
19 19
20struct tegra_dc_window { 20struct tegra_plane {
21 fixed20_12 x; 21 struct drm_plane base;
22 fixed20_12 y; 22 unsigned int index;
23 fixed20_12 w;
24 fixed20_12 h;
25 unsigned int outx;
26 unsigned int outy;
27 unsigned int outw;
28 unsigned int outh;
29 unsigned int stride;
30 unsigned int fmt;
31}; 23};
32 24
25static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
26{
27 return container_of(plane, struct tegra_plane, base);
28}
29
30static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
31 struct drm_framebuffer *fb, int crtc_x,
32 int crtc_y, unsigned int crtc_w,
33 unsigned int crtc_h, uint32_t src_x,
34 uint32_t src_y, uint32_t src_w, uint32_t src_h)
35{
36 struct tegra_plane *p = to_tegra_plane(plane);
37 struct tegra_dc *dc = to_tegra_dc(crtc);
38 struct tegra_dc_window window;
39 unsigned int i;
40
41 memset(&window, 0, sizeof(window));
42 window.src.x = src_x >> 16;
43 window.src.y = src_y >> 16;
44 window.src.w = src_w >> 16;
45 window.src.h = src_h >> 16;
46 window.dst.x = crtc_x;
47 window.dst.y = crtc_y;
48 window.dst.w = crtc_w;
49 window.dst.h = crtc_h;
50 window.format = tegra_dc_format(fb->pixel_format);
51 window.bits_per_pixel = fb->bits_per_pixel;
52
53 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) {
54 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
55
56 window.base[i] = gem->paddr + fb->offsets[i];
57
58 /*
59 * Tegra doesn't support different strides for U and V planes
60 * so we display a warning if the user tries to display a
61 * framebuffer with such a configuration.
62 */
63 if (i >= 2) {
64 if (fb->pitches[i] != window.stride[1])
65 DRM_ERROR("unsupported UV-plane configuration\n");
66 } else {
67 window.stride[i] = fb->pitches[i];
68 }
69 }
70
71 return tegra_dc_setup_window(dc, p->index, &window);
72}
73
74static int tegra_plane_disable(struct drm_plane *plane)
75{
76 struct tegra_dc *dc = to_tegra_dc(plane->crtc);
77 struct tegra_plane *p = to_tegra_plane(plane);
78 unsigned long value;
79
80 value = WINDOW_A_SELECT << p->index;
81 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
82
83 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
84 value &= ~WIN_ENABLE;
85 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
86
87 tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
88 tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
89
90 return 0;
91}
92
93static void tegra_plane_destroy(struct drm_plane *plane)
94{
95 tegra_plane_disable(plane);
96 drm_plane_cleanup(plane);
97}
98
99static const struct drm_plane_funcs tegra_plane_funcs = {
100 .update_plane = tegra_plane_update,
101 .disable_plane = tegra_plane_disable,
102 .destroy = tegra_plane_destroy,
103};
104
105static const uint32_t plane_formats[] = {
106 DRM_FORMAT_XRGB8888,
107 DRM_FORMAT_UYVY,
108 DRM_FORMAT_YUV420,
109 DRM_FORMAT_YUV422,
110};
111
112static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
113{
114 unsigned int i;
115 int err = 0;
116
117 for (i = 0; i < 2; i++) {
118 struct tegra_plane *plane;
119
120 plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
121 if (!plane)
122 return -ENOMEM;
123
124 plane->index = 1 + i;
125
126 err = drm_plane_init(drm, &plane->base, 1 << dc->pipe,
127 &tegra_plane_funcs, plane_formats,
128 ARRAY_SIZE(plane_formats), false);
129 if (err < 0)
130 return err;
131 }
132
133 return 0;
134}
135
136static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
137 struct drm_framebuffer *fb)
138{
139 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, 0);
140 unsigned long value;
141
142 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
143
144 value = fb->offsets[0] + y * fb->pitches[0] +
145 x * fb->bits_per_pixel / 8;
146
147 tegra_dc_writel(dc, gem->paddr + value, DC_WINBUF_START_ADDR);
148 tegra_dc_writel(dc, fb->pitches[0], DC_WIN_LINE_STRIDE);
149
150 value = GENERAL_UPDATE | WIN_A_UPDATE;
151 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
152
153 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
154 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
155
156 return 0;
157}
158
159void tegra_dc_enable_vblank(struct tegra_dc *dc)
160{
161 unsigned long value, flags;
162
163 spin_lock_irqsave(&dc->lock, flags);
164
165 value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
166 value |= VBLANK_INT;
167 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
168
169 spin_unlock_irqrestore(&dc->lock, flags);
170}
171
172void tegra_dc_disable_vblank(struct tegra_dc *dc)
173{
174 unsigned long value, flags;
175
176 spin_lock_irqsave(&dc->lock, flags);
177
178 value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
179 value &= ~VBLANK_INT;
180 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
181
182 spin_unlock_irqrestore(&dc->lock, flags);
183}
184
185static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
186{
187 struct drm_device *drm = dc->base.dev;
188 struct drm_crtc *crtc = &dc->base;
189 struct drm_gem_cma_object *gem;
190 unsigned long flags, base;
191
192 if (!dc->event)
193 return;
194
195 gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
196
197 /* check if new start address has been latched */
198 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
199 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
200 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
201
202 if (base == gem->paddr + crtc->fb->offsets[0]) {
203 spin_lock_irqsave(&drm->event_lock, flags);
204 drm_send_vblank_event(drm, dc->pipe, dc->event);
205 drm_vblank_put(drm, dc->pipe);
206 dc->event = NULL;
207 spin_unlock_irqrestore(&drm->event_lock, flags);
208 }
209}
210
211void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
212{
213 struct tegra_dc *dc = to_tegra_dc(crtc);
214 struct drm_device *drm = crtc->dev;
215 unsigned long flags;
216
217 spin_lock_irqsave(&drm->event_lock, flags);
218
219 if (dc->event && dc->event->base.file_priv == file) {
220 dc->event->base.destroy(&dc->event->base);
221 drm_vblank_put(drm, dc->pipe);
222 dc->event = NULL;
223 }
224
225 spin_unlock_irqrestore(&drm->event_lock, flags);
226}
227
228static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
229 struct drm_pending_vblank_event *event)
230{
231 struct tegra_dc *dc = to_tegra_dc(crtc);
232 struct drm_device *drm = crtc->dev;
233
234 if (dc->event)
235 return -EBUSY;
236
237 if (event) {
238 event->pipe = dc->pipe;
239 dc->event = event;
240 drm_vblank_get(drm, dc->pipe);
241 }
242
243 tegra_dc_set_base(dc, 0, 0, fb);
244 crtc->fb = fb;
245
246 return 0;
247}
248
33static const struct drm_crtc_funcs tegra_crtc_funcs = { 249static const struct drm_crtc_funcs tegra_crtc_funcs = {
250 .page_flip = tegra_dc_page_flip,
34 .set_config = drm_crtc_helper_set_config, 251 .set_config = drm_crtc_helper_set_config,
35 .destroy = drm_crtc_cleanup, 252 .destroy = drm_crtc_cleanup,
36}; 253};
37 254
38static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode) 255static void tegra_crtc_disable(struct drm_crtc *crtc)
39{ 256{
257 struct drm_device *drm = crtc->dev;
258 struct drm_plane *plane;
259
260 list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
261 if (plane->crtc == crtc) {
262 tegra_plane_disable(plane);
263 plane->crtc = NULL;
264
265 if (plane->fb) {
266 drm_framebuffer_unreference(plane->fb);
267 plane->fb = NULL;
268 }
269 }
270 }
40} 271}
41 272
42static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, 273static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -46,10 +277,11 @@ static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
46 return true; 277 return true;
47} 278}
48 279
49static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v, 280static inline u32 compute_dda_inc(unsigned int in, unsigned int out, bool v,
50 unsigned int bpp) 281 unsigned int bpp)
51{ 282{
52 fixed20_12 outf = dfixed_init(out); 283 fixed20_12 outf = dfixed_init(out);
284 fixed20_12 inf = dfixed_init(in);
53 u32 dda_inc; 285 u32 dda_inc;
54 int max; 286 int max;
55 287
@@ -79,9 +311,10 @@ static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
79 return dda_inc; 311 return dda_inc;
80} 312}
81 313
82static inline u32 compute_initial_dda(fixed20_12 in) 314static inline u32 compute_initial_dda(unsigned int in)
83{ 315{
84 return dfixed_frac(in); 316 fixed20_12 inf = dfixed_init(in);
317 return dfixed_frac(inf);
85} 318}
86 319
87static int tegra_dc_set_timings(struct tegra_dc *dc, 320static int tegra_dc_set_timings(struct tegra_dc *dc,
@@ -152,18 +385,198 @@ static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
152 return 0; 385 return 0;
153} 386}
154 387
388static bool tegra_dc_format_is_yuv(unsigned int format, bool *planar)
389{
390 switch (format) {
391 case WIN_COLOR_DEPTH_YCbCr422:
392 case WIN_COLOR_DEPTH_YUV422:
393 if (planar)
394 *planar = false;
395
396 return true;
397
398 case WIN_COLOR_DEPTH_YCbCr420P:
399 case WIN_COLOR_DEPTH_YUV420P:
400 case WIN_COLOR_DEPTH_YCbCr422P:
401 case WIN_COLOR_DEPTH_YUV422P:
402 case WIN_COLOR_DEPTH_YCbCr422R:
403 case WIN_COLOR_DEPTH_YUV422R:
404 case WIN_COLOR_DEPTH_YCbCr422RA:
405 case WIN_COLOR_DEPTH_YUV422RA:
406 if (planar)
407 *planar = true;
408
409 return true;
410 }
411
412 return false;
413}
414
415int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
416 const struct tegra_dc_window *window)
417{
418 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
419 unsigned long value;
420 bool yuv, planar;
421
422 /*
423 * For YUV planar modes, the number of bytes per pixel takes into
424 * account only the luma component and therefore is 1.
425 */
426 yuv = tegra_dc_format_is_yuv(window->format, &planar);
427 if (!yuv)
428 bpp = window->bits_per_pixel / 8;
429 else
430 bpp = planar ? 1 : 2;
431
432 value = WINDOW_A_SELECT << index;
433 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
434
435 tegra_dc_writel(dc, window->format, DC_WIN_COLOR_DEPTH);
436 tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
437
438 value = V_POSITION(window->dst.y) | H_POSITION(window->dst.x);
439 tegra_dc_writel(dc, value, DC_WIN_POSITION);
440
441 value = V_SIZE(window->dst.h) | H_SIZE(window->dst.w);
442 tegra_dc_writel(dc, value, DC_WIN_SIZE);
443
444 h_offset = window->src.x * bpp;
445 v_offset = window->src.y;
446 h_size = window->src.w * bpp;
447 v_size = window->src.h;
448
449 value = V_PRESCALED_SIZE(v_size) | H_PRESCALED_SIZE(h_size);
450 tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
451
452 /*
453 * For DDA computations the number of bytes per pixel for YUV planar
454 * modes needs to take into account all Y, U and V components.
455 */
456 if (yuv && planar)
457 bpp = 2;
458
459 h_dda = compute_dda_inc(window->src.w, window->dst.w, false, bpp);
460 v_dda = compute_dda_inc(window->src.h, window->dst.h, true, bpp);
461
462 value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
463 tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
464
465 h_dda = compute_initial_dda(window->src.x);
466 v_dda = compute_initial_dda(window->src.y);
467
468 tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
469 tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
470
471 tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
472 tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
473
474 tegra_dc_writel(dc, window->base[0], DC_WINBUF_START_ADDR);
475
476 if (yuv && planar) {
477 tegra_dc_writel(dc, window->base[1], DC_WINBUF_START_ADDR_U);
478 tegra_dc_writel(dc, window->base[2], DC_WINBUF_START_ADDR_V);
479 value = window->stride[1] << 16 | window->stride[0];
480 tegra_dc_writel(dc, value, DC_WIN_LINE_STRIDE);
481 } else {
482 tegra_dc_writel(dc, window->stride[0], DC_WIN_LINE_STRIDE);
483 }
484
485 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
486 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
487
488 value = WIN_ENABLE;
489
490 if (yuv) {
491 /* setup default colorspace conversion coefficients */
492 tegra_dc_writel(dc, 0x00f0, DC_WIN_CSC_YOF);
493 tegra_dc_writel(dc, 0x012a, DC_WIN_CSC_KYRGB);
494 tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KUR);
495 tegra_dc_writel(dc, 0x0198, DC_WIN_CSC_KVR);
496 tegra_dc_writel(dc, 0x039b, DC_WIN_CSC_KUG);
497 tegra_dc_writel(dc, 0x032f, DC_WIN_CSC_KVG);
498 tegra_dc_writel(dc, 0x0204, DC_WIN_CSC_KUB);
499 tegra_dc_writel(dc, 0x0000, DC_WIN_CSC_KVB);
500
501 value |= CSC_ENABLE;
502 } else if (window->bits_per_pixel < 24) {
503 value |= COLOR_EXPAND;
504 }
505
506 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
507
508 /*
509 * Disable blending and assume Window A is the bottom-most window,
510 * Window C is the top-most window and Window B is in the middle.
511 */
512 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_NOKEY);
513 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_1WIN);
514
515 switch (index) {
516 case 0:
517 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_X);
518 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
519 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
520 break;
521
522 case 1:
523 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
524 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_2WIN_Y);
525 tegra_dc_writel(dc, 0x000000, DC_WIN_BLEND_3WIN_XY);
526 break;
527
528 case 2:
529 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_X);
530 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_2WIN_Y);
531 tegra_dc_writel(dc, 0xffff00, DC_WIN_BLEND_3WIN_XY);
532 break;
533 }
534
535 tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL);
536 tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL);
537
538 return 0;
539}
540
541unsigned int tegra_dc_format(uint32_t format)
542{
543 switch (format) {
544 case DRM_FORMAT_XRGB8888:
545 return WIN_COLOR_DEPTH_B8G8R8A8;
546
547 case DRM_FORMAT_RGB565:
548 return WIN_COLOR_DEPTH_B5G6R5;
549
550 case DRM_FORMAT_UYVY:
551 return WIN_COLOR_DEPTH_YCbCr422;
552
553 case DRM_FORMAT_YUV420:
554 return WIN_COLOR_DEPTH_YCbCr420P;
555
556 case DRM_FORMAT_YUV422:
557 return WIN_COLOR_DEPTH_YCbCr422P;
558
559 default:
560 break;
561 }
562
563 WARN(1, "unsupported pixel format %u, using default\n", format);
564 return WIN_COLOR_DEPTH_B8G8R8A8;
565}
566
155static int tegra_crtc_mode_set(struct drm_crtc *crtc, 567static int tegra_crtc_mode_set(struct drm_crtc *crtc,
156 struct drm_display_mode *mode, 568 struct drm_display_mode *mode,
157 struct drm_display_mode *adjusted, 569 struct drm_display_mode *adjusted,
158 int x, int y, struct drm_framebuffer *old_fb) 570 int x, int y, struct drm_framebuffer *old_fb)
159{ 571{
160 struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb); 572 struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(crtc->fb, 0);
161 struct tegra_dc *dc = to_tegra_dc(crtc); 573 struct tegra_dc *dc = to_tegra_dc(crtc);
162 unsigned int h_dda, v_dda, bpp; 574 struct tegra_dc_window window;
163 struct tegra_dc_window win;
164 unsigned long div, value; 575 unsigned long div, value;
165 int err; 576 int err;
166 577
578 drm_vblank_pre_modeset(crtc->dev, dc->pipe);
579
167 err = tegra_crtc_setup_clk(crtc, mode, &div); 580 err = tegra_crtc_setup_clk(crtc, mode, &div);
168 if (err) { 581 if (err) {
169 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); 582 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
@@ -191,83 +604,33 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
191 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL); 604 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
192 605
193 /* setup window parameters */ 606 /* setup window parameters */
194 memset(&win, 0, sizeof(win)); 607 memset(&window, 0, sizeof(window));
195 win.x.full = dfixed_const(0); 608 window.src.x = 0;
196 win.y.full = dfixed_const(0); 609 window.src.y = 0;
197 win.w.full = dfixed_const(mode->hdisplay); 610 window.src.w = mode->hdisplay;
198 win.h.full = dfixed_const(mode->vdisplay); 611 window.src.h = mode->vdisplay;
199 win.outx = 0; 612 window.dst.x = 0;
200 win.outy = 0; 613 window.dst.y = 0;
201 win.outw = mode->hdisplay; 614 window.dst.w = mode->hdisplay;
202 win.outh = mode->vdisplay; 615 window.dst.h = mode->vdisplay;
203 616 window.format = tegra_dc_format(crtc->fb->pixel_format);
204 switch (crtc->fb->pixel_format) { 617 window.bits_per_pixel = crtc->fb->bits_per_pixel;
205 case DRM_FORMAT_XRGB8888: 618 window.stride[0] = crtc->fb->pitches[0];
206 win.fmt = WIN_COLOR_DEPTH_B8G8R8A8; 619 window.base[0] = gem->paddr;
207 break; 620
208 621 err = tegra_dc_setup_window(dc, 0, &window);
209 case DRM_FORMAT_RGB565: 622 if (err < 0)
210 win.fmt = WIN_COLOR_DEPTH_B5G6R5; 623 dev_err(dc->dev, "failed to enable root plane\n");
211 break;
212
213 default:
214 win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
215 WARN_ON(1);
216 break;
217 }
218
219 bpp = crtc->fb->bits_per_pixel / 8;
220 win.stride = crtc->fb->pitches[0];
221
222 /* program window registers */
223 value = WINDOW_A_SELECT;
224 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
225
226 tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
227 tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
228
229 value = V_POSITION(win.outy) | H_POSITION(win.outx);
230 tegra_dc_writel(dc, value, DC_WIN_POSITION);
231
232 value = V_SIZE(win.outh) | H_SIZE(win.outw);
233 tegra_dc_writel(dc, value, DC_WIN_SIZE);
234
235 value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
236 H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
237 tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
238
239 h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
240 v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
241
242 value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
243 tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
244
245 h_dda = compute_initial_dda(win.x);
246 v_dda = compute_initial_dda(win.y);
247
248 tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
249 tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
250
251 tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
252 tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
253
254 tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
255 tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
256 tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
257 DC_WINBUF_ADDR_H_OFFSET);
258 tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
259
260 value = WIN_ENABLE;
261
262 if (bpp < 24)
263 value |= COLOR_EXPAND;
264 624
265 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); 625 return 0;
626}
266 627
267 tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY); 628static int tegra_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
268 tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN); 629 struct drm_framebuffer *old_fb)
630{
631 struct tegra_dc *dc = to_tegra_dc(crtc);
269 632
270 return 0; 633 return tegra_dc_set_base(dc, x, y, crtc->fb);
271} 634}
272 635
273static void tegra_crtc_prepare(struct drm_crtc *crtc) 636static void tegra_crtc_prepare(struct drm_crtc *crtc)
@@ -314,31 +677,24 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
314 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); 677 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
315 678
316 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; 679 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
317 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
318
319 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
320 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); 680 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
681
682 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
683 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
321} 684}
322 685
323static void tegra_crtc_commit(struct drm_crtc *crtc) 686static void tegra_crtc_commit(struct drm_crtc *crtc)
324{ 687{
325 struct tegra_dc *dc = to_tegra_dc(crtc); 688 struct tegra_dc *dc = to_tegra_dc(crtc);
326 unsigned long update_mask;
327 unsigned long value; 689 unsigned long value;
328 690
329 update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ; 691 value = GENERAL_UPDATE | WIN_A_UPDATE;
330 692 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
331 tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
332 693
333 value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE); 694 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
334 value |= FRAME_END_INT; 695 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
335 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
336
337 value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
338 value |= FRAME_END_INT;
339 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
340 696
341 tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL); 697 drm_vblank_post_modeset(crtc->dev, dc->pipe);
342} 698}
343 699
344static void tegra_crtc_load_lut(struct drm_crtc *crtc) 700static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -346,15 +702,16 @@ static void tegra_crtc_load_lut(struct drm_crtc *crtc)
346} 702}
347 703
348static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = { 704static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
349 .dpms = tegra_crtc_dpms, 705 .disable = tegra_crtc_disable,
350 .mode_fixup = tegra_crtc_mode_fixup, 706 .mode_fixup = tegra_crtc_mode_fixup,
351 .mode_set = tegra_crtc_mode_set, 707 .mode_set = tegra_crtc_mode_set,
708 .mode_set_base = tegra_crtc_mode_set_base,
352 .prepare = tegra_crtc_prepare, 709 .prepare = tegra_crtc_prepare,
353 .commit = tegra_crtc_commit, 710 .commit = tegra_crtc_commit,
354 .load_lut = tegra_crtc_load_lut, 711 .load_lut = tegra_crtc_load_lut,
355}; 712};
356 713
357static irqreturn_t tegra_drm_irq(int irq, void *data) 714static irqreturn_t tegra_dc_irq(int irq, void *data)
358{ 715{
359 struct tegra_dc *dc = data; 716 struct tegra_dc *dc = data;
360 unsigned long status; 717 unsigned long status;
@@ -373,6 +730,7 @@ static irqreturn_t tegra_drm_irq(int irq, void *data)
373 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); 730 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
374 */ 731 */
375 drm_handle_vblank(dc->base.dev, dc->pipe); 732 drm_handle_vblank(dc->base.dev, dc->pipe);
733 tegra_dc_finish_page_flip(dc);
376 } 734 }
377 735
378 if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) { 736 if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
@@ -587,7 +945,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
587 DUMP_REG(DC_WIN_BLEND_1WIN); 945 DUMP_REG(DC_WIN_BLEND_1WIN);
588 DUMP_REG(DC_WIN_BLEND_2WIN_X); 946 DUMP_REG(DC_WIN_BLEND_2WIN_X);
589 DUMP_REG(DC_WIN_BLEND_2WIN_Y); 947 DUMP_REG(DC_WIN_BLEND_2WIN_Y);
590 DUMP_REG(DC_WIN_BLEND32WIN_XY); 948 DUMP_REG(DC_WIN_BLEND_3WIN_XY);
591 DUMP_REG(DC_WIN_HP_FETCH_CONTROL); 949 DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
592 DUMP_REG(DC_WINBUF_START_ADDR); 950 DUMP_REG(DC_WINBUF_START_ADDR);
593 DUMP_REG(DC_WINBUF_START_ADDR_NS); 951 DUMP_REG(DC_WINBUF_START_ADDR_NS);
@@ -689,13 +1047,17 @@ static int tegra_dc_drm_init(struct host1x_client *client,
689 return err; 1047 return err;
690 } 1048 }
691 1049
1050 err = tegra_dc_add_planes(drm, dc);
1051 if (err < 0)
1052 return err;
1053
692 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1054 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
693 err = tegra_dc_debugfs_init(dc, drm->primary); 1055 err = tegra_dc_debugfs_init(dc, drm->primary);
694 if (err < 0) 1056 if (err < 0)
695 dev_err(dc->dev, "debugfs setup failed: %d\n", err); 1057 dev_err(dc->dev, "debugfs setup failed: %d\n", err);
696 } 1058 }
697 1059
698 err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0, 1060 err = devm_request_irq(dc->dev, dc->irq, tegra_dc_irq, 0,
699 dev_name(dc->dev), dc); 1061 dev_name(dc->dev), dc);
700 if (err < 0) { 1062 if (err < 0) {
701 dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq, 1063 dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
@@ -744,6 +1106,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
744 if (!dc) 1106 if (!dc)
745 return -ENOMEM; 1107 return -ENOMEM;
746 1108
1109 spin_lock_init(&dc->lock);
747 INIT_LIST_HEAD(&dc->list); 1110 INIT_LIST_HEAD(&dc->list);
748 dc->dev = &pdev->dev; 1111 dc->dev = &pdev->dev;
749 1112
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 99977b5d5c36..79eaec9aac77 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -58,6 +58,8 @@
58#define DC_CMD_SIGNAL_RAISE3 0x03e 58#define DC_CMD_SIGNAL_RAISE3 0x03e
59 59
60#define DC_CMD_STATE_ACCESS 0x040 60#define DC_CMD_STATE_ACCESS 0x040
61#define READ_MUX (1 << 0)
62#define WRITE_MUX (1 << 2)
61 63
62#define DC_CMD_STATE_CONTROL 0x041 64#define DC_CMD_STATE_CONTROL 0x041
63#define GENERAL_ACT_REQ (1 << 0) 65#define GENERAL_ACT_REQ (1 << 0)
@@ -290,8 +292,18 @@
290#define DC_DISP_SD_HW_K_VALUES 0x4dd 292#define DC_DISP_SD_HW_K_VALUES 0x4dd
291#define DC_DISP_SD_MAN_K_VALUES 0x4de 293#define DC_DISP_SD_MAN_K_VALUES 0x4de
292 294
295#define DC_WIN_CSC_YOF 0x611
296#define DC_WIN_CSC_KYRGB 0x612
297#define DC_WIN_CSC_KUR 0x613
298#define DC_WIN_CSC_KVR 0x614
299#define DC_WIN_CSC_KUG 0x615
300#define DC_WIN_CSC_KVG 0x616
301#define DC_WIN_CSC_KUB 0x617
302#define DC_WIN_CSC_KVB 0x618
303
293#define DC_WIN_WIN_OPTIONS 0x700 304#define DC_WIN_WIN_OPTIONS 0x700
294#define COLOR_EXPAND (1 << 6) 305#define COLOR_EXPAND (1 << 6)
306#define CSC_ENABLE (1 << 18)
295#define WIN_ENABLE (1 << 30) 307#define WIN_ENABLE (1 << 30)
296 308
297#define DC_WIN_BYTE_SWAP 0x701 309#define DC_WIN_BYTE_SWAP 0x701
@@ -359,7 +371,7 @@
359#define DC_WIN_BLEND_1WIN 0x710 371#define DC_WIN_BLEND_1WIN 0x710
360#define DC_WIN_BLEND_2WIN_X 0x711 372#define DC_WIN_BLEND_2WIN_X 0x711
361#define DC_WIN_BLEND_2WIN_Y 0x712 373#define DC_WIN_BLEND_2WIN_Y 0x712
362#define DC_WIN_BLEND32WIN_XY 0x713 374#define DC_WIN_BLEND_3WIN_XY 0x713
363 375
364#define DC_WIN_HP_FETCH_CONTROL 0x714 376#define DC_WIN_HP_FETCH_CONTROL 0x714
365 377
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d980dc75788c..9d452df5bcad 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -39,6 +39,10 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
39 if (err < 0) 39 if (err < 0)
40 return err; 40 return err;
41 41
42 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
43 if (err < 0)
44 return err;
45
42 err = tegra_drm_fb_init(drm); 46 err = tegra_drm_fb_init(drm);
43 if (err < 0) 47 if (err < 0)
44 return err; 48 return err;
@@ -88,13 +92,112 @@ static const struct file_operations tegra_drm_fops = {
88 .llseek = noop_llseek, 92 .llseek = noop_llseek,
89}; 93};
90 94
95static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
96{
97 struct drm_crtc *crtc;
98
99 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
100 struct tegra_dc *dc = to_tegra_dc(crtc);
101
102 if (dc->pipe == pipe)
103 return crtc;
104 }
105
106 return NULL;
107}
108
109static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
110{
111 /* TODO: implement real hardware counter using syncpoints */
112 return drm_vblank_count(dev, crtc);
113}
114
115static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
116{
117 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
118 struct tegra_dc *dc = to_tegra_dc(crtc);
119
120 if (!crtc)
121 return -ENODEV;
122
123 tegra_dc_enable_vblank(dc);
124
125 return 0;
126}
127
128static void tegra_drm_disable_vblank(struct drm_device *drm, int pipe)
129{
130 struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
131 struct tegra_dc *dc = to_tegra_dc(crtc);
132
133 if (crtc)
134 tegra_dc_disable_vblank(dc);
135}
136
137static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
138{
139 struct drm_crtc *crtc;
140
141 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
142 tegra_dc_cancel_page_flip(crtc, file);
143}
144
145#ifdef CONFIG_DEBUG_FS
146static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
147{
148 struct drm_info_node *node = (struct drm_info_node *)s->private;
149 struct drm_device *drm = node->minor->dev;
150 struct drm_framebuffer *fb;
151
152 mutex_lock(&drm->mode_config.fb_lock);
153
154 list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
155 seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
156 fb->base.id, fb->width, fb->height, fb->depth,
157 fb->bits_per_pixel,
158 atomic_read(&fb->refcount.refcount));
159 }
160
161 mutex_unlock(&drm->mode_config.fb_lock);
162
163 return 0;
164}
165
166static struct drm_info_list tegra_debugfs_list[] = {
167 { "framebuffers", tegra_debugfs_framebuffers, 0 },
168};
169
170static int tegra_debugfs_init(struct drm_minor *minor)
171{
172 return drm_debugfs_create_files(tegra_debugfs_list,
173 ARRAY_SIZE(tegra_debugfs_list),
174 minor->debugfs_root, minor);
175}
176
177static void tegra_debugfs_cleanup(struct drm_minor *minor)
178{
179 drm_debugfs_remove_files(tegra_debugfs_list,
180 ARRAY_SIZE(tegra_debugfs_list), minor);
181}
182#endif
183
91struct drm_driver tegra_drm_driver = { 184struct drm_driver tegra_drm_driver = {
92 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM, 185 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
93 .load = tegra_drm_load, 186 .load = tegra_drm_load,
94 .unload = tegra_drm_unload, 187 .unload = tegra_drm_unload,
95 .open = tegra_drm_open, 188 .open = tegra_drm_open,
189 .preclose = tegra_drm_preclose,
96 .lastclose = tegra_drm_lastclose, 190 .lastclose = tegra_drm_lastclose,
97 191
192 .get_vblank_counter = tegra_drm_get_vblank_counter,
193 .enable_vblank = tegra_drm_enable_vblank,
194 .disable_vblank = tegra_drm_disable_vblank,
195
196#if defined(CONFIG_DEBUG_FS)
197 .debugfs_init = tegra_debugfs_init,
198 .debugfs_cleanup = tegra_debugfs_cleanup,
199#endif
200
98 .gem_free_object = drm_gem_cma_free_object, 201 .gem_free_object = drm_gem_cma_free_object,
99 .gem_vm_ops = &drm_gem_cma_vm_ops, 202 .gem_vm_ops = &drm_gem_cma_vm_ops,
100 .dumb_create = drm_gem_cma_dumb_create, 203 .dumb_create = drm_gem_cma_dumb_create,
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 741b5dc2742c..6dd75a2600eb 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -18,16 +18,6 @@
18#include <drm/drm_fb_cma_helper.h> 18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_fixed.h> 19#include <drm/drm_fixed.h>
20 20
21struct tegra_framebuffer {
22 struct drm_framebuffer base;
23 struct drm_gem_cma_object *obj;
24};
25
26static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
27{
28 return container_of(fb, struct tegra_framebuffer, base);
29}
30
31struct host1x { 21struct host1x {
32 struct drm_device *drm; 22 struct drm_device *drm;
33 struct device *dev; 23 struct device *dev;
@@ -44,7 +34,6 @@ struct host1x {
44 struct list_head clients; 34 struct list_head clients;
45 35
46 struct drm_fbdev_cma *fbdev; 36 struct drm_fbdev_cma *fbdev;
47 struct tegra_framebuffer fb;
48}; 37};
49 38
50struct host1x_client; 39struct host1x_client;
@@ -75,6 +64,7 @@ struct tegra_output;
75 64
76struct tegra_dc { 65struct tegra_dc {
77 struct host1x_client client; 66 struct host1x_client client;
67 spinlock_t lock;
78 68
79 struct host1x *host1x; 69 struct host1x *host1x;
80 struct device *dev; 70 struct device *dev;
@@ -94,6 +84,9 @@ struct tegra_dc {
94 struct drm_info_list *debugfs_files; 84 struct drm_info_list *debugfs_files;
95 struct drm_minor *minor; 85 struct drm_minor *minor;
96 struct dentry *debugfs; 86 struct dentry *debugfs;
87
88 /* page-flip handling */
89 struct drm_pending_vblank_event *event;
97}; 90};
98 91
99static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client) 92static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
@@ -118,6 +111,34 @@ static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
118 return readl(dc->regs + (reg << 2)); 111 return readl(dc->regs + (reg << 2));
119} 112}
120 113
114struct tegra_dc_window {
115 struct {
116 unsigned int x;
117 unsigned int y;
118 unsigned int w;
119 unsigned int h;
120 } src;
121 struct {
122 unsigned int x;
123 unsigned int y;
124 unsigned int w;
125 unsigned int h;
126 } dst;
127 unsigned int bits_per_pixel;
128 unsigned int format;
129 unsigned int stride[2];
130 unsigned long base[3];
131};
132
133/* from dc.c */
134extern unsigned int tegra_dc_format(uint32_t format);
135extern int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
136 const struct tegra_dc_window *window);
137extern void tegra_dc_enable_vblank(struct tegra_dc *dc);
138extern void tegra_dc_disable_vblank(struct tegra_dc *dc);
139extern void tegra_dc_cancel_page_flip(struct drm_crtc *crtc,
140 struct drm_file *file);
141
121struct tegra_output_ops { 142struct tegra_output_ops {
122 int (*enable)(struct tegra_output *output); 143 int (*enable)(struct tegra_output *output);
123 int (*disable)(struct tegra_output *output); 144 int (*disable)(struct tegra_output *output);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 97993c6835fd..03914953cb1c 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -39,10 +39,6 @@ int tegra_drm_fb_init(struct drm_device *drm)
39 if (IS_ERR(fbdev)) 39 if (IS_ERR(fbdev))
40 return PTR_ERR(fbdev); 40 return PTR_ERR(fbdev);
41 41
42#ifndef CONFIG_FRAMEBUFFER_CONSOLE
43 drm_fbdev_cma_restore_mode(fbdev);
44#endif
45
46 host1x->fbdev = fbdev; 42 host1x->fbdev = fbdev;
47 43
48 return 0; 44 return 0;
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index d4f3fb9f0c29..bb747f6cd1a4 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -10,12 +10,15 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/gpio.h> 12#include <linux/gpio.h>
13#include <linux/hdmi.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/of.h> 15#include <linux/of.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16#include <linux/regulator/consumer.h> 17#include <linux/regulator/consumer.h>
17#include <linux/clk/tegra.h> 18#include <linux/clk/tegra.h>
18 19
20#include <drm/drm_edid.h>
21
19#include "hdmi.h" 22#include "hdmi.h"
20#include "drm.h" 23#include "drm.h"
21#include "dc.h" 24#include "dc.h"
@@ -400,54 +403,65 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
400 return 0; 403 return 0;
401} 404}
402 405
403static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, 406static inline unsigned long tegra_hdmi_subpack(const u8 *ptr, size_t size)
404 unsigned int offset, u8 type,
405 u8 version, void *data, size_t size)
406{ 407{
407 unsigned long value; 408 unsigned long value = 0;
408 u8 *ptr = data;
409 u32 subpack[2];
410 size_t i; 409 size_t i;
411 u8 csum;
412 410
413 /* first byte of data is the checksum */ 411 for (i = size; i > 0; i--)
414 csum = type + version + size - 1; 412 value = (value << 8) | ptr[i - 1];
415 413
416 for (i = 1; i < size; i++) 414 return value;
417 csum += ptr[i]; 415}
418 416
419 ptr[0] = 0x100 - csum; 417static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi, const void *data,
418 size_t size)
419{
420 const u8 *ptr = data;
421 unsigned long offset;
422 unsigned long value;
423 size_t i, j;
420 424
421 value = INFOFRAME_HEADER_TYPE(type) | 425 switch (ptr[0]) {
422 INFOFRAME_HEADER_VERSION(version) | 426 case HDMI_INFOFRAME_TYPE_AVI:
423 INFOFRAME_HEADER_LEN(size - 1); 427 offset = HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER;
424 tegra_hdmi_writel(hdmi, value, offset); 428 break;
425 429
426 /* The audio inforame only has one set of subpack registers. The hdmi 430 case HDMI_INFOFRAME_TYPE_AUDIO:
427 * block pads the rest of the data as per the spec so we have to fixup 431 offset = HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER;
428 * the length before filling in the subpacks. 432 break;
429 */
430 if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
431 size = 6;
432 433
433 /* each subpack 7 bytes devided into: 434 case HDMI_INFOFRAME_TYPE_VENDOR:
434 * subpack_low - bytes 0 - 3 435 offset = HDMI_NV_PDISP_HDMI_GENERIC_HEADER;
435 * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00) 436 break;
436 */ 437
437 for (i = 0; i < size; i++) { 438 default:
438 size_t index = i % 7; 439 dev_err(hdmi->dev, "unsupported infoframe type: %02x\n",
440 ptr[0]);
441 return;
442 }
443
444 value = INFOFRAME_HEADER_TYPE(ptr[0]) |
445 INFOFRAME_HEADER_VERSION(ptr[1]) |
446 INFOFRAME_HEADER_LEN(ptr[2]);
447 tegra_hdmi_writel(hdmi, value, offset);
448 offset++;
439 449
440 if (index == 0) 450 /*
441 memset(subpack, 0x0, sizeof(subpack)); 451 * Each subpack contains 7 bytes, divided into:
452 * - subpack_low: bytes 0 - 3
453 * - subpack_high: bytes 4 - 6 (with byte 7 padded to 0x00)
454 */
455 for (i = 3, j = 0; i < size; i += 7, j += 8) {
456 size_t rem = size - i, num = min_t(size_t, rem, 4);
442 457
443 ((u8 *)subpack)[index] = ptr[i]; 458 value = tegra_hdmi_subpack(&ptr[i], num);
459 tegra_hdmi_writel(hdmi, value, offset++);
444 460
445 if (index == 6 || (i + 1 == size)) { 461 num = min_t(size_t, rem - num, 3);
446 unsigned int reg = offset + 1 + (i / 7) * 2;
447 462
448 tegra_hdmi_writel(hdmi, subpack[0], reg); 463 value = tegra_hdmi_subpack(&ptr[i + 4], num);
449 tegra_hdmi_writel(hdmi, subpack[1], reg + 1); 464 tegra_hdmi_writel(hdmi, value, offset++);
450 }
451 } 465 }
452} 466}
453 467
@@ -455,9 +469,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
455 struct drm_display_mode *mode) 469 struct drm_display_mode *mode)
456{ 470{
457 struct hdmi_avi_infoframe frame; 471 struct hdmi_avi_infoframe frame;
458 unsigned int h_front_porch; 472 u8 buffer[17];
459 unsigned int hsize = 16; 473 ssize_t err;
460 unsigned int vsize = 9;
461 474
462 if (hdmi->dvi) { 475 if (hdmi->dvi) {
463 tegra_hdmi_writel(hdmi, 0, 476 tegra_hdmi_writel(hdmi, 0,
@@ -465,69 +478,19 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
465 return; 478 return;
466 } 479 }
467 480
468 h_front_porch = mode->hsync_start - mode->hdisplay; 481 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
469 memset(&frame, 0, sizeof(frame)); 482 if (err < 0) {
470 frame.r = HDMI_AVI_R_SAME; 483 dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
471 484 return;
472 switch (mode->vdisplay) { 485 }
473 case 480:
474 if (mode->hdisplay == 640) {
475 frame.m = HDMI_AVI_M_4_3;
476 frame.vic = 1;
477 } else {
478 frame.m = HDMI_AVI_M_16_9;
479 frame.vic = 3;
480 }
481 break;
482
483 case 576:
484 if (((hsize * 10) / vsize) > 14) {
485 frame.m = HDMI_AVI_M_16_9;
486 frame.vic = 18;
487 } else {
488 frame.m = HDMI_AVI_M_4_3;
489 frame.vic = 17;
490 }
491 break;
492
493 case 720:
494 case 1470: /* stereo mode */
495 frame.m = HDMI_AVI_M_16_9;
496
497 if (h_front_porch == 110)
498 frame.vic = 4;
499 else
500 frame.vic = 19;
501 break;
502
503 case 1080:
504 case 2205: /* stereo mode */
505 frame.m = HDMI_AVI_M_16_9;
506
507 switch (h_front_porch) {
508 case 88:
509 frame.vic = 16;
510 break;
511
512 case 528:
513 frame.vic = 31;
514 break;
515
516 default:
517 frame.vic = 32;
518 break;
519 }
520 break;
521 486
522 default: 487 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
523 frame.m = HDMI_AVI_M_16_9; 488 if (err < 0) {
524 frame.vic = 0; 489 dev_err(hdmi->dev, "failed to pack AVI infoframe: %zd\n", err);
525 break; 490 return;
526 } 491 }
527 492
528 tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER, 493 tegra_hdmi_write_infopack(hdmi, buffer, err);
529 HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
530 &frame, sizeof(frame));
531 494
532 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, 495 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
533 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); 496 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
@@ -536,6 +499,8 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
536static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) 499static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
537{ 500{
538 struct hdmi_audio_infoframe frame; 501 struct hdmi_audio_infoframe frame;
502 u8 buffer[14];
503 ssize_t err;
539 504
540 if (hdmi->dvi) { 505 if (hdmi->dvi) {
541 tegra_hdmi_writel(hdmi, 0, 506 tegra_hdmi_writel(hdmi, 0,
@@ -543,14 +508,29 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
543 return; 508 return;
544 } 509 }
545 510
546 memset(&frame, 0, sizeof(frame)); 511 err = hdmi_audio_infoframe_init(&frame);
547 frame.cc = HDMI_AUDIO_CC_2; 512 if (err < 0) {
513 dev_err(hdmi->dev, "failed to initialize audio infoframe: %d\n",
514 err);
515 return;
516 }
517
518 frame.channels = 2;
519
520 err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
521 if (err < 0) {
522 dev_err(hdmi->dev, "failed to pack audio infoframe: %zd\n",
523 err);
524 return;
525 }
548 526
549 tegra_hdmi_write_infopack(hdmi, 527 /*
550 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER, 528 * The audio infoframe has only one set of subpack registers, so the
551 HDMI_INFOFRAME_TYPE_AUDIO, 529 * infoframe needs to be truncated. One set of subpack registers can
552 HDMI_AUDIO_VERSION, 530 * contain 7 bytes. Including the 3 byte header only the first 10
553 &frame, sizeof(frame)); 531 * bytes can be programmed.
532 */
533 tegra_hdmi_write_infopack(hdmi, buffer, min(10, err));
554 534
555 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, 535 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
556 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); 536 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
@@ -558,8 +538,10 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
558 538
559static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) 539static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
560{ 540{
561 struct hdmi_stereo_infoframe frame; 541 struct hdmi_vendor_infoframe frame;
562 unsigned long value; 542 unsigned long value;
543 u8 buffer[10];
544 ssize_t err;
563 545
564 if (!hdmi->stereo) { 546 if (!hdmi->stereo) {
565 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); 547 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
@@ -569,22 +551,32 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
569 } 551 }
570 552
571 memset(&frame, 0, sizeof(frame)); 553 memset(&frame, 0, sizeof(frame));
572 frame.regid0 = 0x03; 554
573 frame.regid1 = 0x0c; 555 frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
574 frame.regid2 = 0x00; 556 frame.version = 0x01;
575 frame.hdmi_video_format = 2; 557 frame.length = 6;
558
559 frame.data[0] = 0x03; /* regid0 */
560 frame.data[1] = 0x0c; /* regid1 */
561 frame.data[2] = 0x00; /* regid2 */
562 frame.data[3] = 0x02 << 5; /* video format */
576 563
577 /* TODO: 74 MHz limit? */ 564 /* TODO: 74 MHz limit? */
578 if (1) { 565 if (1) {
579 frame._3d_structure = 0; 566 frame.data[4] = 0x00 << 4; /* 3D structure */
580 } else { 567 } else {
581 frame._3d_structure = 8; 568 frame.data[4] = 0x08 << 4; /* 3D structure */
582 frame._3d_ext_data = 0; 569 frame.data[5] = 0x00 << 4; /* 3D ext. data */
570 }
571
572 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
573 if (err < 0) {
574 dev_err(hdmi->dev, "failed to pack vendor infoframe: %zd\n",
575 err);
576 return;
583 } 577 }
584 578
585 tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER, 579 tegra_hdmi_write_infopack(hdmi, buffer, err);
586 HDMI_INFOFRAME_TYPE_VENDOR,
587 HDMI_VENDOR_VERSION, &frame, 6);
588 580
589 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); 581 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
590 value |= GENERIC_CTRL_ENABLE; 582 value |= GENERIC_CTRL_ENABLE;
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index 1477f36eb45a..52ac36e08ccb 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -10,195 +10,6 @@
10#ifndef TEGRA_HDMI_H 10#ifndef TEGRA_HDMI_H
11#define TEGRA_HDMI_H 1 11#define TEGRA_HDMI_H 1
12 12
13#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
14#define HDMI_INFOFRAME_TYPE_AVI 0x82
15#define HDMI_INFOFRAME_TYPE_SPD 0x83
16#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
17#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
18#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
19
20/* all fields little endian */
21struct hdmi_avi_infoframe {
22 /* PB0 */
23 u8 csum;
24
25 /* PB1 */
26 unsigned s:2; /* scan information */
27 unsigned b:2; /* bar info data valid */
28 unsigned a:1; /* active info present */
29 unsigned y:2; /* RGB or YCbCr */
30 unsigned res1:1;
31
32 /* PB2 */
33 unsigned r:4; /* active format aspect ratio */
34 unsigned m:2; /* picture aspect ratio */
35 unsigned c:2; /* colorimetry */
36
37 /* PB3 */
38 unsigned sc:2; /* scan information */
39 unsigned q:2; /* quantization range */
40 unsigned ec:3; /* extended colorimetry */
41 unsigned itc:1; /* it content */
42
43 /* PB4 */
44 unsigned vic:7; /* video format id code */
45 unsigned res4:1;
46
47 /* PB5 */
48 unsigned pr:4; /* pixel repetition factor */
49 unsigned cn:2; /* it content type*/
50 unsigned yq:2; /* ycc quantization range */
51
52 /* PB6-7 */
53 u16 top_bar_end_line;
54
55 /* PB8-9 */
56 u16 bot_bar_start_line;
57
58 /* PB10-11 */
59 u16 left_bar_end_pixel;
60
61 /* PB12-13 */
62 u16 right_bar_start_pixel;
63} __packed;
64
65#define HDMI_AVI_VERSION 0x02
66
67#define HDMI_AVI_Y_RGB 0x0
68#define HDMI_AVI_Y_YCBCR_422 0x1
69#define HDMI_AVI_Y_YCBCR_444 0x2
70
71#define HDMI_AVI_B_VERT 0x1
72#define HDMI_AVI_B_HORIZ 0x2
73
74#define HDMI_AVI_S_NONE 0x0
75#define HDMI_AVI_S_OVERSCAN 0x1
76#define HDMI_AVI_S_UNDERSCAN 0x2
77
78#define HDMI_AVI_C_NONE 0x0
79#define HDMI_AVI_C_SMPTE 0x1
80#define HDMI_AVI_C_ITU_R 0x2
81#define HDMI_AVI_C_EXTENDED 0x4
82
83#define HDMI_AVI_M_4_3 0x1
84#define HDMI_AVI_M_16_9 0x2
85
86#define HDMI_AVI_R_SAME 0x8
87#define HDMI_AVI_R_4_3_CENTER 0x9
88#define HDMI_AVI_R_16_9_CENTER 0xa
89#define HDMI_AVI_R_14_9_CENTER 0xb
90
91/* all fields little endian */
92struct hdmi_audio_infoframe {
93 /* PB0 */
94 u8 csum;
95
96 /* PB1 */
97 unsigned cc:3; /* channel count */
98 unsigned res1:1;
99 unsigned ct:4; /* coding type */
100
101 /* PB2 */
102 unsigned ss:2; /* sample size */
103 unsigned sf:3; /* sample frequency */
104 unsigned res2:3;
105
106 /* PB3 */
107 unsigned cxt:5; /* coding extention type */
108 unsigned res3:3;
109
110 /* PB4 */
111 u8 ca; /* channel/speaker allocation */
112
113 /* PB5 */
114 unsigned res5:3;
115 unsigned lsv:4; /* level shift value */
116 unsigned dm_inh:1; /* downmix inhibit */
117
118 /* PB6-10 reserved */
119 u8 res6;
120 u8 res7;
121 u8 res8;
122 u8 res9;
123 u8 res10;
124} __packed;
125
126#define HDMI_AUDIO_VERSION 0x01
127
128#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
129#define HDMI_AUDIO_CC_2 0x1
130#define HDMI_AUDIO_CC_3 0x2
131#define HDMI_AUDIO_CC_4 0x3
132#define HDMI_AUDIO_CC_5 0x4
133#define HDMI_AUDIO_CC_6 0x5
134#define HDMI_AUDIO_CC_7 0x6
135#define HDMI_AUDIO_CC_8 0x7
136
137#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
138#define HDMI_AUDIO_CT_PCM 0x1
139#define HDMI_AUDIO_CT_AC3 0x2
140#define HDMI_AUDIO_CT_MPEG1 0x3
141#define HDMI_AUDIO_CT_MP3 0x4
142#define HDMI_AUDIO_CT_MPEG2 0x5
143#define HDMI_AUDIO_CT_AAC_LC 0x6
144#define HDMI_AUDIO_CT_DTS 0x7
145#define HDMI_AUDIO_CT_ATRAC 0x8
146#define HDMI_AUDIO_CT_DSD 0x9
147#define HDMI_AUDIO_CT_E_AC3 0xa
148#define HDMI_AUDIO_CT_DTS_HD 0xb
149#define HDMI_AUDIO_CT_MLP 0xc
150#define HDMI_AUDIO_CT_DST 0xd
151#define HDMI_AUDIO_CT_WMA_PRO 0xe
152#define HDMI_AUDIO_CT_CXT 0xf
153
154#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
155#define HDMI_AUIDO_SF_32K 0x1
156#define HDMI_AUDIO_SF_44_1K 0x2
157#define HDMI_AUDIO_SF_48K 0x3
158#define HDMI_AUDIO_SF_88_2K 0x4
159#define HDMI_AUDIO_SF_96K 0x5
160#define HDMI_AUDIO_SF_176_4K 0x6
161#define HDMI_AUDIO_SF_192K 0x7
162
163#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
164#define HDMI_AUDIO_SS_16BIT 0x1
165#define HDMI_AUDIO_SS_20BIT 0x2
166#define HDMI_AUDIO_SS_24BIT 0x3
167
168#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
169#define HDMI_AUDIO_CXT_HE_AAC 0x1
170#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
171#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
172
173/* all fields little endian */
174struct hdmi_stereo_infoframe {
175 /* PB0 */
176 u8 csum;
177
178 /* PB1 */
179 u8 regid0;
180
181 /* PB2 */
182 u8 regid1;
183
184 /* PB3 */
185 u8 regid2;
186
187 /* PB4 */
188 unsigned res1:5;
189 unsigned hdmi_video_format:3;
190
191 /* PB5 */
192 unsigned res2:4;
193 unsigned _3d_structure:4;
194
195 /* PB6*/
196 unsigned res3:4;
197 unsigned _3d_ext_data:4;
198} __packed;
199
200#define HDMI_VENDOR_VERSION 0x01
201
202/* register definitions */ 13/* register definitions */
203#define HDMI_CTXSW 0x00 14#define HDMI_CTXSW 0x00
204 15
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
new file mode 100644
index 000000000000..d24d04013476
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -0,0 +1,13 @@
1config DRM_TILCDC
2 tristate "DRM Support for TI LCDC Display Controller"
3 depends on DRM && OF && ARM
4 select DRM_KMS_HELPER
5 select DRM_KMS_CMA_HELPER
6 select DRM_GEM_CMA_HELPER
7 select OF_VIDEOMODE
8 select OF_DISPLAY_TIMING
9 select BACKLIGHT_CLASS_DEVICE
10 help
11 Choose this option if you have an TI SoC with LCDC display
12 controller, for example AM33xx in beagle-bone, DA8xx, or
13 OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile
new file mode 100644
index 000000000000..deda656b10e7
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/Makefile
@@ -0,0 +1,10 @@
1ccflags-y := -Iinclude/drm -Werror
2
3tilcdc-y := \
4 tilcdc_crtc.o \
5 tilcdc_tfp410.o \
6 tilcdc_slave.o \
7 tilcdc_panel.o \
8 tilcdc_drv.o
9
10obj-$(CONFIG_DRM_TILCDC) += tilcdc.o
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
new file mode 100644
index 000000000000..5dd3c7d031d5
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -0,0 +1,602 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/kfifo.h>
19
20#include "tilcdc_drv.h"
21#include "tilcdc_regs.h"
22
23struct tilcdc_crtc {
24 struct drm_crtc base;
25
26 const struct tilcdc_panel_info *info;
27 uint32_t dirty;
28 dma_addr_t start, end;
29 struct drm_pending_vblank_event *event;
30 int dpms;
31 wait_queue_head_t frame_done_wq;
32 bool frame_done;
33
34 /* fb currently set to scanout 0/1: */
35 struct drm_framebuffer *scanout[2];
36
37 /* for deferred fb unref's: */
38 DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
39 struct work_struct work;
40};
41#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
42
43static void unref_worker(struct work_struct *work)
44{
45 struct tilcdc_crtc *tilcdc_crtc = container_of(work, struct tilcdc_crtc, work);
46 struct drm_device *dev = tilcdc_crtc->base.dev;
47 struct drm_framebuffer *fb;
48
49 mutex_lock(&dev->mode_config.mutex);
50 while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
51 drm_framebuffer_unreference(fb);
52 mutex_unlock(&dev->mode_config.mutex);
53}
54
55static void set_scanout(struct drm_crtc *crtc, int n)
56{
57 static const uint32_t base_reg[] = {
58 LCDC_DMA_FB_BASE_ADDR_0_REG, LCDC_DMA_FB_BASE_ADDR_1_REG,
59 };
60 static const uint32_t ceil_reg[] = {
61 LCDC_DMA_FB_CEILING_ADDR_0_REG, LCDC_DMA_FB_CEILING_ADDR_1_REG,
62 };
63 static const uint32_t stat[] = {
64 LCDC_END_OF_FRAME0, LCDC_END_OF_FRAME1,
65 };
66 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
67 struct drm_device *dev = crtc->dev;
68
69 pm_runtime_get_sync(dev->dev);
70 tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
71 tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
72 if (tilcdc_crtc->scanout[n]) {
73 if (kfifo_put(&tilcdc_crtc->unref_fifo,
74 (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
75 struct tilcdc_drm_private *priv = dev->dev_private;
76 queue_work(priv->wq, &tilcdc_crtc->work);
77 } else {
78 dev_err(dev->dev, "unref fifo full!\n");
79 drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
80 }
81 }
82 tilcdc_crtc->scanout[n] = crtc->fb;
83 drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
84 tilcdc_crtc->dirty &= ~stat[n];
85 pm_runtime_put_sync(dev->dev);
86}
87
88static void update_scanout(struct drm_crtc *crtc)
89{
90 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
91 struct drm_device *dev = crtc->dev;
92 struct drm_framebuffer *fb = crtc->fb;
93 struct drm_gem_cma_object *gem;
94 unsigned int depth, bpp;
95
96 drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
97 gem = drm_fb_cma_get_gem_obj(fb, 0);
98
99 tilcdc_crtc->start = gem->paddr + fb->offsets[0] +
100 (crtc->y * fb->pitches[0]) + (crtc->x * bpp/8);
101
102 tilcdc_crtc->end = tilcdc_crtc->start +
103 (crtc->mode.vdisplay * fb->pitches[0]);
104
105 if (tilcdc_crtc->dpms == DRM_MODE_DPMS_ON) {
106 /* already enabled, so just mark the frames that need
107 * updating and they will be updated on vblank:
108 */
109 tilcdc_crtc->dirty |= LCDC_END_OF_FRAME0 | LCDC_END_OF_FRAME1;
110 drm_vblank_get(dev, 0);
111 } else {
112 /* not enabled yet, so update registers immediately: */
113 set_scanout(crtc, 0);
114 set_scanout(crtc, 1);
115 }
116}
117
118static void start(struct drm_crtc *crtc)
119{
120 struct drm_device *dev = crtc->dev;
121 struct tilcdc_drm_private *priv = dev->dev_private;
122
123 if (priv->rev == 2) {
124 tilcdc_set(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
125 msleep(1);
126 tilcdc_clear(dev, LCDC_CLK_RESET_REG, LCDC_CLK_MAIN_RESET);
127 msleep(1);
128 }
129
130 tilcdc_set(dev, LCDC_DMA_CTRL_REG, LCDC_DUAL_FRAME_BUFFER_ENABLE);
131 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_PALETTE_LOAD_MODE(DATA_ONLY));
132 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
133}
134
135static void stop(struct drm_crtc *crtc)
136{
137 struct drm_device *dev = crtc->dev;
138
139 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
140}
141
142static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
143{
144 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
145
146 WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
147
148 drm_crtc_cleanup(crtc);
149 WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
150 kfifo_free(&tilcdc_crtc->unref_fifo);
151 kfree(tilcdc_crtc);
152}
153
154static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
155 struct drm_framebuffer *fb,
156 struct drm_pending_vblank_event *event)
157{
158 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
159 struct drm_device *dev = crtc->dev;
160
161 if (tilcdc_crtc->event) {
162 dev_err(dev->dev, "already pending page flip!\n");
163 return -EBUSY;
164 }
165
166 crtc->fb = fb;
167 tilcdc_crtc->event = event;
168 update_scanout(crtc);
169
170 return 0;
171}
172
173static void tilcdc_crtc_dpms(struct drm_crtc *crtc, int mode)
174{
175 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
176 struct drm_device *dev = crtc->dev;
177 struct tilcdc_drm_private *priv = dev->dev_private;
178
179 /* we really only care about on or off: */
180 if (mode != DRM_MODE_DPMS_ON)
181 mode = DRM_MODE_DPMS_OFF;
182
183 if (tilcdc_crtc->dpms == mode)
184 return;
185
186 tilcdc_crtc->dpms = mode;
187
188 pm_runtime_get_sync(dev->dev);
189
190 if (mode == DRM_MODE_DPMS_ON) {
191 pm_runtime_forbid(dev->dev);
192 start(crtc);
193 } else {
194 tilcdc_crtc->frame_done = false;
195 stop(crtc);
196
197 /* if necessary wait for framedone irq which will still come
198 * before putting things to sleep..
199 */
200 if (priv->rev == 2) {
201 int ret = wait_event_timeout(
202 tilcdc_crtc->frame_done_wq,
203 tilcdc_crtc->frame_done,
204 msecs_to_jiffies(50));
205 if (ret == 0)
206 dev_err(dev->dev, "timeout waiting for framedone\n");
207 }
208 pm_runtime_allow(dev->dev);
209 }
210
211 pm_runtime_put_sync(dev->dev);
212}
213
214static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
215 const struct drm_display_mode *mode,
216 struct drm_display_mode *adjusted_mode)
217{
218 return true;
219}
220
221static void tilcdc_crtc_prepare(struct drm_crtc *crtc)
222{
223 tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
224}
225
226static void tilcdc_crtc_commit(struct drm_crtc *crtc)
227{
228 tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
229}
230
231static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
232 struct drm_display_mode *mode,
233 struct drm_display_mode *adjusted_mode,
234 int x, int y,
235 struct drm_framebuffer *old_fb)
236{
237 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
238 struct drm_device *dev = crtc->dev;
239 struct tilcdc_drm_private *priv = dev->dev_private;
240 const struct tilcdc_panel_info *info = tilcdc_crtc->info;
241 uint32_t reg, hbp, hfp, hsw, vbp, vfp, vsw;
242 int ret;
243
244 ret = tilcdc_crtc_mode_valid(crtc, mode);
245 if (WARN_ON(ret))
246 return ret;
247
248 if (WARN_ON(!info))
249 return -EINVAL;
250
251 pm_runtime_get_sync(dev->dev);
252
253 /* Configure the Burst Size and fifo threshold of DMA: */
254 reg = tilcdc_read(dev, LCDC_DMA_CTRL_REG) & ~0x00000770;
255 switch (info->dma_burst_sz) {
256 case 1:
257 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_1);
258 break;
259 case 2:
260 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_2);
261 break;
262 case 4:
263 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_4);
264 break;
265 case 8:
266 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_8);
267 break;
268 case 16:
269 reg |= LCDC_DMA_BURST_SIZE(LCDC_DMA_BURST_16);
270 break;
271 default:
272 return -EINVAL;
273 }
274 reg |= (info->fifo_th << 8);
275 tilcdc_write(dev, LCDC_DMA_CTRL_REG, reg);
276
277 /* Configure timings: */
278 hbp = mode->htotal - mode->hsync_end;
279 hfp = mode->hsync_start - mode->hdisplay;
280 hsw = mode->hsync_end - mode->hsync_start;
281 vbp = mode->vtotal - mode->vsync_end;
282 vfp = mode->vsync_start - mode->vdisplay;
283 vsw = mode->vsync_end - mode->vsync_start;
284
285 DBG("%dx%d, hbp=%u, hfp=%u, hsw=%u, vbp=%u, vfp=%u, vsw=%u",
286 mode->hdisplay, mode->vdisplay, hbp, hfp, hsw, vbp, vfp, vsw);
287
288 /* Configure the AC Bias Period and Number of Transitions per Interrupt: */
289 reg = tilcdc_read(dev, LCDC_RASTER_TIMING_2_REG) & ~0x000fff00;
290 reg |= LCDC_AC_BIAS_FREQUENCY(info->ac_bias) |
291 LCDC_AC_BIAS_TRANSITIONS_PER_INT(info->ac_bias_intrpt);
292 if (priv->rev == 2) {
293 reg |= (hfp & 0x300) >> 8;
294 reg |= (hbp & 0x300) >> 4;
295 reg |= (hsw & 0x3c0) << 21;
296 }
297 tilcdc_write(dev, LCDC_RASTER_TIMING_2_REG, reg);
298
299 reg = (((mode->hdisplay >> 4) - 1) << 4) |
300 ((hbp & 0xff) << 24) |
301 ((hfp & 0xff) << 16) |
302 ((hsw & 0x3f) << 10);
303 if (priv->rev == 2)
304 reg |= (((mode->hdisplay >> 4) - 1) & 0x40) >> 3;
305 tilcdc_write(dev, LCDC_RASTER_TIMING_0_REG, reg);
306
307 reg = ((mode->vdisplay - 1) & 0x3ff) |
308 ((vbp & 0xff) << 24) |
309 ((vfp & 0xff) << 16) |
310 ((vsw & 0x3f) << 10);
311 tilcdc_write(dev, LCDC_RASTER_TIMING_1_REG, reg);
312
313 /* Configure display type: */
314 reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG) &
315 ~(LCDC_TFT_MODE | LCDC_MONO_8BIT_MODE | LCDC_MONOCHROME_MODE |
316 LCDC_V2_TFT_24BPP_MODE | LCDC_V2_TFT_24BPP_UNPACK | 0x000ff000);
317 reg |= LCDC_TFT_MODE; /* no monochrome/passive support */
318 if (info->tft_alt_mode)
319 reg |= LCDC_TFT_ALT_ENABLE;
320 if (priv->rev == 2) {
321 unsigned int depth, bpp;
322
323 drm_fb_get_bpp_depth(crtc->fb->pixel_format, &depth, &bpp);
324 switch (bpp) {
325 case 16:
326 break;
327 case 32:
328 reg |= LCDC_V2_TFT_24BPP_UNPACK;
329 /* fallthrough */
330 case 24:
331 reg |= LCDC_V2_TFT_24BPP_MODE;
332 break;
333 default:
334 dev_err(dev->dev, "invalid pixel format\n");
335 return -EINVAL;
336 }
337 }
338 reg |= info->fdd < 12;
339 tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
340
341 if (info->invert_pxl_clk)
342 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
343 else
344 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_PIXEL_CLOCK);
345
346 if (info->sync_ctrl)
347 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
348 else
349 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_CTRL);
350
351 if (info->sync_edge)
352 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
353 else
354 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
355
356 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
357 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
358 else
359 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
360
361 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
362 tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
363 else
364 tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_VSYNC);
365
366 if (info->raster_order)
367 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
368 else
369 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ORDER);
370
371
372 update_scanout(crtc);
373 tilcdc_crtc_update_clk(crtc);
374
375 pm_runtime_put_sync(dev->dev);
376
377 return 0;
378}
379
380static int tilcdc_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
381 struct drm_framebuffer *old_fb)
382{
383 update_scanout(crtc);
384 return 0;
385}
386
387static void tilcdc_crtc_load_lut(struct drm_crtc *crtc)
388{
389}
390
391static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
392 .destroy = tilcdc_crtc_destroy,
393 .set_config = drm_crtc_helper_set_config,
394 .page_flip = tilcdc_crtc_page_flip,
395};
396
397static const struct drm_crtc_helper_funcs tilcdc_crtc_helper_funcs = {
398 .dpms = tilcdc_crtc_dpms,
399 .mode_fixup = tilcdc_crtc_mode_fixup,
400 .prepare = tilcdc_crtc_prepare,
401 .commit = tilcdc_crtc_commit,
402 .mode_set = tilcdc_crtc_mode_set,
403 .mode_set_base = tilcdc_crtc_mode_set_base,
404 .load_lut = tilcdc_crtc_load_lut,
405};
406
407int tilcdc_crtc_max_width(struct drm_crtc *crtc)
408{
409 struct drm_device *dev = crtc->dev;
410 struct tilcdc_drm_private *priv = dev->dev_private;
411 int max_width = 0;
412
413 if (priv->rev == 1)
414 max_width = 1024;
415 else if (priv->rev == 2)
416 max_width = 2048;
417
418 return max_width;
419}
420
421int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode)
422{
423 struct tilcdc_drm_private *priv = crtc->dev->dev_private;
424 unsigned int bandwidth;
425
426 if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
427 return MODE_VIRTUAL_X;
428
429 /* width must be multiple of 16 */
430 if (mode->hdisplay & 0xf)
431 return MODE_VIRTUAL_X;
432
433 if (mode->vdisplay > 2048)
434 return MODE_VIRTUAL_Y;
435
436 /* filter out modes that would require too much memory bandwidth: */
437 bandwidth = mode->hdisplay * mode->vdisplay * drm_mode_vrefresh(mode);
438 if (bandwidth > priv->max_bandwidth)
439 return MODE_BAD;
440
441 return MODE_OK;
442}
443
444void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
445 const struct tilcdc_panel_info *info)
446{
447 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
448 tilcdc_crtc->info = info;
449}
450
451void tilcdc_crtc_update_clk(struct drm_crtc *crtc)
452{
453 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
454 struct drm_device *dev = crtc->dev;
455 struct tilcdc_drm_private *priv = dev->dev_private;
456 int dpms = tilcdc_crtc->dpms;
457 unsigned int lcd_clk, div;
458 int ret;
459
460 pm_runtime_get_sync(dev->dev);
461
462 if (dpms == DRM_MODE_DPMS_ON)
463 tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
464
465 /* in raster mode, minimum divisor is 2: */
466 ret = clk_set_rate(priv->disp_clk, crtc->mode.clock * 1000 * 2);
467 if (ret) {
468 dev_err(dev->dev, "failed to set display clock rate to: %d\n",
469 crtc->mode.clock);
470 goto out;
471 }
472
473 lcd_clk = clk_get_rate(priv->clk);
474 div = lcd_clk / (crtc->mode.clock * 1000);
475
476 DBG("lcd_clk=%u, mode clock=%d, div=%u", lcd_clk, crtc->mode.clock, div);
477 DBG("fck=%lu, dpll_disp_ck=%lu", clk_get_rate(priv->clk), clk_get_rate(priv->disp_clk));
478
479 /* Configure the LCD clock divisor. */
480 tilcdc_write(dev, LCDC_CTRL_REG, LCDC_CLK_DIVISOR(div) |
481 LCDC_RASTER_MODE);
482
483 if (priv->rev == 2)
484 tilcdc_set(dev, LCDC_CLK_ENABLE_REG,
485 LCDC_V2_DMA_CLK_EN | LCDC_V2_LIDD_CLK_EN |
486 LCDC_V2_CORE_CLK_EN);
487
488 if (dpms == DRM_MODE_DPMS_ON)
489 tilcdc_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
490
491out:
492 pm_runtime_put_sync(dev->dev);
493}
494
495irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
496{
497 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
498 struct drm_device *dev = crtc->dev;
499 struct tilcdc_drm_private *priv = dev->dev_private;
500 uint32_t stat = tilcdc_read_irqstatus(dev);
501
502 if ((stat & LCDC_SYNC_LOST) && (stat & LCDC_FIFO_UNDERFLOW)) {
503 stop(crtc);
504 dev_err(dev->dev, "error: %08x\n", stat);
505 tilcdc_clear_irqstatus(dev, stat);
506 start(crtc);
507 } else if (stat & LCDC_PL_LOAD_DONE) {
508 tilcdc_clear_irqstatus(dev, stat);
509 } else {
510 struct drm_pending_vblank_event *event;
511 unsigned long flags;
512 uint32_t dirty = tilcdc_crtc->dirty & stat;
513
514 tilcdc_clear_irqstatus(dev, stat);
515
516 if (dirty & LCDC_END_OF_FRAME0)
517 set_scanout(crtc, 0);
518
519 if (dirty & LCDC_END_OF_FRAME1)
520 set_scanout(crtc, 1);
521
522 drm_handle_vblank(dev, 0);
523
524 spin_lock_irqsave(&dev->event_lock, flags);
525 event = tilcdc_crtc->event;
526 tilcdc_crtc->event = NULL;
527 if (event)
528 drm_send_vblank_event(dev, 0, event);
529 spin_unlock_irqrestore(&dev->event_lock, flags);
530
531 if (dirty && !tilcdc_crtc->dirty)
532 drm_vblank_put(dev, 0);
533 }
534
535 if (priv->rev == 2) {
536 if (stat & LCDC_FRAME_DONE) {
537 tilcdc_crtc->frame_done = true;
538 wake_up(&tilcdc_crtc->frame_done_wq);
539 }
540 tilcdc_write(dev, LCDC_END_OF_INT_IND_REG, 0);
541 }
542
543 return IRQ_HANDLED;
544}
545
546void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
547{
548 struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
549 struct drm_pending_vblank_event *event;
550 struct drm_device *dev = crtc->dev;
551 unsigned long flags;
552
553 /* Destroy the pending vertical blanking event associated with the
554 * pending page flip, if any, and disable vertical blanking interrupts.
555 */
556 spin_lock_irqsave(&dev->event_lock, flags);
557 event = tilcdc_crtc->event;
558 if (event && event->base.file_priv == file) {
559 tilcdc_crtc->event = NULL;
560 event->base.destroy(&event->base);
561 drm_vblank_put(dev, 0);
562 }
563 spin_unlock_irqrestore(&dev->event_lock, flags);
564}
565
566struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
567{
568 struct tilcdc_crtc *tilcdc_crtc;
569 struct drm_crtc *crtc;
570 int ret;
571
572 tilcdc_crtc = kzalloc(sizeof(*tilcdc_crtc), GFP_KERNEL);
573 if (!tilcdc_crtc) {
574 dev_err(dev->dev, "allocation failed\n");
575 return NULL;
576 }
577
578 crtc = &tilcdc_crtc->base;
579
580 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
581 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
582
583 ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
584 if (ret) {
585 dev_err(dev->dev, "could not allocate unref FIFO\n");
586 goto fail;
587 }
588
589 INIT_WORK(&tilcdc_crtc->work, unref_worker);
590
591 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
592 if (ret < 0)
593 goto fail;
594
595 drm_crtc_helper_add(crtc, &tilcdc_crtc_helper_funcs);
596
597 return crtc;
598
599fail:
600 tilcdc_crtc_destroy(crtc);
601 return NULL;
602}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
new file mode 100644
index 000000000000..c5b592dc1970
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -0,0 +1,611 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18/* LCDC DRM driver, based on da8xx-fb */
19
20#include "tilcdc_drv.h"
21#include "tilcdc_regs.h"
22#include "tilcdc_tfp410.h"
23#include "tilcdc_slave.h"
24#include "tilcdc_panel.h"
25
26#include "drm_fb_helper.h"
27
28static LIST_HEAD(module_list);
29
30void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
31 const struct tilcdc_module_ops *funcs)
32{
33 mod->name = name;
34 mod->funcs = funcs;
35 INIT_LIST_HEAD(&mod->list);
36 list_add(&mod->list, &module_list);
37}
38
39void tilcdc_module_cleanup(struct tilcdc_module *mod)
40{
41 list_del(&mod->list);
42}
43
44static struct of_device_id tilcdc_of_match[];
45
46static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
47 struct drm_file *file_priv, struct drm_mode_fb_cmd2 *mode_cmd)
48{
49 return drm_fb_cma_create(dev, file_priv, mode_cmd);
50}
51
52static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
53{
54 struct tilcdc_drm_private *priv = dev->dev_private;
55 if (priv->fbdev)
56 drm_fbdev_cma_hotplug_event(priv->fbdev);
57}
58
59static const struct drm_mode_config_funcs mode_config_funcs = {
60 .fb_create = tilcdc_fb_create,
61 .output_poll_changed = tilcdc_fb_output_poll_changed,
62};
63
64static int modeset_init(struct drm_device *dev)
65{
66 struct tilcdc_drm_private *priv = dev->dev_private;
67 struct tilcdc_module *mod;
68
69 drm_mode_config_init(dev);
70
71 priv->crtc = tilcdc_crtc_create(dev);
72
73 list_for_each_entry(mod, &module_list, list) {
74 DBG("loading module: %s", mod->name);
75 mod->funcs->modeset_init(mod, dev);
76 }
77
78 if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) {
79 /* oh nos! */
80 dev_err(dev->dev, "no encoders/connectors found\n");
81 return -ENXIO;
82 }
83
84 dev->mode_config.min_width = 0;
85 dev->mode_config.min_height = 0;
86 dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
87 dev->mode_config.max_height = 2048;
88 dev->mode_config.funcs = &mode_config_funcs;
89
90 return 0;
91}
92
93#ifdef CONFIG_CPU_FREQ
94static int cpufreq_transition(struct notifier_block *nb,
95 unsigned long val, void *data)
96{
97 struct tilcdc_drm_private *priv = container_of(nb,
98 struct tilcdc_drm_private, freq_transition);
99 if (val == CPUFREQ_POSTCHANGE) {
100 if (priv->lcd_fck_rate != clk_get_rate(priv->clk)) {
101 priv->lcd_fck_rate = clk_get_rate(priv->clk);
102 tilcdc_crtc_update_clk(priv->crtc);
103 }
104 }
105
106 return 0;
107}
108#endif
109
110/*
111 * DRM operations:
112 */
113
114static int tilcdc_unload(struct drm_device *dev)
115{
116 struct tilcdc_drm_private *priv = dev->dev_private;
117 struct tilcdc_module *mod, *cur;
118
119 drm_kms_helper_poll_fini(dev);
120 drm_mode_config_cleanup(dev);
121 drm_vblank_cleanup(dev);
122
123 pm_runtime_get_sync(dev->dev);
124 drm_irq_uninstall(dev);
125 pm_runtime_put_sync(dev->dev);
126
127#ifdef CONFIG_CPU_FREQ
128 cpufreq_unregister_notifier(&priv->freq_transition,
129 CPUFREQ_TRANSITION_NOTIFIER);
130#endif
131
132 if (priv->clk)
133 clk_put(priv->clk);
134
135 if (priv->mmio)
136 iounmap(priv->mmio);
137
138 flush_workqueue(priv->wq);
139 destroy_workqueue(priv->wq);
140
141 dev->dev_private = NULL;
142
143 pm_runtime_disable(dev->dev);
144
145 list_for_each_entry_safe(mod, cur, &module_list, list) {
146 DBG("destroying module: %s", mod->name);
147 mod->funcs->destroy(mod);
148 }
149
150 kfree(priv);
151
152 return 0;
153}
154
155static int tilcdc_load(struct drm_device *dev, unsigned long flags)
156{
157 struct platform_device *pdev = dev->platformdev;
158 struct device_node *node = pdev->dev.of_node;
159 struct tilcdc_drm_private *priv;
160 struct resource *res;
161 int ret;
162
163 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
164 if (!priv) {
165 dev_err(dev->dev, "failed to allocate private data\n");
166 return -ENOMEM;
167 }
168
169 dev->dev_private = priv;
170
171 priv->wq = alloc_ordered_workqueue("tilcdc", 0);
172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (!res) {
175 dev_err(dev->dev, "failed to get memory resource\n");
176 ret = -EINVAL;
177 goto fail;
178 }
179
180 priv->mmio = ioremap_nocache(res->start, resource_size(res));
181 if (!priv->mmio) {
182 dev_err(dev->dev, "failed to ioremap\n");
183 ret = -ENOMEM;
184 goto fail;
185 }
186
187 priv->clk = clk_get(dev->dev, "fck");
188 if (IS_ERR(priv->clk)) {
189 dev_err(dev->dev, "failed to get functional clock\n");
190 ret = -ENODEV;
191 goto fail;
192 }
193
194 priv->disp_clk = clk_get(dev->dev, "dpll_disp_ck");
195 if (IS_ERR(priv->clk)) {
196 dev_err(dev->dev, "failed to get display clock\n");
197 ret = -ENODEV;
198 goto fail;
199 }
200
201#ifdef CONFIG_CPU_FREQ
202 priv->lcd_fck_rate = clk_get_rate(priv->clk);
203 priv->freq_transition.notifier_call = cpufreq_transition;
204 ret = cpufreq_register_notifier(&priv->freq_transition,
205 CPUFREQ_TRANSITION_NOTIFIER);
206 if (ret) {
207 dev_err(dev->dev, "failed to register cpufreq notifier\n");
208 goto fail;
209 }
210#endif
211
212 if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
213 priv->max_bandwidth = 1280 * 1024 * 60;
214
215 pm_runtime_enable(dev->dev);
216
217 /* Determine LCD IP Version */
218 pm_runtime_get_sync(dev->dev);
219 switch (tilcdc_read(dev, LCDC_PID_REG)) {
220 case 0x4c100102:
221 priv->rev = 1;
222 break;
223 case 0x4f200800:
224 case 0x4f201000:
225 priv->rev = 2;
226 break;
227 default:
228 dev_warn(dev->dev, "Unknown PID Reg value 0x%08x, "
229 "defaulting to LCD revision 1\n",
230 tilcdc_read(dev, LCDC_PID_REG));
231 priv->rev = 1;
232 break;
233 }
234
235 pm_runtime_put_sync(dev->dev);
236
237 ret = modeset_init(dev);
238 if (ret < 0) {
239 dev_err(dev->dev, "failed to initialize mode setting\n");
240 goto fail;
241 }
242
243 ret = drm_vblank_init(dev, 1);
244 if (ret < 0) {
245 dev_err(dev->dev, "failed to initialize vblank\n");
246 goto fail;
247 }
248
249 pm_runtime_get_sync(dev->dev);
250 ret = drm_irq_install(dev);
251 pm_runtime_put_sync(dev->dev);
252 if (ret < 0) {
253 dev_err(dev->dev, "failed to install IRQ handler\n");
254 goto fail;
255 }
256
257 platform_set_drvdata(pdev, dev);
258
259 priv->fbdev = drm_fbdev_cma_init(dev, 16,
260 dev->mode_config.num_crtc,
261 dev->mode_config.num_connector);
262
263 drm_kms_helper_poll_init(dev);
264
265 return 0;
266
267fail:
268 tilcdc_unload(dev);
269 return ret;
270}
271
272static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
273{
274 struct tilcdc_drm_private *priv = dev->dev_private;
275
276 tilcdc_crtc_cancel_page_flip(priv->crtc, file);
277}
278
279static void tilcdc_lastclose(struct drm_device *dev)
280{
281 struct tilcdc_drm_private *priv = dev->dev_private;
282 drm_fbdev_cma_restore_mode(priv->fbdev);
283}
284
285static irqreturn_t tilcdc_irq(DRM_IRQ_ARGS)
286{
287 struct drm_device *dev = arg;
288 struct tilcdc_drm_private *priv = dev->dev_private;
289 return tilcdc_crtc_irq(priv->crtc);
290}
291
292static void tilcdc_irq_preinstall(struct drm_device *dev)
293{
294 tilcdc_clear_irqstatus(dev, 0xffffffff);
295}
296
297static int tilcdc_irq_postinstall(struct drm_device *dev)
298{
299 struct tilcdc_drm_private *priv = dev->dev_private;
300
301 /* enable FIFO underflow irq: */
302 if (priv->rev == 1) {
303 tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_V1_UNDERFLOW_INT_ENA);
304 } else {
305 tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG, LCDC_V2_UNDERFLOW_INT_ENA);
306 }
307
308 return 0;
309}
310
311static void tilcdc_irq_uninstall(struct drm_device *dev)
312{
313 struct tilcdc_drm_private *priv = dev->dev_private;
314
315 /* disable irqs that we might have enabled: */
316 if (priv->rev == 1) {
317 tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
318 LCDC_V1_UNDERFLOW_INT_ENA | LCDC_V1_PL_INT_ENA);
319 tilcdc_clear(dev, LCDC_DMA_CTRL_REG, LCDC_V1_END_OF_FRAME_INT_ENA);
320 } else {
321 tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
322 LCDC_V2_UNDERFLOW_INT_ENA | LCDC_V2_PL_INT_ENA |
323 LCDC_V2_END_OF_FRAME0_INT_ENA | LCDC_V2_END_OF_FRAME1_INT_ENA |
324 LCDC_FRAME_DONE);
325 }
326
327}
328
329static void enable_vblank(struct drm_device *dev, bool enable)
330{
331 struct tilcdc_drm_private *priv = dev->dev_private;
332 u32 reg, mask;
333
334 if (priv->rev == 1) {
335 reg = LCDC_DMA_CTRL_REG;
336 mask = LCDC_V1_END_OF_FRAME_INT_ENA;
337 } else {
338 reg = LCDC_INT_ENABLE_SET_REG;
339 mask = LCDC_V2_END_OF_FRAME0_INT_ENA |
340 LCDC_V2_END_OF_FRAME1_INT_ENA | LCDC_FRAME_DONE;
341 }
342
343 if (enable)
344 tilcdc_set(dev, reg, mask);
345 else
346 tilcdc_clear(dev, reg, mask);
347}
348
349static int tilcdc_enable_vblank(struct drm_device *dev, int crtc)
350{
351 enable_vblank(dev, true);
352 return 0;
353}
354
355static void tilcdc_disable_vblank(struct drm_device *dev, int crtc)
356{
357 enable_vblank(dev, false);
358}
359
360#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_PM_SLEEP)
361static const struct {
362 const char *name;
363 uint8_t rev;
364 uint8_t save;
365 uint32_t reg;
366} registers[] = {
367#define REG(rev, save, reg) { #reg, rev, save, reg }
368 /* exists in revision 1: */
369 REG(1, false, LCDC_PID_REG),
370 REG(1, true, LCDC_CTRL_REG),
371 REG(1, false, LCDC_STAT_REG),
372 REG(1, true, LCDC_RASTER_CTRL_REG),
373 REG(1, true, LCDC_RASTER_TIMING_0_REG),
374 REG(1, true, LCDC_RASTER_TIMING_1_REG),
375 REG(1, true, LCDC_RASTER_TIMING_2_REG),
376 REG(1, true, LCDC_DMA_CTRL_REG),
377 REG(1, true, LCDC_DMA_FB_BASE_ADDR_0_REG),
378 REG(1, true, LCDC_DMA_FB_CEILING_ADDR_0_REG),
379 REG(1, true, LCDC_DMA_FB_BASE_ADDR_1_REG),
380 REG(1, true, LCDC_DMA_FB_CEILING_ADDR_1_REG),
381 /* new in revision 2: */
382 REG(2, false, LCDC_RAW_STAT_REG),
383 REG(2, false, LCDC_MASKED_STAT_REG),
384 REG(2, false, LCDC_INT_ENABLE_SET_REG),
385 REG(2, false, LCDC_INT_ENABLE_CLR_REG),
386 REG(2, false, LCDC_END_OF_INT_IND_REG),
387 REG(2, true, LCDC_CLK_ENABLE_REG),
388 REG(2, true, LCDC_INT_ENABLE_SET_REG),
389#undef REG
390};
391#endif
392
393#ifdef CONFIG_DEBUG_FS
394static int tilcdc_regs_show(struct seq_file *m, void *arg)
395{
396 struct drm_info_node *node = (struct drm_info_node *) m->private;
397 struct drm_device *dev = node->minor->dev;
398 struct tilcdc_drm_private *priv = dev->dev_private;
399 unsigned i;
400
401 pm_runtime_get_sync(dev->dev);
402
403 seq_printf(m, "revision: %d\n", priv->rev);
404
405 for (i = 0; i < ARRAY_SIZE(registers); i++)
406 if (priv->rev >= registers[i].rev)
407 seq_printf(m, "%s:\t %08x\n", registers[i].name,
408 tilcdc_read(dev, registers[i].reg));
409
410 pm_runtime_put_sync(dev->dev);
411
412 return 0;
413}
414
415static int tilcdc_mm_show(struct seq_file *m, void *arg)
416{
417 struct drm_info_node *node = (struct drm_info_node *) m->private;
418 struct drm_device *dev = node->minor->dev;
419 return drm_mm_dump_table(m, dev->mm_private);
420}
421
422static struct drm_info_list tilcdc_debugfs_list[] = {
423 { "regs", tilcdc_regs_show, 0 },
424 { "mm", tilcdc_mm_show, 0 },
425 { "fb", drm_fb_cma_debugfs_show, 0 },
426};
427
428static int tilcdc_debugfs_init(struct drm_minor *minor)
429{
430 struct drm_device *dev = minor->dev;
431 struct tilcdc_module *mod;
432 int ret;
433
434 ret = drm_debugfs_create_files(tilcdc_debugfs_list,
435 ARRAY_SIZE(tilcdc_debugfs_list),
436 minor->debugfs_root, minor);
437
438 list_for_each_entry(mod, &module_list, list)
439 if (mod->funcs->debugfs_init)
440 mod->funcs->debugfs_init(mod, minor);
441
442 if (ret) {
443 dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
444 return ret;
445 }
446
447 return ret;
448}
449
450static void tilcdc_debugfs_cleanup(struct drm_minor *minor)
451{
452 struct tilcdc_module *mod;
453 drm_debugfs_remove_files(tilcdc_debugfs_list,
454 ARRAY_SIZE(tilcdc_debugfs_list), minor);
455
456 list_for_each_entry(mod, &module_list, list)
457 if (mod->funcs->debugfs_cleanup)
458 mod->funcs->debugfs_cleanup(mod, minor);
459}
460#endif
461
462static const struct file_operations fops = {
463 .owner = THIS_MODULE,
464 .open = drm_open,
465 .release = drm_release,
466 .unlocked_ioctl = drm_ioctl,
467#ifdef CONFIG_COMPAT
468 .compat_ioctl = drm_compat_ioctl,
469#endif
470 .poll = drm_poll,
471 .read = drm_read,
472 .fasync = drm_fasync,
473 .llseek = no_llseek,
474 .mmap = drm_gem_cma_mmap,
475};
476
477static struct drm_driver tilcdc_driver = {
478 .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
479 .load = tilcdc_load,
480 .unload = tilcdc_unload,
481 .preclose = tilcdc_preclose,
482 .lastclose = tilcdc_lastclose,
483 .irq_handler = tilcdc_irq,
484 .irq_preinstall = tilcdc_irq_preinstall,
485 .irq_postinstall = tilcdc_irq_postinstall,
486 .irq_uninstall = tilcdc_irq_uninstall,
487 .get_vblank_counter = drm_vblank_count,
488 .enable_vblank = tilcdc_enable_vblank,
489 .disable_vblank = tilcdc_disable_vblank,
490 .gem_free_object = drm_gem_cma_free_object,
491 .gem_vm_ops = &drm_gem_cma_vm_ops,
492 .dumb_create = drm_gem_cma_dumb_create,
493 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
494 .dumb_destroy = drm_gem_cma_dumb_destroy,
495#ifdef CONFIG_DEBUG_FS
496 .debugfs_init = tilcdc_debugfs_init,
497 .debugfs_cleanup = tilcdc_debugfs_cleanup,
498#endif
499 .fops = &fops,
500 .name = "tilcdc",
501 .desc = "TI LCD Controller DRM",
502 .date = "20121205",
503 .major = 1,
504 .minor = 0,
505};
506
507/*
508 * Power management:
509 */
510
511#ifdef CONFIG_PM_SLEEP
512static int tilcdc_pm_suspend(struct device *dev)
513{
514 struct drm_device *ddev = dev_get_drvdata(dev);
515 struct tilcdc_drm_private *priv = ddev->dev_private;
516 unsigned i, n = 0;
517
518 drm_kms_helper_poll_disable(ddev);
519
520 /* Save register state: */
521 for (i = 0; i < ARRAY_SIZE(registers); i++)
522 if (registers[i].save && (priv->rev >= registers[i].rev))
523 priv->saved_register[n++] = tilcdc_read(ddev, registers[i].reg);
524
525 return 0;
526}
527
528static int tilcdc_pm_resume(struct device *dev)
529{
530 struct drm_device *ddev = dev_get_drvdata(dev);
531 struct tilcdc_drm_private *priv = ddev->dev_private;
532 unsigned i, n = 0;
533
534 /* Restore register state: */
535 for (i = 0; i < ARRAY_SIZE(registers); i++)
536 if (registers[i].save && (priv->rev >= registers[i].rev))
537 tilcdc_write(ddev, registers[i].reg, priv->saved_register[n++]);
538
539 drm_kms_helper_poll_enable(ddev);
540
541 return 0;
542}
543#endif
544
545static const struct dev_pm_ops tilcdc_pm_ops = {
546 SET_SYSTEM_SLEEP_PM_OPS(tilcdc_pm_suspend, tilcdc_pm_resume)
547};
548
549/*
550 * Platform driver:
551 */
552
553static int tilcdc_pdev_probe(struct platform_device *pdev)
554{
555 /* bail out early if no DT data: */
556 if (!pdev->dev.of_node) {
557 dev_err(&pdev->dev, "device-tree data is missing\n");
558 return -ENXIO;
559 }
560
561 return drm_platform_init(&tilcdc_driver, pdev);
562}
563
564static int tilcdc_pdev_remove(struct platform_device *pdev)
565{
566 drm_platform_exit(&tilcdc_driver, pdev);
567
568 return 0;
569}
570
571static struct of_device_id tilcdc_of_match[] = {
572 { .compatible = "ti,am33xx-tilcdc", },
573 { },
574};
575MODULE_DEVICE_TABLE(of, tilcdc_of_match);
576
577static struct platform_driver tilcdc_platform_driver = {
578 .probe = tilcdc_pdev_probe,
579 .remove = tilcdc_pdev_remove,
580 .driver = {
581 .owner = THIS_MODULE,
582 .name = "tilcdc",
583 .pm = &tilcdc_pm_ops,
584 .of_match_table = tilcdc_of_match,
585 },
586};
587
588static int __init tilcdc_drm_init(void)
589{
590 DBG("init");
591 tilcdc_tfp410_init();
592 tilcdc_slave_init();
593 tilcdc_panel_init();
594 return platform_driver_register(&tilcdc_platform_driver);
595}
596
597static void __exit tilcdc_drm_fini(void)
598{
599 DBG("fini");
600 tilcdc_tfp410_fini();
601 tilcdc_slave_fini();
602 tilcdc_panel_fini();
603 platform_driver_unregister(&tilcdc_platform_driver);
604}
605
606late_initcall(tilcdc_drm_init);
607module_exit(tilcdc_drm_fini);
608
609MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
610MODULE_DESCRIPTION("TI LCD Controller DRM Driver");
611MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
new file mode 100644
index 000000000000..8242b5a4307b
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -0,0 +1,150 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __TILCDC_DRV_H__
19#define __TILCDC_DRV_H__
20
21#include <linux/clk.h>
22#include <linux/cpufreq.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
25#include <linux/pm.h>
26#include <linux/pm_runtime.h>
27#include <linux/slab.h>
28#include <linux/of.h>
29#include <linux/of_device.h>
30#include <linux/list.h>
31
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_gem_cma_helper.h>
35#include <drm/drm_fb_cma_helper.h>
36
37struct tilcdc_drm_private {
38 void __iomem *mmio;
39
40 struct clk *disp_clk; /* display dpll */
41 struct clk *clk; /* functional clock */
42 int rev; /* IP revision */
43
44 /* don't attempt resolutions w/ higher W * H * Hz: */
45 uint32_t max_bandwidth;
46
47 /* register contents saved across suspend/resume: */
48 u32 saved_register[12];
49
50#ifdef CONFIG_CPU_FREQ
51 struct notifier_block freq_transition;
52 unsigned int lcd_fck_rate;
53#endif
54
55 struct workqueue_struct *wq;
56
57 struct drm_fbdev_cma *fbdev;
58
59 struct drm_crtc *crtc;
60
61 unsigned int num_encoders;
62 struct drm_encoder *encoders[8];
63
64 unsigned int num_connectors;
65 struct drm_connector *connectors[8];
66};
67
68/* Sub-module for display. Since we don't know at compile time what panels
69 * or display adapter(s) might be present (for ex, off chip dvi/tfp410,
70 * hdmi encoder, various lcd panels), the connector/encoder(s) are split into
71 * separate drivers. If they are probed and found to be present, they
72 * register themselves with tilcdc_register_module().
73 */
74struct tilcdc_module;
75
76struct tilcdc_module_ops {
77 /* create appropriate encoders/connectors: */
78 int (*modeset_init)(struct tilcdc_module *mod, struct drm_device *dev);
79 void (*destroy)(struct tilcdc_module *mod);
80#ifdef CONFIG_DEBUG_FS
81 /* create debugfs nodes (can be NULL): */
82 int (*debugfs_init)(struct tilcdc_module *mod, struct drm_minor *minor);
83 /* cleanup debugfs nodes (can be NULL): */
84 void (*debugfs_cleanup)(struct tilcdc_module *mod, struct drm_minor *minor);
85#endif
86};
87
88struct tilcdc_module {
89 const char *name;
90 struct list_head list;
91 const struct tilcdc_module_ops *funcs;
92};
93
94void tilcdc_module_init(struct tilcdc_module *mod, const char *name,
95 const struct tilcdc_module_ops *funcs);
96void tilcdc_module_cleanup(struct tilcdc_module *mod);
97
98
99/* Panel config that needs to be set in the crtc, but is not coming from
100 * the mode timings. The display module is expected to call
101 * tilcdc_crtc_set_panel_info() to set this during modeset.
102 */
103struct tilcdc_panel_info {
104
105 /* AC Bias Pin Frequency */
106 uint32_t ac_bias;
107
108 /* AC Bias Pin Transitions per Interrupt */
109 uint32_t ac_bias_intrpt;
110
111 /* DMA burst size */
112 uint32_t dma_burst_sz;
113
114 /* Bits per pixel */
115 uint32_t bpp;
116
117 /* FIFO DMA Request Delay */
118 uint32_t fdd;
119
120 /* TFT Alternative Signal Mapping (Only for active) */
121 bool tft_alt_mode;
122
123 /* Invert pixel clock */
124 bool invert_pxl_clk;
125
126 /* Horizontal and Vertical Sync Edge: 0=rising 1=falling */
127 uint32_t sync_edge;
128
129 /* Horizontal and Vertical Sync: Control: 0=ignore */
130 uint32_t sync_ctrl;
131
132 /* Raster Data Order Select: 1=Most-to-least 0=Least-to-most */
133 uint32_t raster_order;
134
135 /* DMA FIFO threshold */
136 uint32_t fifo_th;
137};
138
139#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
140
141struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
142void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
143irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
144void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
145void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
146 const struct tilcdc_panel_info *info);
147int tilcdc_crtc_mode_valid(struct drm_crtc *crtc, struct drm_display_mode *mode);
148int tilcdc_crtc_max_width(struct drm_crtc *crtc);
149
150#endif /* __TILCDC_DRV_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
new file mode 100644
index 000000000000..580b74e2022b
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -0,0 +1,436 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/pinctrl/pinmux.h>
19#include <linux/pinctrl/consumer.h>
20#include <linux/backlight.h>
21#include <video/display_timing.h>
22#include <video/of_display_timing.h>
23#include <video/videomode.h>
24
25#include "tilcdc_drv.h"
26
27struct panel_module {
28 struct tilcdc_module base;
29 struct tilcdc_panel_info *info;
30 struct display_timings *timings;
31 struct backlight_device *backlight;
32};
33#define to_panel_module(x) container_of(x, struct panel_module, base)
34
35
36/*
37 * Encoder:
38 */
39
40struct panel_encoder {
41 struct drm_encoder base;
42 struct panel_module *mod;
43};
44#define to_panel_encoder(x) container_of(x, struct panel_encoder, base)
45
46
47static void panel_encoder_destroy(struct drm_encoder *encoder)
48{
49 struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
50 drm_encoder_cleanup(encoder);
51 kfree(panel_encoder);
52}
53
54static void panel_encoder_dpms(struct drm_encoder *encoder, int mode)
55{
56 struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
57 struct backlight_device *backlight = panel_encoder->mod->backlight;
58
59 if (!backlight)
60 return;
61
62 backlight->props.power = mode == DRM_MODE_DPMS_ON
63 ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
64 backlight_update_status(backlight);
65}
66
67static bool panel_encoder_mode_fixup(struct drm_encoder *encoder,
68 const struct drm_display_mode *mode,
69 struct drm_display_mode *adjusted_mode)
70{
71 /* nothing needed */
72 return true;
73}
74
75static void panel_encoder_prepare(struct drm_encoder *encoder)
76{
77 struct panel_encoder *panel_encoder = to_panel_encoder(encoder);
78 panel_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
79 tilcdc_crtc_set_panel_info(encoder->crtc, panel_encoder->mod->info);
80}
81
82static void panel_encoder_commit(struct drm_encoder *encoder)
83{
84 panel_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
85}
86
87static void panel_encoder_mode_set(struct drm_encoder *encoder,
88 struct drm_display_mode *mode,
89 struct drm_display_mode *adjusted_mode)
90{
91 /* nothing needed */
92}
93
94static const struct drm_encoder_funcs panel_encoder_funcs = {
95 .destroy = panel_encoder_destroy,
96};
97
98static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
99 .dpms = panel_encoder_dpms,
100 .mode_fixup = panel_encoder_mode_fixup,
101 .prepare = panel_encoder_prepare,
102 .commit = panel_encoder_commit,
103 .mode_set = panel_encoder_mode_set,
104};
105
106static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
107 struct panel_module *mod)
108{
109 struct panel_encoder *panel_encoder;
110 struct drm_encoder *encoder;
111 int ret;
112
113 panel_encoder = kzalloc(sizeof(*panel_encoder), GFP_KERNEL);
114 if (!panel_encoder) {
115 dev_err(dev->dev, "allocation failed\n");
116 return NULL;
117 }
118
119 panel_encoder->mod = mod;
120
121 encoder = &panel_encoder->base;
122 encoder->possible_crtcs = 1;
123
124 ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
125 DRM_MODE_ENCODER_LVDS);
126 if (ret < 0)
127 goto fail;
128
129 drm_encoder_helper_add(encoder, &panel_encoder_helper_funcs);
130
131 return encoder;
132
133fail:
134 panel_encoder_destroy(encoder);
135 return NULL;
136}
137
138/*
139 * Connector:
140 */
141
142struct panel_connector {
143 struct drm_connector base;
144
145 struct drm_encoder *encoder; /* our connected encoder */
146 struct panel_module *mod;
147};
148#define to_panel_connector(x) container_of(x, struct panel_connector, base)
149
150
151static void panel_connector_destroy(struct drm_connector *connector)
152{
153 struct panel_connector *panel_connector = to_panel_connector(connector);
154 drm_connector_cleanup(connector);
155 kfree(panel_connector);
156}
157
158static enum drm_connector_status panel_connector_detect(
159 struct drm_connector *connector,
160 bool force)
161{
162 return connector_status_connected;
163}
164
165static int panel_connector_get_modes(struct drm_connector *connector)
166{
167 struct drm_device *dev = connector->dev;
168 struct panel_connector *panel_connector = to_panel_connector(connector);
169 struct display_timings *timings = panel_connector->mod->timings;
170 int i;
171
172 for (i = 0; i < timings->num_timings; i++) {
173 struct drm_display_mode *mode = drm_mode_create(dev);
174 struct videomode vm;
175
176 if (videomode_from_timing(timings, &vm, i))
177 break;
178
179 drm_display_mode_from_videomode(&vm, mode);
180
181 mode->type = DRM_MODE_TYPE_DRIVER;
182
183 if (timings->native_mode == i)
184 mode->type |= DRM_MODE_TYPE_PREFERRED;
185
186 drm_mode_set_name(mode);
187 drm_mode_probed_add(connector, mode);
188 }
189
190 return i;
191}
192
193static int panel_connector_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
196 struct tilcdc_drm_private *priv = connector->dev->dev_private;
197 /* our only constraints are what the crtc can generate: */
198 return tilcdc_crtc_mode_valid(priv->crtc, mode);
199}
200
201static struct drm_encoder *panel_connector_best_encoder(
202 struct drm_connector *connector)
203{
204 struct panel_connector *panel_connector = to_panel_connector(connector);
205 return panel_connector->encoder;
206}
207
208static const struct drm_connector_funcs panel_connector_funcs = {
209 .destroy = panel_connector_destroy,
210 .dpms = drm_helper_connector_dpms,
211 .detect = panel_connector_detect,
212 .fill_modes = drm_helper_probe_single_connector_modes,
213};
214
215static const struct drm_connector_helper_funcs panel_connector_helper_funcs = {
216 .get_modes = panel_connector_get_modes,
217 .mode_valid = panel_connector_mode_valid,
218 .best_encoder = panel_connector_best_encoder,
219};
220
221static struct drm_connector *panel_connector_create(struct drm_device *dev,
222 struct panel_module *mod, struct drm_encoder *encoder)
223{
224 struct panel_connector *panel_connector;
225 struct drm_connector *connector;
226 int ret;
227
228 panel_connector = kzalloc(sizeof(*panel_connector), GFP_KERNEL);
229 if (!panel_connector) {
230 dev_err(dev->dev, "allocation failed\n");
231 return NULL;
232 }
233
234 panel_connector->encoder = encoder;
235 panel_connector->mod = mod;
236
237 connector = &panel_connector->base;
238
239 drm_connector_init(dev, connector, &panel_connector_funcs,
240 DRM_MODE_CONNECTOR_LVDS);
241 drm_connector_helper_add(connector, &panel_connector_helper_funcs);
242
243 connector->interlace_allowed = 0;
244 connector->doublescan_allowed = 0;
245
246 ret = drm_mode_connector_attach_encoder(connector, encoder);
247 if (ret)
248 goto fail;
249
250 drm_sysfs_connector_add(connector);
251
252 return connector;
253
254fail:
255 panel_connector_destroy(connector);
256 return NULL;
257}
258
259/*
260 * Module:
261 */
262
263static int panel_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
264{
265 struct panel_module *panel_mod = to_panel_module(mod);
266 struct tilcdc_drm_private *priv = dev->dev_private;
267 struct drm_encoder *encoder;
268 struct drm_connector *connector;
269
270 encoder = panel_encoder_create(dev, panel_mod);
271 if (!encoder)
272 return -ENOMEM;
273
274 connector = panel_connector_create(dev, panel_mod, encoder);
275 if (!connector)
276 return -ENOMEM;
277
278 priv->encoders[priv->num_encoders++] = encoder;
279 priv->connectors[priv->num_connectors++] = connector;
280
281 return 0;
282}
283
284static void panel_destroy(struct tilcdc_module *mod)
285{
286 struct panel_module *panel_mod = to_panel_module(mod);
287
288 if (panel_mod->timings) {
289 display_timings_release(panel_mod->timings);
290 kfree(panel_mod->timings);
291 }
292
293 tilcdc_module_cleanup(mod);
294 kfree(panel_mod->info);
295 kfree(panel_mod);
296}
297
298static const struct tilcdc_module_ops panel_module_ops = {
299 .modeset_init = panel_modeset_init,
300 .destroy = panel_destroy,
301};
302
303/*
304 * Device:
305 */
306
307/* maybe move this somewhere common if it is needed by other outputs? */
308static struct tilcdc_panel_info * of_get_panel_info(struct device_node *np)
309{
310 struct device_node *info_np;
311 struct tilcdc_panel_info *info;
312 int ret = 0;
313
314 if (!np) {
315 pr_err("%s: no devicenode given\n", __func__);
316 return NULL;
317 }
318
319 info_np = of_get_child_by_name(np, "panel-info");
320 if (!info_np) {
321 pr_err("%s: could not find panel-info node\n", __func__);
322 return NULL;
323 }
324
325 info = kzalloc(sizeof(*info), GFP_KERNEL);
326 if (!info) {
327 pr_err("%s: allocation failed\n", __func__);
328 return NULL;
329 }
330
331 ret |= of_property_read_u32(info_np, "ac-bias", &info->ac_bias);
332 ret |= of_property_read_u32(info_np, "ac-bias-intrpt", &info->ac_bias_intrpt);
333 ret |= of_property_read_u32(info_np, "dma-burst-sz", &info->dma_burst_sz);
334 ret |= of_property_read_u32(info_np, "bpp", &info->bpp);
335 ret |= of_property_read_u32(info_np, "fdd", &info->fdd);
336 ret |= of_property_read_u32(info_np, "sync-edge", &info->sync_edge);
337 ret |= of_property_read_u32(info_np, "sync-ctrl", &info->sync_ctrl);
338 ret |= of_property_read_u32(info_np, "raster-order", &info->raster_order);
339 ret |= of_property_read_u32(info_np, "fifo-th", &info->fifo_th);
340
341 /* optional: */
342 info->tft_alt_mode = of_property_read_bool(info_np, "tft-alt-mode");
343 info->invert_pxl_clk = of_property_read_bool(info_np, "invert-pxl-clk");
344
345 if (ret) {
346 pr_err("%s: error reading panel-info properties\n", __func__);
347 kfree(info);
348 return NULL;
349 }
350
351 return info;
352}
353
354static struct of_device_id panel_of_match[];
355
356static int panel_probe(struct platform_device *pdev)
357{
358 struct device_node *node = pdev->dev.of_node;
359 struct panel_module *panel_mod;
360 struct tilcdc_module *mod;
361 struct pinctrl *pinctrl;
362 int ret = -EINVAL;
363
364
365 /* bail out early if no DT data: */
366 if (!node) {
367 dev_err(&pdev->dev, "device-tree data is missing\n");
368 return -ENXIO;
369 }
370
371 panel_mod = kzalloc(sizeof(*panel_mod), GFP_KERNEL);
372 if (!panel_mod)
373 return -ENOMEM;
374
375 mod = &panel_mod->base;
376
377 tilcdc_module_init(mod, "panel", &panel_module_ops);
378
379 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
380 if (IS_ERR(pinctrl))
381 dev_warn(&pdev->dev, "pins are not configured\n");
382
383
384 panel_mod->timings = of_get_display_timings(node);
385 if (!panel_mod->timings) {
386 dev_err(&pdev->dev, "could not get panel timings\n");
387 goto fail;
388 }
389
390 panel_mod->info = of_get_panel_info(node);
391 if (!panel_mod->info) {
392 dev_err(&pdev->dev, "could not get panel info\n");
393 goto fail;
394 }
395
396 panel_mod->backlight = of_find_backlight_by_node(node);
397 if (panel_mod->backlight)
398 dev_info(&pdev->dev, "found backlight\n");
399
400 return 0;
401
402fail:
403 panel_destroy(mod);
404 return ret;
405}
406
407static int panel_remove(struct platform_device *pdev)
408{
409 return 0;
410}
411
412static struct of_device_id panel_of_match[] = {
413 { .compatible = "ti,tilcdc,panel", },
414 { },
415};
416MODULE_DEVICE_TABLE(of, panel_of_match);
417
418struct platform_driver panel_driver = {
419 .probe = panel_probe,
420 .remove = panel_remove,
421 .driver = {
422 .owner = THIS_MODULE,
423 .name = "panel",
424 .of_match_table = panel_of_match,
425 },
426};
427
428int __init tilcdc_panel_init(void)
429{
430 return platform_driver_register(&panel_driver);
431}
432
433void __exit tilcdc_panel_fini(void)
434{
435 platform_driver_unregister(&panel_driver);
436}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.h b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
new file mode 100644
index 000000000000..7db40aacc74a
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __TILCDC_PANEL_H__
19#define __TILCDC_PANEL_H__
20
21/* sub-module for generic lcd panel output */
22
23int tilcdc_panel_init(void);
24void tilcdc_panel_fini(void);
25
26#endif /* __TILCDC_PANEL_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_regs.h b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
new file mode 100644
index 000000000000..17fd1b45428a
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_regs.h
@@ -0,0 +1,154 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __TILCDC_REGS_H__
19#define __TILCDC_REGS_H__
20
21/* LCDC register definitions, based on da8xx-fb */
22
23#include <linux/bitops.h>
24
25#include "tilcdc_drv.h"
26
27/* LCDC Status Register */
28#define LCDC_END_OF_FRAME1 BIT(9)
29#define LCDC_END_OF_FRAME0 BIT(8)
30#define LCDC_PL_LOAD_DONE BIT(6)
31#define LCDC_FIFO_UNDERFLOW BIT(5)
32#define LCDC_SYNC_LOST BIT(2)
33#define LCDC_FRAME_DONE BIT(0)
34
35/* LCDC DMA Control Register */
36#define LCDC_DMA_BURST_SIZE(x) ((x) << 4)
37#define LCDC_DMA_BURST_1 0x0
38#define LCDC_DMA_BURST_2 0x1
39#define LCDC_DMA_BURST_4 0x2
40#define LCDC_DMA_BURST_8 0x3
41#define LCDC_DMA_BURST_16 0x4
42#define LCDC_V1_END_OF_FRAME_INT_ENA BIT(2)
43#define LCDC_V2_END_OF_FRAME0_INT_ENA BIT(8)
44#define LCDC_V2_END_OF_FRAME1_INT_ENA BIT(9)
45#define LCDC_DUAL_FRAME_BUFFER_ENABLE BIT(0)
46
47/* LCDC Control Register */
48#define LCDC_CLK_DIVISOR(x) ((x) << 8)
49#define LCDC_RASTER_MODE 0x01
50
51/* LCDC Raster Control Register */
52#define LCDC_PALETTE_LOAD_MODE(x) ((x) << 20)
53#define PALETTE_AND_DATA 0x00
54#define PALETTE_ONLY 0x01
55#define DATA_ONLY 0x02
56
57#define LCDC_MONO_8BIT_MODE BIT(9)
58#define LCDC_RASTER_ORDER BIT(8)
59#define LCDC_TFT_MODE BIT(7)
60#define LCDC_V1_UNDERFLOW_INT_ENA BIT(6)
61#define LCDC_V2_UNDERFLOW_INT_ENA BIT(5)
62#define LCDC_V1_PL_INT_ENA BIT(4)
63#define LCDC_V2_PL_INT_ENA BIT(6)
64#define LCDC_MONOCHROME_MODE BIT(1)
65#define LCDC_RASTER_ENABLE BIT(0)
66#define LCDC_TFT_ALT_ENABLE BIT(23)
67#define LCDC_STN_565_ENABLE BIT(24)
68#define LCDC_V2_DMA_CLK_EN BIT(2)
69#define LCDC_V2_LIDD_CLK_EN BIT(1)
70#define LCDC_V2_CORE_CLK_EN BIT(0)
71#define LCDC_V2_LPP_B10 26
72#define LCDC_V2_TFT_24BPP_MODE BIT(25)
73#define LCDC_V2_TFT_24BPP_UNPACK BIT(26)
74
75/* LCDC Raster Timing 2 Register */
76#define LCDC_AC_BIAS_TRANSITIONS_PER_INT(x) ((x) << 16)
77#define LCDC_AC_BIAS_FREQUENCY(x) ((x) << 8)
78#define LCDC_SYNC_CTRL BIT(25)
79#define LCDC_SYNC_EDGE BIT(24)
80#define LCDC_INVERT_PIXEL_CLOCK BIT(22)
81#define LCDC_INVERT_HSYNC BIT(21)
82#define LCDC_INVERT_VSYNC BIT(20)
83
84/* LCDC Block */
85#define LCDC_PID_REG 0x0
86#define LCDC_CTRL_REG 0x4
87#define LCDC_STAT_REG 0x8
88#define LCDC_RASTER_CTRL_REG 0x28
89#define LCDC_RASTER_TIMING_0_REG 0x2c
90#define LCDC_RASTER_TIMING_1_REG 0x30
91#define LCDC_RASTER_TIMING_2_REG 0x34
92#define LCDC_DMA_CTRL_REG 0x40
93#define LCDC_DMA_FB_BASE_ADDR_0_REG 0x44
94#define LCDC_DMA_FB_CEILING_ADDR_0_REG 0x48
95#define LCDC_DMA_FB_BASE_ADDR_1_REG 0x4c
96#define LCDC_DMA_FB_CEILING_ADDR_1_REG 0x50
97
98/* Interrupt Registers available only in Version 2 */
99#define LCDC_RAW_STAT_REG 0x58
100#define LCDC_MASKED_STAT_REG 0x5c
101#define LCDC_INT_ENABLE_SET_REG 0x60
102#define LCDC_INT_ENABLE_CLR_REG 0x64
103#define LCDC_END_OF_INT_IND_REG 0x68
104
105/* Clock registers available only on Version 2 */
106#define LCDC_CLK_ENABLE_REG 0x6c
107#define LCDC_CLK_RESET_REG 0x70
108#define LCDC_CLK_MAIN_RESET BIT(3)
109
110
111/*
112 * Helpers:
113 */
114
115static inline void tilcdc_write(struct drm_device *dev, u32 reg, u32 data)
116{
117 struct tilcdc_drm_private *priv = dev->dev_private;
118 iowrite32(data, priv->mmio + reg);
119}
120
121static inline u32 tilcdc_read(struct drm_device *dev, u32 reg)
122{
123 struct tilcdc_drm_private *priv = dev->dev_private;
124 return ioread32(priv->mmio + reg);
125}
126
127static inline void tilcdc_set(struct drm_device *dev, u32 reg, u32 mask)
128{
129 tilcdc_write(dev, reg, tilcdc_read(dev, reg) | mask);
130}
131
132static inline void tilcdc_clear(struct drm_device *dev, u32 reg, u32 mask)
133{
134 tilcdc_write(dev, reg, tilcdc_read(dev, reg) & ~mask);
135}
136
137/* the register to read/clear irqstatus differs between v1 and v2 of the IP */
138static inline u32 tilcdc_irqstatus_reg(struct drm_device *dev)
139{
140 struct tilcdc_drm_private *priv = dev->dev_private;
141 return (priv->rev == 2) ? LCDC_MASKED_STAT_REG : LCDC_STAT_REG;
142}
143
144static inline u32 tilcdc_read_irqstatus(struct drm_device *dev)
145{
146 return tilcdc_read(dev, tilcdc_irqstatus_reg(dev));
147}
148
149static inline void tilcdc_clear_irqstatus(struct drm_device *dev, u32 mask)
150{
151 tilcdc_write(dev, tilcdc_irqstatus_reg(dev), mask);
152}
153
154#endif /* __TILCDC_REGS_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
new file mode 100644
index 000000000000..568dc1c08e6c
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -0,0 +1,376 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/i2c.h>
19#include <linux/of_i2c.h>
20#include <linux/pinctrl/pinmux.h>
21#include <linux/pinctrl/consumer.h>
22#include <drm/drm_encoder_slave.h>
23
24#include "tilcdc_drv.h"
25
26struct slave_module {
27 struct tilcdc_module base;
28 struct i2c_adapter *i2c;
29};
30#define to_slave_module(x) container_of(x, struct slave_module, base)
31
32static const struct tilcdc_panel_info slave_info = {
33 .bpp = 16,
34 .ac_bias = 255,
35 .ac_bias_intrpt = 0,
36 .dma_burst_sz = 16,
37 .fdd = 0x80,
38 .tft_alt_mode = 0,
39 .sync_edge = 0,
40 .sync_ctrl = 1,
41 .raster_order = 0,
42};
43
44
45/*
46 * Encoder:
47 */
48
49struct slave_encoder {
50 struct drm_encoder_slave base;
51 struct slave_module *mod;
52};
53#define to_slave_encoder(x) container_of(to_encoder_slave(x), struct slave_encoder, base)
54
55static inline struct drm_encoder_slave_funcs *
56get_slave_funcs(struct drm_encoder *enc)
57{
58 return to_encoder_slave(enc)->slave_funcs;
59}
60
61static void slave_encoder_destroy(struct drm_encoder *encoder)
62{
63 struct slave_encoder *slave_encoder = to_slave_encoder(encoder);
64 if (get_slave_funcs(encoder))
65 get_slave_funcs(encoder)->destroy(encoder);
66 drm_encoder_cleanup(encoder);
67 kfree(slave_encoder);
68}
69
70static void slave_encoder_prepare(struct drm_encoder *encoder)
71{
72 drm_i2c_encoder_prepare(encoder);
73 tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
74}
75
76static const struct drm_encoder_funcs slave_encoder_funcs = {
77 .destroy = slave_encoder_destroy,
78};
79
80static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
81 .dpms = drm_i2c_encoder_dpms,
82 .mode_fixup = drm_i2c_encoder_mode_fixup,
83 .prepare = slave_encoder_prepare,
84 .commit = drm_i2c_encoder_commit,
85 .mode_set = drm_i2c_encoder_mode_set,
86 .save = drm_i2c_encoder_save,
87 .restore = drm_i2c_encoder_restore,
88};
89
90static const struct i2c_board_info info = {
91 I2C_BOARD_INFO("tda998x", 0x70)
92};
93
94static struct drm_encoder *slave_encoder_create(struct drm_device *dev,
95 struct slave_module *mod)
96{
97 struct slave_encoder *slave_encoder;
98 struct drm_encoder *encoder;
99 int ret;
100
101 slave_encoder = kzalloc(sizeof(*slave_encoder), GFP_KERNEL);
102 if (!slave_encoder) {
103 dev_err(dev->dev, "allocation failed\n");
104 return NULL;
105 }
106
107 slave_encoder->mod = mod;
108
109 encoder = &slave_encoder->base.base;
110 encoder->possible_crtcs = 1;
111
112 ret = drm_encoder_init(dev, encoder, &slave_encoder_funcs,
113 DRM_MODE_ENCODER_TMDS);
114 if (ret)
115 goto fail;
116
117 drm_encoder_helper_add(encoder, &slave_encoder_helper_funcs);
118
119 ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), mod->i2c, &info);
120 if (ret)
121 goto fail;
122
123 return encoder;
124
125fail:
126 slave_encoder_destroy(encoder);
127 return NULL;
128}
129
130/*
131 * Connector:
132 */
133
134struct slave_connector {
135 struct drm_connector base;
136
137 struct drm_encoder *encoder; /* our connected encoder */
138 struct slave_module *mod;
139};
140#define to_slave_connector(x) container_of(x, struct slave_connector, base)
141
142static void slave_connector_destroy(struct drm_connector *connector)
143{
144 struct slave_connector *slave_connector = to_slave_connector(connector);
145 drm_connector_cleanup(connector);
146 kfree(slave_connector);
147}
148
149static enum drm_connector_status slave_connector_detect(
150 struct drm_connector *connector,
151 bool force)
152{
153 struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
154 return get_slave_funcs(encoder)->detect(encoder, connector);
155}
156
157static int slave_connector_get_modes(struct drm_connector *connector)
158{
159 struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
160 return get_slave_funcs(encoder)->get_modes(encoder, connector);
161}
162
163static int slave_connector_mode_valid(struct drm_connector *connector,
164 struct drm_display_mode *mode)
165{
166 struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
167 struct tilcdc_drm_private *priv = connector->dev->dev_private;
168 int ret;
169
170 ret = tilcdc_crtc_mode_valid(priv->crtc, mode);
171 if (ret != MODE_OK)
172 return ret;
173
174 return get_slave_funcs(encoder)->mode_valid(encoder, mode);
175}
176
177static struct drm_encoder *slave_connector_best_encoder(
178 struct drm_connector *connector)
179{
180 struct slave_connector *slave_connector = to_slave_connector(connector);
181 return slave_connector->encoder;
182}
183
184static int slave_connector_set_property(struct drm_connector *connector,
185 struct drm_property *property, uint64_t value)
186{
187 struct drm_encoder *encoder = to_slave_connector(connector)->encoder;
188 return get_slave_funcs(encoder)->set_property(encoder,
189 connector, property, value);
190}
191
192static const struct drm_connector_funcs slave_connector_funcs = {
193 .destroy = slave_connector_destroy,
194 .dpms = drm_helper_connector_dpms,
195 .detect = slave_connector_detect,
196 .fill_modes = drm_helper_probe_single_connector_modes,
197 .set_property = slave_connector_set_property,
198};
199
200static const struct drm_connector_helper_funcs slave_connector_helper_funcs = {
201 .get_modes = slave_connector_get_modes,
202 .mode_valid = slave_connector_mode_valid,
203 .best_encoder = slave_connector_best_encoder,
204};
205
206static struct drm_connector *slave_connector_create(struct drm_device *dev,
207 struct slave_module *mod, struct drm_encoder *encoder)
208{
209 struct slave_connector *slave_connector;
210 struct drm_connector *connector;
211 int ret;
212
213 slave_connector = kzalloc(sizeof(*slave_connector), GFP_KERNEL);
214 if (!slave_connector) {
215 dev_err(dev->dev, "allocation failed\n");
216 return NULL;
217 }
218
219 slave_connector->encoder = encoder;
220 slave_connector->mod = mod;
221
222 connector = &slave_connector->base;
223
224 drm_connector_init(dev, connector, &slave_connector_funcs,
225 DRM_MODE_CONNECTOR_HDMIA);
226 drm_connector_helper_add(connector, &slave_connector_helper_funcs);
227
228 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
229 DRM_CONNECTOR_POLL_DISCONNECT;
230
231 connector->interlace_allowed = 0;
232 connector->doublescan_allowed = 0;
233
234 get_slave_funcs(encoder)->create_resources(encoder, connector);
235
236 ret = drm_mode_connector_attach_encoder(connector, encoder);
237 if (ret)
238 goto fail;
239
240 drm_sysfs_connector_add(connector);
241
242 return connector;
243
244fail:
245 slave_connector_destroy(connector);
246 return NULL;
247}
248
249/*
250 * Module:
251 */
252
253static int slave_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
254{
255 struct slave_module *slave_mod = to_slave_module(mod);
256 struct tilcdc_drm_private *priv = dev->dev_private;
257 struct drm_encoder *encoder;
258 struct drm_connector *connector;
259
260 encoder = slave_encoder_create(dev, slave_mod);
261 if (!encoder)
262 return -ENOMEM;
263
264 connector = slave_connector_create(dev, slave_mod, encoder);
265 if (!connector)
266 return -ENOMEM;
267
268 priv->encoders[priv->num_encoders++] = encoder;
269 priv->connectors[priv->num_connectors++] = connector;
270
271 return 0;
272}
273
274static void slave_destroy(struct tilcdc_module *mod)
275{
276 struct slave_module *slave_mod = to_slave_module(mod);
277
278 tilcdc_module_cleanup(mod);
279 kfree(slave_mod);
280}
281
282static const struct tilcdc_module_ops slave_module_ops = {
283 .modeset_init = slave_modeset_init,
284 .destroy = slave_destroy,
285};
286
287/*
288 * Device:
289 */
290
291static struct of_device_id slave_of_match[];
292
293static int slave_probe(struct platform_device *pdev)
294{
295 struct device_node *node = pdev->dev.of_node;
296 struct device_node *i2c_node;
297 struct slave_module *slave_mod;
298 struct tilcdc_module *mod;
299 struct pinctrl *pinctrl;
300 uint32_t i2c_phandle;
301 int ret = -EINVAL;
302
303 /* bail out early if no DT data: */
304 if (!node) {
305 dev_err(&pdev->dev, "device-tree data is missing\n");
306 return -ENXIO;
307 }
308
309 slave_mod = kzalloc(sizeof(*slave_mod), GFP_KERNEL);
310 if (!slave_mod)
311 return -ENOMEM;
312
313 mod = &slave_mod->base;
314
315 tilcdc_module_init(mod, "slave", &slave_module_ops);
316
317 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
318 if (IS_ERR(pinctrl))
319 dev_warn(&pdev->dev, "pins are not configured\n");
320
321 if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
322 dev_err(&pdev->dev, "could not get i2c bus phandle\n");
323 goto fail;
324 }
325
326 i2c_node = of_find_node_by_phandle(i2c_phandle);
327 if (!i2c_node) {
328 dev_err(&pdev->dev, "could not get i2c bus node\n");
329 goto fail;
330 }
331
332 slave_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
333 if (!slave_mod->i2c) {
334 dev_err(&pdev->dev, "could not get i2c\n");
335 goto fail;
336 }
337
338 of_node_put(i2c_node);
339
340 return 0;
341
342fail:
343 slave_destroy(mod);
344 return ret;
345}
346
347static int slave_remove(struct platform_device *pdev)
348{
349 return 0;
350}
351
352static struct of_device_id slave_of_match[] = {
353 { .compatible = "ti,tilcdc,slave", },
354 { },
355};
356MODULE_DEVICE_TABLE(of, slave_of_match);
357
358struct platform_driver slave_driver = {
359 .probe = slave_probe,
360 .remove = slave_remove,
361 .driver = {
362 .owner = THIS_MODULE,
363 .name = "slave",
364 .of_match_table = slave_of_match,
365 },
366};
367
368int __init tilcdc_slave_init(void)
369{
370 return platform_driver_register(&slave_driver);
371}
372
373void __exit tilcdc_slave_fini(void)
374{
375 platform_driver_unregister(&slave_driver);
376}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.h b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
new file mode 100644
index 000000000000..2f8504848320
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __TILCDC_SLAVE_H__
19#define __TILCDC_SLAVE_H__
20
21/* sub-module for i2c slave encoder output */
22
23int tilcdc_slave_init(void);
24void tilcdc_slave_fini(void);
25
26#endif /* __TILCDC_SLAVE_H__ */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
new file mode 100644
index 000000000000..58d487ba2414
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -0,0 +1,419 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/i2c.h>
19#include <linux/of_i2c.h>
20#include <linux/gpio.h>
21#include <linux/of_gpio.h>
22#include <linux/pinctrl/pinmux.h>
23#include <linux/pinctrl/consumer.h>
24
25#include "tilcdc_drv.h"
26
27struct tfp410_module {
28 struct tilcdc_module base;
29 struct i2c_adapter *i2c;
30 int gpio;
31};
32#define to_tfp410_module(x) container_of(x, struct tfp410_module, base)
33
34
35static const struct tilcdc_panel_info dvi_info = {
36 .ac_bias = 255,
37 .ac_bias_intrpt = 0,
38 .dma_burst_sz = 16,
39 .bpp = 16,
40 .fdd = 0x80,
41 .tft_alt_mode = 0,
42 .sync_edge = 0,
43 .sync_ctrl = 1,
44 .raster_order = 0,
45};
46
47/*
48 * Encoder:
49 */
50
51struct tfp410_encoder {
52 struct drm_encoder base;
53 struct tfp410_module *mod;
54 int dpms;
55};
56#define to_tfp410_encoder(x) container_of(x, struct tfp410_encoder, base)
57
58
59static void tfp410_encoder_destroy(struct drm_encoder *encoder)
60{
61 struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
62 drm_encoder_cleanup(encoder);
63 kfree(tfp410_encoder);
64}
65
66static void tfp410_encoder_dpms(struct drm_encoder *encoder, int mode)
67{
68 struct tfp410_encoder *tfp410_encoder = to_tfp410_encoder(encoder);
69
70 if (tfp410_encoder->dpms == mode)
71 return;
72
73 if (mode == DRM_MODE_DPMS_ON) {
74 DBG("Power on");
75 gpio_direction_output(tfp410_encoder->mod->gpio, 1);
76 } else {
77 DBG("Power off");
78 gpio_direction_output(tfp410_encoder->mod->gpio, 0);
79 }
80
81 tfp410_encoder->dpms = mode;
82}
83
84static bool tfp410_encoder_mode_fixup(struct drm_encoder *encoder,
85 const struct drm_display_mode *mode,
86 struct drm_display_mode *adjusted_mode)
87{
88 /* nothing needed */
89 return true;
90}
91
92static void tfp410_encoder_prepare(struct drm_encoder *encoder)
93{
94 tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
95 tilcdc_crtc_set_panel_info(encoder->crtc, &dvi_info);
96}
97
98static void tfp410_encoder_commit(struct drm_encoder *encoder)
99{
100 tfp410_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
101}
102
103static void tfp410_encoder_mode_set(struct drm_encoder *encoder,
104 struct drm_display_mode *mode,
105 struct drm_display_mode *adjusted_mode)
106{
107 /* nothing needed */
108}
109
110static const struct drm_encoder_funcs tfp410_encoder_funcs = {
111 .destroy = tfp410_encoder_destroy,
112};
113
114static const struct drm_encoder_helper_funcs tfp410_encoder_helper_funcs = {
115 .dpms = tfp410_encoder_dpms,
116 .mode_fixup = tfp410_encoder_mode_fixup,
117 .prepare = tfp410_encoder_prepare,
118 .commit = tfp410_encoder_commit,
119 .mode_set = tfp410_encoder_mode_set,
120};
121
122static struct drm_encoder *tfp410_encoder_create(struct drm_device *dev,
123 struct tfp410_module *mod)
124{
125 struct tfp410_encoder *tfp410_encoder;
126 struct drm_encoder *encoder;
127 int ret;
128
129 tfp410_encoder = kzalloc(sizeof(*tfp410_encoder), GFP_KERNEL);
130 if (!tfp410_encoder) {
131 dev_err(dev->dev, "allocation failed\n");
132 return NULL;
133 }
134
135 tfp410_encoder->dpms = DRM_MODE_DPMS_OFF;
136 tfp410_encoder->mod = mod;
137
138 encoder = &tfp410_encoder->base;
139 encoder->possible_crtcs = 1;
140
141 ret = drm_encoder_init(dev, encoder, &tfp410_encoder_funcs,
142 DRM_MODE_ENCODER_TMDS);
143 if (ret < 0)
144 goto fail;
145
146 drm_encoder_helper_add(encoder, &tfp410_encoder_helper_funcs);
147
148 return encoder;
149
150fail:
151 tfp410_encoder_destroy(encoder);
152 return NULL;
153}
154
155/*
156 * Connector:
157 */
158
159struct tfp410_connector {
160 struct drm_connector base;
161
162 struct drm_encoder *encoder; /* our connected encoder */
163 struct tfp410_module *mod;
164};
165#define to_tfp410_connector(x) container_of(x, struct tfp410_connector, base)
166
167
168static void tfp410_connector_destroy(struct drm_connector *connector)
169{
170 struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
171 drm_connector_cleanup(connector);
172 kfree(tfp410_connector);
173}
174
175static enum drm_connector_status tfp410_connector_detect(
176 struct drm_connector *connector,
177 bool force)
178{
179 struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
180
181 if (drm_probe_ddc(tfp410_connector->mod->i2c))
182 return connector_status_connected;
183
184 return connector_status_unknown;
185}
186
187static int tfp410_connector_get_modes(struct drm_connector *connector)
188{
189 struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
190 struct edid *edid;
191 int ret = 0;
192
193 edid = drm_get_edid(connector, tfp410_connector->mod->i2c);
194
195 drm_mode_connector_update_edid_property(connector, edid);
196
197 if (edid) {
198 ret = drm_add_edid_modes(connector, edid);
199 kfree(edid);
200 }
201
202 return ret;
203}
204
205static int tfp410_connector_mode_valid(struct drm_connector *connector,
206 struct drm_display_mode *mode)
207{
208 struct tilcdc_drm_private *priv = connector->dev->dev_private;
209 /* our only constraints are what the crtc can generate: */
210 return tilcdc_crtc_mode_valid(priv->crtc, mode);
211}
212
213static struct drm_encoder *tfp410_connector_best_encoder(
214 struct drm_connector *connector)
215{
216 struct tfp410_connector *tfp410_connector = to_tfp410_connector(connector);
217 return tfp410_connector->encoder;
218}
219
220static const struct drm_connector_funcs tfp410_connector_funcs = {
221 .destroy = tfp410_connector_destroy,
222 .dpms = drm_helper_connector_dpms,
223 .detect = tfp410_connector_detect,
224 .fill_modes = drm_helper_probe_single_connector_modes,
225};
226
227static const struct drm_connector_helper_funcs tfp410_connector_helper_funcs = {
228 .get_modes = tfp410_connector_get_modes,
229 .mode_valid = tfp410_connector_mode_valid,
230 .best_encoder = tfp410_connector_best_encoder,
231};
232
233static struct drm_connector *tfp410_connector_create(struct drm_device *dev,
234 struct tfp410_module *mod, struct drm_encoder *encoder)
235{
236 struct tfp410_connector *tfp410_connector;
237 struct drm_connector *connector;
238 int ret;
239
240 tfp410_connector = kzalloc(sizeof(*tfp410_connector), GFP_KERNEL);
241 if (!tfp410_connector) {
242 dev_err(dev->dev, "allocation failed\n");
243 return NULL;
244 }
245
246 tfp410_connector->encoder = encoder;
247 tfp410_connector->mod = mod;
248
249 connector = &tfp410_connector->base;
250
251 drm_connector_init(dev, connector, &tfp410_connector_funcs,
252 DRM_MODE_CONNECTOR_DVID);
253 drm_connector_helper_add(connector, &tfp410_connector_helper_funcs);
254
255 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
256 DRM_CONNECTOR_POLL_DISCONNECT;
257
258 connector->interlace_allowed = 0;
259 connector->doublescan_allowed = 0;
260
261 ret = drm_mode_connector_attach_encoder(connector, encoder);
262 if (ret)
263 goto fail;
264
265 drm_sysfs_connector_add(connector);
266
267 return connector;
268
269fail:
270 tfp410_connector_destroy(connector);
271 return NULL;
272}
273
274/*
275 * Module:
276 */
277
278static int tfp410_modeset_init(struct tilcdc_module *mod, struct drm_device *dev)
279{
280 struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
281 struct tilcdc_drm_private *priv = dev->dev_private;
282 struct drm_encoder *encoder;
283 struct drm_connector *connector;
284
285 encoder = tfp410_encoder_create(dev, tfp410_mod);
286 if (!encoder)
287 return -ENOMEM;
288
289 connector = tfp410_connector_create(dev, tfp410_mod, encoder);
290 if (!connector)
291 return -ENOMEM;
292
293 priv->encoders[priv->num_encoders++] = encoder;
294 priv->connectors[priv->num_connectors++] = connector;
295
296 return 0;
297}
298
299static void tfp410_destroy(struct tilcdc_module *mod)
300{
301 struct tfp410_module *tfp410_mod = to_tfp410_module(mod);
302
303 if (tfp410_mod->i2c)
304 i2c_put_adapter(tfp410_mod->i2c);
305
306 if (!IS_ERR_VALUE(tfp410_mod->gpio))
307 gpio_free(tfp410_mod->gpio);
308
309 tilcdc_module_cleanup(mod);
310 kfree(tfp410_mod);
311}
312
313static const struct tilcdc_module_ops tfp410_module_ops = {
314 .modeset_init = tfp410_modeset_init,
315 .destroy = tfp410_destroy,
316};
317
318/*
319 * Device:
320 */
321
322static struct of_device_id tfp410_of_match[];
323
324static int tfp410_probe(struct platform_device *pdev)
325{
326 struct device_node *node = pdev->dev.of_node;
327 struct device_node *i2c_node;
328 struct tfp410_module *tfp410_mod;
329 struct tilcdc_module *mod;
330 struct pinctrl *pinctrl;
331 uint32_t i2c_phandle;
332 int ret = -EINVAL;
333
334 /* bail out early if no DT data: */
335 if (!node) {
336 dev_err(&pdev->dev, "device-tree data is missing\n");
337 return -ENXIO;
338 }
339
340 tfp410_mod = kzalloc(sizeof(*tfp410_mod), GFP_KERNEL);
341 if (!tfp410_mod)
342 return -ENOMEM;
343
344 mod = &tfp410_mod->base;
345
346 tilcdc_module_init(mod, "tfp410", &tfp410_module_ops);
347
348 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
349 if (IS_ERR(pinctrl))
350 dev_warn(&pdev->dev, "pins are not configured\n");
351
352 if (of_property_read_u32(node, "i2c", &i2c_phandle)) {
353 dev_err(&pdev->dev, "could not get i2c bus phandle\n");
354 goto fail;
355 }
356
357 i2c_node = of_find_node_by_phandle(i2c_phandle);
358 if (!i2c_node) {
359 dev_err(&pdev->dev, "could not get i2c bus node\n");
360 goto fail;
361 }
362
363 tfp410_mod->i2c = of_find_i2c_adapter_by_node(i2c_node);
364 if (!tfp410_mod->i2c) {
365 dev_err(&pdev->dev, "could not get i2c\n");
366 goto fail;
367 }
368
369 of_node_put(i2c_node);
370
371 tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
372 0, NULL);
373 if (IS_ERR_VALUE(tfp410_mod->gpio)) {
374 dev_warn(&pdev->dev, "No power down GPIO\n");
375 } else {
376 ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
377 if (ret) {
378 dev_err(&pdev->dev, "could not get DVI_PDn gpio\n");
379 goto fail;
380 }
381 }
382
383 return 0;
384
385fail:
386 tfp410_destroy(mod);
387 return ret;
388}
389
390static int tfp410_remove(struct platform_device *pdev)
391{
392 return 0;
393}
394
395static struct of_device_id tfp410_of_match[] = {
396 { .compatible = "ti,tilcdc,tfp410", },
397 { },
398};
399MODULE_DEVICE_TABLE(of, tfp410_of_match);
400
401struct platform_driver tfp410_driver = {
402 .probe = tfp410_probe,
403 .remove = tfp410_remove,
404 .driver = {
405 .owner = THIS_MODULE,
406 .name = "tfp410",
407 .of_match_table = tfp410_of_match,
408 },
409};
410
411int __init tilcdc_tfp410_init(void)
412{
413 return platform_driver_register(&tfp410_driver);
414}
415
416void __exit tilcdc_tfp410_fini(void)
417{
418 platform_driver_unregister(&tfp410_driver);
419}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
new file mode 100644
index 000000000000..5b800f1f6aa5
--- /dev/null
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2012 Texas Instruments
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef __TILCDC_TFP410_H__
19#define __TILCDC_TFP410_H__
20
21/* sub-module for tfp410 dvi adaptor */
22
23int tilcdc_tfp410_init(void);
24void tilcdc_tfp410_fini(void);
25
26#endif /* __TILCDC_TFP410_H__ */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 52b20b12c83a..9b07b7d44a58 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -158,7 +158,8 @@ static void ttm_bo_release_list(struct kref *list_kref)
158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size); 158 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159} 159}
160 160
161int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) 161static int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
162 bool interruptible)
162{ 163{
163 if (interruptible) { 164 if (interruptible) {
164 return wait_event_interruptible(bo->event_queue, 165 return wait_event_interruptible(bo->event_queue,
@@ -168,7 +169,6 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
168 return 0; 169 return 0;
169 } 170 }
170} 171}
171EXPORT_SYMBOL(ttm_bo_wait_unreserved);
172 172
173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 173void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
174{ 174{
@@ -213,14 +213,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
213 return put_count; 213 return put_count;
214} 214}
215 215
216int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, 216int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
217 bool interruptible, 217 bool interruptible,
218 bool no_wait, bool use_sequence, uint32_t sequence) 218 bool no_wait, bool use_sequence, uint32_t sequence)
219{ 219{
220 struct ttm_bo_global *glob = bo->glob;
221 int ret; 220 int ret;
222 221
223 while (unlikely(atomic_read(&bo->reserved) != 0)) { 222 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
224 /** 223 /**
225 * Deadlock avoidance for multi-bo reserving. 224 * Deadlock avoidance for multi-bo reserving.
226 */ 225 */
@@ -241,26 +240,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
241 if (no_wait) 240 if (no_wait)
242 return -EBUSY; 241 return -EBUSY;
243 242
244 spin_unlock(&glob->lru_lock);
245 ret = ttm_bo_wait_unreserved(bo, interruptible); 243 ret = ttm_bo_wait_unreserved(bo, interruptible);
246 spin_lock(&glob->lru_lock);
247 244
248 if (unlikely(ret)) 245 if (unlikely(ret))
249 return ret; 246 return ret;
250 } 247 }
251 248
252 atomic_set(&bo->reserved, 1);
253 if (use_sequence) { 249 if (use_sequence) {
250 bool wake_up = false;
254 /** 251 /**
255 * Wake up waiters that may need to recheck for deadlock, 252 * Wake up waiters that may need to recheck for deadlock,
256 * if we decreased the sequence number. 253 * if we decreased the sequence number.
257 */ 254 */
258 if (unlikely((bo->val_seq - sequence < (1 << 31)) 255 if (unlikely((bo->val_seq - sequence < (1 << 31))
259 || !bo->seq_valid)) 256 || !bo->seq_valid))
260 wake_up_all(&bo->event_queue); 257 wake_up = true;
261 258
259 /*
260 * In the worst case with memory ordering these values can be
261 * seen in the wrong order. However since we call wake_up_all
262 * in that case, this will hopefully not pose a problem,
263 * and the worst case would only cause someone to accidentally
264 * hit -EAGAIN in ttm_bo_reserve when they see old value of
265 * val_seq. However this would only happen if seq_valid was
266 * written before val_seq was, and just means some slightly
267 * increased cpu usage
268 */
262 bo->val_seq = sequence; 269 bo->val_seq = sequence;
263 bo->seq_valid = true; 270 bo->seq_valid = true;
271 if (wake_up)
272 wake_up_all(&bo->event_queue);
264 } else { 273 } else {
265 bo->seq_valid = false; 274 bo->seq_valid = false;
266 } 275 }
@@ -289,17 +298,64 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
289 int put_count = 0; 298 int put_count = 0;
290 int ret; 299 int ret;
291 300
292 spin_lock(&glob->lru_lock); 301 ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
293 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, 302 sequence);
294 sequence); 303 if (likely(ret == 0)) {
295 if (likely(ret == 0)) 304 spin_lock(&glob->lru_lock);
296 put_count = ttm_bo_del_from_lru(bo); 305 put_count = ttm_bo_del_from_lru(bo);
297 spin_unlock(&glob->lru_lock); 306 spin_unlock(&glob->lru_lock);
307 ttm_bo_list_ref_sub(bo, put_count, true);
308 }
298 309
299 ttm_bo_list_ref_sub(bo, put_count, true); 310 return ret;
311}
312
313int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
314 bool interruptible, uint32_t sequence)
315{
316 bool wake_up = false;
317 int ret;
318
319 while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
320 WARN_ON(bo->seq_valid && sequence == bo->val_seq);
321
322 ret = ttm_bo_wait_unreserved(bo, interruptible);
300 323
324 if (unlikely(ret))
325 return ret;
326 }
327
328 if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
329 wake_up = true;
330
331 /**
332 * Wake up waiters that may need to recheck for deadlock,
333 * if we decreased the sequence number.
334 */
335 bo->val_seq = sequence;
336 bo->seq_valid = true;
337 if (wake_up)
338 wake_up_all(&bo->event_queue);
339
340 return 0;
341}
342
343int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
344 bool interruptible, uint32_t sequence)
345{
346 struct ttm_bo_global *glob = bo->glob;
347 int put_count, ret;
348
349 ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
350 if (likely(!ret)) {
351 spin_lock(&glob->lru_lock);
352 put_count = ttm_bo_del_from_lru(bo);
353 spin_unlock(&glob->lru_lock);
354 ttm_bo_list_ref_sub(bo, put_count, true);
355 }
301 return ret; 356 return ret;
302} 357}
358EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
303 359
304void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo) 360void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
305{ 361{
@@ -511,7 +567,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
511 int ret; 567 int ret;
512 568
513 spin_lock(&glob->lru_lock); 569 spin_lock(&glob->lru_lock);
514 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 570 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
515 571
516 spin_lock(&bdev->fence_lock); 572 spin_lock(&bdev->fence_lock);
517 (void) ttm_bo_wait(bo, false, false, true); 573 (void) ttm_bo_wait(bo, false, false, true);
@@ -604,7 +660,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
604 return ret; 660 return ret;
605 661
606 spin_lock(&glob->lru_lock); 662 spin_lock(&glob->lru_lock);
607 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 663 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
608 664
609 /* 665 /*
610 * We raced, and lost, someone else holds the reservation now, 666 * We raced, and lost, someone else holds the reservation now,
@@ -668,7 +724,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
668 kref_get(&nentry->list_kref); 724 kref_get(&nentry->list_kref);
669 } 725 }
670 726
671 ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0); 727 ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
728 if (remove_all && ret) {
729 spin_unlock(&glob->lru_lock);
730 ret = ttm_bo_reserve_nolru(entry, false, false,
731 false, 0);
732 spin_lock(&glob->lru_lock);
733 }
734
672 if (!ret) 735 if (!ret)
673 ret = ttm_bo_cleanup_refs_and_unlock(entry, false, 736 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
674 !remove_all); 737 !remove_all);
@@ -816,7 +879,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
816 879
817 spin_lock(&glob->lru_lock); 880 spin_lock(&glob->lru_lock);
818 list_for_each_entry(bo, &man->lru, lru) { 881 list_for_each_entry(bo, &man->lru, lru) {
819 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 882 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
820 if (!ret) 883 if (!ret)
821 break; 884 break;
822 } 885 }
@@ -1797,7 +1860,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1797 1860
1798 spin_lock(&glob->lru_lock); 1861 spin_lock(&glob->lru_lock);
1799 list_for_each_entry(bo, &glob->swap_lru, swap) { 1862 list_for_each_entry(bo, &glob->swap_lru, swap) {
1800 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 1863 ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
1801 if (!ret) 1864 if (!ret)
1802 break; 1865 break;
1803 } 1866 }
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index cd9e4523dc56..7b90def15674 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -82,22 +82,6 @@ static void ttm_eu_list_ref_sub(struct list_head *list)
82 } 82 }
83} 83}
84 84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
101void ttm_eu_backoff_reservation(struct list_head *list) 85void ttm_eu_backoff_reservation(struct list_head *list)
102{ 86{
103 struct ttm_validate_buffer *entry; 87 struct ttm_validate_buffer *entry;
@@ -145,47 +129,65 @@ int ttm_eu_reserve_buffers(struct list_head *list)
145 entry = list_first_entry(list, struct ttm_validate_buffer, head); 129 entry = list_first_entry(list, struct ttm_validate_buffer, head);
146 glob = entry->bo->glob; 130 glob = entry->bo->glob;
147 131
148retry:
149 spin_lock(&glob->lru_lock); 132 spin_lock(&glob->lru_lock);
150 val_seq = entry->bo->bdev->val_seq++; 133 val_seq = entry->bo->bdev->val_seq++;
151 134
135retry:
152 list_for_each_entry(entry, list, head) { 136 list_for_each_entry(entry, list, head) {
153 struct ttm_buffer_object *bo = entry->bo; 137 struct ttm_buffer_object *bo = entry->bo;
154 138
155retry_this_bo: 139 /* already slowpath reserved? */
156 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); 140 if (entry->reserved)
141 continue;
142
143 ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
157 switch (ret) { 144 switch (ret) {
158 case 0: 145 case 0:
159 break; 146 break;
160 case -EBUSY: 147 case -EBUSY:
161 ret = ttm_eu_wait_unreserved_locked(list, bo); 148 ttm_eu_del_from_lru_locked(list);
162 if (unlikely(ret != 0)) { 149 spin_unlock(&glob->lru_lock);
163 spin_unlock(&glob->lru_lock); 150 ret = ttm_bo_reserve_nolru(bo, true, false,
164 ttm_eu_list_ref_sub(list); 151 true, val_seq);
165 return ret; 152 spin_lock(&glob->lru_lock);
166 } 153 if (!ret)
167 goto retry_this_bo; 154 break;
155
156 if (unlikely(ret != -EAGAIN))
157 goto err;
158
159 /* fallthrough */
168 case -EAGAIN: 160 case -EAGAIN:
169 ttm_eu_backoff_reservation_locked(list); 161 ttm_eu_backoff_reservation_locked(list);
162
163 /*
164 * temporarily increase sequence number every retry,
165 * to prevent us from seeing our old reservation
166 * sequence when someone else reserved the buffer,
167 * but hasn't updated the seq_valid/seqno members yet.
168 */
169 val_seq = entry->bo->bdev->val_seq++;
170
170 spin_unlock(&glob->lru_lock); 171 spin_unlock(&glob->lru_lock);
171 ttm_eu_list_ref_sub(list); 172 ttm_eu_list_ref_sub(list);
172 ret = ttm_bo_wait_unreserved(bo, true); 173 ret = ttm_bo_reserve_slowpath_nolru(bo, true, val_seq);
173 if (unlikely(ret != 0)) 174 if (unlikely(ret != 0))
174 return ret; 175 return ret;
176 spin_lock(&glob->lru_lock);
177 entry->reserved = true;
178 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
179 ret = -EBUSY;
180 goto err;
181 }
175 goto retry; 182 goto retry;
176 default: 183 default:
177 ttm_eu_backoff_reservation_locked(list); 184 goto err;
178 spin_unlock(&glob->lru_lock);
179 ttm_eu_list_ref_sub(list);
180 return ret;
181 } 185 }
182 186
183 entry->reserved = true; 187 entry->reserved = true;
184 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { 188 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185 ttm_eu_backoff_reservation_locked(list); 189 ret = -EBUSY;
186 spin_unlock(&glob->lru_lock); 190 goto err;
187 ttm_eu_list_ref_sub(list);
188 return -EBUSY;
189 } 191 }
190 } 192 }
191 193
@@ -194,6 +196,12 @@ retry_this_bo:
194 ttm_eu_list_ref_sub(list); 196 ttm_eu_list_ref_sub(list);
195 197
196 return 0; 198 return 0;
199
200err:
201 ttm_eu_backoff_reservation_locked(list);
202 spin_unlock(&glob->lru_lock);
203 ttm_eu_list_ref_sub(list);
204 return ret;
197} 205}
198EXPORT_SYMBOL(ttm_eu_reserve_buffers); 206EXPORT_SYMBOL(ttm_eu_reserve_buffers);
199 207
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 87aa5f5d3c88..cc6d90f28c71 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -75,6 +75,8 @@ struct udl_framebuffer {
75 struct drm_framebuffer base; 75 struct drm_framebuffer base;
76 struct udl_gem_object *obj; 76 struct udl_gem_object *obj;
77 bool active_16; /* active on the 16-bit channel */ 77 bool active_16; /* active on the 16-bit channel */
78 int x1, y1, x2, y2; /* dirty rect */
79 spinlock_t dirty_lock;
78}; 80};
79 81
80#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base) 82#define to_udl_fb(x) container_of(x, struct udl_framebuffer, base)
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index d4ab3beaada0..9f4be3d4a02e 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -22,9 +22,9 @@
22 22
23#include <drm/drm_fb_helper.h> 23#include <drm/drm_fb_helper.h>
24 24
25#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ 25#define DL_DEFIO_WRITE_DELAY (HZ/20) /* fb_deferred_io.delay in jiffies */
26 26
27static int fb_defio = 1; /* Optionally enable experimental fb_defio mmap support */ 27static int fb_defio = 0; /* Optionally enable experimental fb_defio mmap support */
28static int fb_bpp = 16; 28static int fb_bpp = 16;
29 29
30module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 30module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
@@ -153,6 +153,9 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
153 struct urb *urb; 153 struct urb *urb;
154 int aligned_x; 154 int aligned_x;
155 int bpp = (fb->base.bits_per_pixel / 8); 155 int bpp = (fb->base.bits_per_pixel / 8);
156 int x2, y2;
157 bool store_for_later = false;
158 unsigned long flags;
156 159
157 if (!fb->active_16) 160 if (!fb->active_16)
158 return 0; 161 return 0;
@@ -169,8 +172,6 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
169 } 172 }
170 } 173 }
171 174
172 start_cycles = get_cycles();
173
174 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); 175 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
175 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); 176 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
176 x = aligned_x; 177 x = aligned_x;
@@ -180,19 +181,53 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
180 (y + height > fb->base.height)) 181 (y + height > fb->base.height))
181 return -EINVAL; 182 return -EINVAL;
182 183
184 /* if we are in atomic just store the info
185 can't test inside spin lock */
186 if (in_atomic())
187 store_for_later = true;
188
189 x2 = x + width - 1;
190 y2 = y + height - 1;
191
192 spin_lock_irqsave(&fb->dirty_lock, flags);
193
194 if (fb->y1 < y)
195 y = fb->y1;
196 if (fb->y2 > y2)
197 y2 = fb->y2;
198 if (fb->x1 < x)
199 x = fb->x1;
200 if (fb->x2 > x2)
201 x2 = fb->x2;
202
203 if (store_for_later) {
204 fb->x1 = x;
205 fb->x2 = x2;
206 fb->y1 = y;
207 fb->y2 = y2;
208 spin_unlock_irqrestore(&fb->dirty_lock, flags);
209 return 0;
210 }
211
212 fb->x1 = fb->y1 = INT_MAX;
213 fb->x2 = fb->y2 = 0;
214
215 spin_unlock_irqrestore(&fb->dirty_lock, flags);
216 start_cycles = get_cycles();
217
183 urb = udl_get_urb(dev); 218 urb = udl_get_urb(dev);
184 if (!urb) 219 if (!urb)
185 return 0; 220 return 0;
186 cmd = urb->transfer_buffer; 221 cmd = urb->transfer_buffer;
187 222
188 for (i = y; i < y + height ; i++) { 223 for (i = y; i <= y2 ; i++) {
189 const int line_offset = fb->base.pitches[0] * i; 224 const int line_offset = fb->base.pitches[0] * i;
190 const int byte_offset = line_offset + (x * bpp); 225 const int byte_offset = line_offset + (x * bpp);
191 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp); 226 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
192 if (udl_render_hline(dev, bpp, &urb, 227 if (udl_render_hline(dev, bpp, &urb,
193 (char *) fb->obj->vmapping, 228 (char *) fb->obj->vmapping,
194 &cmd, byte_offset, dev_byte_offset, 229 &cmd, byte_offset, dev_byte_offset,
195 width * bpp, 230 (x2 - x + 1) * bpp,
196 &bytes_identical, &bytes_sent)) 231 &bytes_identical, &bytes_sent))
197 goto error; 232 goto error;
198 } 233 }
@@ -422,7 +457,6 @@ static void udl_user_framebuffer_destroy(struct drm_framebuffer *fb)
422static const struct drm_framebuffer_funcs udlfb_funcs = { 457static const struct drm_framebuffer_funcs udlfb_funcs = {
423 .destroy = udl_user_framebuffer_destroy, 458 .destroy = udl_user_framebuffer_destroy,
424 .dirty = udl_user_framebuffer_dirty, 459 .dirty = udl_user_framebuffer_dirty,
425 .create_handle = NULL,
426}; 460};
427 461
428 462
@@ -434,16 +468,18 @@ udl_framebuffer_init(struct drm_device *dev,
434{ 468{
435 int ret; 469 int ret;
436 470
471 spin_lock_init(&ufb->dirty_lock);
437 ufb->obj = obj; 472 ufb->obj = obj;
438 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
439 drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); 473 drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd);
474 ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs);
440 return ret; 475 return ret;
441} 476}
442 477
443 478
444static int udlfb_create(struct udl_fbdev *ufbdev, 479static int udlfb_create(struct drm_fb_helper *helper,
445 struct drm_fb_helper_surface_size *sizes) 480 struct drm_fb_helper_surface_size *sizes)
446{ 481{
482 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
447 struct drm_device *dev = ufbdev->helper.dev; 483 struct drm_device *dev = ufbdev->helper.dev;
448 struct fb_info *info; 484 struct fb_info *info;
449 struct device *device = &dev->usbdev->dev; 485 struct device *device = &dev->usbdev->dev;
@@ -521,27 +557,10 @@ out:
521 return ret; 557 return ret;
522} 558}
523 559
524static int udl_fb_find_or_create_single(struct drm_fb_helper *helper,
525 struct drm_fb_helper_surface_size *sizes)
526{
527 struct udl_fbdev *ufbdev = (struct udl_fbdev *)helper;
528 int new_fb = 0;
529 int ret;
530
531 if (!helper->fb) {
532 ret = udlfb_create(ufbdev, sizes);
533 if (ret)
534 return ret;
535
536 new_fb = 1;
537 }
538 return new_fb;
539}
540
541static struct drm_fb_helper_funcs udl_fb_helper_funcs = { 560static struct drm_fb_helper_funcs udl_fb_helper_funcs = {
542 .gamma_set = udl_crtc_fb_gamma_set, 561 .gamma_set = udl_crtc_fb_gamma_set,
543 .gamma_get = udl_crtc_fb_gamma_get, 562 .gamma_get = udl_crtc_fb_gamma_get,
544 .fb_probe = udl_fb_find_or_create_single, 563 .fb_probe = udlfb_create,
545}; 564};
546 565
547static void udl_fbdev_destroy(struct drm_device *dev, 566static void udl_fbdev_destroy(struct drm_device *dev,
@@ -556,6 +575,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
556 framebuffer_release(info); 575 framebuffer_release(info);
557 } 576 }
558 drm_fb_helper_fini(&ufbdev->helper); 577 drm_fb_helper_fini(&ufbdev->helper);
578 drm_framebuffer_unregister_private(&ufbdev->ufb.base);
559 drm_framebuffer_cleanup(&ufbdev->ufb.base); 579 drm_framebuffer_cleanup(&ufbdev->ufb.base);
560 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base); 580 drm_gem_object_unreference_unlocked(&ufbdev->ufb.obj->base);
561} 581}
@@ -583,6 +603,10 @@ int udl_fbdev_init(struct drm_device *dev)
583 } 603 }
584 604
585 drm_fb_helper_single_add_all_connectors(&ufbdev->helper); 605 drm_fb_helper_single_add_all_connectors(&ufbdev->helper);
606
607 /* disable all the possible outputs/crtcs before entering KMS mode */
608 drm_helper_disable_unused_functions(dev);
609
586 drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel); 610 drm_fb_helper_initial_config(&ufbdev->helper, bpp_sel);
587 return 0; 611 return 0;
588} 612}
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 142fee5f983f..f343db73e095 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -75,15 +75,19 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
75} 75}
76#endif 76#endif
77 77
78static inline u16 pixel32_to_be16p(const uint8_t *pixel) 78static inline u16 pixel32_to_be16(const uint32_t pixel)
79{ 79{
80 uint32_t pix = *(uint32_t *)pixel; 80 return (((pixel >> 3) & 0x001f) |
81 u16 retval; 81 ((pixel >> 5) & 0x07e0) |
82 ((pixel >> 8) & 0xf800));
83}
82 84
83 retval = (((pix >> 3) & 0x001f) | 85static bool pixel_repeats(const void *pixel, const uint32_t repeat, int bpp)
84 ((pix >> 5) & 0x07e0) | 86{
85 ((pix >> 8) & 0xf800)); 87 if (bpp == 2)
86 return retval; 88 return *(const uint16_t *)pixel == repeat;
89 else
90 return *(const uint32_t *)pixel == repeat;
87} 91}
88 92
89/* 93/*
@@ -152,29 +156,33 @@ static void udl_compress_hline16(
152 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); 156 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
153 157
154 while (pixel < cmd_pixel_end) { 158 while (pixel < cmd_pixel_end) {
155 const u8 * const repeating_pixel = pixel; 159 const u8 *const start = pixel;
156 160 u32 repeating_pixel;
157 if (bpp == 2) 161
158 *(uint16_t *)cmd = cpu_to_be16p((uint16_t *)pixel); 162 if (bpp == 2) {
159 else if (bpp == 4) 163 repeating_pixel = *(uint16_t *)pixel;
160 *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16p(pixel)); 164 *(uint16_t *)cmd = cpu_to_be16(repeating_pixel);
165 } else {
166 repeating_pixel = *(uint32_t *)pixel;
167 *(uint16_t *)cmd = cpu_to_be16(pixel32_to_be16(repeating_pixel));
168 }
161 169
162 cmd += 2; 170 cmd += 2;
163 pixel += bpp; 171 pixel += bpp;
164 172
165 if (unlikely((pixel < cmd_pixel_end) && 173 if (unlikely((pixel < cmd_pixel_end) &&
166 (!memcmp(pixel, repeating_pixel, bpp)))) { 174 (pixel_repeats(pixel, repeating_pixel, bpp)))) {
167 /* go back and fill in raw pixel count */ 175 /* go back and fill in raw pixel count */
168 *raw_pixels_count_byte = (((repeating_pixel - 176 *raw_pixels_count_byte = (((start -
169 raw_pixel_start) / bpp) + 1) & 0xFF; 177 raw_pixel_start) / bpp) + 1) & 0xFF;
170 178
171 while ((pixel < cmd_pixel_end) 179 while ((pixel < cmd_pixel_end) &&
172 && (!memcmp(pixel, repeating_pixel, bpp))) { 180 (pixel_repeats(pixel, repeating_pixel, bpp))) {
173 pixel += bpp; 181 pixel += bpp;
174 } 182 }
175 183
176 /* immediately after raw data is repeat byte */ 184 /* immediately after raw data is repeat byte */
177 *cmd++ = (((pixel - repeating_pixel) / bpp) - 1) & 0xFF; 185 *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
178 186
179 /* Then start another raw pixel span */ 187 /* Then start another raw pixel span */
180 raw_pixel_start = pixel; 188 raw_pixel_start = pixel;
@@ -223,6 +231,8 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
223 u8 *cmd = *urb_buf_ptr; 231 u8 *cmd = *urb_buf_ptr;
224 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; 232 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
225 233
234 BUG_ON(!(bpp == 2 || bpp == 4));
235
226 line_start = (u8 *) (front + byte_offset); 236 line_start = (u8 *) (front + byte_offset);
227 next_pixel = line_start; 237 next_pixel = line_start;
228 line_end = next_pixel + byte_width; 238 line_end = next_pixel + byte_width;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 161f8b2549aa..07dfd823cc30 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -829,7 +829,7 @@ static void vmw_lastclose(struct drm_device *dev)
829 829
830 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 830 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
831 set.crtc = crtc; 831 set.crtc = crtc;
832 ret = crtc->funcs->set_config(&set); 832 ret = drm_mode_set_config_internal(&set);
833 WARN_ON(ret != 0); 833 WARN_ON(ret != 0);
834 } 834 }
835 835
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index d9fbbe191071..c509d40c4897 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -131,7 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
131 struct vmw_master *vmaster = vmw_master(file_priv->master); 131 struct vmw_master *vmaster = vmw_master(file_priv->master);
132 struct drm_vmw_rect __user *clips_ptr; 132 struct drm_vmw_rect __user *clips_ptr;
133 struct drm_vmw_rect *clips = NULL; 133 struct drm_vmw_rect *clips = NULL;
134 struct drm_mode_object *obj; 134 struct drm_framebuffer *fb;
135 struct vmw_framebuffer *vfb; 135 struct vmw_framebuffer *vfb;
136 struct vmw_resource *res; 136 struct vmw_resource *res;
137 uint32_t num_clips; 137 uint32_t num_clips;
@@ -163,19 +163,15 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
163 goto out_no_copy; 163 goto out_no_copy;
164 } 164 }
165 165
166 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 166 drm_modeset_lock_all(dev);
167 if (unlikely(ret != 0)) {
168 ret = -ERESTARTSYS;
169 goto out_no_mode_mutex;
170 }
171 167
172 obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); 168 fb = drm_framebuffer_lookup(dev, arg->fb_id);
173 if (!obj) { 169 if (!fb) {
174 DRM_ERROR("Invalid framebuffer id.\n"); 170 DRM_ERROR("Invalid framebuffer id.\n");
175 ret = -EINVAL; 171 ret = -EINVAL;
176 goto out_no_fb; 172 goto out_no_fb;
177 } 173 }
178 vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); 174 vfb = vmw_framebuffer_to_vfb(fb);
179 175
180 ret = ttm_read_lock(&vmaster->lock, true); 176 ret = ttm_read_lock(&vmaster->lock, true);
181 if (unlikely(ret != 0)) 177 if (unlikely(ret != 0))
@@ -199,9 +195,9 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
199out_no_surface: 195out_no_surface:
200 ttm_read_unlock(&vmaster->lock); 196 ttm_read_unlock(&vmaster->lock);
201out_no_ttm_lock: 197out_no_ttm_lock:
198 drm_framebuffer_unreference(fb);
202out_no_fb: 199out_no_fb:
203 mutex_unlock(&dev->mode_config.mutex); 200 drm_modeset_unlock_all(dev);
204out_no_mode_mutex:
205out_no_copy: 201out_no_copy:
206 kfree(clips); 202 kfree(clips);
207out_clips: 203out_clips:
@@ -220,7 +216,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
220 struct vmw_master *vmaster = vmw_master(file_priv->master); 216 struct vmw_master *vmaster = vmw_master(file_priv->master);
221 struct drm_vmw_rect __user *clips_ptr; 217 struct drm_vmw_rect __user *clips_ptr;
222 struct drm_vmw_rect *clips = NULL; 218 struct drm_vmw_rect *clips = NULL;
223 struct drm_mode_object *obj; 219 struct drm_framebuffer *fb;
224 struct vmw_framebuffer *vfb; 220 struct vmw_framebuffer *vfb;
225 uint32_t num_clips; 221 uint32_t num_clips;
226 int ret; 222 int ret;
@@ -251,24 +247,20 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
251 goto out_no_copy; 247 goto out_no_copy;
252 } 248 }
253 249
254 ret = mutex_lock_interruptible(&dev->mode_config.mutex); 250 drm_modeset_lock_all(dev);
255 if (unlikely(ret != 0)) {
256 ret = -ERESTARTSYS;
257 goto out_no_mode_mutex;
258 }
259 251
260 obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); 252 fb = drm_framebuffer_lookup(dev, arg->fb_id);
261 if (!obj) { 253 if (!fb) {
262 DRM_ERROR("Invalid framebuffer id.\n"); 254 DRM_ERROR("Invalid framebuffer id.\n");
263 ret = -EINVAL; 255 ret = -EINVAL;
264 goto out_no_fb; 256 goto out_no_fb;
265 } 257 }
266 258
267 vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); 259 vfb = vmw_framebuffer_to_vfb(fb);
268 if (!vfb->dmabuf) { 260 if (!vfb->dmabuf) {
269 DRM_ERROR("Framebuffer not dmabuf backed.\n"); 261 DRM_ERROR("Framebuffer not dmabuf backed.\n");
270 ret = -EINVAL; 262 ret = -EINVAL;
271 goto out_no_fb; 263 goto out_no_ttm_lock;
272 } 264 }
273 265
274 ret = ttm_read_lock(&vmaster->lock, true); 266 ret = ttm_read_lock(&vmaster->lock, true);
@@ -281,9 +273,9 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
281 273
282 ttm_read_unlock(&vmaster->lock); 274 ttm_read_unlock(&vmaster->lock);
283out_no_ttm_lock: 275out_no_ttm_lock:
276 drm_framebuffer_unreference(fb);
284out_no_fb: 277out_no_fb:
285 mutex_unlock(&dev->mode_config.mutex); 278 drm_modeset_unlock_all(dev);
286out_no_mode_mutex:
287out_no_copy: 279out_no_copy:
288 kfree(clips); 280 kfree(clips);
289out_clips: 281out_clips:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 54743943d8b3..3e3c7ab33ca2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -180,16 +180,29 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
180 struct vmw_dma_buffer *dmabuf = NULL; 180 struct vmw_dma_buffer *dmabuf = NULL;
181 int ret; 181 int ret;
182 182
183 /*
184 * FIXME: Unclear whether there's any global state touched by the
185 * cursor_set function, especially vmw_cursor_update_position looks
186 * suspicious. For now take the easy route and reacquire all locks. We
187 * can do this since the caller in the drm core doesn't check anything
188 * which is protected by any looks.
189 */
190 mutex_unlock(&crtc->mutex);
191 drm_modeset_lock_all(dev_priv->dev);
192
183 /* A lot of the code assumes this */ 193 /* A lot of the code assumes this */
184 if (handle && (width != 64 || height != 64)) 194 if (handle && (width != 64 || height != 64)) {
185 return -EINVAL; 195 ret = -EINVAL;
196 goto out;
197 }
186 198
187 if (handle) { 199 if (handle) {
188 ret = vmw_user_lookup_handle(dev_priv, tfile, 200 ret = vmw_user_lookup_handle(dev_priv, tfile,
189 handle, &surface, &dmabuf); 201 handle, &surface, &dmabuf);
190 if (ret) { 202 if (ret) {
191 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret); 203 DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
192 return -EINVAL; 204 ret = -EINVAL;
205 goto out;
193 } 206 }
194 } 207 }
195 208
@@ -197,7 +210,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
197 if (surface && !surface->snooper.image) { 210 if (surface && !surface->snooper.image) {
198 DRM_ERROR("surface not suitable for cursor\n"); 211 DRM_ERROR("surface not suitable for cursor\n");
199 vmw_surface_unreference(&surface); 212 vmw_surface_unreference(&surface);
200 return -EINVAL; 213 ret = -EINVAL;
214 goto out;
201 } 215 }
202 216
203 /* takedown old cursor */ 217 /* takedown old cursor */
@@ -225,14 +239,20 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
225 du->hotspot_x, du->hotspot_y); 239 du->hotspot_x, du->hotspot_y);
226 } else { 240 } else {
227 vmw_cursor_update_position(dev_priv, false, 0, 0); 241 vmw_cursor_update_position(dev_priv, false, 0, 0);
228 return 0; 242 ret = 0;
243 goto out;
229 } 244 }
230 245
231 vmw_cursor_update_position(dev_priv, true, 246 vmw_cursor_update_position(dev_priv, true,
232 du->cursor_x + du->hotspot_x, 247 du->cursor_x + du->hotspot_x,
233 du->cursor_y + du->hotspot_y); 248 du->cursor_y + du->hotspot_y);
234 249
235 return 0; 250 ret = 0;
251out:
252 drm_modeset_unlock_all(dev_priv->dev);
253 mutex_lock(&crtc->mutex);
254
255 return ret;
236} 256}
237 257
238int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 258int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
@@ -244,10 +264,23 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
244 du->cursor_x = x + crtc->x; 264 du->cursor_x = x + crtc->x;
245 du->cursor_y = y + crtc->y; 265 du->cursor_y = y + crtc->y;
246 266
267 /*
268 * FIXME: Unclear whether there's any global state touched by the
269 * cursor_set function, especially vmw_cursor_update_position looks
270 * suspicious. For now take the easy route and reacquire all locks. We
271 * can do this since the caller in the drm core doesn't check anything
272 * which is protected by any looks.
273 */
274 mutex_unlock(&crtc->mutex);
275 drm_modeset_lock_all(dev_priv->dev);
276
247 vmw_cursor_update_position(dev_priv, shown, 277 vmw_cursor_update_position(dev_priv, shown,
248 du->cursor_x + du->hotspot_x, 278 du->cursor_x + du->hotspot_x,
249 du->cursor_y + du->hotspot_y); 279 du->cursor_y + du->hotspot_y);
250 280
281 drm_modeset_unlock_all(dev_priv->dev);
282 mutex_lock(&crtc->mutex);
283
251 return 0; 284 return 0;
252} 285}
253 286
@@ -373,16 +406,6 @@ void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
373 * Generic framebuffer code 406 * Generic framebuffer code
374 */ 407 */
375 408
376int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
377 struct drm_file *file_priv,
378 unsigned int *handle)
379{
380 if (handle)
381 *handle = 0;
382
383 return 0;
384}
385
386/* 409/*
387 * Surface framebuffer code 410 * Surface framebuffer code
388 */ 411 */
@@ -610,7 +633,6 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
610static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { 633static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
611 .destroy = vmw_framebuffer_surface_destroy, 634 .destroy = vmw_framebuffer_surface_destroy,
612 .dirty = vmw_framebuffer_surface_dirty, 635 .dirty = vmw_framebuffer_surface_dirty,
613 .create_handle = vmw_framebuffer_create_handle,
614}; 636};
615 637
616static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, 638static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
@@ -681,14 +703,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
681 goto out_err1; 703 goto out_err1;
682 } 704 }
683 705
684 ret = drm_framebuffer_init(dev, &vfbs->base.base,
685 &vmw_framebuffer_surface_funcs);
686 if (ret)
687 goto out_err2;
688
689 if (!vmw_surface_reference(surface)) { 706 if (!vmw_surface_reference(surface)) {
690 DRM_ERROR("failed to reference surface %p\n", surface); 707 DRM_ERROR("failed to reference surface %p\n", surface);
691 goto out_err3; 708 ret = -EINVAL;
709 goto out_err2;
692 } 710 }
693 711
694 /* XXX get the first 3 from the surface info */ 712 /* XXX get the first 3 from the surface info */
@@ -707,10 +725,15 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
707 725
708 *out = &vfbs->base; 726 *out = &vfbs->base;
709 727
728 ret = drm_framebuffer_init(dev, &vfbs->base.base,
729 &vmw_framebuffer_surface_funcs);
730 if (ret)
731 goto out_err3;
732
710 return 0; 733 return 0;
711 734
712out_err3: 735out_err3:
713 drm_framebuffer_cleanup(&vfbs->base.base); 736 vmw_surface_unreference(&surface);
714out_err2: 737out_err2:
715 kfree(vfbs); 738 kfree(vfbs);
716out_err1: 739out_err1:
@@ -960,7 +983,6 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
960static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = { 983static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
961 .destroy = vmw_framebuffer_dmabuf_destroy, 984 .destroy = vmw_framebuffer_dmabuf_destroy,
962 .dirty = vmw_framebuffer_dmabuf_dirty, 985 .dirty = vmw_framebuffer_dmabuf_dirty,
963 .create_handle = vmw_framebuffer_create_handle,
964}; 986};
965 987
966/** 988/**
@@ -1053,14 +1075,10 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1053 goto out_err1; 1075 goto out_err1;
1054 } 1076 }
1055 1077
1056 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1057 &vmw_framebuffer_dmabuf_funcs);
1058 if (ret)
1059 goto out_err2;
1060
1061 if (!vmw_dmabuf_reference(dmabuf)) { 1078 if (!vmw_dmabuf_reference(dmabuf)) {
1062 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf); 1079 DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
1063 goto out_err3; 1080 ret = -EINVAL;
1081 goto out_err2;
1064 } 1082 }
1065 1083
1066 vfbd->base.base.bits_per_pixel = mode_cmd->bpp; 1084 vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
@@ -1077,10 +1095,15 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
1077 vfbd->base.user_handle = mode_cmd->handle; 1095 vfbd->base.user_handle = mode_cmd->handle;
1078 *out = &vfbd->base; 1096 *out = &vfbd->base;
1079 1097
1098 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1099 &vmw_framebuffer_dmabuf_funcs);
1100 if (ret)
1101 goto out_err3;
1102
1080 return 0; 1103 return 0;
1081 1104
1082out_err3: 1105out_err3:
1083 drm_framebuffer_cleanup(&vfbd->base.base); 1106 vmw_dmabuf_unreference(&dmabuf);
1084out_err2: 1107out_err2:
1085 kfree(vfbd); 1108 kfree(vfbd);
1086out_err1: 1109out_err1:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e01a17b407b2..16556170fb32 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -959,13 +959,13 @@ void vmw_resource_unreserve(struct vmw_resource *res,
959 if (new_backup && new_backup != res->backup) { 959 if (new_backup && new_backup != res->backup) {
960 960
961 if (res->backup) { 961 if (res->backup) {
962 BUG_ON(atomic_read(&res->backup->base.reserved) == 0); 962 BUG_ON(!ttm_bo_is_reserved(&res->backup->base));
963 list_del_init(&res->mob_head); 963 list_del_init(&res->mob_head);
964 vmw_dmabuf_unreference(&res->backup); 964 vmw_dmabuf_unreference(&res->backup);
965 } 965 }
966 966
967 res->backup = vmw_dmabuf_reference(new_backup); 967 res->backup = vmw_dmabuf_reference(new_backup);
968 BUG_ON(atomic_read(&new_backup->base.reserved) == 0); 968 BUG_ON(!ttm_bo_is_reserved(&new_backup->base));
969 list_add_tail(&res->mob_head, &new_backup->res_list); 969 list_add_tail(&res->mob_head, &new_backup->res_list);
970 } 970 }
971 if (new_backup) 971 if (new_backup)
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
deleted file mode 100644
index 419917955bf6..000000000000
--- a/drivers/gpu/stub/Kconfig
+++ /dev/null
@@ -1,18 +0,0 @@
1config STUB_POULSBO
2 tristate "Intel GMA500 Stub Driver"
3 depends on PCI
4 depends on NET # for THERMAL
5 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
6 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
7 select BACKLIGHT_CLASS_DEVICE if ACPI
8 select VIDEO_OUTPUT_CONTROL if ACPI
9 select INPUT if ACPI
10 select ACPI_VIDEO if ACPI
11 select THERMAL if ACPI
12 help
13 Choose this option if you have a system that has Intel GMA500
14 (Poulsbo) integrated graphics. If M is selected, the module will
15 be called Poulsbo. This driver is a stub driver for Poulsbo that
16 will call poulsbo.ko to enable the acpi backlight control sysfs
17 entry file because there have no poulsbo native driver can support
18 intel opregion.
diff --git a/drivers/gpu/stub/Makefile b/drivers/gpu/stub/Makefile
deleted file mode 100644
index cd940cc9d36d..000000000000
--- a/drivers/gpu/stub/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_STUB_POULSBO) += poulsbo.o
diff --git a/drivers/gpu/stub/poulsbo.c b/drivers/gpu/stub/poulsbo.c
deleted file mode 100644
index 7edfd27b8dee..000000000000
--- a/drivers/gpu/stub/poulsbo.c
+++ /dev/null
@@ -1,64 +0,0 @@
1/*
2 * Intel Poulsbo Stub driver
3 *
4 * Copyright (C) 2010 Novell <jlee@novell.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 */
11
12#include <linux/module.h>
13#include <linux/pci.h>
14#include <linux/acpi.h>
15#include <acpi/video.h>
16
17#define DRIVER_NAME "poulsbo"
18
19enum {
20 CHIP_PSB_8108 = 0,
21 CHIP_PSB_8109 = 1,
22};
23
24static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
25 {0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8108}, \
26 {0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PSB_8109}, \
27 {0, 0, 0}
28};
29
30static int poulsbo_probe(struct pci_dev *pdev, const struct pci_device_id *id)
31{
32 return acpi_video_register();
33}
34
35static void poulsbo_remove(struct pci_dev *pdev)
36{
37 acpi_video_unregister();
38}
39
40static struct pci_driver poulsbo_driver = {
41 .name = DRIVER_NAME,
42 .id_table = pciidlist,
43 .probe = poulsbo_probe,
44 .remove = poulsbo_remove,
45};
46
47static int __init poulsbo_init(void)
48{
49 return pci_register_driver(&poulsbo_driver);
50}
51
52static void __exit poulsbo_exit(void)
53{
54 pci_unregister_driver(&poulsbo_driver);
55}
56
57module_init(poulsbo_init);
58module_exit(poulsbo_exit);
59
60MODULE_AUTHOR("Lee, Chun-Yi <jlee@novell.com>");
61MODULE_DESCRIPTION("Poulsbo Stub Driver");
62MODULE_LICENSE("GPL");
63
64MODULE_DEVICE_TABLE(pci, pciidlist);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index fa60add0ff63..cf787e1d9322 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -25,6 +25,7 @@
25#include <linux/fb.h> 25#include <linux/fb.h>
26 26
27#include <linux/pci.h> 27#include <linux/pci.h>
28#include <linux/console.h>
28#include <linux/vga_switcheroo.h> 29#include <linux/vga_switcheroo.h>
29 30
30#include <linux/vgaarb.h> 31#include <linux/vgaarb.h>
@@ -337,8 +338,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
337 338
338 if (new_client->fb_info) { 339 if (new_client->fb_info) {
339 struct fb_event event; 340 struct fb_event event;
341 console_lock();
340 event.info = new_client->fb_info; 342 event.info = new_client->fb_info;
341 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event); 343 fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
344 console_unlock();
342 } 345 }
343 346
344 ret = vgasr_priv.handler->switchto(new_client->id); 347 ret = vgasr_priv.handler->switchto(new_client->id);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 43d5c8b8e7ad..0099667a397e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4255,13 +4255,19 @@ static void quirk_iommu_rwbf(struct pci_dev *dev)
4255{ 4255{
4256 /* 4256 /*
4257 * Mobile 4 Series Chipset neglects to set RWBF capability, 4257 * Mobile 4 Series Chipset neglects to set RWBF capability,
4258 * but needs it: 4258 * but needs it. Same seems to hold for the desktop versions.
4259 */ 4259 */
4260 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n"); 4260 printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
4261 rwbf_quirk = 1; 4261 rwbf_quirk = 1;
4262} 4262}
4263 4263
4264DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf); 4264DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);
4265DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e00, quirk_iommu_rwbf);
4266DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e10, quirk_iommu_rwbf);
4267DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e20, quirk_iommu_rwbf);
4268DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e30, quirk_iommu_rwbf);
4269DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e40, quirk_iommu_rwbf);
4270DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2e90, quirk_iommu_rwbf);
4265 4271
4266#define GGC 0x52 4272#define GGC 0x52
4267#define GGC_MEMORY_SIZE_MASK (0xf << 8) 4273#define GGC_MEMORY_SIZE_MASK (0xf << 8)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 3a7965d6ac28..093f10c88cce 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -112,8 +112,6 @@ source "drivers/staging/media/Kconfig"
112 112
113source "drivers/staging/net/Kconfig" 113source "drivers/staging/net/Kconfig"
114 114
115source "drivers/staging/omapdrm/Kconfig"
116
117source "drivers/staging/android/Kconfig" 115source "drivers/staging/android/Kconfig"
118 116
119source "drivers/staging/ozwpan/Kconfig" 117source "drivers/staging/ozwpan/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 5971865d0c61..fa41b04cf4cb 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -48,7 +48,6 @@ obj-$(CONFIG_SPEAKUP) += speakup/
48obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/ 48obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
49obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/ 49obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
50obj-$(CONFIG_MFD_NVEC) += nvec/ 50obj-$(CONFIG_MFD_NVEC) += nvec/
51obj-$(CONFIG_DRM_OMAP) += omapdrm/
52obj-$(CONFIG_ANDROID) += android/ 51obj-$(CONFIG_ANDROID) += android/
53obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/ 52obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
54obj-$(CONFIG_USB_G_CCG) += ccg/ 53obj-$(CONFIG_USB_G_CCG) += ccg/
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
deleted file mode 100644
index abeeb00aaa12..000000000000
--- a/drivers/staging/omapdrm/TODO
+++ /dev/null
@@ -1,32 +0,0 @@
1TODO
2. add video decode/encode support (via syslink3 + codec-engine)
3 . NOTE: with dmabuf this probably could be split into different driver
4 so perhaps this TODO doesn't belong here
5. where should we do eviction (detatch_pages())? We aren't necessarily
6 accessing the pages via a GART, so maybe we need some other threshold
7 to put a cap on the # of pages that can be pin'd. (It is mostly only
8 of interest in case you have a swap partition/file.. which a lot of
9 these devices do not.. but it doesn't hurt for the driver to do the
10 right thing anyways.)
11 . Use mm_shrinker to trigger unpinning pages. Need to figure out how
12 to handle next issue first (I think?)
13 . Note TTM already has some mm_shrinker stuff.. maybe an argument to
14 move to TTM? Or maybe something that could be factored out in common?
15. GEM/shmem backed pages can have existing mappings (kernel linear map,
16 etc..), which isn't really ideal.
17. Revisit GEM sync object infrastructure.. TTM has some framework for this
18 already. Possibly this could be refactored out and made more common?
19 There should be some way to do this with less wheel-reinvention.
20. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
21 access is made from any component in the system. Which means on suspend
22 CRTC's should be disabled, and on resume the LUT should be reprogrammed
23 before CRTC's are re-enabled, to prevent DSS from trying to DMA from a
24 buffer mapped in DMM/TILER before LUT is reloaded.
25
26Userspace:
27. git://github.com/robclark/xf86-video-omap.git
28
29Currently tested on
30. OMAP3530 beagleboard
31. OMAP4430 pandaboard
32. OMAP4460 pandaboard
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 6c4abeaf690f..fbd447b390f7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -638,7 +638,7 @@ static inline void save_screen(struct vc_data *vc)
638 * Redrawing of screen 638 * Redrawing of screen
639 */ 639 */
640 640
641static void clear_buffer_attributes(struct vc_data *vc) 641void clear_buffer_attributes(struct vc_data *vc)
642{ 642{
643 unsigned short *p = (unsigned short *)vc->vc_origin; 643 unsigned short *p = (unsigned short *)vc->vc_origin;
644 int count = vc->vc_screenbuf_size / 2; 644 int count = vc->vc_screenbuf_size / 2;
@@ -2987,7 +2987,7 @@ int __init vty_init(const struct file_operations *console_fops)
2987 2987
2988static struct class *vtconsole_class; 2988static struct class *vtconsole_class;
2989 2989
2990static int bind_con_driver(const struct consw *csw, int first, int last, 2990static int do_bind_con_driver(const struct consw *csw, int first, int last,
2991 int deflt) 2991 int deflt)
2992{ 2992{
2993 struct module *owner = csw->owner; 2993 struct module *owner = csw->owner;
@@ -2998,7 +2998,7 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
2998 if (!try_module_get(owner)) 2998 if (!try_module_get(owner))
2999 return -ENODEV; 2999 return -ENODEV;
3000 3000
3001 console_lock(); 3001 WARN_CONSOLE_UNLOCKED();
3002 3002
3003 /* check if driver is registered */ 3003 /* check if driver is registered */
3004 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3004 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3083,11 +3083,22 @@ static int bind_con_driver(const struct consw *csw, int first, int last,
3083 3083
3084 retval = 0; 3084 retval = 0;
3085err: 3085err:
3086 console_unlock();
3087 module_put(owner); 3086 module_put(owner);
3088 return retval; 3087 return retval;
3089}; 3088};
3090 3089
3090
3091static int bind_con_driver(const struct consw *csw, int first, int last,
3092 int deflt)
3093{
3094 int ret;
3095
3096 console_lock();
3097 ret = do_bind_con_driver(csw, first, last, deflt);
3098 console_unlock();
3099 return ret;
3100}
3101
3091#ifdef CONFIG_VT_HW_CONSOLE_BINDING 3102#ifdef CONFIG_VT_HW_CONSOLE_BINDING
3092static int con_is_graphics(const struct consw *csw, int first, int last) 3103static int con_is_graphics(const struct consw *csw, int first, int last)
3093{ 3104{
@@ -3124,6 +3135,18 @@ static int con_is_graphics(const struct consw *csw, int first, int last)
3124 */ 3135 */
3125int unbind_con_driver(const struct consw *csw, int first, int last, int deflt) 3136int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3126{ 3137{
3138 int retval;
3139
3140 console_lock();
3141 retval = do_unbind_con_driver(csw, first, last, deflt);
3142 console_unlock();
3143 return retval;
3144}
3145EXPORT_SYMBOL(unbind_con_driver);
3146
3147/* unlocked version of unbind_con_driver() */
3148int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3149{
3127 struct module *owner = csw->owner; 3150 struct module *owner = csw->owner;
3128 const struct consw *defcsw = NULL; 3151 const struct consw *defcsw = NULL;
3129 struct con_driver *con_driver = NULL, *con_back = NULL; 3152 struct con_driver *con_driver = NULL, *con_back = NULL;
@@ -3132,7 +3155,7 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3132 if (!try_module_get(owner)) 3155 if (!try_module_get(owner))
3133 return -ENODEV; 3156 return -ENODEV;
3134 3157
3135 console_lock(); 3158 WARN_CONSOLE_UNLOCKED();
3136 3159
3137 /* check if driver is registered and if it is unbindable */ 3160 /* check if driver is registered and if it is unbindable */
3138 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3161 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
@@ -3145,10 +3168,8 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3145 } 3168 }
3146 } 3169 }
3147 3170
3148 if (retval) { 3171 if (retval)
3149 console_unlock();
3150 goto err; 3172 goto err;
3151 }
3152 3173
3153 retval = -ENODEV; 3174 retval = -ENODEV;
3154 3175
@@ -3164,15 +3185,11 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3164 } 3185 }
3165 } 3186 }
3166 3187
3167 if (retval) { 3188 if (retval)
3168 console_unlock();
3169 goto err; 3189 goto err;
3170 }
3171 3190
3172 if (!con_is_bound(csw)) { 3191 if (!con_is_bound(csw))
3173 console_unlock();
3174 goto err; 3192 goto err;
3175 }
3176 3193
3177 first = max(first, con_driver->first); 3194 first = max(first, con_driver->first);
3178 last = min(last, con_driver->last); 3195 last = min(last, con_driver->last);
@@ -3199,15 +3216,14 @@ int unbind_con_driver(const struct consw *csw, int first, int last, int deflt)
3199 if (!con_is_bound(csw)) 3216 if (!con_is_bound(csw))
3200 con_driver->flag &= ~CON_DRIVER_FLAG_INIT; 3217 con_driver->flag &= ~CON_DRIVER_FLAG_INIT;
3201 3218
3202 console_unlock();
3203 /* ignore return value, binding should not fail */ 3219 /* ignore return value, binding should not fail */
3204 bind_con_driver(defcsw, first, last, deflt); 3220 do_bind_con_driver(defcsw, first, last, deflt);
3205err: 3221err:
3206 module_put(owner); 3222 module_put(owner);
3207 return retval; 3223 return retval;
3208 3224
3209} 3225}
3210EXPORT_SYMBOL(unbind_con_driver); 3226EXPORT_SYMBOL_GPL(do_unbind_con_driver);
3211 3227
3212static int vt_bind(struct con_driver *con) 3228static int vt_bind(struct con_driver *con)
3213{ 3229{
@@ -3492,28 +3508,18 @@ int con_debug_leave(void)
3492} 3508}
3493EXPORT_SYMBOL_GPL(con_debug_leave); 3509EXPORT_SYMBOL_GPL(con_debug_leave);
3494 3510
3495/** 3511static int do_register_con_driver(const struct consw *csw, int first, int last)
3496 * register_con_driver - register console driver to console layer
3497 * @csw: console driver
3498 * @first: the first console to take over, minimum value is 0
3499 * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
3500 *
3501 * DESCRIPTION: This function registers a console driver which can later
3502 * bind to a range of consoles specified by @first and @last. It will
3503 * also initialize the console driver by calling con_startup().
3504 */
3505int register_con_driver(const struct consw *csw, int first, int last)
3506{ 3512{
3507 struct module *owner = csw->owner; 3513 struct module *owner = csw->owner;
3508 struct con_driver *con_driver; 3514 struct con_driver *con_driver;
3509 const char *desc; 3515 const char *desc;
3510 int i, retval = 0; 3516 int i, retval = 0;
3511 3517
3518 WARN_CONSOLE_UNLOCKED();
3519
3512 if (!try_module_get(owner)) 3520 if (!try_module_get(owner))
3513 return -ENODEV; 3521 return -ENODEV;
3514 3522
3515 console_lock();
3516
3517 for (i = 0; i < MAX_NR_CON_DRIVER; i++) { 3523 for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
3518 con_driver = &registered_con_driver[i]; 3524 con_driver = &registered_con_driver[i];
3519 3525
@@ -3566,10 +3572,29 @@ int register_con_driver(const struct consw *csw, int first, int last)
3566 } 3572 }
3567 3573
3568err: 3574err:
3569 console_unlock();
3570 module_put(owner); 3575 module_put(owner);
3571 return retval; 3576 return retval;
3572} 3577}
3578
3579/**
3580 * register_con_driver - register console driver to console layer
3581 * @csw: console driver
3582 * @first: the first console to take over, minimum value is 0
3583 * @last: the last console to take over, maximum value is MAX_NR_CONSOLES -1
3584 *
3585 * DESCRIPTION: This function registers a console driver which can later
3586 * bind to a range of consoles specified by @first and @last. It will
3587 * also initialize the console driver by calling con_startup().
3588 */
3589int register_con_driver(const struct consw *csw, int first, int last)
3590{
3591 int retval;
3592
3593 console_lock();
3594 retval = do_register_con_driver(csw, first, last);
3595 console_unlock();
3596 return retval;
3597}
3573EXPORT_SYMBOL(register_con_driver); 3598EXPORT_SYMBOL(register_con_driver);
3574 3599
3575/** 3600/**
@@ -3585,9 +3610,18 @@ EXPORT_SYMBOL(register_con_driver);
3585 */ 3610 */
3586int unregister_con_driver(const struct consw *csw) 3611int unregister_con_driver(const struct consw *csw)
3587{ 3612{
3588 int i, retval = -ENODEV; 3613 int retval;
3589 3614
3590 console_lock(); 3615 console_lock();
3616 retval = do_unregister_con_driver(csw);
3617 console_unlock();
3618 return retval;
3619}
3620EXPORT_SYMBOL(unregister_con_driver);
3621
3622int do_unregister_con_driver(const struct consw *csw)
3623{
3624 int i, retval = -ENODEV;
3591 3625
3592 /* cannot unregister a bound driver */ 3626 /* cannot unregister a bound driver */
3593 if (con_is_bound(csw)) 3627 if (con_is_bound(csw))
@@ -3613,27 +3647,53 @@ int unregister_con_driver(const struct consw *csw)
3613 } 3647 }
3614 } 3648 }
3615err: 3649err:
3616 console_unlock();
3617 return retval; 3650 return retval;
3618} 3651}
3619EXPORT_SYMBOL(unregister_con_driver); 3652EXPORT_SYMBOL_GPL(do_unregister_con_driver);
3620 3653
3621/* 3654/*
3622 * If we support more console drivers, this function is used 3655 * If we support more console drivers, this function is used
3623 * when a driver wants to take over some existing consoles 3656 * when a driver wants to take over some existing consoles
3624 * and become default driver for newly opened ones. 3657 * and become default driver for newly opened ones.
3625 * 3658 *
3626 * take_over_console is basically a register followed by unbind 3659 * take_over_console is basically a register followed by unbind
3660 */
3661int do_take_over_console(const struct consw *csw, int first, int last, int deflt)
3662{
3663 int err;
3664
3665 err = do_register_con_driver(csw, first, last);
3666 /*
3667 * If we get an busy error we still want to bind the console driver
3668 * and return success, as we may have unbound the console driver
3669 * but not unregistered it.
3670 */
3671 if (err == -EBUSY)
3672 err = 0;
3673 if (!err)
3674 do_bind_con_driver(csw, first, last, deflt);
3675
3676 return err;
3677}
3678EXPORT_SYMBOL_GPL(do_take_over_console);
3679
3680/*
3681 * If we support more console drivers, this function is used
3682 * when a driver wants to take over some existing consoles
3683 * and become default driver for newly opened ones.
3684 *
3685 * take_over_console is basically a register followed by unbind
3627 */ 3686 */
3628int take_over_console(const struct consw *csw, int first, int last, int deflt) 3687int take_over_console(const struct consw *csw, int first, int last, int deflt)
3629{ 3688{
3630 int err; 3689 int err;
3631 3690
3632 err = register_con_driver(csw, first, last); 3691 err = register_con_driver(csw, first, last);
3633 /* if we get an busy error we still want to bind the console driver 3692 /*
3693 * If we get an busy error we still want to bind the console driver
3634 * and return success, as we may have unbound the console driver 3694 * and return success, as we may have unbound the console driver
3635  * but not unregistered it. 3695 * but not unregistered it.
3636 */ 3696 */
3637 if (err == -EBUSY) 3697 if (err == -EBUSY)
3638 err = 0; 3698 err = 0;
3639 if (!err) 3699 if (!err)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 80cbd21b483f..4c1546f71d56 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -21,8 +21,6 @@ source "drivers/gpu/vga/Kconfig"
21 21
22source "drivers/gpu/drm/Kconfig" 22source "drivers/gpu/drm/Kconfig"
23 23
24source "drivers/gpu/stub/Kconfig"
25
26config VGASTATE 24config VGASTATE
27 tristate 25 tristate
28 default n 26 default n
@@ -33,6 +31,30 @@ config VIDEO_OUTPUT_CONTROL
33 This framework adds support for low-level control of the video 31 This framework adds support for low-level control of the video
34 output switch. 32 output switch.
35 33
34config DISPLAY_TIMING
35 bool
36
37config VIDEOMODE
38 bool
39
40config OF_DISPLAY_TIMING
41 bool "Enable device tree display timing support"
42 depends on OF
43 select DISPLAY_TIMING
44 help
45 helper to parse display timings from the devicetree
46
47config OF_VIDEOMODE
48 bool "Enable device tree videomode support"
49 depends on OF
50 select VIDEOMODE
51 select OF_DISPLAY_TIMING
52 help
53 helper to get videomodes from the devicetree
54
55config HDMI
56 bool
57
36menuconfig FB 58menuconfig FB
37 tristate "Support for frame buffer devices" 59 tristate "Support for frame buffer devices"
38 ---help--- 60 ---help---
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 0577f834fdcd..9df387334cb7 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -5,6 +5,7 @@
5# Each configuration option enables a list of files. 5# Each configuration option enables a list of files.
6 6
7obj-$(CONFIG_VGASTATE) += vgastate.o 7obj-$(CONFIG_VGASTATE) += vgastate.o
8obj-$(CONFIG_HDMI) += hdmi.o
8obj-y += fb_notify.o 9obj-y += fb_notify.o
9obj-$(CONFIG_FB) += fb.o 10obj-$(CONFIG_FB) += fb.o
10fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \ 11fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
@@ -170,3 +171,7 @@ obj-$(CONFIG_FB_VIRTUAL) += vfb.o
170 171
171#video output switch sysfs driver 172#video output switch sysfs driver
172obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o 173obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
174obj-$(CONFIG_DISPLAY_TIMING) += display_timing.o
175obj-$(CONFIG_OF_DISPLAY_TIMING) += of_display_timing.o
176obj-$(CONFIG_VIDEOMODE) += videomode.o
177obj-$(CONFIG_OF_VIDEOMODE) += of_videomode.o
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index f8a61e210d2e..3cd675927826 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -529,6 +529,33 @@ static int search_for_mapped_con(void)
529 return retval; 529 return retval;
530} 530}
531 531
532static int do_fbcon_takeover(int show_logo)
533{
534 int err, i;
535
536 if (!num_registered_fb)
537 return -ENODEV;
538
539 if (!show_logo)
540 logo_shown = FBCON_LOGO_DONTSHOW;
541
542 for (i = first_fb_vc; i <= last_fb_vc; i++)
543 con2fb_map[i] = info_idx;
544
545 err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc,
546 fbcon_is_default);
547
548 if (err) {
549 for (i = first_fb_vc; i <= last_fb_vc; i++)
550 con2fb_map[i] = -1;
551 info_idx = -1;
552 } else {
553 fbcon_has_console_bind = 1;
554 }
555
556 return err;
557}
558
532static int fbcon_takeover(int show_logo) 559static int fbcon_takeover(int show_logo)
533{ 560{
534 int err, i; 561 int err, i;
@@ -815,6 +842,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info,
815 * 842 *
816 * Maps a virtual console @unit to a frame buffer device 843 * Maps a virtual console @unit to a frame buffer device
817 * @newidx. 844 * @newidx.
845 *
846 * This should be called with the console lock held.
818 */ 847 */
819static int set_con2fb_map(int unit, int newidx, int user) 848static int set_con2fb_map(int unit, int newidx, int user)
820{ 849{
@@ -832,7 +861,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
832 861
833 if (!search_for_mapped_con() || !con_is_bound(&fb_con)) { 862 if (!search_for_mapped_con() || !con_is_bound(&fb_con)) {
834 info_idx = newidx; 863 info_idx = newidx;
835 return fbcon_takeover(0); 864 return do_fbcon_takeover(0);
836 } 865 }
837 866
838 if (oldidx != -1) 867 if (oldidx != -1)
@@ -840,7 +869,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
840 869
841 found = search_fb_in_map(newidx); 870 found = search_fb_in_map(newidx);
842 871
843 console_lock();
844 con2fb_map[unit] = newidx; 872 con2fb_map[unit] = newidx;
845 if (!err && !found) 873 if (!err && !found)
846 err = con2fb_acquire_newinfo(vc, info, unit, oldidx); 874 err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
@@ -867,7 +895,6 @@ static int set_con2fb_map(int unit, int newidx, int user)
867 if (!search_fb_in_map(info_idx)) 895 if (!search_fb_in_map(info_idx))
868 info_idx = newidx; 896 info_idx = newidx;
869 897
870 console_unlock();
871 return err; 898 return err;
872} 899}
873 900
@@ -990,7 +1017,7 @@ static const char *fbcon_startup(void)
990 } 1017 }
991 1018
992 /* Setup default font */ 1019 /* Setup default font */
993 if (!p->fontdata) { 1020 if (!p->fontdata && !vc->vc_font.data) {
994 if (!fontname[0] || !(font = find_font(fontname))) 1021 if (!fontname[0] || !(font = find_font(fontname)))
995 font = get_default_font(info->var.xres, 1022 font = get_default_font(info->var.xres,
996 info->var.yres, 1023 info->var.yres,
@@ -1000,6 +1027,8 @@ static const char *fbcon_startup(void)
1000 vc->vc_font.height = font->height; 1027 vc->vc_font.height = font->height;
1001 vc->vc_font.data = (void *)(p->fontdata = font->data); 1028 vc->vc_font.data = (void *)(p->fontdata = font->data);
1002 vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */ 1029 vc->vc_font.charcount = 256; /* FIXME Need to support more fonts */
1030 } else {
1031 p->fontdata = vc->vc_font.data;
1003 } 1032 }
1004 1033
1005 cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres); 1034 cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -1159,9 +1188,9 @@ static void fbcon_init(struct vc_data *vc, int init)
1159 ops->p = &fb_display[fg_console]; 1188 ops->p = &fb_display[fg_console];
1160} 1189}
1161 1190
1162static void fbcon_free_font(struct display *p) 1191static void fbcon_free_font(struct display *p, bool freefont)
1163{ 1192{
1164 if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0)) 1193 if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
1165 kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int)); 1194 kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
1166 p->fontdata = NULL; 1195 p->fontdata = NULL;
1167 p->userfont = 0; 1196 p->userfont = 0;
@@ -1173,8 +1202,8 @@ static void fbcon_deinit(struct vc_data *vc)
1173 struct fb_info *info; 1202 struct fb_info *info;
1174 struct fbcon_ops *ops; 1203 struct fbcon_ops *ops;
1175 int idx; 1204 int idx;
1205 bool free_font = true;
1176 1206
1177 fbcon_free_font(p);
1178 idx = con2fb_map[vc->vc_num]; 1207 idx = con2fb_map[vc->vc_num];
1179 1208
1180 if (idx == -1) 1209 if (idx == -1)
@@ -1185,6 +1214,8 @@ static void fbcon_deinit(struct vc_data *vc)
1185 if (!info) 1214 if (!info)
1186 goto finished; 1215 goto finished;
1187 1216
1217 if (info->flags & FBINFO_MISC_FIRMWARE)
1218 free_font = false;
1188 ops = info->fbcon_par; 1219 ops = info->fbcon_par;
1189 1220
1190 if (!ops) 1221 if (!ops)
@@ -1196,6 +1227,8 @@ static void fbcon_deinit(struct vc_data *vc)
1196 ops->flags &= ~FBCON_FLAGS_INIT; 1227 ops->flags &= ~FBCON_FLAGS_INIT;
1197finished: 1228finished:
1198 1229
1230 fbcon_free_font(p, free_font);
1231
1199 if (!con_is_bound(&fb_con)) 1232 if (!con_is_bound(&fb_con))
1200 fbcon_exit(); 1233 fbcon_exit();
1201 1234
@@ -2985,7 +3018,7 @@ static int fbcon_unbind(void)
2985{ 3018{
2986 int ret; 3019 int ret;
2987 3020
2988 ret = unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc, 3021 ret = do_unbind_con_driver(&fb_con, first_fb_vc, last_fb_vc,
2989 fbcon_is_default); 3022 fbcon_is_default);
2990 3023
2991 if (!ret) 3024 if (!ret)
@@ -3000,6 +3033,7 @@ static inline int fbcon_unbind(void)
3000} 3033}
3001#endif /* CONFIG_VT_HW_CONSOLE_BINDING */ 3034#endif /* CONFIG_VT_HW_CONSOLE_BINDING */
3002 3035
3036/* called with console_lock held */
3003static int fbcon_fb_unbind(int idx) 3037static int fbcon_fb_unbind(int idx)
3004{ 3038{
3005 int i, new_idx = -1, ret = 0; 3039 int i, new_idx = -1, ret = 0;
@@ -3026,6 +3060,7 @@ static int fbcon_fb_unbind(int idx)
3026 return ret; 3060 return ret;
3027} 3061}
3028 3062
3063/* called with console_lock held */
3029static int fbcon_fb_unregistered(struct fb_info *info) 3064static int fbcon_fb_unregistered(struct fb_info *info)
3030{ 3065{
3031 int i, idx; 3066 int i, idx;
@@ -3058,11 +3093,12 @@ static int fbcon_fb_unregistered(struct fb_info *info)
3058 primary_device = -1; 3093 primary_device = -1;
3059 3094
3060 if (!num_registered_fb) 3095 if (!num_registered_fb)
3061 unregister_con_driver(&fb_con); 3096 do_unregister_con_driver(&fb_con);
3062 3097
3063 return 0; 3098 return 0;
3064} 3099}
3065 3100
3101/* called with console_lock held */
3066static void fbcon_remap_all(int idx) 3102static void fbcon_remap_all(int idx)
3067{ 3103{
3068 int i; 3104 int i;
@@ -3107,6 +3143,7 @@ static inline void fbcon_select_primary(struct fb_info *info)
3107} 3143}
3108#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */ 3144#endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */
3109 3145
3146/* called with console_lock held */
3110static int fbcon_fb_registered(struct fb_info *info) 3147static int fbcon_fb_registered(struct fb_info *info)
3111{ 3148{
3112 int ret = 0, i, idx; 3149 int ret = 0, i, idx;
@@ -3123,7 +3160,7 @@ static int fbcon_fb_registered(struct fb_info *info)
3123 } 3160 }
3124 3161
3125 if (info_idx != -1) 3162 if (info_idx != -1)
3126 ret = fbcon_takeover(1); 3163 ret = do_fbcon_takeover(1);
3127 } else { 3164 } else {
3128 for (i = first_fb_vc; i <= last_fb_vc; i++) { 3165 for (i = first_fb_vc; i <= last_fb_vc; i++) {
3129 if (con2fb_map_boot[i] == idx) 3166 if (con2fb_map_boot[i] == idx)
@@ -3259,6 +3296,7 @@ static int fbcon_event_notify(struct notifier_block *self,
3259 ret = fbcon_fb_unregistered(info); 3296 ret = fbcon_fb_unregistered(info);
3260 break; 3297 break;
3261 case FB_EVENT_SET_CONSOLE_MAP: 3298 case FB_EVENT_SET_CONSOLE_MAP:
3299 /* called with console lock held */
3262 con2fb = event->data; 3300 con2fb = event->data;
3263 ret = set_con2fb_map(con2fb->console - 1, 3301 ret = set_con2fb_map(con2fb->console - 1,
3264 con2fb->framebuffer, 1); 3302 con2fb->framebuffer, 1);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index d449a74d4a31..5855d17d19ac 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1064,7 +1064,7 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
1064 unsigned short video_port_status = vga_video_port_reg + 6; 1064 unsigned short video_port_status = vga_video_port_reg + 6;
1065 int font_select = 0x00, beg, i; 1065 int font_select = 0x00, beg, i;
1066 char *charmap; 1066 char *charmap;
1067 1067 bool clear_attribs = false;
1068 if (vga_video_type != VIDEO_TYPE_EGAM) { 1068 if (vga_video_type != VIDEO_TYPE_EGAM) {
1069 charmap = (char *) VGA_MAP_MEM(colourmap, 0); 1069 charmap = (char *) VGA_MAP_MEM(colourmap, 0);
1070 beg = 0x0e; 1070 beg = 0x0e;
@@ -1169,12 +1169,6 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
1169 1169
1170 /* if 512 char mode is already enabled don't re-enable it. */ 1170 /* if 512 char mode is already enabled don't re-enable it. */
1171 if ((set) && (ch512 != vga_512_chars)) { 1171 if ((set) && (ch512 != vga_512_chars)) {
1172 /* attribute controller */
1173 for (i = 0; i < MAX_NR_CONSOLES; i++) {
1174 struct vc_data *c = vc_cons[i].d;
1175 if (c && c->vc_sw == &vga_con)
1176 c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
1177 }
1178 vga_512_chars = ch512; 1172 vga_512_chars = ch512;
1179 /* 256-char: enable intensity bit 1173 /* 256-char: enable intensity bit
1180 512-char: disable intensity bit */ 1174 512-char: disable intensity bit */
@@ -1185,8 +1179,22 @@ static int vgacon_do_font_op(struct vgastate *state,char *arg,int set,int ch512)
1185 it means, but it works, and it appears necessary */ 1179 it means, but it works, and it appears necessary */
1186 inb_p(video_port_status); 1180 inb_p(video_port_status);
1187 vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); 1181 vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0);
1182 clear_attribs = true;
1188 } 1183 }
1189 raw_spin_unlock_irq(&vga_lock); 1184 raw_spin_unlock_irq(&vga_lock);
1185
1186 if (clear_attribs) {
1187 for (i = 0; i < MAX_NR_CONSOLES; i++) {
1188 struct vc_data *c = vc_cons[i].d;
1189 if (c && c->vc_sw == &vga_con) {
1190 /* force hi font mask to 0, so we always clear
1191 the bit on either transition */
1192 c->vc_hi_font_mask = 0x00;
1193 clear_buffer_attributes(c);
1194 c->vc_hi_font_mask = ch512 ? 0x0800 : 0;
1195 }
1196 }
1197 }
1190 return 0; 1198 return 0;
1191} 1199}
1192 1200
diff --git a/drivers/video/display_timing.c b/drivers/video/display_timing.c
new file mode 100644
index 000000000000..5e1822cef571
--- /dev/null
+++ b/drivers/video/display_timing.c
@@ -0,0 +1,24 @@
1/*
2 * generic display timing functions
3 *
4 * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
5 *
6 * This file is released under the GPLv2
7 */
8
9#include <linux/export.h>
10#include <linux/slab.h>
11#include <video/display_timing.h>
12
13void display_timings_release(struct display_timings *disp)
14{
15 if (disp->timings) {
16 unsigned int i;
17
18 for (i = 0; i < disp->num_timings; i++)
19 kfree(disp->timings[i]);
20 kfree(disp->timings);
21 }
22 kfree(disp);
23}
24EXPORT_SYMBOL_GPL(display_timings_release);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 3ff0105a496a..dc61c12ecf8c 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1177,8 +1177,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
1177 event.data = &con2fb; 1177 event.data = &con2fb;
1178 if (!lock_fb_info(info)) 1178 if (!lock_fb_info(info))
1179 return -ENODEV; 1179 return -ENODEV;
1180 console_lock();
1180 event.info = info; 1181 event.info = info;
1181 ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); 1182 ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
1183 console_unlock();
1182 unlock_fb_info(info); 1184 unlock_fb_info(info);
1183 break; 1185 break;
1184 case FBIOBLANK: 1186 case FBIOBLANK:
@@ -1650,7 +1652,9 @@ static int do_register_framebuffer(struct fb_info *fb_info)
1650 event.info = fb_info; 1652 event.info = fb_info;
1651 if (!lock_fb_info(fb_info)) 1653 if (!lock_fb_info(fb_info))
1652 return -ENODEV; 1654 return -ENODEV;
1655 console_lock();
1653 fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); 1656 fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
1657 console_unlock();
1654 unlock_fb_info(fb_info); 1658 unlock_fb_info(fb_info);
1655 return 0; 1659 return 0;
1656} 1660}
@@ -1666,8 +1670,10 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1666 1670
1667 if (!lock_fb_info(fb_info)) 1671 if (!lock_fb_info(fb_info))
1668 return -ENODEV; 1672 return -ENODEV;
1673 console_lock();
1669 event.info = fb_info; 1674 event.info = fb_info;
1670 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); 1675 ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
1676 console_unlock();
1671 unlock_fb_info(fb_info); 1677 unlock_fb_info(fb_info);
1672 1678
1673 if (ret) 1679 if (ret)
@@ -1682,7 +1688,9 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
1682 num_registered_fb--; 1688 num_registered_fb--;
1683 fb_cleanup_device(fb_info); 1689 fb_cleanup_device(fb_info);
1684 event.info = fb_info; 1690 event.info = fb_info;
1691 console_lock();
1685 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); 1692 fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
1693 console_unlock();
1686 1694
1687 /* this may free fb info */ 1695 /* this may free fb info */
1688 put_fb_info(fb_info); 1696 put_fb_info(fb_info);
@@ -1853,11 +1861,8 @@ int fb_new_modelist(struct fb_info *info)
1853 err = 1; 1861 err = 1;
1854 1862
1855 if (!list_empty(&info->modelist)) { 1863 if (!list_empty(&info->modelist)) {
1856 if (!lock_fb_info(info))
1857 return -ENODEV;
1858 event.info = info; 1864 event.info = info;
1859 err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); 1865 err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
1860 unlock_fb_info(info);
1861 } 1866 }
1862 1867
1863 return err; 1868 return err;
diff --git a/drivers/video/fbmon.c b/drivers/video/fbmon.c
index cef65574db6c..94ad0f71383c 100644
--- a/drivers/video/fbmon.c
+++ b/drivers/video/fbmon.c
@@ -31,6 +31,8 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <video/edid.h> 33#include <video/edid.h>
34#include <video/of_videomode.h>
35#include <video/videomode.h>
34#ifdef CONFIG_PPC_OF 36#ifdef CONFIG_PPC_OF
35#include <asm/prom.h> 37#include <asm/prom.h>
36#include <asm/pci-bridge.h> 38#include <asm/pci-bridge.h>
@@ -1373,6 +1375,98 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
1373 kfree(timings); 1375 kfree(timings);
1374 return err; 1376 return err;
1375} 1377}
1378
1379#if IS_ENABLED(CONFIG_VIDEOMODE)
1380int fb_videomode_from_videomode(const struct videomode *vm,
1381 struct fb_videomode *fbmode)
1382{
1383 unsigned int htotal, vtotal;
1384
1385 fbmode->xres = vm->hactive;
1386 fbmode->left_margin = vm->hback_porch;
1387 fbmode->right_margin = vm->hfront_porch;
1388 fbmode->hsync_len = vm->hsync_len;
1389
1390 fbmode->yres = vm->vactive;
1391 fbmode->upper_margin = vm->vback_porch;
1392 fbmode->lower_margin = vm->vfront_porch;
1393 fbmode->vsync_len = vm->vsync_len;
1394
1395 /* prevent division by zero in KHZ2PICOS macro */
1396 fbmode->pixclock = vm->pixelclock ?
1397 KHZ2PICOS(vm->pixelclock / 1000) : 0;
1398
1399 fbmode->sync = 0;
1400 fbmode->vmode = 0;
1401 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
1402 fbmode->sync |= FB_SYNC_HOR_HIGH_ACT;
1403 if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
1404 fbmode->sync |= FB_SYNC_VERT_HIGH_ACT;
1405 if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
1406 fbmode->vmode |= FB_VMODE_INTERLACED;
1407 if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
1408 fbmode->vmode |= FB_VMODE_DOUBLE;
1409 fbmode->flag = 0;
1410
1411 htotal = vm->hactive + vm->hfront_porch + vm->hback_porch +
1412 vm->hsync_len;
1413 vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
1414 vm->vsync_len;
1415 /* prevent division by zero */
1416 if (htotal && vtotal) {
1417 fbmode->refresh = vm->pixelclock / (htotal * vtotal);
1418 /* a mode must have htotal and vtotal != 0 or it is invalid */
1419 } else {
1420 fbmode->refresh = 0;
1421 return -EINVAL;
1422 }
1423
1424 return 0;
1425}
1426EXPORT_SYMBOL_GPL(fb_videomode_from_videomode);
1427#endif
1428
1429#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
1430static inline void dump_fb_videomode(const struct fb_videomode *m)
1431{
1432 pr_debug("fb_videomode = %ux%u@%uHz (%ukHz) %u %u %u %u %u %u %u %u %u\n",
1433 m->xres, m->yres, m->refresh, m->pixclock, m->left_margin,
1434 m->right_margin, m->upper_margin, m->lower_margin,
1435 m->hsync_len, m->vsync_len, m->sync, m->vmode, m->flag);
1436}
1437
1438/**
1439 * of_get_fb_videomode - get a fb_videomode from devicetree
1440 * @np: device_node with the timing specification
1441 * @fb: will be set to the return value
1442 * @index: index into the list of display timings in devicetree
1443 *
1444 * DESCRIPTION:
1445 * This function is expensive and should only be used, if only one mode is to be
1446 * read from DT. To get multiple modes start with of_get_display_timings ond
1447 * work with that instead.
1448 */
1449int of_get_fb_videomode(struct device_node *np, struct fb_videomode *fb,
1450 int index)
1451{
1452 struct videomode vm;
1453 int ret;
1454
1455 ret = of_get_videomode(np, &vm, index);
1456 if (ret)
1457 return ret;
1458
1459 fb_videomode_from_videomode(&vm, fb);
1460
1461 pr_debug("%s: got %dx%d display mode from %s\n",
1462 of_node_full_name(np), vm.hactive, vm.vactive, np->name);
1463 dump_fb_videomode(fb);
1464
1465 return 0;
1466}
1467EXPORT_SYMBOL_GPL(of_get_fb_videomode);
1468#endif
1469
1376#else 1470#else
1377int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var) 1471int fb_parse_edid(unsigned char *edid, struct fb_var_screeninfo *var)
1378{ 1472{
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index a55e3669d135..ef476b02fbe5 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -177,6 +177,8 @@ static ssize_t store_modes(struct device *device,
177 if (i * sizeof(struct fb_videomode) != count) 177 if (i * sizeof(struct fb_videomode) != count)
178 return -EINVAL; 178 return -EINVAL;
179 179
180 if (!lock_fb_info(fb_info))
181 return -ENODEV;
180 console_lock(); 182 console_lock();
181 list_splice(&fb_info->modelist, &old_list); 183 list_splice(&fb_info->modelist, &old_list);
182 fb_videomode_to_modelist((const struct fb_videomode *)buf, i, 184 fb_videomode_to_modelist((const struct fb_videomode *)buf, i,
@@ -188,6 +190,7 @@ static ssize_t store_modes(struct device *device,
188 fb_destroy_modelist(&old_list); 190 fb_destroy_modelist(&old_list);
189 191
190 console_unlock(); 192 console_unlock();
193 unlock_fb_info(fb_info);
191 194
192 return 0; 195 return 0;
193} 196}
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
new file mode 100644
index 000000000000..ab23c9b79143
--- /dev/null
+++ b/drivers/video/hdmi.c
@@ -0,0 +1,308 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/bitops.h>
10#include <linux/errno.h>
11#include <linux/export.h>
12#include <linux/hdmi.h>
13#include <linux/string.h>
14
15static void hdmi_infoframe_checksum(void *buffer, size_t size)
16{
17 u8 *ptr = buffer;
18 u8 csum = 0;
19 size_t i;
20
21 /* compute checksum */
22 for (i = 0; i < size; i++)
23 csum += ptr[i];
24
25 ptr[3] = 256 - csum;
26}
27
28/**
29 * hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
30 * @frame: HDMI AVI infoframe
31 *
32 * Returns 0 on success or a negative error code on failure.
33 */
34int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
35{
36 memset(frame, 0, sizeof(*frame));
37
38 frame->type = HDMI_INFOFRAME_TYPE_AVI;
39 frame->version = 2;
40 frame->length = 13;
41
42 return 0;
43}
44EXPORT_SYMBOL(hdmi_avi_infoframe_init);
45
46/**
47 * hdmi_avi_infoframe_pack() - write HDMI AVI infoframe to binary buffer
48 * @frame: HDMI AVI infoframe
49 * @buffer: destination buffer
50 * @size: size of buffer
51 *
52 * Packs the information contained in the @frame structure into a binary
53 * representation that can be written into the corresponding controller
54 * registers. Also computes the checksum as required by section 5.3.5 of
55 * the HDMI 1.4 specification.
56 *
57 * Returns the number of bytes packed into the binary buffer or a negative
58 * error code on failure.
59 */
60ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
61 size_t size)
62{
63 u8 *ptr = buffer;
64 size_t length;
65
66 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
67
68 if (size < length)
69 return -ENOSPC;
70
71 memset(buffer, 0, length);
72
73 ptr[0] = frame->type;
74 ptr[1] = frame->version;
75 ptr[2] = frame->length;
76 ptr[3] = 0; /* checksum */
77
78 /* start infoframe payload */
79 ptr += HDMI_INFOFRAME_HEADER_SIZE;
80
81 ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
82
83 if (frame->active_info_valid)
84 ptr[0] |= BIT(4);
85
86 if (frame->horizontal_bar_valid)
87 ptr[0] |= BIT(3);
88
89 if (frame->vertical_bar_valid)
90 ptr[0] |= BIT(2);
91
92 ptr[1] = ((frame->colorimetry & 0x3) << 6) |
93 ((frame->picture_aspect & 0x3) << 4) |
94 (frame->active_aspect & 0xf);
95
96 ptr[2] = ((frame->extended_colorimetry & 0x7) << 4) |
97 ((frame->quantization_range & 0x3) << 2) |
98 (frame->nups & 0x3);
99
100 if (frame->itc)
101 ptr[2] |= BIT(7);
102
103 ptr[3] = frame->video_code & 0x7f;
104
105 ptr[4] = ((frame->ycc_quantization_range & 0x3) << 6) |
106 ((frame->content_type & 0x3) << 4) |
107 (frame->pixel_repeat & 0xf);
108
109 ptr[5] = frame->top_bar & 0xff;
110 ptr[6] = (frame->top_bar >> 8) & 0xff;
111 ptr[7] = frame->bottom_bar & 0xff;
112 ptr[8] = (frame->bottom_bar >> 8) & 0xff;
113 ptr[9] = frame->left_bar & 0xff;
114 ptr[10] = (frame->left_bar >> 8) & 0xff;
115 ptr[11] = frame->right_bar & 0xff;
116 ptr[12] = (frame->right_bar >> 8) & 0xff;
117
118 hdmi_infoframe_checksum(buffer, length);
119
120 return length;
121}
122EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
123
124/**
125 * hdmi_spd_infoframe_init() - initialize an HDMI SPD infoframe
126 * @frame: HDMI SPD infoframe
127 * @vendor: vendor string
128 * @product: product string
129 *
130 * Returns 0 on success or a negative error code on failure.
131 */
132int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
133 const char *vendor, const char *product)
134{
135 memset(frame, 0, sizeof(*frame));
136
137 frame->type = HDMI_INFOFRAME_TYPE_SPD;
138 frame->version = 1;
139 frame->length = 25;
140
141 strncpy(frame->vendor, vendor, sizeof(frame->vendor));
142 strncpy(frame->product, product, sizeof(frame->product));
143
144 return 0;
145}
146EXPORT_SYMBOL(hdmi_spd_infoframe_init);
147
148/**
149 * hdmi_spd_infoframe_pack() - write HDMI SPD infoframe to binary buffer
150 * @frame: HDMI SPD infoframe
151 * @buffer: destination buffer
152 * @size: size of buffer
153 *
154 * Packs the information contained in the @frame structure into a binary
155 * representation that can be written into the corresponding controller
156 * registers. Also computes the checksum as required by section 5.3.5 of
157 * the HDMI 1.4 specification.
158 *
159 * Returns the number of bytes packed into the binary buffer or a negative
160 * error code on failure.
161 */
162ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
163 size_t size)
164{
165 u8 *ptr = buffer;
166 size_t length;
167
168 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
169
170 if (size < length)
171 return -ENOSPC;
172
173 memset(buffer, 0, length);
174
175 ptr[0] = frame->type;
176 ptr[1] = frame->version;
177 ptr[2] = frame->length;
178 ptr[3] = 0; /* checksum */
179
180 /* start infoframe payload */
181 ptr += HDMI_INFOFRAME_HEADER_SIZE;
182
183 memcpy(ptr, frame->vendor, sizeof(frame->vendor));
184 memcpy(ptr + 8, frame->product, sizeof(frame->product));
185
186 ptr[24] = frame->sdi;
187
188 hdmi_infoframe_checksum(buffer, length);
189
190 return length;
191}
192EXPORT_SYMBOL(hdmi_spd_infoframe_pack);
193
194/**
195 * hdmi_audio_infoframe_init() - initialize an HDMI audio infoframe
196 * @frame: HDMI audio infoframe
197 *
198 * Returns 0 on success or a negative error code on failure.
199 */
200int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
201{
202 memset(frame, 0, sizeof(*frame));
203
204 frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
205 frame->version = 1;
206 frame->length = 10;
207
208 return 0;
209}
210EXPORT_SYMBOL(hdmi_audio_infoframe_init);
211
212/**
213 * hdmi_audio_infoframe_pack() - write HDMI audio infoframe to binary buffer
214 * @frame: HDMI audio infoframe
215 * @buffer: destination buffer
216 * @size: size of buffer
217 *
218 * Packs the information contained in the @frame structure into a binary
219 * representation that can be written into the corresponding controller
220 * registers. Also computes the checksum as required by section 5.3.5 of
221 * the HDMI 1.4 specification.
222 *
223 * Returns the number of bytes packed into the binary buffer or a negative
224 * error code on failure.
225 */
226ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
227 void *buffer, size_t size)
228{
229 unsigned char channels;
230 u8 *ptr = buffer;
231 size_t length;
232
233 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
234
235 if (size < length)
236 return -ENOSPC;
237
238 memset(buffer, 0, length);
239
240 if (frame->channels >= 2)
241 channels = frame->channels - 1;
242 else
243 channels = 0;
244
245 ptr[0] = frame->type;
246 ptr[1] = frame->version;
247 ptr[2] = frame->length;
248 ptr[3] = 0; /* checksum */
249
250 /* start infoframe payload */
251 ptr += HDMI_INFOFRAME_HEADER_SIZE;
252
253 ptr[0] = ((frame->coding_type & 0xf) << 4) | (channels & 0x7);
254 ptr[1] = ((frame->sample_frequency & 0x7) << 2) |
255 (frame->sample_size & 0x3);
256 ptr[2] = frame->coding_type_ext & 0x1f;
257 ptr[3] = frame->channel_allocation;
258 ptr[4] = (frame->level_shift_value & 0xf) << 3;
259
260 if (frame->downmix_inhibit)
261 ptr[4] |= BIT(7);
262
263 hdmi_infoframe_checksum(buffer, length);
264
265 return length;
266}
267EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
268
269/**
270 * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary
271 * buffer
272 * @frame: HDMI vendor infoframe
273 * @buffer: destination buffer
274 * @size: size of buffer
275 *
276 * Packs the information contained in the @frame structure into a binary
277 * representation that can be written into the corresponding controller
278 * registers. Also computes the checksum as required by section 5.3.5 of
279 * the HDMI 1.4 specification.
280 *
281 * Returns the number of bytes packed into the binary buffer or a negative
282 * error code on failure.
283 */
284ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
285 void *buffer, size_t size)
286{
287 u8 *ptr = buffer;
288 size_t length;
289
290 length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
291
292 if (size < length)
293 return -ENOSPC;
294
295 memset(buffer, 0, length);
296
297 ptr[0] = frame->type;
298 ptr[1] = frame->version;
299 ptr[2] = frame->length;
300 ptr[3] = 0; /* checksum */
301
302 memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length);
303
304 hdmi_infoframe_checksum(buffer, length);
305
306 return length;
307}
308EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
diff --git a/drivers/video/of_display_timing.c b/drivers/video/of_display_timing.c
new file mode 100644
index 000000000000..13ecd9897010
--- /dev/null
+++ b/drivers/video/of_display_timing.c
@@ -0,0 +1,239 @@
1/*
2 * OF helpers for parsing display timings
3 *
4 * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
5 *
6 * based on of_videomode.c by Sascha Hauer <s.hauer@pengutronix.de>
7 *
8 * This file is released under the GPLv2
9 */
10#include <linux/export.h>
11#include <linux/of.h>
12#include <linux/slab.h>
13#include <video/display_timing.h>
14#include <video/of_display_timing.h>
15
16/**
17 * parse_timing_property - parse timing_entry from device_node
18 * @np: device_node with the property
19 * @name: name of the property
20 * @result: will be set to the return value
21 *
22 * DESCRIPTION:
23 * Every display_timing can be specified with either just the typical value or
24 * a range consisting of min/typ/max. This function helps handling this
25 **/
26static int parse_timing_property(struct device_node *np, const char *name,
27 struct timing_entry *result)
28{
29 struct property *prop;
30 int length, cells, ret;
31
32 prop = of_find_property(np, name, &length);
33 if (!prop) {
34 pr_err("%s: could not find property %s\n",
35 of_node_full_name(np), name);
36 return -EINVAL;
37 }
38
39 cells = length / sizeof(u32);
40 if (cells == 1) {
41 ret = of_property_read_u32(np, name, &result->typ);
42 result->min = result->typ;
43 result->max = result->typ;
44 } else if (cells == 3) {
45 ret = of_property_read_u32_array(np, name, &result->min, cells);
46 } else {
47 pr_err("%s: illegal timing specification in %s\n",
48 of_node_full_name(np), name);
49 return -EINVAL;
50 }
51
52 return ret;
53}
54
55/**
56 * of_get_display_timing - parse display_timing entry from device_node
57 * @np: device_node with the properties
58 **/
59static struct display_timing *of_get_display_timing(struct device_node *np)
60{
61 struct display_timing *dt;
62 u32 val = 0;
63 int ret = 0;
64
65 dt = kzalloc(sizeof(*dt), GFP_KERNEL);
66 if (!dt) {
67 pr_err("%s: could not allocate display_timing struct\n",
68 of_node_full_name(np));
69 return NULL;
70 }
71
72 ret |= parse_timing_property(np, "hback-porch", &dt->hback_porch);
73 ret |= parse_timing_property(np, "hfront-porch", &dt->hfront_porch);
74 ret |= parse_timing_property(np, "hactive", &dt->hactive);
75 ret |= parse_timing_property(np, "hsync-len", &dt->hsync_len);
76 ret |= parse_timing_property(np, "vback-porch", &dt->vback_porch);
77 ret |= parse_timing_property(np, "vfront-porch", &dt->vfront_porch);
78 ret |= parse_timing_property(np, "vactive", &dt->vactive);
79 ret |= parse_timing_property(np, "vsync-len", &dt->vsync_len);
80 ret |= parse_timing_property(np, "clock-frequency", &dt->pixelclock);
81
82 dt->dmt_flags = 0;
83 dt->data_flags = 0;
84 if (!of_property_read_u32(np, "vsync-active", &val))
85 dt->dmt_flags |= val ? VESA_DMT_VSYNC_HIGH :
86 VESA_DMT_VSYNC_LOW;
87 if (!of_property_read_u32(np, "hsync-active", &val))
88 dt->dmt_flags |= val ? VESA_DMT_HSYNC_HIGH :
89 VESA_DMT_HSYNC_LOW;
90 if (!of_property_read_u32(np, "de-active", &val))
91 dt->data_flags |= val ? DISPLAY_FLAGS_DE_HIGH :
92 DISPLAY_FLAGS_DE_LOW;
93 if (!of_property_read_u32(np, "pixelclk-active", &val))
94 dt->data_flags |= val ? DISPLAY_FLAGS_PIXDATA_POSEDGE :
95 DISPLAY_FLAGS_PIXDATA_NEGEDGE;
96
97 if (of_property_read_bool(np, "interlaced"))
98 dt->data_flags |= DISPLAY_FLAGS_INTERLACED;
99 if (of_property_read_bool(np, "doublescan"))
100 dt->data_flags |= DISPLAY_FLAGS_DOUBLESCAN;
101
102 if (ret) {
103 pr_err("%s: error reading timing properties\n",
104 of_node_full_name(np));
105 kfree(dt);
106 return NULL;
107 }
108
109 return dt;
110}
111
112/**
113 * of_get_display_timings - parse all display_timing entries from a device_node
114 * @np: device_node with the subnodes
115 **/
116struct display_timings *of_get_display_timings(struct device_node *np)
117{
118 struct device_node *timings_np;
119 struct device_node *entry;
120 struct device_node *native_mode;
121 struct display_timings *disp;
122
123 if (!np) {
124 pr_err("%s: no devicenode given\n", of_node_full_name(np));
125 return NULL;
126 }
127
128 timings_np = of_find_node_by_name(np, "display-timings");
129 if (!timings_np) {
130 pr_err("%s: could not find display-timings node\n",
131 of_node_full_name(np));
132 return NULL;
133 }
134
135 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
136 if (!disp) {
137 pr_err("%s: could not allocate struct disp'\n",
138 of_node_full_name(np));
139 goto dispfail;
140 }
141
142 entry = of_parse_phandle(timings_np, "native-mode", 0);
143 /* assume first child as native mode if none provided */
144 if (!entry)
145 entry = of_get_next_child(np, NULL);
146 /* if there is no child, it is useless to go on */
147 if (!entry) {
148 pr_err("%s: no timing specifications given\n",
149 of_node_full_name(np));
150 goto entryfail;
151 }
152
153 pr_debug("%s: using %s as default timing\n",
154 of_node_full_name(np), entry->name);
155
156 native_mode = entry;
157
158 disp->num_timings = of_get_child_count(timings_np);
159 if (disp->num_timings == 0) {
160 /* should never happen, as entry was already found above */
161 pr_err("%s: no timings specified\n", of_node_full_name(np));
162 goto entryfail;
163 }
164
165 disp->timings = kzalloc(sizeof(struct display_timing *) *
166 disp->num_timings, GFP_KERNEL);
167 if (!disp->timings) {
168 pr_err("%s: could not allocate timings array\n",
169 of_node_full_name(np));
170 goto entryfail;
171 }
172
173 disp->num_timings = 0;
174 disp->native_mode = 0;
175
176 for_each_child_of_node(timings_np, entry) {
177 struct display_timing *dt;
178
179 dt = of_get_display_timing(entry);
180 if (!dt) {
181 /*
182 * to not encourage wrong devicetrees, fail in case of
183 * an error
184 */
185 pr_err("%s: error in timing %d\n",
186 of_node_full_name(np), disp->num_timings + 1);
187 goto timingfail;
188 }
189
190 if (native_mode == entry)
191 disp->native_mode = disp->num_timings;
192
193 disp->timings[disp->num_timings] = dt;
194 disp->num_timings++;
195 }
196 of_node_put(timings_np);
197 /*
198 * native_mode points to the device_node returned by of_parse_phandle
199 * therefore call of_node_put on it
200 */
201 of_node_put(native_mode);
202
203 pr_debug("%s: got %d timings. Using timing #%d as default\n",
204 of_node_full_name(np), disp->num_timings,
205 disp->native_mode + 1);
206
207 return disp;
208
209timingfail:
210 if (native_mode)
211 of_node_put(native_mode);
212 display_timings_release(disp);
213entryfail:
214 kfree(disp);
215dispfail:
216 of_node_put(timings_np);
217 return NULL;
218}
219EXPORT_SYMBOL_GPL(of_get_display_timings);
220
221/**
222 * of_display_timings_exist - check if a display-timings node is provided
223 * @np: device_node with the timing
224 **/
225int of_display_timings_exist(struct device_node *np)
226{
227 struct device_node *timings_np;
228
229 if (!np)
230 return -EINVAL;
231
232 timings_np = of_parse_phandle(np, "display-timings", 0);
233 if (!timings_np)
234 return -EINVAL;
235
236 of_node_put(timings_np);
237 return 1;
238}
239EXPORT_SYMBOL_GPL(of_display_timings_exist);
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
new file mode 100644
index 000000000000..5b8066cd397f
--- /dev/null
+++ b/drivers/video/of_videomode.c
@@ -0,0 +1,54 @@
1/*
2 * generic videomode helper
3 *
4 * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
5 *
6 * This file is released under the GPLv2
7 */
8#include <linux/errno.h>
9#include <linux/export.h>
10#include <linux/of.h>
11#include <video/display_timing.h>
12#include <video/of_display_timing.h>
13#include <video/of_videomode.h>
14#include <video/videomode.h>
15
16/**
17 * of_get_videomode - get the videomode #<index> from devicetree
18 * @np - devicenode with the display_timings
19 * @vm - set to return value
20 * @index - index into list of display_timings
21 * (Set this to OF_USE_NATIVE_MODE to use whatever mode is
22 * specified as native mode in the DT.)
23 *
24 * DESCRIPTION:
25 * Get a list of all display timings and put the one
26 * specified by index into *vm. This function should only be used, if
27 * only one videomode is to be retrieved. A driver that needs to work
28 * with multiple/all videomodes should work with
29 * of_get_display_timings instead.
30 **/
31int of_get_videomode(struct device_node *np, struct videomode *vm,
32 int index)
33{
34 struct display_timings *disp;
35 int ret;
36
37 disp = of_get_display_timings(np);
38 if (!disp) {
39 pr_err("%s: no timings specified\n", of_node_full_name(np));
40 return -EINVAL;
41 }
42
43 if (index == OF_USE_NATIVE_MODE)
44 index = disp->native_mode;
45
46 ret = videomode_from_timing(disp, vm, index);
47 if (ret)
48 return ret;
49
50 display_timings_release(disp);
51
52 return 0;
53}
54EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index 80233dae358a..22450908306c 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -1467,10 +1467,10 @@ void viafb_set_vclock(u32 clk, int set_iga)
1467 via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */ 1467 via_write_misc_reg_mask(0x0C, 0x0C); /* select external clock */
1468} 1468}
1469 1469
1470struct display_timing var_to_timing(const struct fb_var_screeninfo *var, 1470struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
1471 u16 cxres, u16 cyres) 1471 u16 cxres, u16 cyres)
1472{ 1472{
1473 struct display_timing timing; 1473 struct via_display_timing timing;
1474 u16 dx = (var->xres - cxres) / 2, dy = (var->yres - cyres) / 2; 1474 u16 dx = (var->xres - cxres) / 2, dy = (var->yres - cyres) / 2;
1475 1475
1476 timing.hor_addr = cxres; 1476 timing.hor_addr = cxres;
@@ -1491,7 +1491,7 @@ struct display_timing var_to_timing(const struct fb_var_screeninfo *var,
1491void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var, 1491void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
1492 u16 cxres, u16 cyres, int iga) 1492 u16 cxres, u16 cyres, int iga)
1493{ 1493{
1494 struct display_timing crt_reg = var_to_timing(var, 1494 struct via_display_timing crt_reg = var_to_timing(var,
1495 cxres ? cxres : var->xres, cyres ? cyres : var->yres); 1495 cxres ? cxres : var->xres, cyres ? cyres : var->yres);
1496 1496
1497 if (iga == IGA1) 1497 if (iga == IGA1)
diff --git a/drivers/video/via/hw.h b/drivers/video/via/hw.h
index a8205754c736..3be073c58b03 100644
--- a/drivers/video/via/hw.h
+++ b/drivers/video/via/hw.h
@@ -637,7 +637,7 @@ extern int viafb_LCD_ON;
637extern int viafb_DVI_ON; 637extern int viafb_DVI_ON;
638extern int viafb_hotplug; 638extern int viafb_hotplug;
639 639
640struct display_timing var_to_timing(const struct fb_var_screeninfo *var, 640struct via_display_timing var_to_timing(const struct fb_var_screeninfo *var,
641 u16 cxres, u16 cyres); 641 u16 cxres, u16 cyres);
642void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var, 642void viafb_fill_crtc_timing(const struct fb_var_screeninfo *var,
643 u16 cxres, u16 cyres, int iga); 643 u16 cxres, u16 cyres, int iga);
diff --git a/drivers/video/via/lcd.c b/drivers/video/via/lcd.c
index 980ee1b1dcf3..5d21ff436ec8 100644
--- a/drivers/video/via/lcd.c
+++ b/drivers/video/via/lcd.c
@@ -549,7 +549,7 @@ void viafb_lcd_set_mode(const struct fb_var_screeninfo *var, u16 cxres,
549 int panel_hres = plvds_setting_info->lcd_panel_hres; 549 int panel_hres = plvds_setting_info->lcd_panel_hres;
550 int panel_vres = plvds_setting_info->lcd_panel_vres; 550 int panel_vres = plvds_setting_info->lcd_panel_vres;
551 u32 clock; 551 u32 clock;
552 struct display_timing timing; 552 struct via_display_timing timing;
553 struct fb_var_screeninfo panel_var; 553 struct fb_var_screeninfo panel_var;
554 const struct fb_videomode *mode_crt_table, *panel_crt_table; 554 const struct fb_videomode *mode_crt_table, *panel_crt_table;
555 555
diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
index 3158dfc90bed..65c65c611e0a 100644
--- a/drivers/video/via/share.h
+++ b/drivers/video/via/share.h
@@ -319,7 +319,7 @@ struct crt_mode_table {
319 int refresh_rate; 319 int refresh_rate;
320 int h_sync_polarity; 320 int h_sync_polarity;
321 int v_sync_polarity; 321 int v_sync_polarity;
322 struct display_timing crtc; 322 struct via_display_timing crtc;
323}; 323};
324 324
325struct io_reg { 325struct io_reg {
diff --git a/drivers/video/via/via_modesetting.c b/drivers/video/via/via_modesetting.c
index 0e431aee17bb..0b414b09b9b4 100644
--- a/drivers/video/via/via_modesetting.c
+++ b/drivers/video/via/via_modesetting.c
@@ -30,9 +30,9 @@
30#include "debug.h" 30#include "debug.h"
31 31
32 32
33void via_set_primary_timing(const struct display_timing *timing) 33void via_set_primary_timing(const struct via_display_timing *timing)
34{ 34{
35 struct display_timing raw; 35 struct via_display_timing raw;
36 36
37 raw.hor_total = timing->hor_total / 8 - 5; 37 raw.hor_total = timing->hor_total / 8 - 5;
38 raw.hor_addr = timing->hor_addr / 8 - 1; 38 raw.hor_addr = timing->hor_addr / 8 - 1;
@@ -88,9 +88,9 @@ void via_set_primary_timing(const struct display_timing *timing)
88 via_write_reg_mask(VIACR, 0x17, 0x80, 0x80); 88 via_write_reg_mask(VIACR, 0x17, 0x80, 0x80);
89} 89}
90 90
91void via_set_secondary_timing(const struct display_timing *timing) 91void via_set_secondary_timing(const struct via_display_timing *timing)
92{ 92{
93 struct display_timing raw; 93 struct via_display_timing raw;
94 94
95 raw.hor_total = timing->hor_total - 1; 95 raw.hor_total = timing->hor_total - 1;
96 raw.hor_addr = timing->hor_addr - 1; 96 raw.hor_addr = timing->hor_addr - 1;
diff --git a/drivers/video/via/via_modesetting.h b/drivers/video/via/via_modesetting.h
index 06e09fe351ae..f6a6503da3b3 100644
--- a/drivers/video/via/via_modesetting.h
+++ b/drivers/video/via/via_modesetting.h
@@ -33,7 +33,7 @@
33#define VIA_PITCH_MAX 0x3FF8 33#define VIA_PITCH_MAX 0x3FF8
34 34
35 35
36struct display_timing { 36struct via_display_timing {
37 u16 hor_total; 37 u16 hor_total;
38 u16 hor_addr; 38 u16 hor_addr;
39 u16 hor_blank_start; 39 u16 hor_blank_start;
@@ -49,8 +49,8 @@ struct display_timing {
49}; 49};
50 50
51 51
52void via_set_primary_timing(const struct display_timing *timing); 52void via_set_primary_timing(const struct via_display_timing *timing);
53void via_set_secondary_timing(const struct display_timing *timing); 53void via_set_secondary_timing(const struct via_display_timing *timing);
54void via_set_primary_address(u32 addr); 54void via_set_primary_address(u32 addr);
55void via_set_secondary_address(u32 addr); 55void via_set_secondary_address(u32 addr);
56void via_set_primary_pitch(u32 pitch); 56void via_set_primary_pitch(u32 pitch);
diff --git a/drivers/video/videomode.c b/drivers/video/videomode.c
new file mode 100644
index 000000000000..21c47a202afa
--- /dev/null
+++ b/drivers/video/videomode.c
@@ -0,0 +1,39 @@
1/*
2 * generic display timing functions
3 *
4 * Copyright (c) 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>, Pengutronix
5 *
6 * This file is released under the GPLv2
7 */
8
9#include <linux/errno.h>
10#include <linux/export.h>
11#include <video/display_timing.h>
12#include <video/videomode.h>
13
14int videomode_from_timing(const struct display_timings *disp,
15 struct videomode *vm, unsigned int index)
16{
17 struct display_timing *dt;
18
19 dt = display_timings_get(disp, index);
20 if (!dt)
21 return -EINVAL;
22
23 vm->pixelclock = display_timing_get_value(&dt->pixelclock, TE_TYP);
24 vm->hactive = display_timing_get_value(&dt->hactive, TE_TYP);
25 vm->hfront_porch = display_timing_get_value(&dt->hfront_porch, TE_TYP);
26 vm->hback_porch = display_timing_get_value(&dt->hback_porch, TE_TYP);
27 vm->hsync_len = display_timing_get_value(&dt->hsync_len, TE_TYP);
28
29 vm->vactive = display_timing_get_value(&dt->vactive, TE_TYP);
30 vm->vfront_porch = display_timing_get_value(&dt->vfront_porch, TE_TYP);
31 vm->vback_porch = display_timing_get_value(&dt->vback_porch, TE_TYP);
32 vm->vsync_len = display_timing_get_value(&dt->vsync_len, TE_TYP);
33
34 vm->dmt_flags = dt->dmt_flags;
35 vm->data_flags = dt->data_flags;
36
37 return 0;
38}
39EXPORT_SYMBOL_GPL(videomode_from_timing);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fad21c927a38..2d94d7413d71 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -85,6 +85,9 @@ struct module;
85struct drm_file; 85struct drm_file;
86struct drm_device; 86struct drm_device;
87 87
88struct device_node;
89struct videomode;
90
88#include <drm/drm_os_linux.h> 91#include <drm/drm_os_linux.h>
89#include <drm/drm_hashtab.h> 92#include <drm/drm_hashtab.h>
90#include <drm/drm_mm.h> 93#include <drm/drm_mm.h>
@@ -446,7 +449,15 @@ struct drm_file {
446 int is_master; /* this file private is a master for a minor */ 449 int is_master; /* this file private is a master for a minor */
447 struct drm_master *master; /* master this node is currently associated with 450 struct drm_master *master; /* master this node is currently associated with
448 N.B. not always minor->master */ 451 N.B. not always minor->master */
452
453 /**
454 * fbs - List of framebuffers associated with this file.
455 *
456 * Protected by fbs_lock. Note that the fbs list holds a reference on
457 * the fb object to prevent it from untimely disappearing.
458 */
449 struct list_head fbs; 459 struct list_head fbs;
460 struct mutex fbs_lock;
450 461
451 wait_queue_head_t event_wait; 462 wait_queue_head_t event_wait;
452 struct list_head event_list; 463 struct list_head event_list;
@@ -919,6 +930,14 @@ struct drm_driver {
919 /* import dmabuf -> GEM */ 930 /* import dmabuf -> GEM */
920 struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, 931 struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
921 struct dma_buf *dma_buf); 932 struct dma_buf *dma_buf);
933 /* low-level interface used by drm_gem_prime_{import,export} */
934 int (*gem_prime_pin)(struct drm_gem_object *obj);
935 struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
936 struct drm_gem_object *(*gem_prime_import_sg_table)(
937 struct drm_device *dev, size_t size,
938 struct sg_table *sgt);
939 void *(*gem_prime_vmap)(struct drm_gem_object *obj);
940 void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
922 941
923 /* vga arb irq handler */ 942 /* vga arb irq handler */
924 void (*vgaarb_irq)(struct drm_device *dev, bool state); 943 void (*vgaarb_irq)(struct drm_device *dev, bool state);
@@ -1276,6 +1295,11 @@ static inline int drm_device_is_unplugged(struct drm_device *dev)
1276 return ret; 1295 return ret;
1277} 1296}
1278 1297
1298static inline bool drm_modeset_is_locked(struct drm_device *dev)
1299{
1300 return mutex_is_locked(&dev->mode_config.mutex);
1301}
1302
1279/******************************************************************/ 1303/******************************************************************/
1280/** \name Internal function definitions */ 1304/** \name Internal function definitions */
1281/*@{*/ 1305/*@{*/
@@ -1456,6 +1480,12 @@ extern struct drm_display_mode *
1456drm_mode_create_from_cmdline_mode(struct drm_device *dev, 1480drm_mode_create_from_cmdline_mode(struct drm_device *dev,
1457 struct drm_cmdline_mode *cmd); 1481 struct drm_cmdline_mode *cmd);
1458 1482
1483extern int drm_display_mode_from_videomode(const struct videomode *vm,
1484 struct drm_display_mode *dmode);
1485extern int of_get_drm_display_mode(struct device_node *np,
1486 struct drm_display_mode *dmode,
1487 int index);
1488
1459/* Modesetting support */ 1489/* Modesetting support */
1460extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1490extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
1461extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); 1491extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1540,9 +1570,13 @@ extern int drm_clients_info(struct seq_file *m, void* data);
1540extern int drm_gem_name_info(struct seq_file *m, void *data); 1570extern int drm_gem_name_info(struct seq_file *m, void *data);
1541 1571
1542 1572
1573extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
1574 struct drm_gem_object *obj, int flags);
1543extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, 1575extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
1544 struct drm_file *file_priv, uint32_t handle, uint32_t flags, 1576 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
1545 int *prime_fd); 1577 int *prime_fd);
1578extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
1579 struct dma_buf *dma_buf);
1546extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, 1580extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
1547 struct drm_file *file_priv, int prime_fd, uint32_t *handle); 1581 struct drm_file *file_priv, int prime_fd, uint32_t *handle);
1548 1582
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 00d78b5161c0..8839b3a24660 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -38,7 +38,8 @@ struct drm_device;
38struct drm_mode_set; 38struct drm_mode_set;
39struct drm_framebuffer; 39struct drm_framebuffer;
40struct drm_object_properties; 40struct drm_object_properties;
41 41struct drm_file;
42struct drm_clip_rect;
42 43
43#define DRM_MODE_OBJECT_CRTC 0xcccccccc 44#define DRM_MODE_OBJECT_CRTC 0xcccccccc
44#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 45#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -254,6 +255,10 @@ struct drm_framebuffer {
254 * userspace perspective. 255 * userspace perspective.
255 */ 256 */
256 struct kref refcount; 257 struct kref refcount;
258 /*
259 * Place on the dev->mode_config.fb_list, access protected by
260 * dev->mode_config.fb_lock.
261 */
257 struct list_head head; 262 struct list_head head;
258 struct drm_mode_object base; 263 struct drm_mode_object base;
259 const struct drm_framebuffer_funcs *funcs; 264 const struct drm_framebuffer_funcs *funcs;
@@ -390,6 +395,15 @@ struct drm_crtc {
390 struct drm_device *dev; 395 struct drm_device *dev;
391 struct list_head head; 396 struct list_head head;
392 397
398 /**
399 * crtc mutex
400 *
401 * This provides a read lock for the overall crtc state (mode, dpms
402 * state, ...) and a write lock for everything which can be update
403 * without a full modeset (fb, cursor data, ...)
404 */
405 struct mutex mutex;
406
393 struct drm_mode_object base; 407 struct drm_mode_object base;
394 408
395 /* framebuffer the connector is currently bound to */ 409 /* framebuffer the connector is currently bound to */
@@ -771,8 +785,18 @@ struct drm_mode_config {
771 struct mutex idr_mutex; /* for IDR management */ 785 struct mutex idr_mutex; /* for IDR management */
772 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 786 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
773 /* this is limited to one for now */ 787 /* this is limited to one for now */
788
789
790 /**
791 * fb_lock - mutex to protect fb state
792 *
793 * Besides the global fb list his also protects the fbs list in the
794 * file_priv
795 */
796 struct mutex fb_lock;
774 int num_fb; 797 int num_fb;
775 struct list_head fb_list; 798 struct list_head fb_list;
799
776 int num_connector; 800 int num_connector;
777 struct list_head connector_list; 801 struct list_head connector_list;
778 int num_encoder; 802 int num_encoder;
@@ -842,6 +866,10 @@ struct drm_prop_enum_list {
842 char *name; 866 char *name;
843}; 867};
844 868
869extern void drm_modeset_lock_all(struct drm_device *dev);
870extern void drm_modeset_unlock_all(struct drm_device *dev);
871extern void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
872
845extern int drm_crtc_init(struct drm_device *dev, 873extern int drm_crtc_init(struct drm_device *dev,
846 struct drm_crtc *crtc, 874 struct drm_crtc *crtc,
847 const struct drm_crtc_funcs *funcs); 875 const struct drm_crtc_funcs *funcs);
@@ -932,10 +960,13 @@ extern void drm_framebuffer_set_object(struct drm_device *dev,
932extern int drm_framebuffer_init(struct drm_device *dev, 960extern int drm_framebuffer_init(struct drm_device *dev,
933 struct drm_framebuffer *fb, 961 struct drm_framebuffer *fb,
934 const struct drm_framebuffer_funcs *funcs); 962 const struct drm_framebuffer_funcs *funcs);
963extern struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
964 uint32_t id);
935extern void drm_framebuffer_unreference(struct drm_framebuffer *fb); 965extern void drm_framebuffer_unreference(struct drm_framebuffer *fb);
936extern void drm_framebuffer_reference(struct drm_framebuffer *fb); 966extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
937extern void drm_framebuffer_remove(struct drm_framebuffer *fb); 967extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
938extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); 968extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
969extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
939extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc); 970extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
940extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); 971extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
941extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); 972extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
@@ -985,6 +1016,7 @@ extern int drm_mode_getcrtc(struct drm_device *dev,
985 void *data, struct drm_file *file_priv); 1016 void *data, struct drm_file *file_priv);
986extern int drm_mode_getconnector(struct drm_device *dev, 1017extern int drm_mode_getconnector(struct drm_device *dev,
987 void *data, struct drm_file *file_priv); 1018 void *data, struct drm_file *file_priv);
1019extern int drm_mode_set_config_internal(struct drm_mode_set *set);
988extern int drm_mode_setcrtc(struct drm_device *dev, 1020extern int drm_mode_setcrtc(struct drm_device *dev,
989 void *data, struct drm_file *file_priv); 1021 void *data, struct drm_file *file_priv);
990extern int drm_mode_getplane(struct drm_device *dev, 1022extern int drm_mode_getplane(struct drm_device *dev,
@@ -1030,9 +1062,10 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
1030extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 1062extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
1031 void *data, struct drm_file *file_priv); 1063 void *data, struct drm_file *file_priv);
1032extern u8 *drm_find_cea_extension(struct edid *edid); 1064extern u8 *drm_find_cea_extension(struct edid *edid);
1033extern u8 drm_match_cea_mode(struct drm_display_mode *to_match); 1065extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
1034extern bool drm_detect_hdmi_monitor(struct edid *edid); 1066extern bool drm_detect_hdmi_monitor(struct edid *edid);
1035extern bool drm_detect_monitor_audio(struct edid *edid); 1067extern bool drm_detect_monitor_audio(struct edid *edid);
1068extern bool drm_rgb_quant_range_selectable(struct edid *edid);
1036extern int drm_mode_page_flip_ioctl(struct drm_device *dev, 1069extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
1037 void *data, struct drm_file *file_priv); 1070 void *data, struct drm_file *file_priv);
1038extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, 1071extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
@@ -1047,7 +1080,6 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
1047 int GTF_2C, int GTF_K, int GTF_2J); 1080 int GTF_2C, int GTF_K, int GTF_2J);
1048extern int drm_add_modes_noedid(struct drm_connector *connector, 1081extern int drm_add_modes_noedid(struct drm_connector *connector,
1049 int hdisplay, int vdisplay); 1082 int hdisplay, int vdisplay);
1050extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
1051 1083
1052extern int drm_edid_header_is_valid(const u8 *raw_edid); 1084extern int drm_edid_header_is_valid(const u8 *raw_edid);
1053extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1085extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 0cac551c5347..5da1b4ae7d84 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -247,6 +247,8 @@ struct edid {
247struct drm_encoder; 247struct drm_encoder;
248struct drm_connector; 248struct drm_connector;
249struct drm_display_mode; 249struct drm_display_mode;
250struct hdmi_avi_infoframe;
251
250void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid); 252void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
251int drm_av_sync_delay(struct drm_connector *connector, 253int drm_av_sync_delay(struct drm_connector *connector,
252 struct drm_display_mode *mode); 254 struct drm_display_mode *mode);
@@ -254,4 +256,8 @@ struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
254 struct drm_display_mode *mode); 256 struct drm_display_mode *mode);
255int drm_load_edid_firmware(struct drm_connector *connector); 257int drm_load_edid_firmware(struct drm_connector *connector);
256 258
259int
260drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
261 const struct drm_display_mode *mode);
262
257#endif /* __DRM_EDID_H__ */ 263#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h
index b0c11a7809bb..8b9cc3671858 100644
--- a/include/drm/drm_encoder_slave.h
+++ b/include/drm/drm_encoder_slave.h
@@ -159,4 +159,24 @@ static inline void drm_i2c_encoder_unregister(struct drm_i2c_encoder_driver *dri
159 159
160void drm_i2c_encoder_destroy(struct drm_encoder *encoder); 160void drm_i2c_encoder_destroy(struct drm_encoder *encoder);
161 161
162
163/*
164 * Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
165 */
166
167void drm_i2c_encoder_dpms(struct drm_encoder *encoder, int mode);
168bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
169 const struct drm_display_mode *mode,
170 struct drm_display_mode *adjusted_mode);
171void drm_i2c_encoder_prepare(struct drm_encoder *encoder);
172void drm_i2c_encoder_commit(struct drm_encoder *encoder);
173void drm_i2c_encoder_mode_set(struct drm_encoder *encoder,
174 struct drm_display_mode *mode,
175 struct drm_display_mode *adjusted_mode);
176enum drm_connector_status drm_i2c_encoder_detect(struct drm_encoder *encoder,
177 struct drm_connector *connector);
178void drm_i2c_encoder_save(struct drm_encoder *encoder);
179void drm_i2c_encoder_restore(struct drm_encoder *encoder);
180
181
162#endif 182#endif
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 76c709837543..4a3fc244301c 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -23,5 +23,10 @@ struct drm_framebuffer *drm_fb_cma_create(struct drm_device *dev,
23struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, 23struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
24 unsigned int plane); 24 unsigned int plane);
25 25
26#ifdef CONFIG_DEBUG_FS
27void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m);
28int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
29#endif
30
26#endif 31#endif
27 32
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 5120b01c2eeb..c09511625a11 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -48,6 +48,18 @@ struct drm_fb_helper_surface_size {
48 u32 surface_depth; 48 u32 surface_depth;
49}; 49};
50 50
51/**
52 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
53 * @gamma_set: - Set the given gamma lut register on the given crtc.
54 * @gamma_get: - Read the given gamma lut register on the given crtc, used to
55 * save the current lut when force-restoring the fbdev for e.g.
56 * kdbg.
57 * @fb_probe: - Driver callback to allocate and initialize the fbdev info
58 * structure. Futhermore it also needs to allocate the drm
59 * framebuffer used to back the fbdev.
60 *
61 * Driver callbacks used by the fbdev emulation helper library.
62 */
51struct drm_fb_helper_funcs { 63struct drm_fb_helper_funcs {
52 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green, 64 void (*gamma_set)(struct drm_crtc *crtc, u16 red, u16 green,
53 u16 blue, int regno); 65 u16 blue, int regno);
@@ -65,9 +77,7 @@ struct drm_fb_helper_connector {
65 77
66struct drm_fb_helper { 78struct drm_fb_helper {
67 struct drm_framebuffer *fb; 79 struct drm_framebuffer *fb;
68 struct drm_framebuffer *saved_fb;
69 struct drm_device *dev; 80 struct drm_device *dev;
70 struct drm_display_mode *mode;
71 int crtc_count; 81 int crtc_count;
72 struct drm_fb_helper_crtc *crtc_info; 82 struct drm_fb_helper_crtc *crtc_info;
73 int connector_count; 83 int connector_count;
@@ -82,9 +92,6 @@ struct drm_fb_helper {
82 bool delayed_hotplug; 92 bool delayed_hotplug;
83}; 93};
84 94
85int drm_fb_helper_single_fb_probe(struct drm_fb_helper *helper,
86 int preferred_bpp);
87
88int drm_fb_helper_init(struct drm_device *dev, 95int drm_fb_helper_init(struct drm_device *dev,
89 struct drm_fb_helper *helper, int crtc_count, 96 struct drm_fb_helper *helper, int crtc_count,
90 int max_conn); 97 int max_conn);
@@ -103,7 +110,6 @@ int drm_fb_helper_setcolreg(unsigned regno,
103 struct fb_info *info); 110 struct fb_info *info);
104 111
105bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper); 112bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper);
106void drm_fb_helper_restore(void);
107void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, 113void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
108 uint32_t fb_width, uint32_t fb_height); 114 uint32_t fb_width, uint32_t fb_height);
109void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 115void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index f0f6b1af25ad..63397ced9254 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -41,4 +41,8 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
41 41
42extern const struct vm_operations_struct drm_gem_cma_vm_ops; 42extern const struct vm_operations_struct drm_gem_cma_vm_ops;
43 43
44#ifdef CONFIG_DEBUG_FS
45void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
46#endif
47
44#endif /* __DRM_GEM_CMA_HELPER_H__ */ 48#endif /* __DRM_GEM_CMA_HELPER_H__ */
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 3527fb3f75bb..88591ef8fa24 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -89,6 +89,29 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
89{ 89{
90 return mm->hole_stack.next; 90 return mm->hole_stack.next;
91} 91}
92
93static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node)
94{
95 return hole_node->start + hole_node->size;
96}
97
98static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
99{
100 BUG_ON(!hole_node->hole_follows);
101 return __drm_mm_hole_node_start(hole_node);
102}
103
104static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node)
105{
106 return list_entry(hole_node->node_list.next,
107 struct drm_mm_node, node_list)->start;
108}
109
110static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
111{
112 return __drm_mm_hole_node_end(hole_node);
113}
114
92#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ 115#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
93 &(mm)->head_node.node_list, \ 116 &(mm)->head_node.node_list, \
94 node_list) 117 node_list)
@@ -99,9 +122,26 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
99 entry != NULL; entry = next, \ 122 entry != NULL; entry = next, \
100 next = entry ? list_entry(entry->node_list.next, \ 123 next = entry ? list_entry(entry->node_list.next, \
101 struct drm_mm_node, node_list) : NULL) \ 124 struct drm_mm_node, node_list) : NULL) \
125
126/* Note that we need to unroll list_for_each_entry in order to inline
127 * setting hole_start and hole_end on each iteration and keep the
128 * macro sane.
129 */
130#define drm_mm_for_each_hole(entry, mm, hole_start, hole_end) \
131 for (entry = list_entry((mm)->hole_stack.next, struct drm_mm_node, hole_stack); \
132 &entry->hole_stack != &(mm)->hole_stack ? \
133 hole_start = drm_mm_hole_node_start(entry), \
134 hole_end = drm_mm_hole_node_end(entry), \
135 1 : 0; \
136 entry = list_entry(entry->hole_stack.next, struct drm_mm_node, hole_stack))
137
102/* 138/*
103 * Basic range manager support (drm_mm.c) 139 * Basic range manager support (drm_mm.c)
104 */ 140 */
141extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
142 unsigned long start,
143 unsigned long size,
144 bool atomic);
105extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, 145extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
106 unsigned long size, 146 unsigned long size,
107 unsigned alignment, 147 unsigned alignment,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index c5c35e629426..a386b0b654cc 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -139,6 +139,19 @@
139 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 139 {0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
140 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 140 {0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
141 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 141 {0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
142 {0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
143 {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
145 {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
146 {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
147 {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
148 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
149 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
150 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
151 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
152 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
142 {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 155 {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
143 {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 156 {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
144 {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 157 {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 6eb76a1f11ab..b08bdade6002 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,26 +3,8 @@
3#ifndef _DRM_INTEL_GTT_H 3#ifndef _DRM_INTEL_GTT_H
4#define _DRM_INTEL_GTT_H 4#define _DRM_INTEL_GTT_H
5 5
6struct intel_gtt { 6void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
7 /* Size of memory reserved for graphics by the BIOS */ 7 phys_addr_t *mappable_base, unsigned long *mappable_end);
8 unsigned int stolen_size;
9 /* Total number of gtt entries. */
10 unsigned int gtt_total_entries;
11 /* Part of the gtt that is mappable by the cpu, for those chips where
12 * this is not the full gtt. */
13 unsigned int gtt_mappable_entries;
14 /* Whether i915 needs to use the dmar apis or not. */
15 unsigned int needs_dmar : 1;
16 /* Whether we idle the gpu before mapping/unmapping */
17 unsigned int do_idle_maps : 1;
18 /* Share the scratch page dma with ppgtts. */
19 dma_addr_t scratch_page_dma;
20 struct page *scratch_page;
21 /* for ppgtt PDE access */
22 u32 __iomem *gtt;
23 /* needed for ioremap in drm/i915 */
24 phys_addr_t gma_bus_addr;
25} *intel_gtt_get(void);
26 8
27int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev, 9int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
28 struct agp_bridge_data *bridge); 10 struct agp_bridge_data *bridge);
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3a43a47d78c..0fbd046e7c93 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -790,16 +790,7 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
790 * to make room for a buffer already reserved. (Buffers are reserved before 790 * to make room for a buffer already reserved. (Buffers are reserved before
791 * they are evicted). The following algorithm prevents such deadlocks from 791 * they are evicted). The following algorithm prevents such deadlocks from
792 * occurring: 792 * occurring:
793 * 1) Buffers are reserved with the lru spinlock held. Upon successful 793 * Processes attempting to reserve multiple buffers other than for eviction,
794 * reservation they are removed from the lru list. This stops a reserved buffer
795 * from being evicted. However the lru spinlock is released between the time
796 * a buffer is selected for eviction and the time it is reserved.
797 * Therefore a check is made when a buffer is reserved for eviction, that it
798 * is still the first buffer in the lru list, before it is removed from the
799 * list. @check_lru == 1 forces this check. If it fails, the function returns
800 * -EINVAL, and the caller should then choose a new buffer to evict and repeat
801 * the procedure.
802 * 2) Processes attempting to reserve multiple buffers other than for eviction,
803 * (typically execbuf), should first obtain a unique 32-bit 794 * (typically execbuf), should first obtain a unique 32-bit
804 * validation sequence number, 795 * validation sequence number,
805 * and call this function with @use_sequence == 1 and @sequence == the unique 796 * and call this function with @use_sequence == 1 and @sequence == the unique
@@ -830,9 +821,39 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
830 bool interruptible, 821 bool interruptible,
831 bool no_wait, bool use_sequence, uint32_t sequence); 822 bool no_wait, bool use_sequence, uint32_t sequence);
832 823
824/**
825 * ttm_bo_reserve_slowpath_nolru:
826 * @bo: A pointer to a struct ttm_buffer_object.
827 * @interruptible: Sleep interruptible if waiting.
828 * @sequence: Set (@bo)->sequence to this value after lock
829 *
830 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
831 * from all our other reservations. Because there are no other reservations
832 * held by us, this function cannot deadlock any more.
833 *
834 * Will not remove reserved buffers from the lru lists.
835 * Otherwise identical to ttm_bo_reserve_slowpath.
836 */
837extern int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
838 bool interruptible,
839 uint32_t sequence);
840
833 841
834/** 842/**
835 * ttm_bo_reserve_locked: 843 * ttm_bo_reserve_slowpath:
844 * @bo: A pointer to a struct ttm_buffer_object.
845 * @interruptible: Sleep interruptible if waiting.
846 * @sequence: Set (@bo)->sequence to this value after lock
847 *
848 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
849 * from all our other reservations. Because there are no other reservations
850 * held by us, this function cannot deadlock any more.
851 */
852extern int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
853 bool interruptible, uint32_t sequence);
854
855/**
856 * ttm_bo_reserve_nolru:
836 * 857 *
837 * @bo: A pointer to a struct ttm_buffer_object. 858 * @bo: A pointer to a struct ttm_buffer_object.
838 * @interruptible: Sleep interruptible if waiting. 859 * @interruptible: Sleep interruptible if waiting.
@@ -840,9 +861,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
840 * @use_sequence: If @bo is already reserved, Only sleep waiting for 861 * @use_sequence: If @bo is already reserved, Only sleep waiting for
841 * it to become unreserved if @sequence < (@bo)->sequence. 862 * it to become unreserved if @sequence < (@bo)->sequence.
842 * 863 *
843 * Must be called with struct ttm_bo_global::lru_lock held, 864 * Will not remove reserved buffers from the lru lists.
844 * and will not remove reserved buffers from the lru lists.
845 * The function may release the LRU spinlock if it needs to sleep.
846 * Otherwise identical to ttm_bo_reserve. 865 * Otherwise identical to ttm_bo_reserve.
847 * 866 *
848 * Returns: 867 * Returns:
@@ -855,7 +874,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
855 * -EDEADLK: Bo already reserved using @sequence. This error code will only 874 * -EDEADLK: Bo already reserved using @sequence. This error code will only
856 * be returned if @use_sequence is set to true. 875 * be returned if @use_sequence is set to true.
857 */ 876 */
858extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, 877extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
859 bool interruptible, 878 bool interruptible,
860 bool no_wait, bool use_sequence, 879 bool no_wait, bool use_sequence,
861 uint32_t sequence); 880 uint32_t sequence);
@@ -879,18 +898,6 @@ extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
879 */ 898 */
880extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo); 899extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
881 900
882/**
883 * ttm_bo_wait_unreserved
884 *
885 * @bo: A pointer to a struct ttm_buffer_object.
886 *
887 * Wait for a struct ttm_buffer_object to become unreserved.
888 * This is typically used in the execbuf code to relax cpu-usage when
889 * a potential deadlock condition backoff.
890 */
891extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
892 bool interruptible);
893
894/* 901/*
895 * ttm_bo_util.c 902 * ttm_bo_util.c
896 */ 903 */
diff --git a/include/linux/console.h b/include/linux/console.h
index 3b709da1786e..29680a8cda99 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -77,7 +77,9 @@ extern const struct consw prom_con; /* SPARC PROM console */
77int con_is_bound(const struct consw *csw); 77int con_is_bound(const struct consw *csw);
78int register_con_driver(const struct consw *csw, int first, int last); 78int register_con_driver(const struct consw *csw, int first, int last);
79int unregister_con_driver(const struct consw *csw); 79int unregister_con_driver(const struct consw *csw);
80int do_unregister_con_driver(const struct consw *csw);
80int take_over_console(const struct consw *sw, int first, int last, int deflt); 81int take_over_console(const struct consw *sw, int first, int last, int deflt);
82int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
81void give_up_console(const struct consw *sw); 83void give_up_console(const struct consw *sw);
82#ifdef CONFIG_HW_CONSOLE 84#ifdef CONFIG_HW_CONSOLE
83int con_debug_enter(struct vc_data *vc); 85int con_debug_enter(struct vc_data *vc);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c7a95714b1fe..58b98606ac26 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -19,6 +19,8 @@ struct vm_area_struct;
19struct fb_info; 19struct fb_info;
20struct device; 20struct device;
21struct file; 21struct file;
22struct videomode;
23struct device_node;
22 24
23/* Definitions below are used in the parsed monitor specs */ 25/* Definitions below are used in the parsed monitor specs */
24#define FB_DPMS_ACTIVE_OFF 1 26#define FB_DPMS_ACTIVE_OFF 1
@@ -714,6 +716,12 @@ extern void fb_destroy_modedb(struct fb_videomode *modedb);
714extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb); 716extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
715extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter); 717extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
716 718
719extern int of_get_fb_videomode(struct device_node *np,
720 struct fb_videomode *fb,
721 int index);
722extern int fb_videomode_from_videomode(const struct videomode *vm,
723 struct fb_videomode *fbmode);
724
717/* drivers/video/modedb.c */ 725/* drivers/video/modedb.c */
718#define VESA_MODEDB_SIZE 34 726#define VESA_MODEDB_SIZE 34
719extern void fb_var_to_videomode(struct fb_videomode *mode, 727extern void fb_var_to_videomode(struct fb_videomode *mode,
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
new file mode 100644
index 000000000000..3b589440ecfe
--- /dev/null
+++ b/include/linux/hdmi.h
@@ -0,0 +1,231 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __LINUX_HDMI_H_
10#define __LINUX_HDMI_H_
11
12#include <linux/types.h>
13
14enum hdmi_infoframe_type {
15 HDMI_INFOFRAME_TYPE_VENDOR = 0x81,
16 HDMI_INFOFRAME_TYPE_AVI = 0x82,
17 HDMI_INFOFRAME_TYPE_SPD = 0x83,
18 HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
19};
20
21#define HDMI_INFOFRAME_HEADER_SIZE 4
22#define HDMI_AVI_INFOFRAME_SIZE 13
23#define HDMI_SPD_INFOFRAME_SIZE 25
24#define HDMI_AUDIO_INFOFRAME_SIZE 10
25
26enum hdmi_colorspace {
27 HDMI_COLORSPACE_RGB,
28 HDMI_COLORSPACE_YUV422,
29 HDMI_COLORSPACE_YUV444,
30};
31
32enum hdmi_scan_mode {
33 HDMI_SCAN_MODE_NONE,
34 HDMI_SCAN_MODE_OVERSCAN,
35 HDMI_SCAN_MODE_UNDERSCAN,
36};
37
38enum hdmi_colorimetry {
39 HDMI_COLORIMETRY_NONE,
40 HDMI_COLORIMETRY_ITU_601,
41 HDMI_COLORIMETRY_ITU_709,
42 HDMI_COLORIMETRY_EXTENDED,
43};
44
45enum hdmi_picture_aspect {
46 HDMI_PICTURE_ASPECT_NONE,
47 HDMI_PICTURE_ASPECT_4_3,
48 HDMI_PICTURE_ASPECT_16_9,
49};
50
51enum hdmi_active_aspect {
52 HDMI_ACTIVE_ASPECT_16_9_TOP = 2,
53 HDMI_ACTIVE_ASPECT_14_9_TOP = 3,
54 HDMI_ACTIVE_ASPECT_16_9_CENTER = 4,
55 HDMI_ACTIVE_ASPECT_PICTURE = 8,
56 HDMI_ACTIVE_ASPECT_4_3 = 9,
57 HDMI_ACTIVE_ASPECT_16_9 = 10,
58 HDMI_ACTIVE_ASPECT_14_9 = 11,
59 HDMI_ACTIVE_ASPECT_4_3_SP_14_9 = 13,
60 HDMI_ACTIVE_ASPECT_16_9_SP_14_9 = 14,
61 HDMI_ACTIVE_ASPECT_16_9_SP_4_3 = 15,
62};
63
64enum hdmi_extended_colorimetry {
65 HDMI_EXTENDED_COLORIMETRY_XV_YCC_601,
66 HDMI_EXTENDED_COLORIMETRY_XV_YCC_709,
67 HDMI_EXTENDED_COLORIMETRY_S_YCC_601,
68 HDMI_EXTENDED_COLORIMETRY_ADOBE_YCC_601,
69 HDMI_EXTENDED_COLORIMETRY_ADOBE_RGB,
70};
71
72enum hdmi_quantization_range {
73 HDMI_QUANTIZATION_RANGE_DEFAULT,
74 HDMI_QUANTIZATION_RANGE_LIMITED,
75 HDMI_QUANTIZATION_RANGE_FULL,
76};
77
78/* non-uniform picture scaling */
79enum hdmi_nups {
80 HDMI_NUPS_UNKNOWN,
81 HDMI_NUPS_HORIZONTAL,
82 HDMI_NUPS_VERTICAL,
83 HDMI_NUPS_BOTH,
84};
85
86enum hdmi_ycc_quantization_range {
87 HDMI_YCC_QUANTIZATION_RANGE_LIMITED,
88 HDMI_YCC_QUANTIZATION_RANGE_FULL,
89};
90
91enum hdmi_content_type {
92 HDMI_CONTENT_TYPE_NONE,
93 HDMI_CONTENT_TYPE_PHOTO,
94 HDMI_CONTENT_TYPE_CINEMA,
95 HDMI_CONTENT_TYPE_GAME,
96};
97
98struct hdmi_avi_infoframe {
99 enum hdmi_infoframe_type type;
100 unsigned char version;
101 unsigned char length;
102 enum hdmi_colorspace colorspace;
103 bool active_info_valid;
104 bool horizontal_bar_valid;
105 bool vertical_bar_valid;
106 enum hdmi_scan_mode scan_mode;
107 enum hdmi_colorimetry colorimetry;
108 enum hdmi_picture_aspect picture_aspect;
109 enum hdmi_active_aspect active_aspect;
110 bool itc;
111 enum hdmi_extended_colorimetry extended_colorimetry;
112 enum hdmi_quantization_range quantization_range;
113 enum hdmi_nups nups;
114 unsigned char video_code;
115 enum hdmi_ycc_quantization_range ycc_quantization_range;
116 enum hdmi_content_type content_type;
117 unsigned char pixel_repeat;
118 unsigned short top_bar;
119 unsigned short bottom_bar;
120 unsigned short left_bar;
121 unsigned short right_bar;
122};
123
124int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
125ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
126 size_t size);
127
128enum hdmi_spd_sdi {
129 HDMI_SPD_SDI_UNKNOWN,
130 HDMI_SPD_SDI_DSTB,
131 HDMI_SPD_SDI_DVDP,
132 HDMI_SPD_SDI_DVHS,
133 HDMI_SPD_SDI_HDDVR,
134 HDMI_SPD_SDI_DVC,
135 HDMI_SPD_SDI_DSC,
136 HDMI_SPD_SDI_VCD,
137 HDMI_SPD_SDI_GAME,
138 HDMI_SPD_SDI_PC,
139 HDMI_SPD_SDI_BD,
140 HDMI_SPD_SDI_SACD,
141 HDMI_SPD_SDI_HDDVD,
142 HDMI_SPD_SDI_PMP,
143};
144
145struct hdmi_spd_infoframe {
146 enum hdmi_infoframe_type type;
147 unsigned char version;
148 unsigned char length;
149 char vendor[8];
150 char product[16];
151 enum hdmi_spd_sdi sdi;
152};
153
154int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
155 const char *vendor, const char *product);
156ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
157 size_t size);
158
159enum hdmi_audio_coding_type {
160 HDMI_AUDIO_CODING_TYPE_STREAM,
161 HDMI_AUDIO_CODING_TYPE_PCM,
162 HDMI_AUDIO_CODING_TYPE_AC3,
163 HDMI_AUDIO_CODING_TYPE_MPEG1,
164 HDMI_AUDIO_CODING_TYPE_MP3,
165 HDMI_AUDIO_CODING_TYPE_MPEG2,
166 HDMI_AUDIO_CODING_TYPE_AAC_LC,
167 HDMI_AUDIO_CODING_TYPE_DTS,
168 HDMI_AUDIO_CODING_TYPE_ATRAC,
169 HDMI_AUDIO_CODING_TYPE_DSD,
170 HDMI_AUDIO_CODING_TYPE_EAC3,
171 HDMI_AUDIO_CODING_TYPE_DTS_HD,
172 HDMI_AUDIO_CODING_TYPE_MLP,
173 HDMI_AUDIO_CODING_TYPE_DST,
174 HDMI_AUDIO_CODING_TYPE_WMA_PRO,
175};
176
177enum hdmi_audio_sample_size {
178 HDMI_AUDIO_SAMPLE_SIZE_STREAM,
179 HDMI_AUDIO_SAMPLE_SIZE_16,
180 HDMI_AUDIO_SAMPLE_SIZE_20,
181 HDMI_AUDIO_SAMPLE_SIZE_24,
182};
183
184enum hdmi_audio_sample_frequency {
185 HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM,
186 HDMI_AUDIO_SAMPLE_FREQUENCY_32000,
187 HDMI_AUDIO_SAMPLE_FREQUENCY_44100,
188 HDMI_AUDIO_SAMPLE_FREQUENCY_48000,
189 HDMI_AUDIO_SAMPLE_FREQUENCY_88200,
190 HDMI_AUDIO_SAMPLE_FREQUENCY_96000,
191 HDMI_AUDIO_SAMPLE_FREQUENCY_176400,
192 HDMI_AUDIO_SAMPLE_FREQUENCY_192000,
193};
194
195enum hdmi_audio_coding_type_ext {
196 HDMI_AUDIO_CODING_TYPE_EXT_STREAM,
197 HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC,
198 HDMI_AUDIO_CODING_TYPE_EXT_HE_AAC_V2,
199 HDMI_AUDIO_CODING_TYPE_EXT_MPEG_SURROUND,
200};
201
202struct hdmi_audio_infoframe {
203 enum hdmi_infoframe_type type;
204 unsigned char version;
205 unsigned char length;
206 unsigned char channels;
207 enum hdmi_audio_coding_type coding_type;
208 enum hdmi_audio_sample_size sample_size;
209 enum hdmi_audio_sample_frequency sample_frequency;
210 enum hdmi_audio_coding_type_ext coding_type_ext;
211 unsigned char channel_allocation;
212 unsigned char level_shift_value;
213 bool downmix_inhibit;
214
215};
216
217int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
218ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
219 void *buffer, size_t size);
220
221struct hdmi_vendor_infoframe {
222 enum hdmi_infoframe_type type;
223 unsigned char version;
224 unsigned char length;
225 u8 data[27];
226};
227
228ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
229 void *buffer, size_t size);
230
231#endif /* _DRM_HDMI_H */
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index 50ae7d0c279e..e8d65718560b 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -47,6 +47,7 @@ int con_set_cmap(unsigned char __user *cmap);
47int con_get_cmap(unsigned char __user *cmap); 47int con_get_cmap(unsigned char __user *cmap);
48void scrollback(struct vc_data *vc, int lines); 48void scrollback(struct vc_data *vc, int lines);
49void scrollfront(struct vc_data *vc, int lines); 49void scrollfront(struct vc_data *vc, int lines);
50void clear_buffer_attributes(struct vc_data *vc);
50void update_region(struct vc_data *vc, unsigned long start, int count); 51void update_region(struct vc_data *vc, unsigned long start, int count);
51void redraw_screen(struct vc_data *vc, int is_switch); 52void redraw_screen(struct vc_data *vc, int is_switch);
52#define update_screen(x) redraw_screen(x, 0) 53#define update_screen(x) redraw_screen(x, 0)
@@ -130,6 +131,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new);
130int vt_waitactive(int n); 131int vt_waitactive(int n);
131void change_console(struct vc_data *new_vc); 132void change_console(struct vc_data *new_vc);
132void reset_vc(struct vc_data *vc); 133void reset_vc(struct vc_data *vc);
134extern int do_unbind_con_driver(const struct consw *csw, int first, int last,
135 int deflt);
133extern int unbind_con_driver(const struct consw *csw, int first, int last, 136extern int unbind_con_driver(const struct consw *csw, int first, int last,
134 int deflt); 137 int deflt);
135int vty_init(const struct file_operations *console_fops); 138int vty_init(const struct file_operations *console_fops);
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index c4d2e9c74002..07d59419fe6b 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -308,6 +308,8 @@ typedef struct drm_i915_irq_wait {
308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22 308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
309#define I915_PARAM_HAS_SECURE_BATCHES 23 309#define I915_PARAM_HAS_SECURE_BATCHES 23
310#define I915_PARAM_HAS_PINNED_BATCHES 24 310#define I915_PARAM_HAS_PINNED_BATCHES 24
311#define I915_PARAM_HAS_EXEC_NO_RELOC 25
312#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
311 313
312typedef struct drm_i915_getparam { 314typedef struct drm_i915_getparam {
313 int param; 315 int param;
@@ -628,7 +630,11 @@ struct drm_i915_gem_exec_object2 {
628 __u64 offset; 630 __u64 offset;
629 631
630#define EXEC_OBJECT_NEEDS_FENCE (1<<0) 632#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
633#define EXEC_OBJECT_NEEDS_GTT (1<<1)
634#define EXEC_OBJECT_WRITE (1<<2)
635#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
631 __u64 flags; 636 __u64 flags;
637
632 __u64 rsvd1; 638 __u64 rsvd1;
633 __u64 rsvd2; 639 __u64 rsvd2;
634}; 640};
@@ -687,6 +693,20 @@ struct drm_i915_gem_execbuffer2 {
687 */ 693 */
688#define I915_EXEC_IS_PINNED (1<<10) 694#define I915_EXEC_IS_PINNED (1<<10)
689 695
696/** Provide a hint to the kernel that the command stream and auxilliary
697 * state buffers already holds the correct presumed addresses and so the
698 * relocation process may be skipped if no buffers need to be moved in
699 * preparation for the execbuffer.
700 */
701#define I915_EXEC_NO_RELOC (1<<11)
702
703/** Use the reloc.handle as an index into the exec object array rather
704 * than as the per-file handle.
705 */
706#define I915_EXEC_HANDLE_LUT (1<<12)
707
708#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
709
690#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 710#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
691#define i915_execbuffer2_set_context_id(eb2, context) \ 711#define i915_execbuffer2_set_context_id(eb2, context) \
692 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 712 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
diff --git a/drivers/staging/omapdrm/omap_drm.h b/include/uapi/drm/omap_drm.h
index f0ac34a8973e..1d0b1172664e 100644
--- a/drivers/staging/omapdrm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * include/drm/omap_drm.h 2 * include/uapi/drm/omap_drm.h
3 * 3 *
4 * Copyright (C) 2011 Texas Instruments 4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob@ti.com> 5 * Author: Rob Clark <rob@ti.com>
diff --git a/include/video/display_timing.h b/include/video/display_timing.h
new file mode 100644
index 000000000000..71e9a383a981
--- /dev/null
+++ b/include/video/display_timing.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
3 *
4 * description of display timings
5 *
6 * This file is released under the GPLv2
7 */
8
9#ifndef __LINUX_DISPLAY_TIMING_H
10#define __LINUX_DISPLAY_TIMING_H
11
12#include <linux/bitops.h>
13#include <linux/types.h>
14
15/* VESA display monitor timing parameters */
16#define VESA_DMT_HSYNC_LOW BIT(0)
17#define VESA_DMT_HSYNC_HIGH BIT(1)
18#define VESA_DMT_VSYNC_LOW BIT(2)
19#define VESA_DMT_VSYNC_HIGH BIT(3)
20
21/* display specific flags */
22#define DISPLAY_FLAGS_DE_LOW BIT(0) /* data enable flag */
23#define DISPLAY_FLAGS_DE_HIGH BIT(1)
24#define DISPLAY_FLAGS_PIXDATA_POSEDGE BIT(2) /* drive data on pos. edge */
25#define DISPLAY_FLAGS_PIXDATA_NEGEDGE BIT(3) /* drive data on neg. edge */
26#define DISPLAY_FLAGS_INTERLACED BIT(4)
27#define DISPLAY_FLAGS_DOUBLESCAN BIT(5)
28
29/*
30 * A single signal can be specified via a range of minimal and maximal values
31 * with a typical value, that lies somewhere inbetween.
32 */
33struct timing_entry {
34 u32 min;
35 u32 typ;
36 u32 max;
37};
38
39enum timing_entry_index {
40 TE_MIN = 0,
41 TE_TYP = 1,
42 TE_MAX = 2,
43};
44
45/*
46 * Single "mode" entry. This describes one set of signal timings a display can
47 * have in one setting. This struct can later be converted to struct videomode
48 * (see include/video/videomode.h). As each timing_entry can be defined as a
49 * range, one struct display_timing may become multiple struct videomodes.
50 *
51 * Example: hsync active high, vsync active low
52 *
53 * Active Video
54 * Video ______________________XXXXXXXXXXXXXXXXXXXXXX_____________________
55 * |<- sync ->|<- back ->|<----- active ----->|<- front ->|<- sync..
56 * | | porch | | porch |
57 *
58 * HSync _|¯¯¯¯¯¯¯¯¯¯|___________________________________________|¯¯¯¯¯¯¯¯¯
59 *
60 * VSync ¯|__________|¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯¯|_________
61 */
62struct display_timing {
63 struct timing_entry pixelclock;
64
65 struct timing_entry hactive; /* hor. active video */
66 struct timing_entry hfront_porch; /* hor. front porch */
67 struct timing_entry hback_porch; /* hor. back porch */
68 struct timing_entry hsync_len; /* hor. sync len */
69
70 struct timing_entry vactive; /* ver. active video */
71 struct timing_entry vfront_porch; /* ver. front porch */
72 struct timing_entry vback_porch; /* ver. back porch */
73 struct timing_entry vsync_len; /* ver. sync len */
74
75 unsigned int dmt_flags; /* VESA DMT flags */
76 unsigned int data_flags; /* video data flags */
77};
78
79/*
80 * This describes all timing settings a display provides.
81 * The native_mode is the default setting for this display.
82 * Drivers that can handle multiple videomodes should work with this struct and
83 * convert each entry to the desired end result.
84 */
85struct display_timings {
86 unsigned int num_timings;
87 unsigned int native_mode;
88
89 struct display_timing **timings;
90};
91
92/* get value specified by index from struct timing_entry */
93static inline u32 display_timing_get_value(const struct timing_entry *te,
94 enum timing_entry_index index)
95{
96 switch (index) {
97 case TE_MIN:
98 return te->min;
99 break;
100 case TE_TYP:
101 return te->typ;
102 break;
103 case TE_MAX:
104 return te->max;
105 break;
106 default:
107 return te->typ;
108 }
109}
110
111/* get one entry from struct display_timings */
112static inline struct display_timing *display_timings_get(const struct
113 display_timings *disp,
114 unsigned int index)
115{
116 if (disp->num_timings > index)
117 return disp->timings[index];
118 else
119 return NULL;
120}
121
122void display_timings_release(struct display_timings *disp);
123
124#endif
diff --git a/include/video/of_display_timing.h b/include/video/of_display_timing.h
new file mode 100644
index 000000000000..8016eb727cf3
--- /dev/null
+++ b/include/video/of_display_timing.h
@@ -0,0 +1,20 @@
1/*
2 * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
3 *
4 * display timings of helpers
5 *
6 * This file is released under the GPLv2
7 */
8
9#ifndef __LINUX_OF_DISPLAY_TIMING_H
10#define __LINUX_OF_DISPLAY_TIMING_H
11
12struct device_node;
13struct display_timings;
14
15#define OF_USE_NATIVE_MODE -1
16
17struct display_timings *of_get_display_timings(struct device_node *np);
18int of_display_timings_exist(struct device_node *np);
19
20#endif
diff --git a/include/video/of_videomode.h b/include/video/of_videomode.h
new file mode 100644
index 000000000000..a07efcc51424
--- /dev/null
+++ b/include/video/of_videomode.h
@@ -0,0 +1,18 @@
1/*
2 * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
3 *
4 * videomode of-helpers
5 *
6 * This file is released under the GPLv2
7 */
8
9#ifndef __LINUX_OF_VIDEOMODE_H
10#define __LINUX_OF_VIDEOMODE_H
11
12struct device_node;
13struct videomode;
14
15int of_get_videomode(struct device_node *np, struct videomode *vm,
16 int index);
17
18#endif /* __LINUX_OF_VIDEOMODE_H */
diff --git a/include/video/videomode.h b/include/video/videomode.h
new file mode 100644
index 000000000000..a42156234dd4
--- /dev/null
+++ b/include/video/videomode.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2012 Steffen Trumtrar <s.trumtrar@pengutronix.de>
3 *
4 * generic videomode description
5 *
6 * This file is released under the GPLv2
7 */
8
9#ifndef __LINUX_VIDEOMODE_H
10#define __LINUX_VIDEOMODE_H
11
12#include <linux/types.h>
13#include <video/display_timing.h>
14
15/*
16 * Subsystem independent description of a videomode.
17 * Can be generated from struct display_timing.
18 */
19struct videomode {
20 unsigned long pixelclock; /* pixelclock in Hz */
21
22 u32 hactive;
23 u32 hfront_porch;
24 u32 hback_porch;
25 u32 hsync_len;
26
27 u32 vactive;
28 u32 vfront_porch;
29 u32 vback_porch;
30 u32 vsync_len;
31
32 unsigned int dmt_flags; /* VESA DMT flags */
33 unsigned int data_flags; /* video data flags */
34};
35
36/**
37 * videomode_from_timing - convert display timing to videomode
38 * @disp: structure with all possible timing entries
39 * @vm: return value
40 * @index: index into the list of display timings in devicetree
41 *
42 * DESCRIPTION:
43 * This function converts a struct display_timing to a struct videomode.
44 */
45int videomode_from_timing(const struct display_timings *disp,
46 struct videomode *vm, unsigned int index);
47
48#endif
diff --git a/kernel/printk.c b/kernel/printk.c
index f24633afa46a..0b31715f335a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -88,6 +88,12 @@ static DEFINE_SEMAPHORE(console_sem);
88struct console *console_drivers; 88struct console *console_drivers;
89EXPORT_SYMBOL_GPL(console_drivers); 89EXPORT_SYMBOL_GPL(console_drivers);
90 90
91#ifdef CONFIG_LOCKDEP
92static struct lockdep_map console_lock_dep_map = {
93 .name = "console_lock"
94};
95#endif
96
91/* 97/*
92 * This is used for debugging the mess that is the VT code by 98 * This is used for debugging the mess that is the VT code by
93 * keeping track if we have the console semaphore held. It's 99 * keeping track if we have the console semaphore held. It's
@@ -1919,6 +1925,7 @@ void console_lock(void)
1919 return; 1925 return;
1920 console_locked = 1; 1926 console_locked = 1;
1921 console_may_schedule = 1; 1927 console_may_schedule = 1;
1928 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
1922} 1929}
1923EXPORT_SYMBOL(console_lock); 1930EXPORT_SYMBOL(console_lock);
1924 1931
@@ -1940,6 +1947,7 @@ int console_trylock(void)
1940 } 1947 }
1941 console_locked = 1; 1948 console_locked = 1;
1942 console_may_schedule = 0; 1949 console_may_schedule = 0;
1950 mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
1943 return 1; 1951 return 1;
1944} 1952}
1945EXPORT_SYMBOL(console_trylock); 1953EXPORT_SYMBOL(console_trylock);
@@ -2102,6 +2110,7 @@ skip:
2102 local_irq_restore(flags); 2110 local_irq_restore(flags);
2103 } 2111 }
2104 console_locked = 0; 2112 console_locked = 0;
2113 mutex_release(&console_lock_dep_map, 1, _RET_IP_);
2105 2114
2106 /* Release the exclusive_console once it is used */ 2115 /* Release the exclusive_console once it is used */
2107 if (unlikely(exclusive_console)) 2116 if (unlikely(exclusive_console))