aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c3
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c17
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c161
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c (renamed from drivers/gpu/drm/drm_dp_i2c_helper.c)146
-rw-r--r--drivers/gpu/drm/drm_edid.c48
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c76
-rw-r--r--drivers/gpu/drm/drm_fops.c44
-rw-r--r--drivers/gpu/drm/drm_hashtab.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c120
-rw-r--r--drivers/gpu/drm/drm_mm.c86
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c37
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig32
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c178
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h26
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c57
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c179
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c141
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h65
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c97
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c116
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c109
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1955
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c240
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c501
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c457
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h80
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1838
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c136
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2050
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h252
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c839
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c60
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c332
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c421
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c10
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c365
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c66
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c101
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c139
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h480
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c364
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c66
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c420
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c98
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h312
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c763
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c14
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c64
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1091
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2000
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c961
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h123
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c235
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c14
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c92
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c694
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c318
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h37
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c229
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c99
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile38
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1179
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c884
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c190
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h147
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h157
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c184
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h225
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h55
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/clock.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/gpio.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c128
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c132
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/base.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/base.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mxm/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c67
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c112
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c261
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c764
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c136
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c321
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2547
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c403
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h120
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c530
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2141
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c46
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c149
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c308
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c774
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h149
-rw-r--r--drivers/gpu/drm/radeon/ni.c467
-rw-r--r--drivers/gpu/drm/radeon/nid.h87
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r600.c565
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c405
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h86
-rw-r--r--drivers/gpu/drm/radeon/radeon.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c100
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c183
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c40
-rw-r--r--drivers/gpu/drm/radeon/rv515.c122
-rw-r--r--drivers/gpu/drm/radeon/rv770.c105
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h71
-rw-r--r--drivers/gpu/drm/radeon/si.c440
-rw-r--r--drivers/gpu/drm/radeon/sid.h138
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c10
-rw-r--r--drivers/gpu/drm/tegra/Kconfig23
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/dc.c833
-rw-r--r--drivers/gpu/drm/tegra/dc.h388
-rw-r--r--drivers/gpu/drm/tegra/drm.c115
-rw-r--r--drivers/gpu/drm/tegra/drm.h216
-rw-r--r--drivers/gpu/drm/tegra/fb.c56
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1321
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h575
-rw-r--r--drivers/gpu/drm/tegra/host1x.c327
-rw-r--r--drivers/gpu/drm/tegra/output.c272
-rw-r--r--drivers/gpu/drm/tegra/rgb.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c321
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c51
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c4
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c31
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c12
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h909
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c274
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c97
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c917
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2019
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c893
-rw-r--r--drivers/gpu/vga/Kconfig2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
402 files changed, 40238 insertions, 15628 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 18321b68b880..983201b450f1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
210source "drivers/gpu/drm/cirrus/Kconfig" 210source "drivers/gpu/drm/cirrus/Kconfig"
211 211
212source "drivers/gpu/drm/shmobile/Kconfig" 212source "drivers/gpu/drm/shmobile/Kconfig"
213
214source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2ff5cefe9ead..6f58c81cfcbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o \ 8 drm_context.o drm_dma.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o drm_pci.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
16 16
17drm-$(CONFIG_COMPAT) += drm_ioc32.o 17drm-$(CONFIG_COMPAT) += drm_ioc32.o
18drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 18drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
19drm-$(CONFIG_PCI) += ati_pcigart.o
19 20
20drm-usb-y := drm_usb.o 21drm-usb-y := drm_usb.o
21 22
22drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o 23drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
23drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 24drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
24drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 25drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
25 26
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
48obj-$(CONFIG_DRM_UDL) += udl/ 49obj-$(CONFIG_DRM_UDL) += udl/
49obj-$(CONFIG_DRM_AST) += ast/ 50obj-$(CONFIG_DRM_AST) += ast/
50obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
52obj-$(CONFIG_DRM_TEGRA) += tegra/
51obj-y += i2c/ 53obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 31123b6a0be5..2d2c2f8d6dc6 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,7 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
60 60
61MODULE_DEVICE_TABLE(pci, pciidlist); 61MODULE_DEVICE_TABLE(pci, pciidlist);
62 62
63static int __devinit 63static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
65{ 64{
66 return drm_get_pci_dev(pdev, ent, &driver); 65 return drm_get_pci_dev(pdev, ent, &driver);
67} 66}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb4..3602731a6112 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
186 186
187static int ast_bo_move(struct ttm_buffer_object *bo, 187static int ast_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
356 356
357 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, 357 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
358 ttm_bo_type_device, &astbo->placement, 358 ttm_bo_type_device, &astbo->placement,
359 align >> PAGE_SHIFT, 0, false, NULL, acc_size, 359 align >> PAGE_SHIFT, false, NULL, acc_size,
360 NULL, ast_bo_ttm_destroy); 360 NULL, ast_bo_ttm_destroy);
361 if (ret) 361 if (ret)
362 return ret; 362 return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
383 ast_ttm_placement(bo, pl_flag); 383 ast_ttm_placement(bo, pl_flag);
384 for (i = 0; i < bo->placement.num_placement; i++) 384 for (i = 0; i < bo->placement.num_placement; i++)
385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
387 if (ret) 387 if (ret)
388 return ret; 388 return ret;
389 389
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
406 406
407 for (i = 0; i < bo->placement.num_placement ; i++) 407 for (i = 0; i < bo->placement.num_placement ; i++)
408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
410 if (ret) 410 if (ret)
411 return ret; 411 return ret;
412 412
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
431 for (i = 0; i < bo->placement.num_placement ; i++) 431 for (i = 0; i < bo->placement.num_placement ; i++)
432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
433 433
434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
435 if (ret) { 435 if (ret) {
436 DRM_ERROR("pushing to VRAM failed\n"); 436 DRM_ERROR("pushing to VRAM failed\n");
437 return ret; 437 return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c8991..8ecb601152ef 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
35}; 35};
36 36
37 37
38static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev) 38static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
39{ 39{
40 struct apertures_struct *ap; 40 struct apertures_struct *ap;
41 bool primary = false; 41 bool primary = false;
42 42
43 ap = alloc_apertures(1); 43 ap = alloc_apertures(1);
44 if (!ap)
45 return -ENOMEM;
46
44 ap->ranges[0].base = pci_resource_start(pdev, 0); 47 ap->ranges[0].base = pci_resource_start(pdev, 0);
45 ap->ranges[0].size = pci_resource_len(pdev, 0); 48 ap->ranges[0].size = pci_resource_len(pdev, 0);
46 49
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
49#endif 52#endif
50 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); 53 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
51 kfree(ap); 54 kfree(ap);
55
56 return 0;
52} 57}
53 58
54static int __devinit 59static int cirrus_pci_probe(struct pci_dev *pdev,
55cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 60 const struct pci_device_id *ent)
56{ 61{
57 cirrus_kick_out_firmware_fb(pdev); 62 int ret;
63
64 ret = cirrus_kick_out_firmware_fb(pdev);
65 if (ret)
66 return ret;
58 67
59 return drm_get_pci_dev(pdev, ent, &driver); 68 return drm_get_pci_dev(pdev, ent, &driver);
60} 69}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c830..1413a26e4905 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
186 186
187static int cirrus_bo_move(struct ttm_buffer_object *bo, 187static int cirrus_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
361 361
362 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, 362 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
363 ttm_bo_type_device, &cirrusbo->placement, 363 ttm_bo_type_device, &cirrusbo->placement,
364 align >> PAGE_SHIFT, 0, false, NULL, acc_size, 364 align >> PAGE_SHIFT, false, NULL, acc_size,
365 NULL, cirrus_bo_ttm_destroy); 365 NULL, cirrus_bo_ttm_destroy);
366 if (ret) 366 if (ret)
367 return ret; 367 return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
388 cirrus_ttm_placement(bo, pl_flag); 388 cirrus_ttm_placement(bo, pl_flag);
389 for (i = 0; i < bo->placement.num_placement; i++) 389 for (i = 0; i < bo->placement.num_placement; i++)
390 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 390 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
392 if (ret) 392 if (ret)
393 return ret; 393 return ret;
394 394
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
411 411
412 for (i = 0; i < bo->placement.num_placement ; i++) 412 for (i = 0; i < bo->placement.num_placement ; i++)
413 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 413 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
414 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 414 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
415 if (ret) 415 if (ret)
416 return ret; 416 return ret;
417 417
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
436 for (i = 0; i < bo->placement.num_placement ; i++) 436 for (i = 0; i < bo->placement.num_placement ; i++)
437 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 437 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
438 438
439 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 439 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
440 if (ret) { 440 if (ret) {
441 DRM_ERROR("pushing to VRAM failed\n"); 441 DRM_ERROR("pushing to VRAM failed\n");
442 return ret; 442 return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ef1b22144d37..f2d667b8bee2 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
470{ 470{
471 struct drm_device *dev = crtc->dev; 471 struct drm_device *dev = crtc->dev;
472 472
473 if (crtc->gamma_store) { 473 kfree(crtc->gamma_store);
474 kfree(crtc->gamma_store); 474 crtc->gamma_store = NULL;
475 crtc->gamma_store = NULL;
476 }
477 475
478 drm_mode_object_put(dev, &crtc->base); 476 drm_mode_object_put(dev, &crtc->base);
479 list_del(&crtc->head); 477 list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
555 INIT_LIST_HEAD(&connector->probed_modes); 553 INIT_LIST_HEAD(&connector->probed_modes);
556 INIT_LIST_HEAD(&connector->modes); 554 INIT_LIST_HEAD(&connector->modes);
557 connector->edid_blob_ptr = NULL; 555 connector->edid_blob_ptr = NULL;
556 connector->status = connector_status_unknown;
558 557
559 list_add_tail(&connector->head, &dev->mode_config.connector_list); 558 list_add_tail(&connector->head, &dev->mode_config.connector_list);
560 dev->mode_config.num_connector++; 559 dev->mode_config.num_connector++;
561 560
562 if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) 561 if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
563 drm_connector_attach_property(connector, 562 drm_object_attach_property(&connector->base,
564 dev->mode_config.edid_property, 563 dev->mode_config.edid_property,
565 0); 564 0);
566 565
567 drm_connector_attach_property(connector, 566 drm_object_attach_property(&connector->base,
568 dev->mode_config.dpms_property, 0); 567 dev->mode_config.dpms_property, 0);
569 568
570 out: 569 out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
2280 2279
2281 for (i = 0; i < num_planes; i++) { 2280 for (i = 0; i < num_planes; i++) {
2282 unsigned int width = r->width / (i != 0 ? hsub : 1); 2281 unsigned int width = r->width / (i != 0 ? hsub : 1);
2282 unsigned int height = r->height / (i != 0 ? vsub : 1);
2283 unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
2283 2284
2284 if (!r->handles[i]) { 2285 if (!r->handles[i]) {
2285 DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); 2286 DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
2286 return -EINVAL; 2287 return -EINVAL;
2287 } 2288 }
2288 2289
2289 if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { 2290 if ((uint64_t) width * cpp > UINT_MAX)
2291 return -ERANGE;
2292
2293 if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
2294 return -ERANGE;
2295
2296 if (r->pitches[i] < width * cpp) {
2290 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); 2297 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
2291 return -EINVAL; 2298 return -EINVAL;
2292 } 2299 }
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
2323 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2330 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2324 return -EINVAL; 2331 return -EINVAL;
2325 2332
2333 if (r->flags & ~DRM_MODE_FB_INTERLACED) {
2334 DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
2335 return -EINVAL;
2336 }
2337
2326 if ((config->min_width > r->width) || (r->width > config->max_width)) { 2338 if ((config->min_width > r->width) || (r->width > config->max_width)) {
2327 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", 2339 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
2328 r->width, config->min_width, config->max_width); 2340 r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
2916} 2928}
2917EXPORT_SYMBOL(drm_property_destroy); 2929EXPORT_SYMBOL(drm_property_destroy);
2918 2930
2919void drm_connector_attach_property(struct drm_connector *connector,
2920 struct drm_property *property, uint64_t init_val)
2921{
2922 drm_object_attach_property(&connector->base, property, init_val);
2923}
2924EXPORT_SYMBOL(drm_connector_attach_property);
2925
2926int drm_connector_property_set_value(struct drm_connector *connector,
2927 struct drm_property *property, uint64_t value)
2928{
2929 return drm_object_property_set_value(&connector->base, property, value);
2930}
2931EXPORT_SYMBOL(drm_connector_property_set_value);
2932
2933int drm_connector_property_get_value(struct drm_connector *connector,
2934 struct drm_property *property, uint64_t *val)
2935{
2936 return drm_object_property_get_value(&connector->base, property, val);
2937}
2938EXPORT_SYMBOL(drm_connector_property_get_value);
2939
2940void drm_object_attach_property(struct drm_mode_object *obj, 2931void drm_object_attach_property(struct drm_mode_object *obj,
2941 struct drm_property *property, 2932 struct drm_property *property,
2942 uint64_t init_val) 2933 uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
3173 /* Delete edid, when there is none. */ 3164 /* Delete edid, when there is none. */
3174 if (!edid) { 3165 if (!edid) {
3175 connector->edid_blob_ptr = NULL; 3166 connector->edid_blob_ptr = NULL;
3176 ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); 3167 ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
3177 return ret; 3168 return ret;
3178 } 3169 }
3179 3170
3180 size = EDID_LENGTH * (1 + edid->extensions); 3171 size = EDID_LENGTH * (1 + edid->extensions);
3181 connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 3172 connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
3182 size, edid); 3173 size, edid);
3174 if (!connector->edid_blob_ptr)
3175 return -EINVAL;
3183 3176
3184 ret = drm_connector_property_set_value(connector, 3177 ret = drm_object_property_set_value(&connector->base,
3185 dev->mode_config.edid_property, 3178 dev->mode_config.edid_property,
3186 connector->edid_blob_ptr->base.id); 3179 connector->edid_blob_ptr->base.id);
3187 3180
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
3204 for (i = 0; i < property->num_values; i++) 3197 for (i = 0; i < property->num_values; i++)
3205 valid_mask |= (1ULL << property->values[i]); 3198 valid_mask |= (1ULL << property->values[i]);
3206 return !(value & ~valid_mask); 3199 return !(value & ~valid_mask);
3200 } else if (property->flags & DRM_MODE_PROP_BLOB) {
3201 /* Only the driver knows */
3202 return true;
3207 } else { 3203 } else {
3208 int i; 3204 int i;
3209 for (i = 0; i < property->num_values; i++) 3205 for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
3245 3241
3246 /* store the property value if successful */ 3242 /* store the property value if successful */
3247 if (!ret) 3243 if (!ret)
3248 drm_connector_property_set_value(connector, property, value); 3244 drm_object_property_set_value(&connector->base, property, value);
3249 return ret; 3245 return ret;
3250} 3246}
3251 3247
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
3656 if (encoder->funcs->reset) 3652 if (encoder->funcs->reset)
3657 encoder->funcs->reset(encoder); 3653 encoder->funcs->reset(encoder);
3658 3654
3659 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 3655 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3656 connector->status = connector_status_unknown;
3657
3660 if (connector->funcs->reset) 3658 if (connector->funcs->reset)
3661 connector->funcs->reset(connector); 3659 connector->funcs->reset(connector);
3660 }
3662} 3661}
3663EXPORT_SYMBOL(drm_mode_config_reset); 3662EXPORT_SYMBOL(drm_mode_config_reset);
3664 3663
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74dbc..7b2d378b2576 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
39#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
40#include <drm/drm_edid.h> 40#include <drm/drm_edid.h>
41 41
42/**
43 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
44 * connector list
45 * @dev: drm device to operate on
46 *
47 * Some userspace presumes that the first connected connector is the main
48 * display, where it's supposed to display e.g. the login screen. For
49 * laptops, this should be the main panel. Use this function to sort all
50 * (eDP/LVDS) panels to the front of the connector list, instead of
51 * painstakingly trying to initialize them in the right order.
52 */
53void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
54{
55 struct drm_connector *connector, *tmp;
56 struct list_head panel_list;
57
58 INIT_LIST_HEAD(&panel_list);
59
60 list_for_each_entry_safe(connector, tmp,
61 &dev->mode_config.connector_list, head) {
62 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
63 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
64 list_move_tail(&connector->head, &panel_list);
65 }
66
67 list_splice(&panel_list, &dev->mode_config.connector_list);
68}
69EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
70
42static bool drm_kms_helper_poll = true; 71static bool drm_kms_helper_poll = true;
43module_param_named(poll, drm_kms_helper_poll, bool, 0600); 72module_param_named(poll, drm_kms_helper_poll, bool, 0600);
44 73
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
64 93
65/** 94/**
66 * drm_helper_probe_single_connector_modes - get complete set of display modes 95 * drm_helper_probe_single_connector_modes - get complete set of display modes
67 * @dev: DRM device 96 * @connector: connector to probe
68 * @maxX: max width for modes 97 * @maxX: max width for modes
69 * @maxY: max height for modes 98 * @maxY: max height for modes
70 * 99 *
71 * LOCKING: 100 * LOCKING:
72 * Caller must hold mode config lock. 101 * Caller must hold mode config lock.
73 * 102 *
74 * Based on @dev's mode_config layout, scan all the connectors and try to detect 103 * Based on the helper callbacks implemented by @connector try to detect all
75 * modes on them. Modes will first be added to the connector's probed_modes 104 * valid modes. Modes will first be added to the connector's probed_modes list,
76 * list, then culled (based on validity and the @maxX, @maxY parameters) and 105 * then culled (based on validity and the @maxX, @maxY parameters) and put into
77 * put into the normal modes list. 106 * the normal modes list.
78 * 107 *
79 * Intended to be used either at bootup time or when major configuration 108 * Intended to be use as a generic implementation of the ->probe() @connector
80 * changes have occurred. 109 * callback for drivers that use the crtc helpers for output mode filtering and
81 * 110 * detection.
82 * FIXME: take into account monitor limits
83 * 111 *
84 * RETURNS: 112 * RETURNS:
85 * Number of modes found on @connector. 113 * Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
109 connector->funcs->force(connector); 137 connector->funcs->force(connector);
110 } else { 138 } else {
111 connector->status = connector->funcs->detect(connector, true); 139 connector->status = connector->funcs->detect(connector, true);
112 drm_kms_helper_poll_enable(dev);
113 } 140 }
114 141
142 /* Re-enable polling in case the global poll config changed. */
143 if (drm_kms_helper_poll != dev->mode_config.poll_running)
144 drm_kms_helper_poll_enable(dev);
145
146 dev->mode_config.poll_running = drm_kms_helper_poll;
147
115 if (connector->status == connector_status_disconnected) { 148 if (connector->status == connector_status_disconnected) {
116 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
117 connector->base.id, drm_get_connector_name(connector)); 150 connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
325} 358}
326 359
327/** 360/**
328 * drm_crtc_set_mode - set a mode 361 * drm_crtc_helper_set_mode - internal helper to set a mode
329 * @crtc: CRTC to program 362 * @crtc: CRTC to program
330 * @mode: mode to use 363 * @mode: mode to use
331 * @x: width of mode 364 * @x: horizontal offset into the surface
332 * @y: height of mode 365 * @y: vertical offset into the surface
366 * @old_fb: old framebuffer, for cleanup
333 * 367 *
334 * LOCKING: 368 * LOCKING:
335 * Caller must hold mode config lock. 369 * Caller must hold mode config lock.
336 * 370 *
337 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance 371 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
338 * to fixup or reject the mode prior to trying to set it. 372 * to fixup or reject the mode prior to trying to set it. This is an internal
373 * helper that drivers could e.g. use to update properties that require the
374 * entire output pipe to be disabled and re-enabled in a new configuration. For
375 * example for changing whether audio is enabled on a hdmi link or for changing
376 * panel fitter or dither attributes. It is also called by the
377 * drm_crtc_helper_set_config() helper function to drive the mode setting
378 * sequence.
339 * 379 *
340 * RETURNS: 380 * RETURNS:
341 * True if the mode was set successfully, or false otherwise. 381 * True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
491 531
492/** 532/**
493 * drm_crtc_helper_set_config - set a new config from userspace 533 * drm_crtc_helper_set_config - set a new config from userspace
494 * @crtc: CRTC to setup 534 * @set: mode set configuration
495 * @crtc_info: user provided configuration
496 * @new_mode: new mode to set
497 * @connector_set: set of connectors for the new config
498 * @fb: new framebuffer
499 * 535 *
500 * LOCKING: 536 * LOCKING:
501 * Caller must hold mode config lock. 537 * Caller must hold mode config lock.
502 * 538 *
503 * Setup a new configuration, provided by the user in @crtc_info, and enable 539 * Setup a new configuration, provided by the upper layers (either an ioctl call
504 * it. 540 * from userspace or internally e.g. from the fbdev suppport code) in @set, and
541 * enable it. This is the main helper functions for drivers that implement
542 * kernel mode setting with the crtc helper functions and the assorted
543 * ->prepare(), ->modeset() and ->commit() helper callbacks.
505 * 544 *
506 * RETURNS: 545 * RETURNS:
507 * Zero. (FIXME) 546 * Returns 0 on success, -ERRNO on failure.
508 */ 547 */
509int drm_crtc_helper_set_config(struct drm_mode_set *set) 548int drm_crtc_helper_set_config(struct drm_mode_set *set)
510{ 549{
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
800} 839}
801 840
802/** 841/**
803 * drm_helper_connector_dpms 842 * drm_helper_connector_dpms() - connector dpms helper implementation
804 * @connector affected connector 843 * @connector: affected connector
805 * @mode DPMS mode 844 * @mode: DPMS mode
806 * 845 *
807 * Calls the low-level connector DPMS function, then 846 * This is the main helper function provided by the crtc helper framework for
808 * calls appropriate encoder and crtc DPMS functions as well 847 * implementing the DPMS connector attribute. It computes the new desired DPMS
848 * state for all encoders and crtcs in the output mesh and calls the ->dpms()
849 * callback provided by the driver appropriately.
809 */ 850 */
810void drm_helper_connector_dpms(struct drm_connector *connector, int mode) 851void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
811{ 852{
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
918} 959}
919EXPORT_SYMBOL(drm_helper_resume_force_mode); 960EXPORT_SYMBOL(drm_helper_resume_force_mode);
920 961
962void drm_kms_helper_hotplug_event(struct drm_device *dev)
963{
964 /* send a uevent + call fbdev */
965 drm_sysfs_hotplug_event(dev);
966 if (dev->mode_config.funcs->output_poll_changed)
967 dev->mode_config.funcs->output_poll_changed(dev);
968}
969EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
970
921#define DRM_OUTPUT_POLL_PERIOD (10*HZ) 971#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
922static void output_poll_execute(struct work_struct *work) 972static void output_poll_execute(struct work_struct *work)
923{ 973{
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
933 mutex_lock(&dev->mode_config.mutex); 983 mutex_lock(&dev->mode_config.mutex);
934 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 984 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
935 985
936 /* if this is HPD or polled don't check it - 986 /* Ignore forced connectors. */
937 TV out for instance */ 987 if (connector->force)
938 if (!connector->polled)
939 continue; 988 continue;
940 989
941 else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) 990 /* Ignore HPD capable connectors and connectors where we don't
942 repoll = true; 991 * want any hotplug detection at all for polling. */
992 if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
993 continue;
994
995 repoll = true;
943 996
944 old_status = connector->status; 997 old_status = connector->status;
945 /* if we are connected and don't want to poll for disconnect 998 /* if we are connected and don't want to poll for disconnect
946 skip it */ 999 skip it */
947 if (old_status == connector_status_connected && 1000 if (old_status == connector_status_connected &&
948 !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && 1001 !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
949 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
950 continue; 1002 continue;
951 1003
952 connector->status = connector->funcs->detect(connector, false); 1004 connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
960 1012
961 mutex_unlock(&dev->mode_config.mutex); 1013 mutex_unlock(&dev->mode_config.mutex);
962 1014
963 if (changed) { 1015 if (changed)
964 /* send a uevent + call fbdev */ 1016 drm_kms_helper_hotplug_event(dev);
965 drm_sysfs_hotplug_event(dev);
966 if (dev->mode_config.funcs->output_poll_changed)
967 dev->mode_config.funcs->output_poll_changed(dev);
968 }
969 1017
970 if (repoll) 1018 if (repoll)
971 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); 1019 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
988 return; 1036 return;
989 1037
990 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1038 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
991 if (connector->polled) 1039 if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
1040 DRM_CONNECTOR_POLL_DISCONNECT))
992 poll = true; 1041 poll = true;
993 } 1042 }
994 1043
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
1014 1063
1015void drm_helper_hpd_irq_event(struct drm_device *dev) 1064void drm_helper_hpd_irq_event(struct drm_device *dev)
1016{ 1065{
1066 struct drm_connector *connector;
1067 enum drm_connector_status old_status;
1068 bool changed = false;
1069
1017 if (!dev->mode_config.poll_enabled) 1070 if (!dev->mode_config.poll_enabled)
1018 return; 1071 return;
1019 1072
1020 /* kill timer and schedule immediate execution, this doesn't block */ 1073 mutex_lock(&dev->mode_config.mutex);
1021 cancel_delayed_work(&dev->mode_config.output_poll_work); 1074 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1022 if (drm_kms_helper_poll) 1075
1023 schedule_delayed_work(&dev->mode_config.output_poll_work, 0); 1076 /* Only handle HPD capable connectors. */
1077 if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
1078 continue;
1079
1080 old_status = connector->status;
1081
1082 connector->status = connector->funcs->detect(connector, false);
1083 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
1084 connector->base.id,
1085 drm_get_connector_name(connector),
1086 old_status, connector->status);
1087 if (old_status != connector->status)
1088 changed = true;
1089 }
1090
1091 mutex_unlock(&dev->mode_config.mutex);
1092
1093 if (changed)
1094 drm_kms_helper_hotplug_event(dev);
1024} 1095}
1025EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1096EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7f246f212457..89e196627160 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
30#include <drm/drm_dp_helper.h> 30#include <drm/drm_dp_helper.h>
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32 32
33/**
34 * DOC: dp helpers
35 *
36 * These functions contain some common logic and helpers at various abstraction
37 * levels to deal with Display Port sink devices and related things like DP aux
38 * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
39 * blocks, ...
40 */
41
33/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ 42/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
34static int 43static int
35i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, 44i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
37{ 46{
38 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 47 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
39 int ret; 48 int ret;
40 49
41 ret = (*algo_data->aux_ch)(adapter, mode, 50 ret = (*algo_data->aux_ch)(adapter, mode,
42 write_byte, read_byte); 51 write_byte, read_byte);
43 return ret; 52 return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
182{ 191{
183 (void) i2c_algo_dp_aux_address(adapter, 0, false); 192 (void) i2c_algo_dp_aux_address(adapter, 0, false);
184 (void) i2c_algo_dp_aux_stop(adapter, false); 193 (void) i2c_algo_dp_aux_stop(adapter, false);
185
186} 194}
187 195
188static int 196static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
194 return 0; 202 return 0;
195} 203}
196 204
205/**
206 * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
207 * @adapter: i2c adapter to register
208 *
209 * This registers an i2c adapater that uses dp aux channel as it's underlaying
210 * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
211 * and store it in the algo_data member of the @adapter argument. This will be
212 * used by the i2c over dp aux algorithm to drive the hardware.
213 *
214 * RETURNS:
215 * 0 on success, -ERRNO on failure.
216 */
197int 217int
198i2c_dp_aux_add_bus(struct i2c_adapter *adapter) 218i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
199{ 219{
200 int error; 220 int error;
201 221
202 error = i2c_dp_aux_prepare_bus(adapter); 222 error = i2c_dp_aux_prepare_bus(adapter);
203 if (error) 223 if (error)
204 return error; 224 return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
206 return error; 226 return error;
207} 227}
208EXPORT_SYMBOL(i2c_dp_aux_add_bus); 228EXPORT_SYMBOL(i2c_dp_aux_add_bus);
229
230/* Helpers for DP link training */
231static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
232{
233 return link_status[r - DP_LANE0_1_STATUS];
234}
235
236static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
237 int lane)
238{
239 int i = DP_LANE0_1_STATUS + (lane >> 1);
240 int s = (lane & 1) * 4;
241 u8 l = dp_link_status(link_status, i);
242 return (l >> s) & 0xf;
243}
244
245bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
246 int lane_count)
247{
248 u8 lane_align;
249 u8 lane_status;
250 int lane;
251
252 lane_align = dp_link_status(link_status,
253 DP_LANE_ALIGN_STATUS_UPDATED);
254 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
255 return false;
256 for (lane = 0; lane < lane_count; lane++) {
257 lane_status = dp_get_lane_status(link_status, lane);
258 if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
259 return false;
260 }
261 return true;
262}
263EXPORT_SYMBOL(drm_dp_channel_eq_ok);
264
265bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
266 int lane_count)
267{
268 int lane;
269 u8 lane_status;
270
271 for (lane = 0; lane < lane_count; lane++) {
272 lane_status = dp_get_lane_status(link_status, lane);
273 if ((lane_status & DP_LANE_CR_DONE) == 0)
274 return false;
275 }
276 return true;
277}
278EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
279
280u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
281 int lane)
282{
283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
284 int s = ((lane & 1) ?
285 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
286 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
287 u8 l = dp_link_status(link_status, i);
288
289 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
290}
291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
292
293u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
294 int lane)
295{
296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
297 int s = ((lane & 1) ?
298 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
299 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
300 u8 l = dp_link_status(link_status, i);
301
302 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
303}
304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
305
306void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
308 udelay(100);
309 else
310 mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
311}
312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
313
314void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
316 udelay(400);
317 else
318 mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
319}
320EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
321
322u8 drm_dp_link_rate_to_bw_code(int link_rate)
323{
324 switch (link_rate) {
325 case 162000:
326 default:
327 return DP_LINK_BW_1_62;
328 case 270000:
329 return DP_LINK_BW_2_7;
330 case 540000:
331 return DP_LINK_BW_5_4;
332 }
333}
334EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
335
336int drm_dp_bw_code_to_link_rate(u8 link_bw)
337{
338 switch (link_bw) {
339 case DP_LINK_BW_1_62:
340 default:
341 return 162000;
342 case DP_LINK_BW_2_7:
343 return 270000;
344 case DP_LINK_BW_5_4:
345 return 540000;
346 }
347}
348EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fadcd44ff196..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
307 307
308static bool drm_edid_is_zero(u8 *in_edid, int length) 308static bool drm_edid_is_zero(u8 *in_edid, int length)
309{ 309{
310 int i; 310 if (memchr_inv(in_edid, 0, length))
311 u32 *raw_edid = (u32 *)in_edid; 311 return false;
312 312
313 for (i = 0; i < length / 4; i++)
314 if (*(raw_edid + i) != 0)
315 return false;
316 return true; 313 return true;
317} 314}
318 315
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
1516} 1513}
1517EXPORT_SYMBOL(drm_find_cea_extension); 1514EXPORT_SYMBOL(drm_find_cea_extension);
1518 1515
1516/*
1517 * Looks for a CEA mode matching given drm_display_mode.
1518 * Returns its CEA Video ID code, or 0 if not found.
1519 */
1520u8 drm_match_cea_mode(struct drm_display_mode *to_match)
1521{
1522 struct drm_display_mode *cea_mode;
1523 u8 mode;
1524
1525 for (mode = 0; mode < drm_num_cea_modes; mode++) {
1526 cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
1527
1528 if (drm_mode_equal(to_match, cea_mode))
1529 return mode + 1;
1530 }
1531 return 0;
1532}
1533EXPORT_SYMBOL(drm_match_cea_mode);
1534
1535
1519static int 1536static int
1520do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) 1537do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
1521{ 1538{
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
1622 if (len >= 12) 1639 if (len >= 12)
1623 connector->audio_latency[1] = db[12]; 1640 connector->audio_latency[1] = db[12];
1624 1641
1625 DRM_LOG_KMS("HDMI: DVI dual %d, " 1642 DRM_DEBUG_KMS("HDMI: DVI dual %d, "
1626 "max TMDS clock %d, " 1643 "max TMDS clock %d, "
1627 "latency present %d %d, " 1644 "latency present %d %d, "
1628 "video latency %d %d, " 1645 "video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
2062 return num_modes; 2079 return num_modes;
2063} 2080}
2064EXPORT_SYMBOL(drm_add_modes_noedid); 2081EXPORT_SYMBOL(drm_add_modes_noedid);
2082
2083/**
2084 * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
2085 * @mode: mode
2086 *
2087 * RETURNS:
2088 * The VIC number, 0 in case it's not a CEA-861 mode.
2089 */
2090uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
2091{
2092 uint8_t i;
2093
2094 for (i = 0; i < drm_num_cea_modes; i++)
2095 if (drm_mode_equal(mode, &edid_cea_modes[i]))
2096 return i + 1;
2097
2098 return 0;
2099}
2100EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3f..954d175bd7fa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
27 * Dave Airlie <airlied@linux.ie> 27 * Dave Airlie <airlied@linux.ie>
28 * Jesse Barnes <jesse.barnes@intel.com> 28 * Jesse Barnes <jesse.barnes@intel.com>
29 */ 29 */
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/kernel.h> 32#include <linux/kernel.h>
31#include <linux/sysrq.h> 33#include <linux/sysrq.h>
32#include <linux/slab.h> 34#include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
43 45
44static LIST_HEAD(kernel_fb_helper_list); 46static LIST_HEAD(kernel_fb_helper_list);
45 47
48/**
49 * DOC: fbdev helpers
50 *
51 * The fb helper functions are useful to provide an fbdev on top of a drm kernel
52 * mode setting driver. They can be used mostly independantely from the crtc
53 * helper functions used by many drivers to implement the kernel mode setting
54 * interfaces.
55 */
56
46/* simple single crtc case helper function */ 57/* simple single crtc case helper function */
47int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) 58int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
48{ 59{
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
95 if (mode->force) { 106 if (mode->force) {
96 const char *s; 107 const char *s;
97 switch (mode->force) { 108 switch (mode->force) {
98 case DRM_FORCE_OFF: s = "OFF"; break; 109 case DRM_FORCE_OFF:
99 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; 110 s = "OFF";
111 break;
112 case DRM_FORCE_ON_DIGITAL:
113 s = "ON - dig";
114 break;
100 default: 115 default:
101 case DRM_FORCE_ON: s = "ON"; break; 116 case DRM_FORCE_ON:
117 s = "ON";
118 break;
102 } 119 }
103 120
104 DRM_INFO("forcing %s connector %s\n", 121 DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
265 if (panic_timeout < 0) 282 if (panic_timeout < 0)
266 return 0; 283 return 0;
267 284
268 printk(KERN_ERR "panic occurred, switching back to text console\n"); 285 pr_err("panic occurred, switching back to text console\n");
269 return drm_fb_helper_force_kernel_mode(); 286 return drm_fb_helper_force_kernel_mode();
270} 287}
271EXPORT_SYMBOL(drm_fb_helper_panic); 288EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
331 for (j = 0; j < fb_helper->connector_count; j++) { 348 for (j = 0; j < fb_helper->connector_count; j++) {
332 connector = fb_helper->connector_info[j]->connector; 349 connector = fb_helper->connector_info[j]->connector;
333 connector->funcs->dpms(connector, dpms_mode); 350 connector->funcs->dpms(connector, dpms_mode);
334 drm_connector_property_set_value(connector, 351 drm_object_property_set_value(&connector->base,
335 dev->mode_config.dpms_property, dpms_mode); 352 dev->mode_config.dpms_property, dpms_mode);
336 } 353 }
337 } 354 }
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
433 if (!list_empty(&fb_helper->kernel_fb_list)) { 450 if (!list_empty(&fb_helper->kernel_fb_list)) {
434 list_del(&fb_helper->kernel_fb_list); 451 list_del(&fb_helper->kernel_fb_list);
435 if (list_empty(&kernel_fb_helper_list)) { 452 if (list_empty(&kernel_fb_helper_list)) {
436 printk(KERN_INFO "drm: unregistered panic notifier\n"); 453 pr_info("drm: unregistered panic notifier\n");
437 atomic_notifier_chain_unregister(&panic_notifier_list, 454 atomic_notifier_chain_unregister(&panic_notifier_list,
438 &paniced); 455 &paniced);
439 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 456 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
724 741
725 /* if driver picks 8 or 16 by default use that 742 /* if driver picks 8 or 16 by default use that
726 for both depth/bpp */ 743 for both depth/bpp */
727 if (preferred_bpp != sizes.surface_bpp) { 744 if (preferred_bpp != sizes.surface_bpp)
728 sizes.surface_depth = sizes.surface_bpp = preferred_bpp; 745 sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
729 } 746
730 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 747 /* first up get a count of crtcs now in use and new min/maxes width/heights */
731 for (i = 0; i < fb_helper->connector_count; i++) { 748 for (i = 0; i < fb_helper->connector_count; i++) {
732 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; 749 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
794 info = fb_helper->fbdev; 811 info = fb_helper->fbdev;
795 812
796 /* set the fb pointer */ 813 /* set the fb pointer */
797 for (i = 0; i < fb_helper->crtc_count; i++) { 814 for (i = 0; i < fb_helper->crtc_count; i++)
798 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; 815 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
799 }
800 816
801 if (new_fb) { 817 if (new_fb) {
802 info->var.pixclock = 0; 818 info->var.pixclock = 0;
803 if (register_framebuffer(info) < 0) { 819 if (register_framebuffer(info) < 0)
804 return -EINVAL; 820 return -EINVAL;
805 }
806 821
807 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, 822 dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
808 info->fix.id); 823 info->node, info->fix.id);
809 824
810 } else { 825 } else {
811 drm_fb_helper_set_par(info); 826 drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
814 /* Switch back to kernel console on panic */ 829 /* Switch back to kernel console on panic */
815 /* multi card linked list maybe */ 830 /* multi card linked list maybe */
816 if (list_empty(&kernel_fb_helper_list)) { 831 if (list_empty(&kernel_fb_helper_list)) {
817 printk(KERN_INFO "drm: registered panic notifier\n"); 832 dev_info(fb_helper->dev->dev, "registered panic notifier\n");
818 atomic_notifier_chain_register(&panic_notifier_list, 833 atomic_notifier_chain_register(&panic_notifier_list,
819 &paniced); 834 &paniced);
820 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 835 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
1002{ 1017{
1003 bool enable; 1018 bool enable;
1004 1019
1005 if (strict) { 1020 if (strict)
1006 enable = connector->status == connector_status_connected; 1021 enable = connector->status == connector_status_connected;
1007 } else { 1022 else
1008 enable = connector->status != connector_status_disconnected; 1023 enable = connector->status != connector_status_disconnected;
1009 } 1024
1010 return enable; 1025 return enable;
1011} 1026}
1012 1027
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1191 for (c = 0; c < fb_helper->crtc_count; c++) { 1206 for (c = 0; c < fb_helper->crtc_count; c++) {
1192 crtc = &fb_helper->crtc_info[c]; 1207 crtc = &fb_helper->crtc_info[c];
1193 1208
1194 if ((encoder->possible_crtcs & (1 << c)) == 0) { 1209 if ((encoder->possible_crtcs & (1 << c)) == 0)
1195 continue; 1210 continue;
1196 }
1197 1211
1198 for (o = 0; o < n; o++) 1212 for (o = 0; o < n; o++)
1199 if (best_crtcs[o] == crtc) 1213 if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1246 sizeof(struct drm_display_mode *), GFP_KERNEL); 1260 sizeof(struct drm_display_mode *), GFP_KERNEL);
1247 enabled = kcalloc(dev->mode_config.num_connector, 1261 enabled = kcalloc(dev->mode_config.num_connector,
1248 sizeof(bool), GFP_KERNEL); 1262 sizeof(bool), GFP_KERNEL);
1263 if (!crtcs || !modes || !enabled) {
1264 DRM_ERROR("Memory allocation failed\n");
1265 goto out;
1266 }
1267
1249 1268
1250 drm_enable_connectors(fb_helper, enabled); 1269 drm_enable_connectors(fb_helper, enabled);
1251 1270
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1284 } 1303 }
1285 } 1304 }
1286 1305
1306out:
1287 kfree(crtcs); 1307 kfree(crtcs);
1288 kfree(modes); 1308 kfree(modes);
1289 kfree(enabled); 1309 kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1291 1311
1292/** 1312/**
1293 * drm_helper_initial_config - setup a sane initial connector configuration 1313 * drm_helper_initial_config - setup a sane initial connector configuration
1294 * @dev: DRM device 1314 * @fb_helper: fb_helper device struct
1315 * @bpp_sel: bpp value to use for the framebuffer configuration
1295 * 1316 *
1296 * LOCKING: 1317 * LOCKING:
1297 * Called at init time, must take mode config lock. 1318 * Called at init time by the driver to set up the @fb_helper initial
1319 * configuration, must take the mode config lock.
1298 * 1320 *
1299 * Scan the CRTCs and connectors and try to put together an initial setup. 1321 * Scans the CRTCs and connectors and tries to put together an initial setup.
1300 * At the moment, this is a cloned configuration across all heads with 1322 * At the moment, this is a cloned configuration across all heads with
1301 * a new framebuffer object as the backing store. 1323 * a new framebuffer object as the backing store.
1302 * 1324 *
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1319 /* 1341 /*
1320 * we shouldn't end up with no modes here. 1342 * we shouldn't end up with no modes here.
1321 */ 1343 */
1322 if (count == 0) { 1344 if (count == 0)
1323 printk(KERN_INFO "No connectors reported connected with modes\n"); 1345 dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
1324 } 1346
1325 drm_setup_crtcs(fb_helper); 1347 drm_setup_crtcs(fb_helper);
1326 1348
1327 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1349 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
1330 1352
1331/** 1353/**
1332 * drm_fb_helper_hotplug_event - respond to a hotplug notification by 1354 * drm_fb_helper_hotplug_event - respond to a hotplug notification by
1333 * probing all the outputs attached to the fb. 1355 * probing all the outputs attached to the fb
1334 * @fb_helper: the drm_fb_helper 1356 * @fb_helper: the drm_fb_helper
1335 * 1357 *
1336 * LOCKING: 1358 * LOCKING:
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7ef1b673e1be..133b4132983e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -121,6 +121,8 @@ int drm_open(struct inode *inode, struct file *filp)
121 int minor_id = iminor(inode); 121 int minor_id = iminor(inode);
122 struct drm_minor *minor; 122 struct drm_minor *minor;
123 int retcode = 0; 123 int retcode = 0;
124 int need_setup = 0;
125 struct address_space *old_mapping;
124 126
125 minor = idr_find(&drm_minors_idr, minor_id); 127 minor = idr_find(&drm_minors_idr, minor_id);
126 if (!minor) 128 if (!minor)
@@ -132,23 +134,37 @@ int drm_open(struct inode *inode, struct file *filp)
132 if (drm_device_is_unplugged(dev)) 134 if (drm_device_is_unplugged(dev))
133 return -ENODEV; 135 return -ENODEV;
134 136
137 if (!dev->open_count++)
138 need_setup = 1;
139 mutex_lock(&dev->struct_mutex);
140 old_mapping = dev->dev_mapping;
141 if (old_mapping == NULL)
142 dev->dev_mapping = &inode->i_data;
143 /* ihold ensures nobody can remove inode with our i_data */
144 ihold(container_of(dev->dev_mapping, struct inode, i_data));
145 inode->i_mapping = dev->dev_mapping;
146 filp->f_mapping = dev->dev_mapping;
147 mutex_unlock(&dev->struct_mutex);
148
135 retcode = drm_open_helper(inode, filp, dev); 149 retcode = drm_open_helper(inode, filp, dev);
136 if (!retcode) { 150 if (retcode)
137 atomic_inc(&dev->counts[_DRM_STAT_OPENS]); 151 goto err_undo;
138 if (!dev->open_count++) 152 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
139 retcode = drm_setup(dev); 153 if (need_setup) {
140 } 154 retcode = drm_setup(dev);
141 if (!retcode) { 155 if (retcode)
142 mutex_lock(&dev->struct_mutex); 156 goto err_undo;
143 if (dev->dev_mapping == NULL)
144 dev->dev_mapping = &inode->i_data;
145 /* ihold ensures nobody can remove inode with our i_data */
146 ihold(container_of(dev->dev_mapping, struct inode, i_data));
147 inode->i_mapping = dev->dev_mapping;
148 filp->f_mapping = dev->dev_mapping;
149 mutex_unlock(&dev->struct_mutex);
150 } 157 }
158 return 0;
151 159
160err_undo:
161 mutex_lock(&dev->struct_mutex);
162 filp->f_mapping = old_mapping;
163 inode->i_mapping = old_mapping;
164 iput(container_of(dev->dev_mapping, struct inode, i_data));
165 dev->dev_mapping = old_mapping;
166 mutex_unlock(&dev->struct_mutex);
167 dev->open_count--;
152 return retcode; 168 return retcode;
153} 169}
154EXPORT_SYMBOL(drm_open); 170EXPORT_SYMBOL(drm_open);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d8..80254547a3f8 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
67 hashed_key = hash_long(key, ht->order); 67 hashed_key = hash_long(key, ht->order);
68 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); 68 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
69 h_list = &ht->table[hashed_key]; 69 h_list = &ht->table[hashed_key];
70 hlist_for_each(list, h_list) { 70 hlist_for_each_entry(entry, list, h_list, head)
71 entry = hlist_entry(list, struct drm_hash_item, head);
72 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); 71 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
73 }
74} 72}
75 73
76static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, 74static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
83 81
84 hashed_key = hash_long(key, ht->order); 82 hashed_key = hash_long(key, ht->order);
85 h_list = &ht->table[hashed_key]; 83 h_list = &ht->table[hashed_key];
86 hlist_for_each(list, h_list) { 84 hlist_for_each_entry(entry, list, h_list, head) {
87 entry = hlist_entry(list, struct drm_hash_item, head);
88 if (entry->key == key) 85 if (entry->key == key)
89 return list; 86 return list;
90 if (entry->key > key) 87 if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
93 return NULL; 90 return NULL;
94} 91}
95 92
93static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
94 unsigned long key)
95{
96 struct drm_hash_item *entry;
97 struct hlist_head *h_list;
98 struct hlist_node *list;
99 unsigned int hashed_key;
100
101 hashed_key = hash_long(key, ht->order);
102 h_list = &ht->table[hashed_key];
103 hlist_for_each_entry_rcu(entry, list, h_list, head) {
104 if (entry->key == key)
105 return list;
106 if (entry->key > key)
107 break;
108 }
109 return NULL;
110}
96 111
97int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) 112int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
98{ 113{
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
105 hashed_key = hash_long(key, ht->order); 120 hashed_key = hash_long(key, ht->order);
106 h_list = &ht->table[hashed_key]; 121 h_list = &ht->table[hashed_key];
107 parent = NULL; 122 parent = NULL;
108 hlist_for_each(list, h_list) { 123 hlist_for_each_entry(entry, list, h_list, head) {
109 entry = hlist_entry(list, struct drm_hash_item, head);
110 if (entry->key == key) 124 if (entry->key == key)
111 return -EINVAL; 125 return -EINVAL;
112 if (entry->key > key) 126 if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
114 parent = list; 128 parent = list;
115 } 129 }
116 if (parent) { 130 if (parent) {
117 hlist_add_after(parent, &item->head); 131 hlist_add_after_rcu(parent, &item->head);
118 } else { 132 } else {
119 hlist_add_head(&item->head, h_list); 133 hlist_add_head_rcu(&item->head, h_list);
120 } 134 }
121 return 0; 135 return 0;
122} 136}
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
156{ 170{
157 struct hlist_node *list; 171 struct hlist_node *list;
158 172
159 list = drm_ht_find_key(ht, key); 173 list = drm_ht_find_key_rcu(ht, key);
160 if (!list) 174 if (!list)
161 return -EINVAL; 175 return -EINVAL;
162 176
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
171 185
172 list = drm_ht_find_key(ht, key); 186 list = drm_ht_find_key(ht, key);
173 if (list) { 187 if (list) {
174 hlist_del_init(list); 188 hlist_del_init_rcu(list);
175 return 0; 189 return 0;
176 } 190 }
177 return -EINVAL; 191 return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
179 193
180int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) 194int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
181{ 195{
182 hlist_del_init(&item->head); 196 hlist_del_init_rcu(&item->head);
183 return 0; 197 return 0;
184} 198}
185EXPORT_SYMBOL(drm_ht_remove_item); 199EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f28..e77bd8b57df2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
287 req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; 287 req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
288 req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; 288 req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
289 break; 289 break;
290 case DRM_CAP_TIMESTAMP_MONOTONIC:
291 req->value = drm_timestamp_monotonic;
292 break;
290 default: 293 default:
291 return -EINVAL; 294 return -EINVAL;
292 } 295 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b9..19c01ca3cc76 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
106 s64 diff_ns; 106 s64 diff_ns;
107 int vblrc; 107 int vblrc;
108 struct timeval tvblank; 108 struct timeval tvblank;
109 int count = DRM_TIMESTAMP_MAXRETRIES;
109 110
110 /* Prevent vblank irq processing while disabling vblank irqs, 111 /* Prevent vblank irq processing while disabling vblank irqs,
111 * so no updates of timestamps or count can happen after we've 112 * so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
131 do { 132 do {
132 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 133 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
133 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 134 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
134 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); 135 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
136
137 if (!count)
138 vblrc = 0;
135 139
136 /* Compute time difference to stored timestamp of last vblank 140 /* Compute time difference to stored timestamp of last vblank
137 * as updated by last invocation of drm_handle_vblank() in vblank irq. 141 * as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
576 unsigned flags, 580 unsigned flags,
577 struct drm_crtc *refcrtc) 581 struct drm_crtc *refcrtc)
578{ 582{
579 struct timeval stime, raw_time; 583 ktime_t stime, etime, mono_time_offset;
584 struct timeval tv_etime;
580 struct drm_display_mode *mode; 585 struct drm_display_mode *mode;
581 int vbl_status, vtotal, vdisplay; 586 int vbl_status, vtotal, vdisplay;
582 int vpos, hpos, i; 587 int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
625 preempt_disable(); 630 preempt_disable();
626 631
627 /* Get system timestamp before query. */ 632 /* Get system timestamp before query. */
628 do_gettimeofday(&stime); 633 stime = ktime_get();
629 634
630 /* Get vertical and horizontal scanout pos. vpos, hpos. */ 635 /* Get vertical and horizontal scanout pos. vpos, hpos. */
631 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); 636 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
632 637
633 /* Get system timestamp after query. */ 638 /* Get system timestamp after query. */
634 do_gettimeofday(&raw_time); 639 etime = ktime_get();
640 if (!drm_timestamp_monotonic)
641 mono_time_offset = ktime_get_monotonic_offset();
635 642
636 preempt_enable(); 643 preempt_enable();
637 644
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
642 return -EIO; 649 return -EIO;
643 } 650 }
644 651
645 duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); 652 duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
646 653
647 /* Accept result with < max_error nsecs timing uncertainty. */ 654 /* Accept result with < max_error nsecs timing uncertainty. */
648 if (duration_ns <= (s64) *max_error) 655 if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
689 vbl_status |= 0x8; 696 vbl_status |= 0x8;
690 } 697 }
691 698
699 if (!drm_timestamp_monotonic)
700 etime = ktime_sub(etime, mono_time_offset);
701
702 /* save this only for debugging purposes */
703 tv_etime = ktime_to_timeval(etime);
692 /* Subtract time delta from raw timestamp to get final 704 /* Subtract time delta from raw timestamp to get final
693 * vblank_time timestamp for end of vblank. 705 * vblank_time timestamp for end of vblank.
694 */ 706 */
695 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); 707 etime = ktime_sub_ns(etime, delta_ns);
708 *vblank_time = ktime_to_timeval(etime);
696 709
697 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 710 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
698 crtc, (int)vbl_status, hpos, vpos, 711 crtc, (int)vbl_status, hpos, vpos,
699 (long)raw_time.tv_sec, (long)raw_time.tv_usec, 712 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
700 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, 713 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
701 (int)duration_ns/1000, i); 714 (int)duration_ns/1000, i);
702 715
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
708} 721}
709EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); 722EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
710 723
724static struct timeval get_drm_timestamp(void)
725{
726 ktime_t now;
727
728 now = ktime_get();
729 if (!drm_timestamp_monotonic)
730 now = ktime_sub(now, ktime_get_monotonic_offset());
731
732 return ktime_to_timeval(now);
733}
734
711/** 735/**
712 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent 736 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
713 * vblank interval. 737 * vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
745 } 769 }
746 770
747 /* GPU high precision timestamp query unsupported or failed. 771 /* GPU high precision timestamp query unsupported or failed.
748 * Return gettimeofday timestamp as best estimate. 772 * Return current monotonic/gettimeofday timestamp as best estimate.
749 */ 773 */
750 do_gettimeofday(tvblank); 774 *tvblank = get_drm_timestamp();
751 775
752 return 0; 776 return 0;
753} 777}
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
802} 826}
803EXPORT_SYMBOL(drm_vblank_count_and_time); 827EXPORT_SYMBOL(drm_vblank_count_and_time);
804 828
829static void send_vblank_event(struct drm_device *dev,
830 struct drm_pending_vblank_event *e,
831 unsigned long seq, struct timeval *now)
832{
833 WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
834 e->event.sequence = seq;
835 e->event.tv_sec = now->tv_sec;
836 e->event.tv_usec = now->tv_usec;
837
838 list_add_tail(&e->base.link,
839 &e->base.file_priv->event_list);
840 wake_up_interruptible(&e->base.file_priv->event_wait);
841 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
842 e->event.sequence);
843}
844
845/**
846 * drm_send_vblank_event - helper to send vblank event after pageflip
847 * @dev: DRM device
848 * @crtc: CRTC in question
849 * @e: the event to send
850 *
851 * Updates sequence # and timestamp on event, and sends it to userspace.
852 * Caller must hold event lock.
853 */
854void drm_send_vblank_event(struct drm_device *dev, int crtc,
855 struct drm_pending_vblank_event *e)
856{
857 struct timeval now;
858 unsigned int seq;
859 if (crtc >= 0) {
860 seq = drm_vblank_count_and_time(dev, crtc, &now);
861 } else {
862 seq = 0;
863
864 now = get_drm_timestamp();
865 }
866 send_vblank_event(dev, e, seq, &now);
867}
868EXPORT_SYMBOL(drm_send_vblank_event);
869
805/** 870/**
806 * drm_update_vblank_count - update the master vblank counter 871 * drm_update_vblank_count - update the master vblank counter
807 * @dev: DRM device 872 * @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
936} 1001}
937EXPORT_SYMBOL(drm_vblank_put); 1002EXPORT_SYMBOL(drm_vblank_put);
938 1003
1004/**
1005 * drm_vblank_off - disable vblank events on a CRTC
1006 * @dev: DRM device
1007 * @crtc: CRTC in question
1008 *
1009 * Caller must hold event lock.
1010 */
939void drm_vblank_off(struct drm_device *dev, int crtc) 1011void drm_vblank_off(struct drm_device *dev, int crtc)
940{ 1012{
941 struct drm_pending_vblank_event *e, *t; 1013 struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
949 1021
950 /* Send any queued vblank events, lest the natives grow disquiet */ 1022 /* Send any queued vblank events, lest the natives grow disquiet */
951 seq = drm_vblank_count_and_time(dev, crtc, &now); 1023 seq = drm_vblank_count_and_time(dev, crtc, &now);
1024
1025 spin_lock(&dev->event_lock);
952 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1026 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
953 if (e->pipe != crtc) 1027 if (e->pipe != crtc)
954 continue; 1028 continue;
955 DRM_DEBUG("Sending premature vblank event on disable: \ 1029 DRM_DEBUG("Sending premature vblank event on disable: \
956 wanted %d, current %d\n", 1030 wanted %d, current %d\n",
957 e->event.sequence, seq); 1031 e->event.sequence, seq);
958 1032 list_del(&e->base.link);
959 e->event.sequence = seq;
960 e->event.tv_sec = now.tv_sec;
961 e->event.tv_usec = now.tv_usec;
962 drm_vblank_put(dev, e->pipe); 1033 drm_vblank_put(dev, e->pipe);
963 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1034 send_vblank_event(dev, e, seq, &now);
964 wake_up_interruptible(&e->base.file_priv->event_wait);
965 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
966 e->event.sequence);
967 } 1035 }
1036 spin_unlock(&dev->event_lock);
968 1037
969 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1038 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
970} 1039}
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1107 1176
1108 e->event.sequence = vblwait->request.sequence; 1177 e->event.sequence = vblwait->request.sequence;
1109 if ((seq - vblwait->request.sequence) <= (1 << 23)) { 1178 if ((seq - vblwait->request.sequence) <= (1 << 23)) {
1110 e->event.sequence = seq;
1111 e->event.tv_sec = now.tv_sec;
1112 e->event.tv_usec = now.tv_usec;
1113 drm_vblank_put(dev, pipe); 1179 drm_vblank_put(dev, pipe);
1114 list_add_tail(&e->base.link, &e->base.file_priv->event_list); 1180 send_vblank_event(dev, e, seq, &now);
1115 wake_up_interruptible(&e->base.file_priv->event_wait);
1116 vblwait->reply.sequence = seq; 1181 vblwait->reply.sequence = seq;
1117 trace_drm_vblank_event_delivered(current->pid, pipe,
1118 vblwait->request.sequence);
1119 } else { 1182 } else {
1120 /* drm_handle_vblank_events will call drm_vblank_put */ 1183 /* drm_handle_vblank_events will call drm_vblank_put */
1121 list_add_tail(&e->base.link, &dev->vblank_event_list); 1184 list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1256 DRM_DEBUG("vblank event on %d, current %d\n", 1319 DRM_DEBUG("vblank event on %d, current %d\n",
1257 e->event.sequence, seq); 1320 e->event.sequence, seq);
1258 1321
1259 e->event.sequence = seq; 1322 list_del(&e->base.link);
1260 e->event.tv_sec = now.tv_sec;
1261 e->event.tv_usec = now.tv_usec;
1262 drm_vblank_put(dev, e->pipe); 1323 drm_vblank_put(dev, e->pipe);
1263 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1324 send_vblank_event(dev, e, seq, &now);
1264 wake_up_interruptible(&e->base.file_priv->event_wait);
1265 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
1266 e->event.sequence);
1267 } 1325 }
1268 1326
1269 spin_unlock_irqrestore(&dev->event_lock, flags); 1327 spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 0761a03cdbb2..2aa331499f81 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -184,19 +184,27 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
184 * -ENOSPC if no suitable free area is available. The preallocated memory node 184 * -ENOSPC if no suitable free area is available. The preallocated memory node
185 * must be cleared. 185 * must be cleared.
186 */ 186 */
187int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, 187int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
188 unsigned long size, unsigned alignment) 188 unsigned long size, unsigned alignment,
189 unsigned long color)
189{ 190{
190 struct drm_mm_node *hole_node; 191 struct drm_mm_node *hole_node;
191 192
192 hole_node = drm_mm_search_free(mm, size, alignment, false); 193 hole_node = drm_mm_search_free_generic(mm, size, alignment,
194 color, 0);
193 if (!hole_node) 195 if (!hole_node)
194 return -ENOSPC; 196 return -ENOSPC;
195 197
196 drm_mm_insert_helper(hole_node, node, size, alignment, 0); 198 drm_mm_insert_helper(hole_node, node, size, alignment, color);
197
198 return 0; 199 return 0;
199} 200}
201EXPORT_SYMBOL(drm_mm_insert_node_generic);
202
203int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
204 unsigned long size, unsigned alignment)
205{
206 return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
207}
200EXPORT_SYMBOL(drm_mm_insert_node); 208EXPORT_SYMBOL(drm_mm_insert_node);
201 209
202static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, 210static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
@@ -213,11 +221,13 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
213 221
214 BUG_ON(!hole_node->hole_follows || node->allocated); 222 BUG_ON(!hole_node->hole_follows || node->allocated);
215 223
216 if (mm->color_adjust)
217 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
218
219 if (adj_start < start) 224 if (adj_start < start)
220 adj_start = start; 225 adj_start = start;
226 if (adj_end > end)
227 adj_end = end;
228
229 if (mm->color_adjust)
230 mm->color_adjust(hole_node, color, &adj_start, &adj_end);
221 231
222 if (alignment) { 232 if (alignment) {
223 unsigned tmp = adj_start % alignment; 233 unsigned tmp = adj_start % alignment;
@@ -275,22 +285,31 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
275 * -ENOSPC if no suitable free area is available. This is for range 285 * -ENOSPC if no suitable free area is available. This is for range
276 * restricted allocations. The preallocated memory node must be cleared. 286 * restricted allocations. The preallocated memory node must be cleared.
277 */ 287 */
278int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, 288int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
279 unsigned long size, unsigned alignment, 289 unsigned long size, unsigned alignment, unsigned long color,
280 unsigned long start, unsigned long end) 290 unsigned long start, unsigned long end)
281{ 291{
282 struct drm_mm_node *hole_node; 292 struct drm_mm_node *hole_node;
283 293
284 hole_node = drm_mm_search_free_in_range(mm, size, alignment, 294 hole_node = drm_mm_search_free_in_range_generic(mm,
285 start, end, false); 295 size, alignment, color,
296 start, end, 0);
286 if (!hole_node) 297 if (!hole_node)
287 return -ENOSPC; 298 return -ENOSPC;
288 299
289 drm_mm_insert_helper_range(hole_node, node, size, alignment, 0, 300 drm_mm_insert_helper_range(hole_node, node,
301 size, alignment, color,
290 start, end); 302 start, end);
291
292 return 0; 303 return 0;
293} 304}
305EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
306
307int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
308 unsigned long size, unsigned alignment,
309 unsigned long start, unsigned long end)
310{
311 return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
312}
294EXPORT_SYMBOL(drm_mm_insert_node_in_range); 313EXPORT_SYMBOL(drm_mm_insert_node_in_range);
295 314
296/** 315/**
@@ -489,7 +508,7 @@ void drm_mm_init_scan(struct drm_mm *mm,
489 mm->scan_size = size; 508 mm->scan_size = size;
490 mm->scanned_blocks = 0; 509 mm->scanned_blocks = 0;
491 mm->scan_hit_start = 0; 510 mm->scan_hit_start = 0;
492 mm->scan_hit_size = 0; 511 mm->scan_hit_end = 0;
493 mm->scan_check_range = 0; 512 mm->scan_check_range = 0;
494 mm->prev_scanned_node = NULL; 513 mm->prev_scanned_node = NULL;
495} 514}
@@ -516,7 +535,7 @@ void drm_mm_init_scan_with_range(struct drm_mm *mm,
516 mm->scan_size = size; 535 mm->scan_size = size;
517 mm->scanned_blocks = 0; 536 mm->scanned_blocks = 0;
518 mm->scan_hit_start = 0; 537 mm->scan_hit_start = 0;
519 mm->scan_hit_size = 0; 538 mm->scan_hit_end = 0;
520 mm->scan_start = start; 539 mm->scan_start = start;
521 mm->scan_end = end; 540 mm->scan_end = end;
522 mm->scan_check_range = 1; 541 mm->scan_check_range = 1;
@@ -535,8 +554,7 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
535 struct drm_mm *mm = node->mm; 554 struct drm_mm *mm = node->mm;
536 struct drm_mm_node *prev_node; 555 struct drm_mm_node *prev_node;
537 unsigned long hole_start, hole_end; 556 unsigned long hole_start, hole_end;
538 unsigned long adj_start; 557 unsigned long adj_start, adj_end;
539 unsigned long adj_end;
540 558
541 mm->scanned_blocks++; 559 mm->scanned_blocks++;
542 560
@@ -553,14 +571,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
553 node->node_list.next = &mm->prev_scanned_node->node_list; 571 node->node_list.next = &mm->prev_scanned_node->node_list;
554 mm->prev_scanned_node = node; 572 mm->prev_scanned_node = node;
555 573
556 hole_start = drm_mm_hole_node_start(prev_node); 574 adj_start = hole_start = drm_mm_hole_node_start(prev_node);
557 hole_end = drm_mm_hole_node_end(prev_node); 575 adj_end = hole_end = drm_mm_hole_node_end(prev_node);
558
559 adj_start = hole_start;
560 adj_end = hole_end;
561
562 if (mm->color_adjust)
563 mm->color_adjust(prev_node, mm->scan_color, &adj_start, &adj_end);
564 576
565 if (mm->scan_check_range) { 577 if (mm->scan_check_range) {
566 if (adj_start < mm->scan_start) 578 if (adj_start < mm->scan_start)
@@ -569,11 +581,14 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
569 adj_end = mm->scan_end; 581 adj_end = mm->scan_end;
570 } 582 }
571 583
584 if (mm->color_adjust)
585 mm->color_adjust(prev_node, mm->scan_color,
586 &adj_start, &adj_end);
587
572 if (check_free_hole(adj_start, adj_end, 588 if (check_free_hole(adj_start, adj_end,
573 mm->scan_size, mm->scan_alignment)) { 589 mm->scan_size, mm->scan_alignment)) {
574 mm->scan_hit_start = hole_start; 590 mm->scan_hit_start = hole_start;
575 mm->scan_hit_size = hole_end; 591 mm->scan_hit_end = hole_end;
576
577 return 1; 592 return 1;
578 } 593 }
579 594
@@ -609,19 +624,10 @@ int drm_mm_scan_remove_block(struct drm_mm_node *node)
609 node_list); 624 node_list);
610 625
611 prev_node->hole_follows = node->scanned_preceeds_hole; 626 prev_node->hole_follows = node->scanned_preceeds_hole;
612 INIT_LIST_HEAD(&node->node_list);
613 list_add(&node->node_list, &prev_node->node_list); 627 list_add(&node->node_list, &prev_node->node_list);
614 628
615 /* Only need to check for containement because start&size for the 629 return (drm_mm_hole_node_end(node) > mm->scan_hit_start &&
616 * complete resulting free block (not just the desired part) is 630 node->start < mm->scan_hit_end);
617 * stored. */
618 if (node->start >= mm->scan_hit_start &&
619 node->start + node->size
620 <= mm->scan_hit_start + mm->scan_hit_size) {
621 return 1;
622 }
623
624 return 0;
625} 631}
626EXPORT_SYMBOL(drm_mm_scan_remove_block); 632EXPORT_SYMBOL(drm_mm_scan_remove_block);
627 633
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 59450f39bf96..d8da30e90db5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
46 * 46 *
47 * Describe @mode using DRM_DEBUG. 47 * Describe @mode using DRM_DEBUG.
48 */ 48 */
49void drm_mode_debug_printmodeline(struct drm_display_mode *mode) 49void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
50{ 50{
51 DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " 51 DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
52 "0x%x 0x%x\n", 52 "0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
558 * RETURNS: 558 * RETURNS:
559 * @mode->hdisplay 559 * @mode->hdisplay
560 */ 560 */
561int drm_mode_width(struct drm_display_mode *mode) 561int drm_mode_width(const struct drm_display_mode *mode)
562{ 562{
563 return mode->hdisplay; 563 return mode->hdisplay;
564 564
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
579 * RETURNS: 579 * RETURNS:
580 * @mode->vdisplay 580 * @mode->vdisplay
581 */ 581 */
582int drm_mode_height(struct drm_display_mode *mode) 582int drm_mode_height(const struct drm_display_mode *mode)
583{ 583{
584 return mode->vdisplay; 584 return mode->vdisplay;
585} 585}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
768 * RETURNS: 768 * RETURNS:
769 * True if the modes are equal, false otherwise. 769 * True if the modes are equal, false otherwise.
770 */ 770 */
771bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) 771bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
772{ 772{
773 /* do clock check convert to PICOS so fb modes get matched 773 /* do clock check convert to PICOS so fb modes get matched
774 * the same */ 774 * the same */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e5..754bc96e10c7 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
470{ 470{
471 struct pci_dev *root; 471 struct pci_dev *root;
472 int pos; 472 int pos;
473 u32 lnkcap, lnkcap2; 473 u32 lnkcap = 0, lnkcap2 = 0;
474 474
475 *mask = 0; 475 *mask = 0;
476 if (!dev->pdev) 476 if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c236fd27eba6..200e104f1fa0 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
46unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 46unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
47EXPORT_SYMBOL(drm_timestamp_precision); 47EXPORT_SYMBOL(drm_timestamp_precision);
48 48
49/*
50 * Default to use monotonic timestamps for wait-for-vblank and page-flip
51 * complete events.
52 */
53unsigned int drm_timestamp_monotonic = 1;
54
49MODULE_AUTHOR(CORE_AUTHOR); 55MODULE_AUTHOR(CORE_AUTHOR);
50MODULE_DESCRIPTION(CORE_DESC); 56MODULE_DESCRIPTION(CORE_DESC);
51MODULE_LICENSE("GPL and additional rights"); 57MODULE_LICENSE("GPL and additional rights");
52MODULE_PARM_DESC(debug, "Enable debug output"); 58MODULE_PARM_DESC(debug, "Enable debug output");
53MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 59MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
54MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 60MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
61MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
55 62
56module_param_named(debug, drm_debug, int, 0600); 63module_param_named(debug, drm_debug, int, 0600);
57module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 64module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
58module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 65module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
66module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
59 67
60struct idr drm_minors_idr; 68struct idr drm_minors_idr;
61 69
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
221 if (!file_priv->master) 229 if (!file_priv->master)
222 return -EINVAL; 230 return -EINVAL;
223 231
224 if (!file_priv->minor->master && 232 if (file_priv->minor->master)
225 file_priv->minor->master != file_priv->master) { 233 return -EINVAL;
226 mutex_lock(&dev->struct_mutex); 234
227 file_priv->minor->master = drm_master_get(file_priv->master); 235 mutex_lock(&dev->struct_mutex);
228 file_priv->is_master = 1; 236 file_priv->minor->master = drm_master_get(file_priv->master);
229 if (dev->driver->master_set) { 237 file_priv->is_master = 1;
230 ret = dev->driver->master_set(dev, file_priv, false); 238 if (dev->driver->master_set) {
231 if (unlikely(ret != 0)) { 239 ret = dev->driver->master_set(dev, file_priv, false);
232 file_priv->is_master = 0; 240 if (unlikely(ret != 0)) {
233 drm_master_put(&file_priv->minor->master); 241 file_priv->is_master = 0;
234 } 242 drm_master_put(&file_priv->minor->master);
235 } 243 }
236 mutex_unlock(&dev->struct_mutex);
237 } 244 }
245 mutex_unlock(&dev->struct_mutex);
238 246
239 return 0; 247 return 0;
240} 248}
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
492 drm_put_minor(&dev->primary); 500 drm_put_minor(&dev->primary);
493 501
494 list_del(&dev->driver_item); 502 list_del(&dev->driver_item);
495 if (dev->devname) { 503 kfree(dev->devname);
496 kfree(dev->devname);
497 dev->devname = NULL;
498 }
499 kfree(dev); 504 kfree(dev);
500} 505}
501EXPORT_SYMBOL(drm_put_dev); 506EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062af..02296653a058 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
182 uint64_t dpms_status; 182 uint64_t dpms_status;
183 int ret; 183 int ret;
184 184
185 ret = drm_connector_property_get_value(connector, 185 ret = drm_object_property_get_value(&connector->base,
186 dev->mode_config.dpms_property, 186 dev->mode_config.dpms_property,
187 &dpms_status); 187 &dpms_status);
188 if (ret) 188 if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
277 return 0; 277 return 0;
278 } 278 }
279 279
280 ret = drm_connector_property_get_value(connector, prop, &subconnector); 280 ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
281 if (ret) 281 if (ret)
282 return 0; 282 return 0;
283 283
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
318 return 0; 318 return 0;
319 } 319 }
320 320
321 ret = drm_connector_property_get_value(connector, prop, &subconnector); 321 ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
322 if (ret) 322 if (ret)
323 return 0; 323 return 0;
324 324
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 59a26e577b57..1d1f1e5e33f0 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
1config DRM_EXYNOS 1config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on DRM && PLAT_SAMSUNG 3 depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
6 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
@@ -10,6 +10,12 @@ config DRM_EXYNOS
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 10 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 11 If M is selected the module will be called exynosdrm.
12 12
13config DRM_EXYNOS_IOMMU
14 bool "EXYNOS DRM IOMMU Support"
15 depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
16 help
17 Choose this option if you want to use IOMMU feature for DRM.
18
13config DRM_EXYNOS_DMABUF 19config DRM_EXYNOS_DMABUF
14 bool "EXYNOS DRM DMABUF" 20 bool "EXYNOS DRM DMABUF"
15 depends on DRM_EXYNOS 21 depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
39 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 45 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
40 help 46 help
41 Choose this option if you want to use Exynos G2D for DRM. 47 Choose this option if you want to use Exynos G2D for DRM.
48
49config DRM_EXYNOS_IPP
50 bool "Exynos DRM IPP"
51 depends on DRM_EXYNOS
52 help
53 Choose this option if you want to use IPP feature for DRM.
54
55config DRM_EXYNOS_FIMC
56 bool "Exynos DRM FIMC"
57 depends on DRM_EXYNOS_IPP
58 help
59 Choose this option if you want to use Exynos FIMC for DRM.
60
61config DRM_EXYNOS_ROTATOR
62 bool "Exynos DRM Rotator"
63 depends on DRM_EXYNOS_IPP
64 help
65 Choose this option if you want to use Exynos Rotator for DRM.
66
67config DRM_EXYNOS_GSC
68 bool "Exynos DRM GSC"
69 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
70 help
71 Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o 9 exynos_drm_plane.o
10 10
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
11exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 13exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
13exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ 14exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
15 exynos_drm_hdmi.o 16 exynos_drm_hdmi.o
16exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 17exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 18exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
19exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
20exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
21exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
22exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
18 23
19obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o 24obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..4e9b5ba8edff 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
48 { }, 48 { },
49}; 49};
50 50
51#ifdef CONFIG_OF
51static struct of_device_id hdmiddc_match_types[] = { 52static struct of_device_id hdmiddc_match_types[] = {
52 { 53 {
53 .compatible = "samsung,exynos5-hdmiddc", 54 .compatible = "samsung,exynos5-hdmiddc",
@@ -55,15 +56,16 @@ static struct of_device_id hdmiddc_match_types[] = {
55 /* end node */ 56 /* end node */
56 } 57 }
57}; 58};
59#endif
58 60
59struct i2c_driver ddc_driver = { 61struct i2c_driver ddc_driver = {
60 .driver = { 62 .driver = {
61 .name = "exynos-hdmiddc", 63 .name = "exynos-hdmiddc",
62 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
63 .of_match_table = hdmiddc_match_types, 65 .of_match_table = of_match_ptr(hdmiddc_match_types),
64 }, 66 },
65 .id_table = ddc_idtable, 67 .id_table = ddc_idtable,
66 .probe = s5p_ddc_probe, 68 .probe = s5p_ddc_probe,
67 .remove = __devexit_p(s5p_ddc_remove), 69 .remove = s5p_ddc_remove,
68 .command = NULL, 70 .command = NULL,
69}; 71};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..57affae9568b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#include <drm/drmP.h> 12#include <drm/drmP.h>
@@ -29,93 +15,103 @@
29#include "exynos_drm_drv.h" 15#include "exynos_drm_drv.h"
30#include "exynos_drm_gem.h" 16#include "exynos_drm_gem.h"
31#include "exynos_drm_buf.h" 17#include "exynos_drm_buf.h"
18#include "exynos_drm_iommu.h"
32 19
33static int lowlevel_buffer_allocate(struct drm_device *dev, 20static int lowlevel_buffer_allocate(struct drm_device *dev,
34 unsigned int flags, struct exynos_drm_gem_buf *buf) 21 unsigned int flags, struct exynos_drm_gem_buf *buf)
35{ 22{
36 dma_addr_t start_addr;
37 unsigned int npages, i = 0;
38 struct scatterlist *sgl;
39 int ret = 0; 23 int ret = 0;
24 enum dma_attr attr;
25 unsigned int nr_pages;
40 26
41 DRM_DEBUG_KMS("%s\n", __FILE__); 27 DRM_DEBUG_KMS("%s\n", __FILE__);
42 28
43 if (IS_NONCONTIG_BUFFER(flags)) {
44 DRM_DEBUG_KMS("not support allocation type.\n");
45 return -EINVAL;
46 }
47
48 if (buf->dma_addr) { 29 if (buf->dma_addr) {
49 DRM_DEBUG_KMS("already allocated.\n"); 30 DRM_DEBUG_KMS("already allocated.\n");
50 return 0; 31 return 0;
51 } 32 }
52 33
53 if (buf->size >= SZ_1M) { 34 init_dma_attrs(&buf->dma_attrs);
54 npages = buf->size >> SECTION_SHIFT;
55 buf->page_size = SECTION_SIZE;
56 } else if (buf->size >= SZ_64K) {
57 npages = buf->size >> 16;
58 buf->page_size = SZ_64K;
59 } else {
60 npages = buf->size >> PAGE_SHIFT;
61 buf->page_size = PAGE_SIZE;
62 }
63 35
64 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 36 /*
65 if (!buf->sgt) { 37 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
66 DRM_ERROR("failed to allocate sg table.\n"); 38 * region will be allocated else physically contiguous
67 return -ENOMEM; 39 * as possible.
68 } 40 */
41 if (!(flags & EXYNOS_BO_NONCONTIG))
42 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
69 43
70 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); 44 /*
71 if (ret < 0) { 45 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
72 DRM_ERROR("failed to initialize sg table.\n"); 46 * else cachable mapping.
73 kfree(buf->sgt); 47 */
74 buf->sgt = NULL; 48 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
75 return -ENOMEM; 49 attr = DMA_ATTR_WRITE_COMBINE;
76 } 50 else
51 attr = DMA_ATTR_NON_CONSISTENT;
52
53 dma_set_attr(attr, &buf->dma_attrs);
54 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
55
56 nr_pages = buf->size >> PAGE_SHIFT;
57
58 if (!is_drm_iommu_supported(dev)) {
59 dma_addr_t start_addr;
60 unsigned int i = 0;
61
62 buf->pages = kzalloc(sizeof(struct page) * nr_pages,
63 GFP_KERNEL);
64 if (!buf->pages) {
65 DRM_ERROR("failed to allocate pages.\n");
66 return -ENOMEM;
67 }
68
69 buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
70 &buf->dma_addr, GFP_KERNEL,
71 &buf->dma_attrs);
72 if (!buf->kvaddr) {
73 DRM_ERROR("failed to allocate buffer.\n");
74 kfree(buf->pages);
75 return -ENOMEM;
76 }
77
78 start_addr = buf->dma_addr;
79 while (i < nr_pages) {
80 buf->pages[i] = phys_to_page(start_addr);
81 start_addr += PAGE_SIZE;
82 i++;
83 }
84 } else {
77 85
78 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, 86 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
79 &buf->dma_addr, GFP_KERNEL); 87 &buf->dma_addr, GFP_KERNEL,
80 if (!buf->kvaddr) { 88 &buf->dma_attrs);
81 DRM_ERROR("failed to allocate buffer.\n"); 89 if (!buf->pages) {
82 ret = -ENOMEM; 90 DRM_ERROR("failed to allocate buffer.\n");
83 goto err1; 91 return -ENOMEM;
92 }
84 } 93 }
85 94
86 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); 95 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
87 if (!buf->pages) { 96 if (!buf->sgt) {
88 DRM_ERROR("failed to allocate pages.\n"); 97 DRM_ERROR("failed to get sg table.\n");
89 ret = -ENOMEM; 98 ret = -ENOMEM;
90 goto err2; 99 goto err_free_attrs;
91 }
92
93 sgl = buf->sgt->sgl;
94 start_addr = buf->dma_addr;
95
96 while (i < npages) {
97 buf->pages[i] = phys_to_page(start_addr);
98 sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
99 sg_dma_address(sgl) = start_addr;
100 start_addr += buf->page_size;
101 sgl = sg_next(sgl);
102 i++;
103 } 100 }
104 101
105 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 102 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
106 (unsigned long)buf->kvaddr,
107 (unsigned long)buf->dma_addr, 103 (unsigned long)buf->dma_addr,
108 buf->size); 104 buf->size);
109 105
110 return ret; 106 return ret;
111err2: 107
112 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, 108err_free_attrs:
113 (dma_addr_t)buf->dma_addr); 109 dma_free_attrs(dev->dev, buf->size, buf->pages,
110 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
114 buf->dma_addr = (dma_addr_t)NULL; 111 buf->dma_addr = (dma_addr_t)NULL;
115err1: 112
116 sg_free_table(buf->sgt); 113 if (!is_drm_iommu_supported(dev))
117 kfree(buf->sgt); 114 kfree(buf->pages);
118 buf->sgt = NULL;
119 115
120 return ret; 116 return ret;
121} 117}
@@ -125,23 +121,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
125{ 121{
126 DRM_DEBUG_KMS("%s.\n", __FILE__); 122 DRM_DEBUG_KMS("%s.\n", __FILE__);
127 123
128 /*
129 * release only physically continuous memory and
130 * non-continuous memory would be released by exynos
131 * gem framework.
132 */
133 if (IS_NONCONTIG_BUFFER(flags)) {
134 DRM_DEBUG_KMS("not support allocation type.\n");
135 return;
136 }
137
138 if (!buf->dma_addr) { 124 if (!buf->dma_addr) {
139 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 125 DRM_DEBUG_KMS("dma_addr is invalid.\n");
140 return; 126 return;
141 } 127 }
142 128
143 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 129 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
144 (unsigned long)buf->kvaddr,
145 (unsigned long)buf->dma_addr, 130 (unsigned long)buf->dma_addr,
146 buf->size); 131 buf->size);
147 132
@@ -150,11 +135,14 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
150 kfree(buf->sgt); 135 kfree(buf->sgt);
151 buf->sgt = NULL; 136 buf->sgt = NULL;
152 137
153 kfree(buf->pages); 138 if (!is_drm_iommu_supported(dev)) {
154 buf->pages = NULL; 139 dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
140 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
141 kfree(buf->pages);
142 } else
143 dma_free_attrs(dev->dev, buf->size, buf->pages,
144 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
155 145
156 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
157 (dma_addr_t)buf->dma_addr);
158 buf->dma_addr = (dma_addr_t)NULL; 146 buf->dma_addr = (dma_addr_t)NULL;
159} 147}
160 148
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..a6412f19673c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_BUF_H_ 12#ifndef _EXYNOS_DRM_BUF_H_
@@ -34,12 +20,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
34void exynos_drm_fini_buf(struct drm_device *dev, 20void exynos_drm_fini_buf(struct drm_device *dev,
35 struct exynos_drm_gem_buf *buffer); 21 struct exynos_drm_gem_buf *buffer);
36 22
37/* allocate physical memory region and setup sgt and pages. */ 23/* allocate physical memory region and setup sgt. */
38int exynos_drm_alloc_buf(struct drm_device *dev, 24int exynos_drm_alloc_buf(struct drm_device *dev,
39 struct exynos_drm_gem_buf *buf, 25 struct exynos_drm_gem_buf *buf,
40 unsigned int flags); 26 unsigned int flags);
41 27
42/* release physical memory region, sgt and pages. */ 28/* release physical memory region, and sgt. */
43void exynos_drm_free_buf(struct drm_device *dev, 29void exynos_drm_free_buf(struct drm_device *dev,
44 unsigned int flags, 30 unsigned int flags,
45 struct exynos_drm_gem_buf *buffer); 31 struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 18c271862ca8..ab37437bad8a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#include <drm/drmP.h> 14#include <drm/drmP.h>
@@ -374,6 +360,7 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
374 exynos_connector->encoder_id = encoder->base.id; 360 exynos_connector->encoder_id = encoder->base.id;
375 exynos_connector->manager = manager; 361 exynos_connector->manager = manager;
376 exynos_connector->dpms = DRM_MODE_DPMS_OFF; 362 exynos_connector->dpms = DRM_MODE_DPMS_OFF;
363 connector->dpms = DRM_MODE_DPMS_OFF;
377 connector->encoder = encoder; 364 connector->encoder = encoder;
378 365
379 err = drm_mode_connector_attach_encoder(connector, encoder); 366 err = drm_mode_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h
index 22f6cc442c3d..547c6b590357 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.h
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_DRM_CONNECTOR_H_ 14#ifndef _EXYNOS_DRM_CONNECTOR_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 94026ad76a77..4667c9f67acd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..e8894bc9e6d5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
@@ -236,16 +222,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
236 goto out; 222 goto out;
237 } 223 }
238 224
225 spin_lock_irq(&dev->event_lock);
239 list_add_tail(&event->base.link, 226 list_add_tail(&event->base.link,
240 &dev_priv->pageflip_event_list); 227 &dev_priv->pageflip_event_list);
228 spin_unlock_irq(&dev->event_lock);
241 229
242 crtc->fb = fb; 230 crtc->fb = fb;
243 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, 231 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
244 NULL); 232 NULL);
245 if (ret) { 233 if (ret) {
246 crtc->fb = old_fb; 234 crtc->fb = old_fb;
235
236 spin_lock_irq(&dev->event_lock);
247 drm_vblank_put(dev, exynos_crtc->pipe); 237 drm_vblank_put(dev, exynos_crtc->pipe);
248 list_del(&event->base.link); 238 list_del(&event->base.link);
239 spin_unlock_irq(&dev->event_lock);
249 240
250 goto out; 241 goto out;
251 } 242 }
@@ -402,3 +393,33 @@ void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
402 exynos_drm_fn_encoder(private->crtc[crtc], &crtc, 393 exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
403 exynos_drm_disable_vblank); 394 exynos_drm_disable_vblank);
404} 395}
396
397void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc)
398{
399 struct exynos_drm_private *dev_priv = dev->dev_private;
400 struct drm_pending_vblank_event *e, *t;
401 struct timeval now;
402 unsigned long flags;
403
404 DRM_DEBUG_KMS("%s\n", __FILE__);
405
406 spin_lock_irqsave(&dev->event_lock, flags);
407
408 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
409 base.link) {
410 /* if event's pipe isn't same as crtc then ignore it. */
411 if (crtc != e->pipe)
412 continue;
413
414 do_gettimeofday(&now);
415 e->event.sequence = 0;
416 e->event.tv_sec = now.tv_sec;
417 e->event.tv_usec = now.tv_usec;
418
419 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
420 wake_up_interruptible(&e->base.file_priv->event_wait);
421 drm_vblank_put(dev, crtc);
422 }
423
424 spin_unlock_irqrestore(&dev->event_lock, flags);
425}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 6bae8d8c250e..3e197e6ae7d9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_CRTC_H_ 15#ifndef _EXYNOS_DRM_CRTC_H_
@@ -32,5 +18,6 @@
32int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr); 18int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
33int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 19int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
34void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 20void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
21void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int crtc);
35 22
36#endif 23#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..9df97714b6c0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#include <drm/drmP.h> 12#include <drm/drmP.h>
@@ -30,70 +16,108 @@
30 16
31#include <linux/dma-buf.h> 17#include <linux/dma-buf.h>
32 18
33static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, 19struct exynos_drm_dmabuf_attachment {
34 unsigned int page_size) 20 struct sg_table sgt;
21 enum dma_data_direction dir;
22};
23
24static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
25 struct device *dev,
26 struct dma_buf_attachment *attach)
35{ 27{
36 struct sg_table *sgt = NULL; 28 struct exynos_drm_dmabuf_attachment *exynos_attach;
37 struct scatterlist *sgl;
38 int i, ret;
39 29
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 30 exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
41 if (!sgt) 31 if (!exynos_attach)
42 goto out; 32 return -ENOMEM;
43 33
44 ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); 34 exynos_attach->dir = DMA_NONE;
45 if (ret) 35 attach->priv = exynos_attach;
46 goto err_free_sgt;
47 36
48 if (page_size < PAGE_SIZE) 37 return 0;
49 page_size = PAGE_SIZE; 38}
50 39
51 for_each_sg(sgt->sgl, sgl, nr_pages, i) 40static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
52 sg_set_page(sgl, pages[i], page_size, 0); 41 struct dma_buf_attachment *attach)
42{
43 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
44 struct sg_table *sgt;
53 45
54 return sgt; 46 if (!exynos_attach)
47 return;
55 48
56err_free_sgt: 49 sgt = &exynos_attach->sgt;
57 kfree(sgt); 50
58 sgt = NULL; 51 if (exynos_attach->dir != DMA_NONE)
59out: 52 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
60 return NULL; 53 exynos_attach->dir);
54
55 sg_free_table(sgt);
56 kfree(exynos_attach);
57 attach->priv = NULL;
61} 58}
62 59
63static struct sg_table * 60static struct sg_table *
64 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, 61 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
65 enum dma_data_direction dir) 62 enum dma_data_direction dir)
66{ 63{
64 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
67 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; 65 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
68 struct drm_device *dev = gem_obj->base.dev; 66 struct drm_device *dev = gem_obj->base.dev;
69 struct exynos_drm_gem_buf *buf; 67 struct exynos_drm_gem_buf *buf;
68 struct scatterlist *rd, *wr;
70 struct sg_table *sgt = NULL; 69 struct sg_table *sgt = NULL;
71 unsigned int npages; 70 unsigned int i;
72 int nents; 71 int nents, ret;
73 72
74 DRM_DEBUG_PRIME("%s\n", __FILE__); 73 DRM_DEBUG_PRIME("%s\n", __FILE__);
75 74
76 mutex_lock(&dev->struct_mutex); 75 if (WARN_ON(dir == DMA_NONE))
76 return ERR_PTR(-EINVAL);
77
78 /* just return current sgt if already requested. */
79 if (exynos_attach->dir == dir)
80 return &exynos_attach->sgt;
81
82 /* reattaching is not allowed. */
83 if (WARN_ON(exynos_attach->dir != DMA_NONE))
84 return ERR_PTR(-EBUSY);
77 85
78 buf = gem_obj->buffer; 86 buf = gem_obj->buffer;
87 if (!buf) {
88 DRM_ERROR("buffer is null.\n");
89 return ERR_PTR(-ENOMEM);
90 }
79 91
80 /* there should always be pages allocated. */ 92 sgt = &exynos_attach->sgt;
81 if (!buf->pages) { 93
82 DRM_ERROR("pages is null.\n"); 94 ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
83 goto err_unlock; 95 if (ret) {
96 DRM_ERROR("failed to alloc sgt.\n");
97 return ERR_PTR(-ENOMEM);
84 } 98 }
85 99
86 npages = buf->size / buf->page_size; 100 mutex_lock(&dev->struct_mutex);
101
102 rd = buf->sgt->sgl;
103 wr = sgt->sgl;
104 for (i = 0; i < sgt->orig_nents; ++i) {
105 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
106 rd = sg_next(rd);
107 wr = sg_next(wr);
108 }
87 109
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); 110 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
89 if (!sgt) { 111 if (!nents) {
90 DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); 112 DRM_ERROR("failed to map sgl with iommu.\n");
113 sgt = ERR_PTR(-EIO);
91 goto err_unlock; 114 goto err_unlock;
92 } 115 }
93 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
94 116
95 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", 117 exynos_attach->dir = dir;
96 npages, buf->size, buf->page_size); 118 attach->priv = exynos_attach;
119
120 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
97 121
98err_unlock: 122err_unlock:
99 mutex_unlock(&dev->struct_mutex); 123 mutex_unlock(&dev->struct_mutex);
@@ -104,10 +128,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
104 struct sg_table *sgt, 128 struct sg_table *sgt,
105 enum dma_data_direction dir) 129 enum dma_data_direction dir)
106{ 130{
107 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 131 /* Nothing to do. */
108 sg_free_table(sgt);
109 kfree(sgt);
110 sgt = NULL;
111} 132}
112 133
113static void exynos_dmabuf_release(struct dma_buf *dmabuf) 134static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +190,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
169} 190}
170 191
171static struct dma_buf_ops exynos_dmabuf_ops = { 192static struct dma_buf_ops exynos_dmabuf_ops = {
193 .attach = exynos_gem_attach_dma_buf,
194 .detach = exynos_gem_detach_dma_buf,
172 .map_dma_buf = exynos_gem_map_dma_buf, 195 .map_dma_buf = exynos_gem_map_dma_buf,
173 .unmap_dma_buf = exynos_gem_unmap_dma_buf, 196 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
174 .kmap = exynos_gem_dmabuf_kmap, 197 .kmap = exynos_gem_dmabuf_kmap,
@@ -185,7 +208,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
185 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 208 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
186 209
187 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops, 210 return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
188 exynos_gem_obj->base.size, 0600); 211 exynos_gem_obj->base.size, flags);
189} 212}
190 213
191struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, 214struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
@@ -196,7 +219,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
196 struct scatterlist *sgl; 219 struct scatterlist *sgl;
197 struct exynos_drm_gem_obj *exynos_gem_obj; 220 struct exynos_drm_gem_obj *exynos_gem_obj;
198 struct exynos_drm_gem_buf *buffer; 221 struct exynos_drm_gem_buf *buffer;
199 struct page *page;
200 int ret; 222 int ret;
201 223
202 DRM_DEBUG_PRIME("%s\n", __FILE__); 224 DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -210,7 +232,12 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
210 232
211 /* is it from our device? */ 233 /* is it from our device? */
212 if (obj->dev == drm_dev) { 234 if (obj->dev == drm_dev) {
235 /*
236 * Importing dmabuf exported from out own gem increases
237 * refcount on gem itself instead of f_count of dmabuf.
238 */
213 drm_gem_object_reference(obj); 239 drm_gem_object_reference(obj);
240 dma_buf_put(dma_buf);
214 return obj; 241 return obj;
215 } 242 }
216 } 243 }
@@ -233,38 +260,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
233 goto err_unmap_attach; 260 goto err_unmap_attach;
234 } 261 }
235 262
236 buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
237 if (!buffer->pages) {
238 DRM_ERROR("failed to allocate pages.\n");
239 ret = -ENOMEM;
240 goto err_free_buffer;
241 }
242
243 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); 263 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
244 if (!exynos_gem_obj) { 264 if (!exynos_gem_obj) {
245 ret = -ENOMEM; 265 ret = -ENOMEM;
246 goto err_free_pages; 266 goto err_free_buffer;
247 } 267 }
248 268
249 sgl = sgt->sgl; 269 sgl = sgt->sgl;
250 270
251 if (sgt->nents == 1) { 271 buffer->size = dma_buf->size;
252 buffer->dma_addr = sg_dma_address(sgt->sgl); 272 buffer->dma_addr = sg_dma_address(sgl);
253 buffer->size = sg_dma_len(sgt->sgl);
254 273
274 if (sgt->nents == 1) {
255 /* always physically continuous memory if sgt->nents is 1. */ 275 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 276 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
257 } else { 277 } else {
258 unsigned int i = 0; 278 /*
259 279 * this case could be CONTIG or NONCONTIG type but for now
260 buffer->dma_addr = sg_dma_address(sgl); 280 * sets NONCONTIG.
261 while (i < sgt->nents) { 281 * TODO. we have to find a way that exporter can notify
262 buffer->pages[i] = sg_page(sgl); 282 * the type of its own buffer to importer.
263 buffer->size += sg_dma_len(sgl); 283 */
264 sgl = sg_next(sgl);
265 i++;
266 }
267
268 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; 284 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
269 } 285 }
270 286
@@ -277,9 +293,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
277 293
278 return &exynos_gem_obj->base; 294 return &exynos_gem_obj->base;
279 295
280err_free_pages:
281 kfree(buffer->pages);
282 buffer->pages = NULL;
283err_free_buffer: 296err_free_buffer:
284 kfree(buffer); 297 kfree(buffer);
285 buffer = NULL; 298 buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
index 662a8f98ccdb..49acfafb4fdb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_DMABUF_H_ 12#ifndef _EXYNOS_DRM_DMABUF_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..3da5c2d214d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#include <drm/drmP.h> 14#include <drm/drmP.h>
@@ -40,6 +26,8 @@
40#include "exynos_drm_vidi.h" 26#include "exynos_drm_vidi.h"
41#include "exynos_drm_dmabuf.h" 27#include "exynos_drm_dmabuf.h"
42#include "exynos_drm_g2d.h" 28#include "exynos_drm_g2d.h"
29#include "exynos_drm_ipp.h"
30#include "exynos_drm_iommu.h"
43 31
44#define DRIVER_NAME "exynos" 32#define DRIVER_NAME "exynos"
45#define DRIVER_DESC "Samsung SoC DRM" 33#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +37,9 @@
49 37
50#define VBLANK_OFF_DELAY 50000 38#define VBLANK_OFF_DELAY 50000
51 39
40/* platform device pointer for eynos drm device. */
41static struct platform_device *exynos_drm_pdev;
42
52static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 43static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
53{ 44{
54 struct exynos_drm_private *private; 45 struct exynos_drm_private *private;
@@ -66,6 +57,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
66 INIT_LIST_HEAD(&private->pageflip_event_list); 57 INIT_LIST_HEAD(&private->pageflip_event_list);
67 dev->dev_private = (void *)private; 58 dev->dev_private = (void *)private;
68 59
60 /*
61 * create mapping to manage iommu table and set a pointer to iommu
62 * mapping structure to iommu_mapping of private data.
63 * also this iommu_mapping can be used to check if iommu is supported
64 * or not.
65 */
66 ret = drm_create_iommu_mapping(dev);
67 if (ret < 0) {
68 DRM_ERROR("failed to create iommu mapping.\n");
69 goto err_crtc;
70 }
71
69 drm_mode_config_init(dev); 72 drm_mode_config_init(dev);
70 73
71 /* init kms poll for handling hpd */ 74 /* init kms poll for handling hpd */
@@ -80,7 +83,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
80 for (nr = 0; nr < MAX_CRTC; nr++) { 83 for (nr = 0; nr < MAX_CRTC; nr++) {
81 ret = exynos_drm_crtc_create(dev, nr); 84 ret = exynos_drm_crtc_create(dev, nr);
82 if (ret) 85 if (ret)
83 goto err_crtc; 86 goto err_release_iommu_mapping;
84 } 87 }
85 88
86 for (nr = 0; nr < MAX_PLANE; nr++) { 89 for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +92,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
89 92
90 plane = exynos_plane_init(dev, possible_crtcs, false); 93 plane = exynos_plane_init(dev, possible_crtcs, false);
91 if (!plane) 94 if (!plane)
92 goto err_crtc; 95 goto err_release_iommu_mapping;
93 } 96 }
94 97
95 ret = drm_vblank_init(dev, MAX_CRTC); 98 ret = drm_vblank_init(dev, MAX_CRTC);
96 if (ret) 99 if (ret)
97 goto err_crtc; 100 goto err_release_iommu_mapping;
98 101
99 /* 102 /*
100 * probe sub drivers such as display controller and hdmi driver, 103 * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +129,8 @@ err_drm_device:
126 exynos_drm_device_unregister(dev); 129 exynos_drm_device_unregister(dev);
127err_vblank: 130err_vblank:
128 drm_vblank_cleanup(dev); 131 drm_vblank_cleanup(dev);
132err_release_iommu_mapping:
133 drm_release_iommu_mapping(dev);
129err_crtc: 134err_crtc:
130 drm_mode_config_cleanup(dev); 135 drm_mode_config_cleanup(dev);
131 kfree(private); 136 kfree(private);
@@ -142,6 +147,8 @@ static int exynos_drm_unload(struct drm_device *dev)
142 drm_vblank_cleanup(dev); 147 drm_vblank_cleanup(dev);
143 drm_kms_helper_poll_fini(dev); 148 drm_kms_helper_poll_fini(dev);
144 drm_mode_config_cleanup(dev); 149 drm_mode_config_cleanup(dev);
150
151 drm_release_iommu_mapping(dev);
145 kfree(dev->dev_private); 152 kfree(dev->dev_private);
146 153
147 dev->dev_private = NULL; 154 dev->dev_private = NULL;
@@ -229,6 +236,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
229 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH), 236 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
230 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, 237 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
231 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH), 238 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
239 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
240 exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
241 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
242 exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
243 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
244 exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
245 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
246 exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
232}; 247};
233 248
234static const struct file_operations exynos_drm_driver_fops = { 249static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +294,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
279{ 294{
280 DRM_DEBUG_DRIVER("%s\n", __FILE__); 295 DRM_DEBUG_DRIVER("%s\n", __FILE__);
281 296
297 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
282 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); 298 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
283 299
284 return drm_platform_init(&exynos_drm_driver, pdev); 300 return drm_platform_init(&exynos_drm_driver, pdev);
@@ -295,7 +311,7 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
295 311
296static struct platform_driver exynos_drm_platform_driver = { 312static struct platform_driver exynos_drm_platform_driver = {
297 .probe = exynos_drm_platform_probe, 313 .probe = exynos_drm_platform_probe,
298 .remove = __devexit_p(exynos_drm_platform_remove), 314 .remove = exynos_drm_platform_remove,
299 .driver = { 315 .driver = {
300 .owner = THIS_MODULE, 316 .owner = THIS_MODULE,
301 .name = "exynos-drm", 317 .name = "exynos-drm",
@@ -324,6 +340,10 @@ static int __init exynos_drm_init(void)
324 ret = platform_driver_register(&exynos_drm_common_hdmi_driver); 340 ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
325 if (ret < 0) 341 if (ret < 0)
326 goto out_common_hdmi; 342 goto out_common_hdmi;
343
344 ret = exynos_platform_device_hdmi_register();
345 if (ret < 0)
346 goto out_common_hdmi_dev;
327#endif 347#endif
328 348
329#ifdef CONFIG_DRM_EXYNOS_VIDI 349#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +358,80 @@ static int __init exynos_drm_init(void)
338 goto out_g2d; 358 goto out_g2d;
339#endif 359#endif
340 360
361#ifdef CONFIG_DRM_EXYNOS_FIMC
362 ret = platform_driver_register(&fimc_driver);
363 if (ret < 0)
364 goto out_fimc;
365#endif
366
367#ifdef CONFIG_DRM_EXYNOS_ROTATOR
368 ret = platform_driver_register(&rotator_driver);
369 if (ret < 0)
370 goto out_rotator;
371#endif
372
373#ifdef CONFIG_DRM_EXYNOS_GSC
374 ret = platform_driver_register(&gsc_driver);
375 if (ret < 0)
376 goto out_gsc;
377#endif
378
379#ifdef CONFIG_DRM_EXYNOS_IPP
380 ret = platform_driver_register(&ipp_driver);
381 if (ret < 0)
382 goto out_ipp;
383#endif
384
341 ret = platform_driver_register(&exynos_drm_platform_driver); 385 ret = platform_driver_register(&exynos_drm_platform_driver);
342 if (ret < 0) 386 if (ret < 0)
387 goto out_drm;
388
389 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
390 NULL, 0);
391 if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
392 ret = PTR_ERR(exynos_drm_pdev);
343 goto out; 393 goto out;
394 }
344 395
345 return 0; 396 return 0;
346 397
347out: 398out:
399 platform_driver_unregister(&exynos_drm_platform_driver);
400
401out_drm:
402#ifdef CONFIG_DRM_EXYNOS_IPP
403 platform_driver_unregister(&ipp_driver);
404out_ipp:
405#endif
406
407#ifdef CONFIG_DRM_EXYNOS_GSC
408 platform_driver_unregister(&gsc_driver);
409out_gsc:
410#endif
411
412#ifdef CONFIG_DRM_EXYNOS_ROTATOR
413 platform_driver_unregister(&rotator_driver);
414out_rotator:
415#endif
416
417#ifdef CONFIG_DRM_EXYNOS_FIMC
418 platform_driver_unregister(&fimc_driver);
419out_fimc:
420#endif
421
348#ifdef CONFIG_DRM_EXYNOS_G2D 422#ifdef CONFIG_DRM_EXYNOS_G2D
349 platform_driver_unregister(&g2d_driver); 423 platform_driver_unregister(&g2d_driver);
350out_g2d: 424out_g2d:
351#endif 425#endif
352 426
353#ifdef CONFIG_DRM_EXYNOS_VIDI 427#ifdef CONFIG_DRM_EXYNOS_VIDI
354out_vidi:
355 platform_driver_unregister(&vidi_driver); 428 platform_driver_unregister(&vidi_driver);
429out_vidi:
356#endif 430#endif
357 431
358#ifdef CONFIG_DRM_EXYNOS_HDMI 432#ifdef CONFIG_DRM_EXYNOS_HDMI
433 exynos_platform_device_hdmi_unregister();
434out_common_hdmi_dev:
359 platform_driver_unregister(&exynos_drm_common_hdmi_driver); 435 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
360out_common_hdmi: 436out_common_hdmi:
361 platform_driver_unregister(&mixer_driver); 437 platform_driver_unregister(&mixer_driver);
@@ -375,13 +451,32 @@ static void __exit exynos_drm_exit(void)
375{ 451{
376 DRM_DEBUG_DRIVER("%s\n", __FILE__); 452 DRM_DEBUG_DRIVER("%s\n", __FILE__);
377 453
454 platform_device_unregister(exynos_drm_pdev);
455
378 platform_driver_unregister(&exynos_drm_platform_driver); 456 platform_driver_unregister(&exynos_drm_platform_driver);
379 457
458#ifdef CONFIG_DRM_EXYNOS_IPP
459 platform_driver_unregister(&ipp_driver);
460#endif
461
462#ifdef CONFIG_DRM_EXYNOS_GSC
463 platform_driver_unregister(&gsc_driver);
464#endif
465
466#ifdef CONFIG_DRM_EXYNOS_ROTATOR
467 platform_driver_unregister(&rotator_driver);
468#endif
469
470#ifdef CONFIG_DRM_EXYNOS_FIMC
471 platform_driver_unregister(&fimc_driver);
472#endif
473
380#ifdef CONFIG_DRM_EXYNOS_G2D 474#ifdef CONFIG_DRM_EXYNOS_G2D
381 platform_driver_unregister(&g2d_driver); 475 platform_driver_unregister(&g2d_driver);
382#endif 476#endif
383 477
384#ifdef CONFIG_DRM_EXYNOS_HDMI 478#ifdef CONFIG_DRM_EXYNOS_HDMI
479 exynos_platform_device_hdmi_unregister();
385 platform_driver_unregister(&exynos_drm_common_hdmi_driver); 480 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
386 platform_driver_unregister(&mixer_driver); 481 platform_driver_unregister(&mixer_driver);
387 platform_driver_unregister(&hdmi_driver); 482 platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..b9e51bc09e81 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_DRV_H_ 15#ifndef _EXYNOS_DRM_DRV_H_
@@ -74,8 +60,6 @@ enum exynos_drm_output_type {
74 * @commit: apply hardware specific overlay data to registers. 60 * @commit: apply hardware specific overlay data to registers.
75 * @enable: enable hardware specific overlay. 61 * @enable: enable hardware specific overlay.
76 * @disable: disable hardware specific overlay. 62 * @disable: disable hardware specific overlay.
77 * @wait_for_vblank: wait for vblank interrupt to make sure that
78 * hardware overlay is disabled.
79 */ 63 */
80struct exynos_drm_overlay_ops { 64struct exynos_drm_overlay_ops {
81 void (*mode_set)(struct device *subdrv_dev, 65 void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +67,6 @@ struct exynos_drm_overlay_ops {
83 void (*commit)(struct device *subdrv_dev, int zpos); 67 void (*commit)(struct device *subdrv_dev, int zpos);
84 void (*enable)(struct device *subdrv_dev, int zpos); 68 void (*enable)(struct device *subdrv_dev, int zpos);
85 void (*disable)(struct device *subdrv_dev, int zpos); 69 void (*disable)(struct device *subdrv_dev, int zpos);
86 void (*wait_for_vblank)(struct device *subdrv_dev);
87}; 70};
88 71
89/* 72/*
@@ -110,7 +93,6 @@ struct exynos_drm_overlay_ops {
110 * @pixel_format: fourcc pixel format of this overlay 93 * @pixel_format: fourcc pixel format of this overlay
111 * @dma_addr: array of bus(accessed by dma) address to the memory region 94 * @dma_addr: array of bus(accessed by dma) address to the memory region
112 * allocated for a overlay. 95 * allocated for a overlay.
113 * @vaddr: array of virtual memory addresss to this overlay.
114 * @zpos: order of overlay layer(z position). 96 * @zpos: order of overlay layer(z position).
115 * @default_win: a window to be enabled. 97 * @default_win: a window to be enabled.
116 * @color_key: color key on or off. 98 * @color_key: color key on or off.
@@ -142,7 +124,6 @@ struct exynos_drm_overlay {
142 unsigned int pitch; 124 unsigned int pitch;
143 uint32_t pixel_format; 125 uint32_t pixel_format;
144 dma_addr_t dma_addr[MAX_FB_BUFFER]; 126 dma_addr_t dma_addr[MAX_FB_BUFFER];
145 void __iomem *vaddr[MAX_FB_BUFFER];
146 int zpos; 127 int zpos;
147 128
148 bool default_win; 129 bool default_win;
@@ -186,6 +167,8 @@ struct exynos_drm_display_ops {
186 * @commit: set current hw specific display mode to hw. 167 * @commit: set current hw specific display mode to hw.
187 * @enable_vblank: specific driver callback for enabling vblank interrupt. 168 * @enable_vblank: specific driver callback for enabling vblank interrupt.
188 * @disable_vblank: specific driver callback for disabling vblank interrupt. 169 * @disable_vblank: specific driver callback for disabling vblank interrupt.
170 * @wait_for_vblank: wait for vblank interrupt to make sure that
171 * hardware overlay is updated.
189 */ 172 */
190struct exynos_drm_manager_ops { 173struct exynos_drm_manager_ops {
191 void (*dpms)(struct device *subdrv_dev, int mode); 174 void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +183,7 @@ struct exynos_drm_manager_ops {
200 void (*commit)(struct device *subdrv_dev); 183 void (*commit)(struct device *subdrv_dev);
201 int (*enable_vblank)(struct device *subdrv_dev); 184 int (*enable_vblank)(struct device *subdrv_dev);
202 void (*disable_vblank)(struct device *subdrv_dev); 185 void (*disable_vblank)(struct device *subdrv_dev);
186 void (*wait_for_vblank)(struct device *subdrv_dev);
203}; 187};
204 188
205/* 189/*
@@ -231,16 +215,28 @@ struct exynos_drm_g2d_private {
231 struct device *dev; 215 struct device *dev;
232 struct list_head inuse_cmdlist; 216 struct list_head inuse_cmdlist;
233 struct list_head event_list; 217 struct list_head event_list;
234 struct list_head gem_list; 218 struct list_head userptr_list;
235 unsigned int gem_nr; 219};
220
221struct exynos_drm_ipp_private {
222 struct device *dev;
223 struct list_head event_list;
236}; 224};
237 225
238struct drm_exynos_file_private { 226struct drm_exynos_file_private {
239 struct exynos_drm_g2d_private *g2d_priv; 227 struct exynos_drm_g2d_private *g2d_priv;
228 struct exynos_drm_ipp_private *ipp_priv;
240}; 229};
241 230
242/* 231/*
243 * Exynos drm private structure. 232 * Exynos drm private structure.
233 *
234 * @da_start: start address to device address space.
235 * with iommu, device address space starts from this address
236 * otherwise default one.
237 * @da_space_size: size of device address space.
238 * if 0 then default value is used for it.
239 * @da_space_order: order to device address space.
244 */ 240 */
245struct exynos_drm_private { 241struct exynos_drm_private {
246 struct drm_fb_helper *fb_helper; 242 struct drm_fb_helper *fb_helper;
@@ -255,6 +251,10 @@ struct exynos_drm_private {
255 struct drm_crtc *crtc[MAX_CRTC]; 251 struct drm_crtc *crtc[MAX_CRTC];
256 struct drm_property *plane_zpos_property; 252 struct drm_property *plane_zpos_property;
257 struct drm_property *crtc_mode_property; 253 struct drm_property *crtc_mode_property;
254
255 unsigned long da_start;
256 unsigned long da_space_size;
257 unsigned long da_space_order;
258}; 258};
259 259
260/* 260/*
@@ -318,10 +318,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
318int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); 318int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
319void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); 319void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
320 320
321/*
322 * this function registers exynos drm hdmi platform device. It ensures only one
323 * instance of the device is created.
324 */
325extern int exynos_platform_device_hdmi_register(void);
326
327/*
328 * this function unregisters exynos drm hdmi platform device if it exists.
329 */
330void exynos_platform_device_hdmi_unregister(void);
331
321extern struct platform_driver fimd_driver; 332extern struct platform_driver fimd_driver;
322extern struct platform_driver hdmi_driver; 333extern struct platform_driver hdmi_driver;
323extern struct platform_driver mixer_driver; 334extern struct platform_driver mixer_driver;
324extern struct platform_driver exynos_drm_common_hdmi_driver; 335extern struct platform_driver exynos_drm_common_hdmi_driver;
325extern struct platform_driver vidi_driver; 336extern struct platform_driver vidi_driver;
326extern struct platform_driver g2d_driver; 337extern struct platform_driver g2d_driver;
338extern struct platform_driver fimc_driver;
339extern struct platform_driver rotator_driver;
340extern struct platform_driver gsc_driver;
341extern struct platform_driver ipp_driver;
327#endif 342#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index e51503fbaf2b..c63721f64aec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
@@ -43,12 +29,14 @@
43 * @manager: specific encoder has its own manager to control a hardware 29 * @manager: specific encoder has its own manager to control a hardware
44 * appropriately and we can access a hardware drawing on this manager. 30 * appropriately and we can access a hardware drawing on this manager.
45 * @dpms: store the encoder dpms value. 31 * @dpms: store the encoder dpms value.
32 * @updated: indicate whether overlay data updating is needed or not.
46 */ 33 */
47struct exynos_drm_encoder { 34struct exynos_drm_encoder {
48 struct drm_crtc *old_crtc; 35 struct drm_crtc *old_crtc;
49 struct drm_encoder drm_encoder; 36 struct drm_encoder drm_encoder;
50 struct exynos_drm_manager *manager; 37 struct exynos_drm_manager *manager;
51 int dpms; 38 int dpms;
39 bool updated;
52}; 40};
53 41
54static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode) 42static void exynos_drm_connector_power(struct drm_encoder *encoder, int mode)
@@ -85,7 +73,9 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
85 switch (mode) { 73 switch (mode) {
86 case DRM_MODE_DPMS_ON: 74 case DRM_MODE_DPMS_ON:
87 if (manager_ops && manager_ops->apply) 75 if (manager_ops && manager_ops->apply)
88 manager_ops->apply(manager->dev); 76 if (!exynos_encoder->updated)
77 manager_ops->apply(manager->dev);
78
89 exynos_drm_connector_power(encoder, mode); 79 exynos_drm_connector_power(encoder, mode);
90 exynos_encoder->dpms = mode; 80 exynos_encoder->dpms = mode;
91 break; 81 break;
@@ -94,6 +84,7 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
94 case DRM_MODE_DPMS_OFF: 84 case DRM_MODE_DPMS_OFF:
95 exynos_drm_connector_power(encoder, mode); 85 exynos_drm_connector_power(encoder, mode);
96 exynos_encoder->dpms = mode; 86 exynos_encoder->dpms = mode;
87 exynos_encoder->updated = false;
97 break; 88 break;
98 default: 89 default:
99 DRM_ERROR("unspecified mode %d\n", mode); 90 DRM_ERROR("unspecified mode %d\n", mode);
@@ -205,15 +196,56 @@ static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
205 196
206static void exynos_drm_encoder_commit(struct drm_encoder *encoder) 197static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
207{ 198{
208 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 199 struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
200 struct exynos_drm_manager *manager = exynos_encoder->manager;
209 struct exynos_drm_manager_ops *manager_ops = manager->ops; 201 struct exynos_drm_manager_ops *manager_ops = manager->ops;
210 202
211 DRM_DEBUG_KMS("%s\n", __FILE__); 203 DRM_DEBUG_KMS("%s\n", __FILE__);
212 204
213 if (manager_ops && manager_ops->commit) 205 if (manager_ops && manager_ops->commit)
214 manager_ops->commit(manager->dev); 206 manager_ops->commit(manager->dev);
207
208 /*
209 * this will avoid one issue that overlay data is updated to
210 * real hardware two times.
211 * And this variable will be used to check if the data was
212 * already updated or not by exynos_drm_encoder_dpms function.
213 */
214 exynos_encoder->updated = true;
215
216 /*
217 * In case of setcrtc, there is no way to update encoder's dpms
218 * so update it here.
219 */
220 exynos_encoder->dpms = DRM_MODE_DPMS_ON;
221}
222
223void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
224{
225 struct exynos_drm_encoder *exynos_encoder;
226 struct exynos_drm_manager_ops *ops;
227 struct drm_device *dev = fb->dev;
228 struct drm_encoder *encoder;
229
230 /*
231 * make sure that overlay data are updated to real hardware
232 * for all encoders.
233 */
234 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
235 exynos_encoder = to_exynos_encoder(encoder);
236 ops = exynos_encoder->manager->ops;
237
238 /*
239 * wait for vblank interrupt
240 * - this makes sure that overlay data are updated to
241 * real hardware.
242 */
243 if (ops->wait_for_vblank)
244 ops->wait_for_vblank(exynos_encoder->manager->dev);
245 }
215} 246}
216 247
248
217static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 249static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
218{ 250{
219 struct drm_plane *plane; 251 struct drm_plane *plane;
@@ -401,19 +433,6 @@ void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
401 manager_ops->dpms(manager->dev, mode); 433 manager_ops->dpms(manager->dev, mode);
402 434
403 /* 435 /*
404 * set current mode to new one so that data aren't updated into
405 * registers by drm_helper_connector_dpms two times.
406 *
407 * in case that drm_crtc_helper_set_mode() is called,
408 * overlay_ops->commit() and manager_ops->commit() callbacks
409 * can be called two times, first at drm_crtc_helper_set_mode()
410 * and second at drm_helper_connector_dpms().
411 * so with this setting, when drm_helper_connector_dpms() is called
412 * encoder->funcs->dpms() will be ignored.
413 */
414 exynos_encoder->dpms = mode;
415
416 /*
417 * if this condition is ok then it means that the crtc is already 436 * if this condition is ok then it means that the crtc is already
418 * detached from encoder and last function for detaching is properly 437 * detached from encoder and last function for detaching is properly
419 * done, so clear pipe from manager to prevent repeated call. 438 * done, so clear pipe from manager to prevent repeated call.
@@ -498,14 +517,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
498 517
499 if (overlay_ops && overlay_ops->disable) 518 if (overlay_ops && overlay_ops->disable)
500 overlay_ops->disable(manager->dev, zpos); 519 overlay_ops->disable(manager->dev, zpos);
501
502 /*
503 * wait for vblank interrupt
504 * - this makes sure that hardware overlay is disabled to avoid
505 * for the dma accesses to memory after gem buffer was released
506 * because the setting for disabling the overlay will be updated
507 * at vsync.
508 */
509 if (overlay_ops->wait_for_vblank)
510 overlay_ops->wait_for_vblank(manager->dev);
511} 520}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..89e2fb0770af 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_DRM_ENCODER_H_ 14#ifndef _EXYNOS_DRM_ENCODER_H_
@@ -46,5 +32,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); 32void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); 33void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); 34void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
35void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
49 36
50#endif 37#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..294c0513f587 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -6,34 +6,23 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
30#include <drm/drm_crtc.h> 16#include <drm/drm_crtc.h>
31#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
32#include <drm/drm_fb_helper.h> 18#include <drm/drm_fb_helper.h>
19#include <uapi/drm/exynos_drm.h>
33 20
34#include "exynos_drm_drv.h" 21#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 22#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 23#include "exynos_drm_gem.h"
24#include "exynos_drm_iommu.h"
25#include "exynos_drm_encoder.h"
37 26
38#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 27#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
39 28
@@ -50,6 +39,32 @@ struct exynos_drm_fb {
50 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; 39 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
51}; 40};
52 41
42static int check_fb_gem_memory_type(struct drm_device *drm_dev,
43 struct exynos_drm_gem_obj *exynos_gem_obj)
44{
45 unsigned int flags;
46
47 /*
48 * if exynos drm driver supports iommu then framebuffer can use
49 * all the buffer types.
50 */
51 if (is_drm_iommu_supported(drm_dev))
52 return 0;
53
54 flags = exynos_gem_obj->flags;
55
56 /*
57 * without iommu support, not support physically non-continuous memory
58 * for framebuffer.
59 */
60 if (IS_NONCONTIG_BUFFER(flags)) {
61 DRM_ERROR("cannot use this gem memory type for fb.\n");
62 return -EINVAL;
63 }
64
65 return 0;
66}
67
53static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 68static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
54{ 69{
55 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 70 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +72,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
57 72
58 DRM_DEBUG_KMS("%s\n", __FILE__); 73 DRM_DEBUG_KMS("%s\n", __FILE__);
59 74
75 /* make sure that overlay data are updated before relesing fb. */
76 exynos_drm_encoder_complete_scanout(fb);
77
60 drm_framebuffer_cleanup(fb); 78 drm_framebuffer_cleanup(fb);
61 79
62 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { 80 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +146,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
128 struct drm_gem_object *obj) 146 struct drm_gem_object *obj)
129{ 147{
130 struct exynos_drm_fb *exynos_fb; 148 struct exynos_drm_fb *exynos_fb;
149 struct exynos_drm_gem_obj *exynos_gem_obj;
131 int ret; 150 int ret;
132 151
152 exynos_gem_obj = to_exynos_gem_obj(obj);
153
154 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
155 if (ret < 0) {
156 DRM_ERROR("cannot use this gem memory type for fb.\n");
157 return ERR_PTR(-EINVAL);
158 }
159
133 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 160 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
134 if (!exynos_fb) { 161 if (!exynos_fb) {
135 DRM_ERROR("failed to allocate exynos drm framebuffer\n"); 162 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
136 return ERR_PTR(-ENOMEM); 163 return ERR_PTR(-ENOMEM);
137 } 164 }
138 165
166 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
167 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
168
139 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 169 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
140 if (ret) { 170 if (ret) {
141 DRM_ERROR("failed to initialize framebuffer\n"); 171 DRM_ERROR("failed to initialize framebuffer\n");
142 return ERR_PTR(ret); 172 return ERR_PTR(ret);
143 } 173 }
144 174
145 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
146 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
147
148 return &exynos_fb->fb; 175 return &exynos_fb->fb;
149} 176}
150 177
@@ -190,9 +217,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
190 struct drm_mode_fb_cmd2 *mode_cmd) 217 struct drm_mode_fb_cmd2 *mode_cmd)
191{ 218{
192 struct drm_gem_object *obj; 219 struct drm_gem_object *obj;
193 struct drm_framebuffer *fb;
194 struct exynos_drm_fb *exynos_fb; 220 struct exynos_drm_fb *exynos_fb;
195 int i; 221 int i, ret;
196 222
197 DRM_DEBUG_KMS("%s\n", __FILE__); 223 DRM_DEBUG_KMS("%s\n", __FILE__);
198 224
@@ -202,30 +228,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
202 return ERR_PTR(-ENOENT); 228 return ERR_PTR(-ENOENT);
203 } 229 }
204 230
205 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); 231 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
206 if (IS_ERR(fb)) { 232 if (!exynos_fb) {
207 drm_gem_object_unreference_unlocked(obj); 233 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
208 return fb; 234 return ERR_PTR(-ENOMEM);
209 } 235 }
210 236
211 exynos_fb = to_exynos_fb(fb); 237 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
238 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
212 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); 239 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
213 240
214 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 241 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
215 242
216 for (i = 1; i < exynos_fb->buf_cnt; i++) { 243 for (i = 1; i < exynos_fb->buf_cnt; i++) {
244 struct exynos_drm_gem_obj *exynos_gem_obj;
245 int ret;
246
217 obj = drm_gem_object_lookup(dev, file_priv, 247 obj = drm_gem_object_lookup(dev, file_priv,
218 mode_cmd->handles[i]); 248 mode_cmd->handles[i]);
219 if (!obj) { 249 if (!obj) {
220 DRM_ERROR("failed to lookup gem object\n"); 250 DRM_ERROR("failed to lookup gem object\n");
221 exynos_drm_fb_destroy(fb); 251 kfree(exynos_fb);
222 return ERR_PTR(-ENOENT); 252 return ERR_PTR(-ENOENT);
223 } 253 }
224 254
255 exynos_gem_obj = to_exynos_gem_obj(obj);
256
257 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
258 if (ret < 0) {
259 DRM_ERROR("cannot use this gem memory type for fb.\n");
260 kfree(exynos_fb);
261 return ERR_PTR(ret);
262 }
263
225 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 264 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
226 } 265 }
227 266
228 return fb; 267 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
268 if (ret) {
269 for (i = 0; i < exynos_fb->buf_cnt; i++) {
270 struct exynos_drm_gem_obj *gem_obj;
271
272 gem_obj = exynos_fb->exynos_gem_obj[i];
273 drm_gem_object_unreference_unlocked(&gem_obj->base);
274 }
275
276 kfree(exynos_fb);
277 return ERR_PTR(ret);
278 }
279
280 return &exynos_fb->fb;
229} 281}
230 282
231struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, 283struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +295,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
243 if (!buffer) 295 if (!buffer)
244 return NULL; 296 return NULL;
245 297
246 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", 298 DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
247 (unsigned long)buffer->kvaddr,
248 (unsigned long)buffer->dma_addr);
249 299
250 return buffer; 300 return buffer;
251} 301}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 96262e54f76d..517471b37566 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -5,24 +5,10 @@
5 * Joonyoung Shim <jy0922.shim@samsung.com> 5 * Joonyoung Shim <jy0922.shim@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_DRM_FB_H_ 14#ifndef _EXYNOS_DRM_FB_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 67eb6ba56edf..71f867340a88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#include <drm/drmP.h> 15#include <drm/drmP.h>
@@ -34,6 +20,7 @@
34#include "exynos_drm_drv.h" 20#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 21#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 22#include "exynos_drm_gem.h"
23#include "exynos_drm_iommu.h"
37 24
38#define MAX_CONNECTOR 4 25#define MAX_CONNECTOR 4
39#define PREFERRED_BPP 32 26#define PREFERRED_BPP 32
@@ -46,8 +33,38 @@ struct exynos_drm_fbdev {
46 struct exynos_drm_gem_obj *exynos_gem_obj; 33 struct exynos_drm_gem_obj *exynos_gem_obj;
47}; 34};
48 35
36static int exynos_drm_fb_mmap(struct fb_info *info,
37 struct vm_area_struct *vma)
38{
39 struct drm_fb_helper *helper = info->par;
40 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
41 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
42 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
43 unsigned long vm_size;
44 int ret;
45
46 DRM_DEBUG_KMS("%s\n", __func__);
47
48 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
49
50 vm_size = vma->vm_end - vma->vm_start;
51
52 if (vm_size > buffer->size)
53 return -EINVAL;
54
55 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
56 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
57 if (ret < 0) {
58 DRM_ERROR("failed to mmap.\n");
59 return ret;
60 }
61
62 return 0;
63}
64
49static struct fb_ops exynos_drm_fb_ops = { 65static struct fb_ops exynos_drm_fb_ops = {
50 .owner = THIS_MODULE, 66 .owner = THIS_MODULE,
67 .fb_mmap = exynos_drm_fb_mmap,
51 .fb_fillrect = cfb_fillrect, 68 .fb_fillrect = cfb_fillrect,
52 .fb_copyarea = cfb_copyarea, 69 .fb_copyarea = cfb_copyarea,
53 .fb_imageblit = cfb_imageblit, 70 .fb_imageblit = cfb_imageblit,
@@ -79,6 +96,26 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79 return -EFAULT; 96 return -EFAULT;
80 } 97 }
81 98
99 /* map pages with kernel virtual space. */
100 if (!buffer->kvaddr) {
101 if (is_drm_iommu_supported(dev)) {
102 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
103
104 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
105 pgprot_writecombine(PAGE_KERNEL));
106 } else {
107 phys_addr_t dma_addr = buffer->dma_addr;
108 if (dma_addr)
109 buffer->kvaddr = phys_to_virt(dma_addr);
110 else
111 buffer->kvaddr = (void __iomem *)NULL;
112 }
113 if (!buffer->kvaddr) {
114 DRM_ERROR("failed to map pages to kernel space.\n");
115 return -EIO;
116 }
117 }
118
82 /* buffer count to framebuffer always is 1 at booting time. */ 119 /* buffer count to framebuffer always is 1 at booting time. */
83 exynos_drm_fb_set_buf_cnt(fb, 1); 120 exynos_drm_fb_set_buf_cnt(fb, 1);
84 121
@@ -87,7 +124,12 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
87 124
88 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; 125 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
89 fbi->screen_base = buffer->kvaddr + offset; 126 fbi->screen_base = buffer->kvaddr + offset;
90 fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset); 127 if (is_drm_iommu_supported(dev))
128 fbi->fix.smem_start = (unsigned long)
129 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
130 else
131 fbi->fix.smem_start = (unsigned long)buffer->dma_addr;
132
91 fbi->screen_size = size; 133 fbi->screen_size = size;
92 fbi->fix.smem_len = size; 134 fbi->fix.smem_len = size;
93 135
@@ -133,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
133 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); 175 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
134 if (IS_ERR(exynos_gem_obj)) { 176 if (IS_ERR(exynos_gem_obj)) {
135 ret = PTR_ERR(exynos_gem_obj); 177 ret = PTR_ERR(exynos_gem_obj);
136 goto out; 178 goto err_release_framebuffer;
137 } 179 }
138 180
139 exynos_fbdev->exynos_gem_obj = exynos_gem_obj; 181 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -143,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
143 if (IS_ERR_OR_NULL(helper->fb)) { 185 if (IS_ERR_OR_NULL(helper->fb)) {
144 DRM_ERROR("failed to create drm framebuffer.\n"); 186 DRM_ERROR("failed to create drm framebuffer.\n");
145 ret = PTR_ERR(helper->fb); 187 ret = PTR_ERR(helper->fb);
146 goto out; 188 goto err_destroy_gem;
147 } 189 }
148 190
149 helper->fbdev = fbi; 191 helper->fbdev = fbi;
@@ -155,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
155 ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 197 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
156 if (ret) { 198 if (ret) {
157 DRM_ERROR("failed to allocate cmap.\n"); 199 DRM_ERROR("failed to allocate cmap.\n");
158 goto out; 200 goto err_destroy_framebuffer;
159 } 201 }
160 202
161 ret = exynos_drm_fbdev_update(helper, helper->fb); 203 ret = exynos_drm_fbdev_update(helper, helper->fb);
162 if (ret < 0) { 204 if (ret < 0)
163 fb_dealloc_cmap(&fbi->cmap); 205 goto err_dealloc_cmap;
164 goto out; 206
165 } 207 mutex_unlock(&dev->struct_mutex);
208 return ret;
209
210err_dealloc_cmap:
211 fb_dealloc_cmap(&fbi->cmap);
212err_destroy_framebuffer:
213 drm_framebuffer_cleanup(helper->fb);
214err_destroy_gem:
215 exynos_drm_gem_destroy(exynos_gem_obj);
216err_release_framebuffer:
217 framebuffer_release(fbi);
166 218
167/* 219/*
168 * if failed, all resources allocated above would be released by 220 * if failed, all resources allocated above would be released by
@@ -264,8 +316,13 @@ err_init:
264static void exynos_drm_fbdev_destroy(struct drm_device *dev, 316static void exynos_drm_fbdev_destroy(struct drm_device *dev,
265 struct drm_fb_helper *fb_helper) 317 struct drm_fb_helper *fb_helper)
266{ 318{
319 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
320 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
267 struct drm_framebuffer *fb; 321 struct drm_framebuffer *fb;
268 322
323 if (is_drm_iommu_supported(dev) && exynos_gem_obj->buffer->kvaddr)
324 vunmap(exynos_gem_obj->buffer->kvaddr);
325
269 /* release drm framebuffer and real buffer */ 326 /* release drm framebuffer and real buffer */
270 if (fb_helper->fb && fb_helper->fb->funcs) { 327 if (fb_helper->fb && fb_helper->fb->funcs) {
271 fb = fb_helper->fb; 328 fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index ccfce8a1a451..e16d7f0ae192 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -6,24 +6,10 @@
6 * Joonyoung Shim <jy0922.shim@samsung.com> 6 * Joonyoung Shim <jy0922.shim@samsung.com>
7 * Seung-Woo Kim <sw0312.kim@samsung.com> 7 * Seung-Woo Kim <sw0312.kim@samsung.com>
8 * 8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a 9 * This program is free software; you can redistribute it and/or modify it
10 * copy of this software and associated documentation files (the "Software"), 10 * under the terms of the GNU General Public License as published by the
11 * to deal in the Software without restriction, including without limitation 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * option) any later version.
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */ 13 */
28 14
29#ifndef _EXYNOS_DRM_FBDEV_H_ 15#ifndef _EXYNOS_DRM_FBDEV_H_
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..67a83e69544b
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,1955 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-fimc.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_fimc.h"
26
27/*
28 * FIMC stands for Fully Interactive Mobile Camera and
29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory.
32 * FIMC supports image rotation and image effect functions.
33 *
34 * M2M operation : supports crop/scale/rotation/csc so on.
35 * Memory ----> FIMC H/W ----> Memory.
36 * Writeback operation : supports cloned screen with FIMD.
37 * FIMD ----> FIMC H/W ----> Memory.
38 * Output operation : supports direct display using local path.
39 * Memory ----> FIMC H/W ----> FIMD.
40 */
41
42/*
43 * TODO
44 * 1. check suspend/resume api if needed.
45 * 2. need to check use case platform_device_id.
46 * 3. check src/dst size with, height.
47 * 4. added check_prepare api for right register.
48 * 5. need to add supported list in prop_list.
49 * 6. check prescaler/scaler optimization.
50 */
51
52#define FIMC_MAX_DEVS 4
53#define FIMC_MAX_SRC 2
54#define FIMC_MAX_DST 32
55#define FIMC_SHFACTOR 10
56#define FIMC_BUF_STOP 1
57#define FIMC_BUF_START 2
58#define FIMC_REG_SZ 32
59#define FIMC_WIDTH_ITU_709 1280
60#define FIMC_REFRESH_MAX 60
61#define FIMC_REFRESH_MIN 12
62#define FIMC_CROP_MAX 8192
63#define FIMC_CROP_MIN 32
64#define FIMC_SCALE_MAX 4224
65#define FIMC_SCALE_MIN 32
66
67#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
68#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
69 struct fimc_context, ippdrv);
70#define fimc_read(offset) readl(ctx->regs + (offset))
71#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
72
73enum fimc_wb {
74 FIMC_WB_NONE,
75 FIMC_WB_A,
76 FIMC_WB_B,
77};
78
79/*
80 * A structure of scaler.
81 *
82 * @range: narrow, wide.
83 * @bypass: unused scaler path.
84 * @up_h: horizontal scale up.
85 * @up_v: vertical scale up.
86 * @hratio: horizontal ratio.
87 * @vratio: vertical ratio.
88 */
89struct fimc_scaler {
90 bool range;
91 bool bypass;
92 bool up_h;
93 bool up_v;
94 u32 hratio;
95 u32 vratio;
96};
97
98/*
99 * A structure of scaler capability.
100 *
101 * find user manual table 43-1.
102 * @in_hori: scaler input horizontal size.
103 * @bypass: scaler bypass mode.
104 * @dst_h_wo_rot: target horizontal size without output rotation.
105 * @dst_h_rot: target horizontal size with output rotation.
106 * @rl_w_wo_rot: real width without input rotation.
107 * @rl_h_rot: real height without output rotation.
108 */
109struct fimc_capability {
110 /* scaler */
111 u32 in_hori;
112 u32 bypass;
113 /* output rotator */
114 u32 dst_h_wo_rot;
115 u32 dst_h_rot;
116 /* input rotator */
117 u32 rl_w_wo_rot;
118 u32 rl_h_rot;
119};
120
121/*
122 * A structure of fimc driver data.
123 *
124 * @parent_clk: name of parent clock.
125 */
126struct fimc_driverdata {
127 char *parent_clk;
128};
129
130/*
131 * A structure of fimc context.
132 *
133 * @ippdrv: prepare initialization using ippdrv.
134 * @regs_res: register resources.
135 * @regs: memory mapped io registers.
136 * @lock: locking of operations.
137 * @sclk_fimc_clk: fimc source clock.
138 * @fimc_clk: fimc clock.
139 * @wb_clk: writeback a clock.
140 * @wb_b_clk: writeback b clock.
141 * @sc: scaler infomations.
142 * @odr: ordering of YUV.
143 * @ver: fimc version.
144 * @pol: porarity of writeback.
145 * @id: fimc id.
146 * @irq: irq number.
147 * @suspended: qos operations.
148 */
149struct fimc_context {
150 struct exynos_drm_ippdrv ippdrv;
151 struct resource *regs_res;
152 void __iomem *regs;
153 struct mutex lock;
154 struct clk *sclk_fimc_clk;
155 struct clk *fimc_clk;
156 struct clk *wb_clk;
157 struct clk *wb_b_clk;
158 struct fimc_scaler sc;
159 struct fimc_driverdata *ddata;
160 struct exynos_drm_ipp_pol pol;
161 int id;
162 int irq;
163 bool suspended;
164};
165
166static void fimc_sw_reset(struct fimc_context *ctx)
167{
168 u32 cfg;
169
170 DRM_DEBUG_KMS("%s\n", __func__);
171
172 /* stop dma operation */
173 cfg = fimc_read(EXYNOS_CISTATUS);
174 if (EXYNOS_CISTATUS_GET_ENVID_STATUS(cfg)) {
175 cfg = fimc_read(EXYNOS_MSCTRL);
176 cfg &= ~EXYNOS_MSCTRL_ENVID;
177 fimc_write(cfg, EXYNOS_MSCTRL);
178 }
179
180 cfg = fimc_read(EXYNOS_CISRCFMT);
181 cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
182 fimc_write(cfg, EXYNOS_CISRCFMT);
183
184 /* disable image capture */
185 cfg = fimc_read(EXYNOS_CIIMGCPT);
186 cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
187 fimc_write(cfg, EXYNOS_CIIMGCPT);
188
189 /* s/w reset */
190 cfg = fimc_read(EXYNOS_CIGCTRL);
191 cfg |= (EXYNOS_CIGCTRL_SWRST);
192 fimc_write(cfg, EXYNOS_CIGCTRL);
193
194 /* s/w reset complete */
195 cfg = fimc_read(EXYNOS_CIGCTRL);
196 cfg &= ~EXYNOS_CIGCTRL_SWRST;
197 fimc_write(cfg, EXYNOS_CIGCTRL);
198
199 /* reset sequence */
200 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
201}
202
203static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
204{
205 u32 camblk_cfg;
206
207 DRM_DEBUG_KMS("%s\n", __func__);
208
209 camblk_cfg = readl(SYSREG_CAMERA_BLK);
210 camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
211 camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
212
213 writel(camblk_cfg, SYSREG_CAMERA_BLK);
214}
215
216static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
217{
218 u32 cfg;
219
220 DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
221
222 cfg = fimc_read(EXYNOS_CIGCTRL);
223 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
224 EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
225 EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
226 EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
227 EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
228 EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
229
230 switch (wb) {
231 case FIMC_WB_A:
232 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
233 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
234 break;
235 case FIMC_WB_B:
236 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
237 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
238 break;
239 case FIMC_WB_NONE:
240 default:
241 cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
242 EXYNOS_CIGCTRL_SELWRITEBACK_A |
243 EXYNOS_CIGCTRL_SELCAM_MIPI_A |
244 EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
245 break;
246 }
247
248 fimc_write(cfg, EXYNOS_CIGCTRL);
249}
250
251static void fimc_set_polarity(struct fimc_context *ctx,
252 struct exynos_drm_ipp_pol *pol)
253{
254 u32 cfg;
255
256 DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
257 __func__, pol->inv_pclk, pol->inv_vsync);
258 DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
259 __func__, pol->inv_href, pol->inv_hsync);
260
261 cfg = fimc_read(EXYNOS_CIGCTRL);
262 cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
263 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
264
265 if (pol->inv_pclk)
266 cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
267 if (pol->inv_vsync)
268 cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
269 if (pol->inv_href)
270 cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
271 if (pol->inv_hsync)
272 cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
273
274 fimc_write(cfg, EXYNOS_CIGCTRL);
275}
276
277static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
278{
279 u32 cfg;
280
281 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
282
283 cfg = fimc_read(EXYNOS_CIGCTRL);
284 if (enable)
285 cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
286 else
287 cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
288
289 fimc_write(cfg, EXYNOS_CIGCTRL);
290}
291
292static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
293 bool overflow, bool level)
294{
295 u32 cfg;
296
297 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
298 enable, overflow, level);
299
300 cfg = fimc_read(EXYNOS_CIGCTRL);
301 if (enable) {
302 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
303 cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
304 if (overflow)
305 cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
306 if (level)
307 cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
308 } else
309 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
310
311 fimc_write(cfg, EXYNOS_CIGCTRL);
312}
313
314static void fimc_clear_irq(struct fimc_context *ctx)
315{
316 u32 cfg;
317
318 DRM_DEBUG_KMS("%s\n", __func__);
319
320 cfg = fimc_read(EXYNOS_CIGCTRL);
321 cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
322 fimc_write(cfg, EXYNOS_CIGCTRL);
323}
324
325static bool fimc_check_ovf(struct fimc_context *ctx)
326{
327 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
328 u32 cfg, status, flag;
329
330 status = fimc_read(EXYNOS_CISTATUS);
331 flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
332 EXYNOS_CISTATUS_OVFICR;
333
334 DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
335
336 if (status & flag) {
337 cfg = fimc_read(EXYNOS_CIWDOFST);
338 cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
339 EXYNOS_CIWDOFST_CLROVFICR);
340
341 fimc_write(cfg, EXYNOS_CIWDOFST);
342
343 cfg = fimc_read(EXYNOS_CIWDOFST);
344 cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
345 EXYNOS_CIWDOFST_CLROVFICR);
346
347 fimc_write(cfg, EXYNOS_CIWDOFST);
348
349 dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
350 ctx->id, status);
351 return true;
352 }
353
354 return false;
355}
356
357static bool fimc_check_frame_end(struct fimc_context *ctx)
358{
359 u32 cfg;
360
361 cfg = fimc_read(EXYNOS_CISTATUS);
362
363 DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
364
365 if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
366 return false;
367
368 cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
369 fimc_write(cfg, EXYNOS_CISTATUS);
370
371 return true;
372}
373
374static int fimc_get_buf_id(struct fimc_context *ctx)
375{
376 u32 cfg;
377 int frame_cnt, buf_id;
378
379 DRM_DEBUG_KMS("%s\n", __func__);
380
381 cfg = fimc_read(EXYNOS_CISTATUS2);
382 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
383
384 if (frame_cnt == 0)
385 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
386
387 DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
388 EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
389 EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
390
391 if (frame_cnt == 0) {
392 DRM_ERROR("failed to get frame count.\n");
393 return -EIO;
394 }
395
396 buf_id = frame_cnt - 1;
397 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
398
399 return buf_id;
400}
401
402static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
403{
404 u32 cfg;
405
406 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
407
408 cfg = fimc_read(EXYNOS_CIOCTRL);
409 if (enable)
410 cfg |= EXYNOS_CIOCTRL_LASTENDEN;
411 else
412 cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
413
414 fimc_write(cfg, EXYNOS_CIOCTRL);
415}
416
417
418static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
419{
420 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
421 u32 cfg;
422
423 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
424
425 /* RGB */
426 cfg = fimc_read(EXYNOS_CISCCTRL);
427 cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
428
429 switch (fmt) {
430 case DRM_FORMAT_RGB565:
431 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
432 fimc_write(cfg, EXYNOS_CISCCTRL);
433 return 0;
434 case DRM_FORMAT_RGB888:
435 case DRM_FORMAT_XRGB8888:
436 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
437 fimc_write(cfg, EXYNOS_CISCCTRL);
438 return 0;
439 default:
440 /* bypass */
441 break;
442 }
443
444 /* YUV */
445 cfg = fimc_read(EXYNOS_MSCTRL);
446 cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
447 EXYNOS_MSCTRL_C_INT_IN_2PLANE |
448 EXYNOS_MSCTRL_ORDER422_YCBYCR);
449
450 switch (fmt) {
451 case DRM_FORMAT_YUYV:
452 cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
453 break;
454 case DRM_FORMAT_YVYU:
455 cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
456 break;
457 case DRM_FORMAT_UYVY:
458 cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
459 break;
460 case DRM_FORMAT_VYUY:
461 case DRM_FORMAT_YUV444:
462 cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
463 break;
464 case DRM_FORMAT_NV21:
465 case DRM_FORMAT_NV61:
466 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
467 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
468 break;
469 case DRM_FORMAT_YUV422:
470 case DRM_FORMAT_YUV420:
471 case DRM_FORMAT_YVU420:
472 cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
473 break;
474 case DRM_FORMAT_NV12:
475 case DRM_FORMAT_NV12MT:
476 case DRM_FORMAT_NV16:
477 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
478 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
479 break;
480 default:
481 dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
482 return -EINVAL;
483 }
484
485 fimc_write(cfg, EXYNOS_MSCTRL);
486
487 return 0;
488}
489
490static int fimc_src_set_fmt(struct device *dev, u32 fmt)
491{
492 struct fimc_context *ctx = get_fimc_context(dev);
493 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
494 u32 cfg;
495
496 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
497
498 cfg = fimc_read(EXYNOS_MSCTRL);
499 cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
500
501 switch (fmt) {
502 case DRM_FORMAT_RGB565:
503 case DRM_FORMAT_RGB888:
504 case DRM_FORMAT_XRGB8888:
505 cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
506 break;
507 case DRM_FORMAT_YUV444:
508 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
509 break;
510 case DRM_FORMAT_YUYV:
511 case DRM_FORMAT_YVYU:
512 case DRM_FORMAT_UYVY:
513 case DRM_FORMAT_VYUY:
514 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
515 break;
516 case DRM_FORMAT_NV16:
517 case DRM_FORMAT_NV61:
518 case DRM_FORMAT_YUV422:
519 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
520 break;
521 case DRM_FORMAT_YUV420:
522 case DRM_FORMAT_YVU420:
523 case DRM_FORMAT_NV12:
524 case DRM_FORMAT_NV21:
525 case DRM_FORMAT_NV12MT:
526 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
527 break;
528 default:
529 dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
530 return -EINVAL;
531 }
532
533 fimc_write(cfg, EXYNOS_MSCTRL);
534
535 cfg = fimc_read(EXYNOS_CIDMAPARAM);
536 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
537
538 if (fmt == DRM_FORMAT_NV12MT)
539 cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
540 else
541 cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
542
543 fimc_write(cfg, EXYNOS_CIDMAPARAM);
544
545 return fimc_src_set_fmt_order(ctx, fmt);
546}
547
548static int fimc_src_set_transf(struct device *dev,
549 enum drm_exynos_degree degree,
550 enum drm_exynos_flip flip, bool *swap)
551{
552 struct fimc_context *ctx = get_fimc_context(dev);
553 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
554 u32 cfg1, cfg2;
555
556 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
557 degree, flip);
558
559 cfg1 = fimc_read(EXYNOS_MSCTRL);
560 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
561 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
562
563 cfg2 = fimc_read(EXYNOS_CITRGFMT);
564 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
565
566 switch (degree) {
567 case EXYNOS_DRM_DEGREE_0:
568 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
569 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
570 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
571 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
572 break;
573 case EXYNOS_DRM_DEGREE_90:
574 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
575 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
576 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
577 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
578 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
579 break;
580 case EXYNOS_DRM_DEGREE_180:
581 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
582 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
583 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
584 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
585 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
586 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
587 break;
588 case EXYNOS_DRM_DEGREE_270:
589 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
590 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
591 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
592 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
593 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
594 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
595 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
596 break;
597 default:
598 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
599 return -EINVAL;
600 }
601
602 fimc_write(cfg1, EXYNOS_MSCTRL);
603 fimc_write(cfg2, EXYNOS_CITRGFMT);
604 *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
605
606 return 0;
607}
608
609static int fimc_set_window(struct fimc_context *ctx,
610 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
611{
612 u32 cfg, h1, h2, v1, v2;
613
614 /* cropped image */
615 h1 = pos->x;
616 h2 = sz->hsize - pos->w - pos->x;
617 v1 = pos->y;
618 v2 = sz->vsize - pos->h - pos->y;
619
620 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
621 __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
622 DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
623 h1, h2, v1, v2);
624
625 /*
626 * set window offset 1, 2 size
627 * check figure 43-21 in user manual
628 */
629 cfg = fimc_read(EXYNOS_CIWDOFST);
630 cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
631 EXYNOS_CIWDOFST_WINVEROFST_MASK);
632 cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
633 EXYNOS_CIWDOFST_WINVEROFST(v1));
634 cfg |= EXYNOS_CIWDOFST_WINOFSEN;
635 fimc_write(cfg, EXYNOS_CIWDOFST);
636
637 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
638 EXYNOS_CIWDOFST2_WINVEROFST2(v2));
639 fimc_write(cfg, EXYNOS_CIWDOFST2);
640
641 return 0;
642}
643
644static int fimc_src_set_size(struct device *dev, int swap,
645 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
646{
647 struct fimc_context *ctx = get_fimc_context(dev);
648 struct drm_exynos_pos img_pos = *pos;
649 struct drm_exynos_sz img_sz = *sz;
650 u32 cfg;
651
652 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
653 __func__, swap, sz->hsize, sz->vsize);
654
655 /* original size */
656 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
657 EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
658
659 fimc_write(cfg, EXYNOS_ORGISIZE);
660
661 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
662 pos->x, pos->y, pos->w, pos->h);
663
664 if (swap) {
665 img_pos.w = pos->h;
666 img_pos.h = pos->w;
667 img_sz.hsize = sz->vsize;
668 img_sz.vsize = sz->hsize;
669 }
670
671 /* set input DMA image size */
672 cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
673 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
674 EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
675 cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
676 EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
677 fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
678
679 /*
680 * set input FIFO image size
681 * for now, we support only ITU601 8 bit mode
682 */
683 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
684 EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
685 EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
686 fimc_write(cfg, EXYNOS_CISRCFMT);
687
688 /* offset Y(RGB), Cb, Cr */
689 cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
690 EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
691 fimc_write(cfg, EXYNOS_CIIYOFF);
692 cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
693 EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
694 fimc_write(cfg, EXYNOS_CIICBOFF);
695 cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
696 EXYNOS_CIICROFF_VERTICAL(img_pos.y));
697 fimc_write(cfg, EXYNOS_CIICROFF);
698
699 return fimc_set_window(ctx, &img_pos, &img_sz);
700}
701
702static int fimc_src_set_addr(struct device *dev,
703 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
704 enum drm_exynos_ipp_buf_type buf_type)
705{
706 struct fimc_context *ctx = get_fimc_context(dev);
707 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
708 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
709 struct drm_exynos_ipp_property *property;
710 struct drm_exynos_ipp_config *config;
711
712 if (!c_node) {
713 DRM_ERROR("failed to get c_node.\n");
714 return -EINVAL;
715 }
716
717 property = &c_node->property;
718
719 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
720 property->prop_id, buf_id, buf_type);
721
722 if (buf_id > FIMC_MAX_SRC) {
723 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
724 return -ENOMEM;
725 }
726
727 /* address register set */
728 switch (buf_type) {
729 case IPP_BUF_ENQUEUE:
730 config = &property->config[EXYNOS_DRM_OPS_SRC];
731 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
732 EXYNOS_CIIYSA(buf_id));
733
734 if (config->fmt == DRM_FORMAT_YVU420) {
735 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
736 EXYNOS_CIICBSA(buf_id));
737 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
738 EXYNOS_CIICRSA(buf_id));
739 } else {
740 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
741 EXYNOS_CIICBSA(buf_id));
742 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
743 EXYNOS_CIICRSA(buf_id));
744 }
745 break;
746 case IPP_BUF_DEQUEUE:
747 fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
748 fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
749 fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
750 break;
751 default:
752 /* bypass */
753 break;
754 }
755
756 return 0;
757}
758
759static struct exynos_drm_ipp_ops fimc_src_ops = {
760 .set_fmt = fimc_src_set_fmt,
761 .set_transf = fimc_src_set_transf,
762 .set_size = fimc_src_set_size,
763 .set_addr = fimc_src_set_addr,
764};
765
766static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
767{
768 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
769 u32 cfg;
770
771 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
772
773 /* RGB */
774 cfg = fimc_read(EXYNOS_CISCCTRL);
775 cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
776
777 switch (fmt) {
778 case DRM_FORMAT_RGB565:
779 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
780 fimc_write(cfg, EXYNOS_CISCCTRL);
781 return 0;
782 case DRM_FORMAT_RGB888:
783 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
784 fimc_write(cfg, EXYNOS_CISCCTRL);
785 return 0;
786 case DRM_FORMAT_XRGB8888:
787 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
788 EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
789 fimc_write(cfg, EXYNOS_CISCCTRL);
790 break;
791 default:
792 /* bypass */
793 break;
794 }
795
796 /* YUV */
797 cfg = fimc_read(EXYNOS_CIOCTRL);
798 cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
799 EXYNOS_CIOCTRL_ORDER422_MASK |
800 EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
801
802 switch (fmt) {
803 case DRM_FORMAT_XRGB8888:
804 cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
805 break;
806 case DRM_FORMAT_YUYV:
807 cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
808 break;
809 case DRM_FORMAT_YVYU:
810 cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
811 break;
812 case DRM_FORMAT_UYVY:
813 cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
814 break;
815 case DRM_FORMAT_VYUY:
816 cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
817 break;
818 case DRM_FORMAT_NV21:
819 case DRM_FORMAT_NV61:
820 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
821 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
822 break;
823 case DRM_FORMAT_YUV422:
824 case DRM_FORMAT_YUV420:
825 case DRM_FORMAT_YVU420:
826 cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
827 break;
828 case DRM_FORMAT_NV12:
829 case DRM_FORMAT_NV12MT:
830 case DRM_FORMAT_NV16:
831 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
832 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
833 break;
834 default:
835 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
836 return -EINVAL;
837 }
838
839 fimc_write(cfg, EXYNOS_CIOCTRL);
840
841 return 0;
842}
843
844static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
845{
846 struct fimc_context *ctx = get_fimc_context(dev);
847 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
848 u32 cfg;
849
850 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
851
852 cfg = fimc_read(EXYNOS_CIEXTEN);
853
854 if (fmt == DRM_FORMAT_AYUV) {
855 cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
856 fimc_write(cfg, EXYNOS_CIEXTEN);
857 } else {
858 cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
859 fimc_write(cfg, EXYNOS_CIEXTEN);
860
861 cfg = fimc_read(EXYNOS_CITRGFMT);
862 cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
863
864 switch (fmt) {
865 case DRM_FORMAT_RGB565:
866 case DRM_FORMAT_RGB888:
867 case DRM_FORMAT_XRGB8888:
868 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
869 break;
870 case DRM_FORMAT_YUYV:
871 case DRM_FORMAT_YVYU:
872 case DRM_FORMAT_UYVY:
873 case DRM_FORMAT_VYUY:
874 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
875 break;
876 case DRM_FORMAT_NV16:
877 case DRM_FORMAT_NV61:
878 case DRM_FORMAT_YUV422:
879 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
880 break;
881 case DRM_FORMAT_YUV420:
882 case DRM_FORMAT_YVU420:
883 case DRM_FORMAT_NV12:
884 case DRM_FORMAT_NV12MT:
885 case DRM_FORMAT_NV21:
886 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
887 break;
888 default:
889 dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
890 fmt);
891 return -EINVAL;
892 }
893
894 fimc_write(cfg, EXYNOS_CITRGFMT);
895 }
896
897 cfg = fimc_read(EXYNOS_CIDMAPARAM);
898 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
899
900 if (fmt == DRM_FORMAT_NV12MT)
901 cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
902 else
903 cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
904
905 fimc_write(cfg, EXYNOS_CIDMAPARAM);
906
907 return fimc_dst_set_fmt_order(ctx, fmt);
908}
909
910static int fimc_dst_set_transf(struct device *dev,
911 enum drm_exynos_degree degree,
912 enum drm_exynos_flip flip, bool *swap)
913{
914 struct fimc_context *ctx = get_fimc_context(dev);
915 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
916 u32 cfg;
917
918 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
919 degree, flip);
920
921 cfg = fimc_read(EXYNOS_CITRGFMT);
922 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
923 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
924
925 switch (degree) {
926 case EXYNOS_DRM_DEGREE_0:
927 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
928 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
929 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
930 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
931 break;
932 case EXYNOS_DRM_DEGREE_90:
933 cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
934 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
935 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
936 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
937 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
938 break;
939 case EXYNOS_DRM_DEGREE_180:
940 cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
941 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
942 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
943 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
944 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
945 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
946 break;
947 case EXYNOS_DRM_DEGREE_270:
948 cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
949 EXYNOS_CITRGFMT_FLIP_X_MIRROR |
950 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
951 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
952 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
953 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
954 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
955 break;
956 default:
957 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
958 return -EINVAL;
959 }
960
961 fimc_write(cfg, EXYNOS_CITRGFMT);
962 *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
963
964 return 0;
965}
966
967static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
968{
969 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
970
971 if (src >= dst * 64) {
972 DRM_ERROR("failed to make ratio and shift.\n");
973 return -EINVAL;
974 } else if (src >= dst * 32) {
975 *ratio = 32;
976 *shift = 5;
977 } else if (src >= dst * 16) {
978 *ratio = 16;
979 *shift = 4;
980 } else if (src >= dst * 8) {
981 *ratio = 8;
982 *shift = 3;
983 } else if (src >= dst * 4) {
984 *ratio = 4;
985 *shift = 2;
986 } else if (src >= dst * 2) {
987 *ratio = 2;
988 *shift = 1;
989 } else {
990 *ratio = 1;
991 *shift = 0;
992 }
993
994 return 0;
995}
996
997static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
998 struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
999{
1000 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1001 u32 cfg, cfg_ext, shfactor;
1002 u32 pre_dst_width, pre_dst_height;
1003 u32 pre_hratio, hfactor, pre_vratio, vfactor;
1004 int ret = 0;
1005 u32 src_w, src_h, dst_w, dst_h;
1006
1007 cfg_ext = fimc_read(EXYNOS_CITRGFMT);
1008 if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
1009 src_w = src->h;
1010 src_h = src->w;
1011 } else {
1012 src_w = src->w;
1013 src_h = src->h;
1014 }
1015
1016 if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
1017 dst_w = dst->h;
1018 dst_h = dst->w;
1019 } else {
1020 dst_w = dst->w;
1021 dst_h = dst->h;
1022 }
1023
1024 ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
1025 if (ret) {
1026 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
1027 return ret;
1028 }
1029
1030 ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
1031 if (ret) {
1032 dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
1033 return ret;
1034 }
1035
1036 pre_dst_width = src_w / pre_hratio;
1037 pre_dst_height = src_h / pre_vratio;
1038 DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
1039 pre_dst_width, pre_dst_height);
1040 DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
1041 __func__, pre_hratio, hfactor, pre_vratio, vfactor);
1042
1043 sc->hratio = (src_w << 14) / (dst_w << hfactor);
1044 sc->vratio = (src_h << 14) / (dst_h << vfactor);
1045 sc->up_h = (dst_w >= src_w) ? true : false;
1046 sc->up_v = (dst_h >= src_h) ? true : false;
1047 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
1048 __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
1049
1050 shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
1051 DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
1052
1053 cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
1054 EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
1055 EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
1056 fimc_write(cfg, EXYNOS_CISCPRERATIO);
1057
1058 cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
1059 EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
1060 fimc_write(cfg, EXYNOS_CISCPREDST);
1061
1062 return ret;
1063}
1064
1065static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
1066{
1067 u32 cfg, cfg_ext;
1068
1069 DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
1070 __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
1071 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
1072 __func__, sc->hratio, sc->vratio);
1073
1074 cfg = fimc_read(EXYNOS_CISCCTRL);
1075 cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
1076 EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
1077 EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
1078 EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
1079 EXYNOS_CISCCTRL_CSCR2Y_WIDE |
1080 EXYNOS_CISCCTRL_CSCY2R_WIDE);
1081
1082 if (sc->range)
1083 cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
1084 EXYNOS_CISCCTRL_CSCY2R_WIDE);
1085 if (sc->bypass)
1086 cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
1087 if (sc->up_h)
1088 cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
1089 if (sc->up_v)
1090 cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
1091
1092 cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
1093 EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
1094 fimc_write(cfg, EXYNOS_CISCCTRL);
1095
1096 cfg_ext = fimc_read(EXYNOS_CIEXTEN);
1097 cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
1098 cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
1099 cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
1100 EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
1101 fimc_write(cfg_ext, EXYNOS_CIEXTEN);
1102}
1103
1104static int fimc_dst_set_size(struct device *dev, int swap,
1105 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1106{
1107 struct fimc_context *ctx = get_fimc_context(dev);
1108 struct drm_exynos_pos img_pos = *pos;
1109 struct drm_exynos_sz img_sz = *sz;
1110 u32 cfg;
1111
1112 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
1113 __func__, swap, sz->hsize, sz->vsize);
1114
1115 /* original size */
1116 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
1117 EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
1118
1119 fimc_write(cfg, EXYNOS_ORGOSIZE);
1120
1121 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
1122 __func__, pos->x, pos->y, pos->w, pos->h);
1123
1124 /* CSC ITU */
1125 cfg = fimc_read(EXYNOS_CIGCTRL);
1126 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
1127
1128 if (sz->hsize >= FIMC_WIDTH_ITU_709)
1129 cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
1130 else
1131 cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
1132
1133 fimc_write(cfg, EXYNOS_CIGCTRL);
1134
1135 if (swap) {
1136 img_pos.w = pos->h;
1137 img_pos.h = pos->w;
1138 img_sz.hsize = sz->vsize;
1139 img_sz.vsize = sz->hsize;
1140 }
1141
1142 /* target image size */
1143 cfg = fimc_read(EXYNOS_CITRGFMT);
1144 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
1145 EXYNOS_CITRGFMT_TARGETV_MASK);
1146 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
1147 EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
1148 fimc_write(cfg, EXYNOS_CITRGFMT);
1149
1150 /* target area */
1151 cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
1152 fimc_write(cfg, EXYNOS_CITAREA);
1153
1154 /* offset Y(RGB), Cb, Cr */
1155 cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
1156 EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
1157 fimc_write(cfg, EXYNOS_CIOYOFF);
1158 cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
1159 EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
1160 fimc_write(cfg, EXYNOS_CIOCBOFF);
1161 cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
1162 EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
1163 fimc_write(cfg, EXYNOS_CIOCROFF);
1164
1165 return 0;
1166}
1167
1168static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
1169{
1170 u32 cfg, i, buf_num = 0;
1171 u32 mask = 0x00000001;
1172
1173 cfg = fimc_read(EXYNOS_CIFCNTSEQ);
1174
1175 for (i = 0; i < FIMC_REG_SZ; i++)
1176 if (cfg & (mask << i))
1177 buf_num++;
1178
1179 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1180
1181 return buf_num;
1182}
1183
1184static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1185 enum drm_exynos_ipp_buf_type buf_type)
1186{
1187 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1188 bool enable;
1189 u32 cfg;
1190 u32 mask = 0x00000001 << buf_id;
1191 int ret = 0;
1192
1193 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1194 buf_id, buf_type);
1195
1196 mutex_lock(&ctx->lock);
1197
1198 /* mask register set */
1199 cfg = fimc_read(EXYNOS_CIFCNTSEQ);
1200
1201 switch (buf_type) {
1202 case IPP_BUF_ENQUEUE:
1203 enable = true;
1204 break;
1205 case IPP_BUF_DEQUEUE:
1206 enable = false;
1207 break;
1208 default:
1209 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1210 ret = -EINVAL;
1211 goto err_unlock;
1212 }
1213
1214 /* sequence id */
1215 cfg &= ~mask;
1216 cfg |= (enable << buf_id);
1217 fimc_write(cfg, EXYNOS_CIFCNTSEQ);
1218
1219 /* interrupt enable */
1220 if (buf_type == IPP_BUF_ENQUEUE &&
1221 fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
1222 fimc_handle_irq(ctx, true, false, true);
1223
1224 /* interrupt disable */
1225 if (buf_type == IPP_BUF_DEQUEUE &&
1226 fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
1227 fimc_handle_irq(ctx, false, false, true);
1228
1229err_unlock:
1230 mutex_unlock(&ctx->lock);
1231 return ret;
1232}
1233
1234static int fimc_dst_set_addr(struct device *dev,
1235 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1236 enum drm_exynos_ipp_buf_type buf_type)
1237{
1238 struct fimc_context *ctx = get_fimc_context(dev);
1239 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1240 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1241 struct drm_exynos_ipp_property *property;
1242 struct drm_exynos_ipp_config *config;
1243
1244 if (!c_node) {
1245 DRM_ERROR("failed to get c_node.\n");
1246 return -EINVAL;
1247 }
1248
1249 property = &c_node->property;
1250
1251 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1252 property->prop_id, buf_id, buf_type);
1253
1254 if (buf_id > FIMC_MAX_DST) {
1255 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1256 return -ENOMEM;
1257 }
1258
1259 /* address register set */
1260 switch (buf_type) {
1261 case IPP_BUF_ENQUEUE:
1262 config = &property->config[EXYNOS_DRM_OPS_DST];
1263
1264 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1265 EXYNOS_CIOYSA(buf_id));
1266
1267 if (config->fmt == DRM_FORMAT_YVU420) {
1268 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1269 EXYNOS_CIOCBSA(buf_id));
1270 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1271 EXYNOS_CIOCRSA(buf_id));
1272 } else {
1273 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1274 EXYNOS_CIOCBSA(buf_id));
1275 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1276 EXYNOS_CIOCRSA(buf_id));
1277 }
1278 break;
1279 case IPP_BUF_DEQUEUE:
1280 fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
1281 fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
1282 fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
1283 break;
1284 default:
1285 /* bypass */
1286 break;
1287 }
1288
1289 return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
1290}
1291
1292static struct exynos_drm_ipp_ops fimc_dst_ops = {
1293 .set_fmt = fimc_dst_set_fmt,
1294 .set_transf = fimc_dst_set_transf,
1295 .set_size = fimc_dst_set_size,
1296 .set_addr = fimc_dst_set_addr,
1297};
1298
1299static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1300{
1301 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1302
1303 if (enable) {
1304 clk_enable(ctx->sclk_fimc_clk);
1305 clk_enable(ctx->fimc_clk);
1306 clk_enable(ctx->wb_clk);
1307 ctx->suspended = false;
1308 } else {
1309 clk_disable(ctx->sclk_fimc_clk);
1310 clk_disable(ctx->fimc_clk);
1311 clk_disable(ctx->wb_clk);
1312 ctx->suspended = true;
1313 }
1314
1315 return 0;
1316}
1317
1318static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1319{
1320 struct fimc_context *ctx = dev_id;
1321 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1322 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1323 struct drm_exynos_ipp_event_work *event_work =
1324 c_node->event_work;
1325 int buf_id;
1326
1327 DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
1328
1329 fimc_clear_irq(ctx);
1330 if (fimc_check_ovf(ctx))
1331 return IRQ_NONE;
1332
1333 if (!fimc_check_frame_end(ctx))
1334 return IRQ_NONE;
1335
1336 buf_id = fimc_get_buf_id(ctx);
1337 if (buf_id < 0)
1338 return IRQ_HANDLED;
1339
1340 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
1341
1342 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
1343 DRM_ERROR("failed to dequeue.\n");
1344 return IRQ_HANDLED;
1345 }
1346
1347 event_work->ippdrv = ippdrv;
1348 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
1349 queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
1350
1351 return IRQ_HANDLED;
1352}
1353
1354static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1355{
1356 struct drm_exynos_ipp_prop_list *prop_list;
1357
1358 DRM_DEBUG_KMS("%s\n", __func__);
1359
1360 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1361 if (!prop_list) {
1362 DRM_ERROR("failed to alloc property list.\n");
1363 return -ENOMEM;
1364 }
1365
1366 prop_list->version = 1;
1367 prop_list->writeback = 1;
1368 prop_list->refresh_min = FIMC_REFRESH_MIN;
1369 prop_list->refresh_max = FIMC_REFRESH_MAX;
1370 prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
1371 (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1372 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1373 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1374 (1 << EXYNOS_DRM_DEGREE_90) |
1375 (1 << EXYNOS_DRM_DEGREE_180) |
1376 (1 << EXYNOS_DRM_DEGREE_270);
1377 prop_list->csc = 1;
1378 prop_list->crop = 1;
1379 prop_list->crop_max.hsize = FIMC_CROP_MAX;
1380 prop_list->crop_max.vsize = FIMC_CROP_MAX;
1381 prop_list->crop_min.hsize = FIMC_CROP_MIN;
1382 prop_list->crop_min.vsize = FIMC_CROP_MIN;
1383 prop_list->scale = 1;
1384 prop_list->scale_max.hsize = FIMC_SCALE_MAX;
1385 prop_list->scale_max.vsize = FIMC_SCALE_MAX;
1386 prop_list->scale_min.hsize = FIMC_SCALE_MIN;
1387 prop_list->scale_min.vsize = FIMC_SCALE_MIN;
1388
1389 ippdrv->prop_list = prop_list;
1390
1391 return 0;
1392}
1393
1394static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
1395{
1396 switch (flip) {
1397 case EXYNOS_DRM_FLIP_NONE:
1398 case EXYNOS_DRM_FLIP_VERTICAL:
1399 case EXYNOS_DRM_FLIP_HORIZONTAL:
1400 case EXYNOS_DRM_FLIP_BOTH:
1401 return true;
1402 default:
1403 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1404 return false;
1405 }
1406}
1407
1408static int fimc_ippdrv_check_property(struct device *dev,
1409 struct drm_exynos_ipp_property *property)
1410{
1411 struct fimc_context *ctx = get_fimc_context(dev);
1412 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1413 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1414 struct drm_exynos_ipp_config *config;
1415 struct drm_exynos_pos *pos;
1416 struct drm_exynos_sz *sz;
1417 bool swap;
1418 int i;
1419
1420 DRM_DEBUG_KMS("%s\n", __func__);
1421
1422 for_each_ipp_ops(i) {
1423 if ((i == EXYNOS_DRM_OPS_SRC) &&
1424 (property->cmd == IPP_CMD_WB))
1425 continue;
1426
1427 config = &property->config[i];
1428 pos = &config->pos;
1429 sz = &config->sz;
1430
1431 /* check for flip */
1432 if (!fimc_check_drm_flip(config->flip)) {
1433 DRM_ERROR("invalid flip.\n");
1434 goto err_property;
1435 }
1436
1437 /* check for degree */
1438 switch (config->degree) {
1439 case EXYNOS_DRM_DEGREE_90:
1440 case EXYNOS_DRM_DEGREE_270:
1441 swap = true;
1442 break;
1443 case EXYNOS_DRM_DEGREE_0:
1444 case EXYNOS_DRM_DEGREE_180:
1445 swap = false;
1446 break;
1447 default:
1448 DRM_ERROR("invalid degree.\n");
1449 goto err_property;
1450 }
1451
1452 /* check for buffer bound */
1453 if ((pos->x + pos->w > sz->hsize) ||
1454 (pos->y + pos->h > sz->vsize)) {
1455 DRM_ERROR("out of buf bound.\n");
1456 goto err_property;
1457 }
1458
1459 /* check for crop */
1460 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1461 if (swap) {
1462 if ((pos->h < pp->crop_min.hsize) ||
1463 (sz->vsize > pp->crop_max.hsize) ||
1464 (pos->w < pp->crop_min.vsize) ||
1465 (sz->hsize > pp->crop_max.vsize)) {
1466 DRM_ERROR("out of crop size.\n");
1467 goto err_property;
1468 }
1469 } else {
1470 if ((pos->w < pp->crop_min.hsize) ||
1471 (sz->hsize > pp->crop_max.hsize) ||
1472 (pos->h < pp->crop_min.vsize) ||
1473 (sz->vsize > pp->crop_max.vsize)) {
1474 DRM_ERROR("out of crop size.\n");
1475 goto err_property;
1476 }
1477 }
1478 }
1479
1480 /* check for scale */
1481 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1482 if (swap) {
1483 if ((pos->h < pp->scale_min.hsize) ||
1484 (sz->vsize > pp->scale_max.hsize) ||
1485 (pos->w < pp->scale_min.vsize) ||
1486 (sz->hsize > pp->scale_max.vsize)) {
1487 DRM_ERROR("out of scale size.\n");
1488 goto err_property;
1489 }
1490 } else {
1491 if ((pos->w < pp->scale_min.hsize) ||
1492 (sz->hsize > pp->scale_max.hsize) ||
1493 (pos->h < pp->scale_min.vsize) ||
1494 (sz->vsize > pp->scale_max.vsize)) {
1495 DRM_ERROR("out of scale size.\n");
1496 goto err_property;
1497 }
1498 }
1499 }
1500 }
1501
1502 return 0;
1503
1504err_property:
1505 for_each_ipp_ops(i) {
1506 if ((i == EXYNOS_DRM_OPS_SRC) &&
1507 (property->cmd == IPP_CMD_WB))
1508 continue;
1509
1510 config = &property->config[i];
1511 pos = &config->pos;
1512 sz = &config->sz;
1513
1514 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1515 i ? "dst" : "src", config->flip, config->degree,
1516 pos->x, pos->y, pos->w, pos->h,
1517 sz->hsize, sz->vsize);
1518 }
1519
1520 return -EINVAL;
1521}
1522
1523static void fimc_clear_addr(struct fimc_context *ctx)
1524{
1525 int i;
1526
1527 DRM_DEBUG_KMS("%s:\n", __func__);
1528
1529 for (i = 0; i < FIMC_MAX_SRC; i++) {
1530 fimc_write(0, EXYNOS_CIIYSA(i));
1531 fimc_write(0, EXYNOS_CIICBSA(i));
1532 fimc_write(0, EXYNOS_CIICRSA(i));
1533 }
1534
1535 for (i = 0; i < FIMC_MAX_DST; i++) {
1536 fimc_write(0, EXYNOS_CIOYSA(i));
1537 fimc_write(0, EXYNOS_CIOCBSA(i));
1538 fimc_write(0, EXYNOS_CIOCRSA(i));
1539 }
1540}
1541
1542static int fimc_ippdrv_reset(struct device *dev)
1543{
1544 struct fimc_context *ctx = get_fimc_context(dev);
1545
1546 DRM_DEBUG_KMS("%s\n", __func__);
1547
1548 /* reset h/w block */
1549 fimc_sw_reset(ctx);
1550
1551 /* reset scaler capability */
1552 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1553
1554 fimc_clear_addr(ctx);
1555
1556 return 0;
1557}
1558
1559static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1560{
1561 struct fimc_context *ctx = get_fimc_context(dev);
1562 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1563 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1564 struct drm_exynos_ipp_property *property;
1565 struct drm_exynos_ipp_config *config;
1566 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1567 struct drm_exynos_ipp_set_wb set_wb;
1568 int ret, i;
1569 u32 cfg0, cfg1;
1570
1571 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1572
1573 if (!c_node) {
1574 DRM_ERROR("failed to get c_node.\n");
1575 return -EINVAL;
1576 }
1577
1578 property = &c_node->property;
1579
1580 fimc_handle_irq(ctx, true, false, true);
1581
1582 for_each_ipp_ops(i) {
1583 config = &property->config[i];
1584 img_pos[i] = config->pos;
1585 }
1586
1587 ret = fimc_set_prescaler(ctx, &ctx->sc,
1588 &img_pos[EXYNOS_DRM_OPS_SRC],
1589 &img_pos[EXYNOS_DRM_OPS_DST]);
1590 if (ret) {
1591 dev_err(dev, "failed to set precalser.\n");
1592 return ret;
1593 }
1594
1595 /* If set ture, we can save jpeg about screen */
1596 fimc_handle_jpeg(ctx, false);
1597 fimc_set_scaler(ctx, &ctx->sc);
1598 fimc_set_polarity(ctx, &ctx->pol);
1599
1600 switch (cmd) {
1601 case IPP_CMD_M2M:
1602 fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
1603 fimc_handle_lastend(ctx, false);
1604
1605 /* setup dma */
1606 cfg0 = fimc_read(EXYNOS_MSCTRL);
1607 cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
1608 cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
1609 fimc_write(cfg0, EXYNOS_MSCTRL);
1610 break;
1611 case IPP_CMD_WB:
1612 fimc_set_type_ctrl(ctx, FIMC_WB_A);
1613 fimc_handle_lastend(ctx, true);
1614
1615 /* setup FIMD */
1616 fimc_set_camblk_fimd0_wb(ctx);
1617
1618 set_wb.enable = 1;
1619 set_wb.refresh = property->refresh_rate;
1620 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1621 break;
1622 case IPP_CMD_OUTPUT:
1623 default:
1624 ret = -EINVAL;
1625 dev_err(dev, "invalid operations.\n");
1626 return ret;
1627 }
1628
1629 /* Reset status */
1630 fimc_write(0x0, EXYNOS_CISTATUS);
1631
1632 cfg0 = fimc_read(EXYNOS_CIIMGCPT);
1633 cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1634 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1635
1636 /* Scaler */
1637 cfg1 = fimc_read(EXYNOS_CISCCTRL);
1638 cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
1639 cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
1640 EXYNOS_CISCCTRL_SCALERSTART);
1641
1642 fimc_write(cfg1, EXYNOS_CISCCTRL);
1643
1644 /* Enable image capture*/
1645 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
1646 fimc_write(cfg0, EXYNOS_CIIMGCPT);
1647
1648 /* Disable frame end irq */
1649 cfg0 = fimc_read(EXYNOS_CIGCTRL);
1650 cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1651 fimc_write(cfg0, EXYNOS_CIGCTRL);
1652
1653 cfg0 = fimc_read(EXYNOS_CIOCTRL);
1654 cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
1655 fimc_write(cfg0, EXYNOS_CIOCTRL);
1656
1657 if (cmd == IPP_CMD_M2M) {
1658 cfg0 = fimc_read(EXYNOS_MSCTRL);
1659 cfg0 |= EXYNOS_MSCTRL_ENVID;
1660 fimc_write(cfg0, EXYNOS_MSCTRL);
1661
1662 cfg0 = fimc_read(EXYNOS_MSCTRL);
1663 cfg0 |= EXYNOS_MSCTRL_ENVID;
1664 fimc_write(cfg0, EXYNOS_MSCTRL);
1665 }
1666
1667 return 0;
1668}
1669
1670static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1671{
1672 struct fimc_context *ctx = get_fimc_context(dev);
1673 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1674 u32 cfg;
1675
1676 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1677
1678 switch (cmd) {
1679 case IPP_CMD_M2M:
1680 /* Source clear */
1681 cfg = fimc_read(EXYNOS_MSCTRL);
1682 cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
1683 cfg &= ~EXYNOS_MSCTRL_ENVID;
1684 fimc_write(cfg, EXYNOS_MSCTRL);
1685 break;
1686 case IPP_CMD_WB:
1687 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1688 break;
1689 case IPP_CMD_OUTPUT:
1690 default:
1691 dev_err(dev, "invalid operations.\n");
1692 break;
1693 }
1694
1695 fimc_handle_irq(ctx, false, false, true);
1696
1697 /* reset sequence */
1698 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
1699
1700 /* Scaler disable */
1701 cfg = fimc_read(EXYNOS_CISCCTRL);
1702 cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
1703 fimc_write(cfg, EXYNOS_CISCCTRL);
1704
1705 /* Disable image capture */
1706 cfg = fimc_read(EXYNOS_CIIMGCPT);
1707 cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
1708 fimc_write(cfg, EXYNOS_CIIMGCPT);
1709
1710 /* Enable frame end irq */
1711 cfg = fimc_read(EXYNOS_CIGCTRL);
1712 cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1713 fimc_write(cfg, EXYNOS_CIGCTRL);
1714}
1715
1716static int fimc_probe(struct platform_device *pdev)
1717{
1718 struct device *dev = &pdev->dev;
1719 struct fimc_context *ctx;
1720 struct clk *parent_clk;
1721 struct resource *res;
1722 struct exynos_drm_ippdrv *ippdrv;
1723 struct exynos_drm_fimc_pdata *pdata;
1724 struct fimc_driverdata *ddata;
1725 int ret;
1726
1727 pdata = pdev->dev.platform_data;
1728 if (!pdata) {
1729 dev_err(dev, "no platform data specified.\n");
1730 return -EINVAL;
1731 }
1732
1733 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1734 if (!ctx)
1735 return -ENOMEM;
1736
1737 ddata = (struct fimc_driverdata *)
1738 platform_get_device_id(pdev)->driver_data;
1739
1740 /* clock control */
1741 ctx->sclk_fimc_clk = devm_clk_get(dev, "sclk_fimc");
1742 if (IS_ERR(ctx->sclk_fimc_clk)) {
1743 dev_err(dev, "failed to get src fimc clock.\n");
1744 return PTR_ERR(ctx->sclk_fimc_clk);
1745 }
1746 clk_enable(ctx->sclk_fimc_clk);
1747
1748 ctx->fimc_clk = devm_clk_get(dev, "fimc");
1749 if (IS_ERR(ctx->fimc_clk)) {
1750 dev_err(dev, "failed to get fimc clock.\n");
1751 clk_disable(ctx->sclk_fimc_clk);
1752 return PTR_ERR(ctx->fimc_clk);
1753 }
1754
1755 ctx->wb_clk = devm_clk_get(dev, "pxl_async0");
1756 if (IS_ERR(ctx->wb_clk)) {
1757 dev_err(dev, "failed to get writeback a clock.\n");
1758 clk_disable(ctx->sclk_fimc_clk);
1759 return PTR_ERR(ctx->wb_clk);
1760 }
1761
1762 ctx->wb_b_clk = devm_clk_get(dev, "pxl_async1");
1763 if (IS_ERR(ctx->wb_b_clk)) {
1764 dev_err(dev, "failed to get writeback b clock.\n");
1765 clk_disable(ctx->sclk_fimc_clk);
1766 return PTR_ERR(ctx->wb_b_clk);
1767 }
1768
1769 parent_clk = devm_clk_get(dev, ddata->parent_clk);
1770
1771 if (IS_ERR(parent_clk)) {
1772 dev_err(dev, "failed to get parent clock.\n");
1773 clk_disable(ctx->sclk_fimc_clk);
1774 return PTR_ERR(parent_clk);
1775 }
1776
1777 if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
1778 dev_err(dev, "failed to set parent.\n");
1779 clk_disable(ctx->sclk_fimc_clk);
1780 return -EINVAL;
1781 }
1782
1783 devm_clk_put(dev, parent_clk);
1784 clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
1785
1786 /* resource memory */
1787 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1788 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1789 if (!ctx->regs) {
1790 dev_err(dev, "failed to map registers.\n");
1791 return -ENXIO;
1792 }
1793
1794 /* resource irq */
1795 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1796 if (!res) {
1797 dev_err(dev, "failed to request irq resource.\n");
1798 return -ENOENT;
1799 }
1800
1801 ctx->irq = res->start;
1802 ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
1803 IRQF_ONESHOT, "drm_fimc", ctx);
1804 if (ret < 0) {
1805 dev_err(dev, "failed to request irq.\n");
1806 return ret;
1807 }
1808
1809 /* context initailization */
1810 ctx->id = pdev->id;
1811 ctx->pol = pdata->pol;
1812 ctx->ddata = ddata;
1813
1814 ippdrv = &ctx->ippdrv;
1815 ippdrv->dev = dev;
1816 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
1817 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
1818 ippdrv->check_property = fimc_ippdrv_check_property;
1819 ippdrv->reset = fimc_ippdrv_reset;
1820 ippdrv->start = fimc_ippdrv_start;
1821 ippdrv->stop = fimc_ippdrv_stop;
1822 ret = fimc_init_prop_list(ippdrv);
1823 if (ret < 0) {
1824 dev_err(dev, "failed to init property list.\n");
1825 goto err_get_irq;
1826 }
1827
1828 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1829 (int)ippdrv);
1830
1831 mutex_init(&ctx->lock);
1832 platform_set_drvdata(pdev, ctx);
1833
1834 pm_runtime_set_active(dev);
1835 pm_runtime_enable(dev);
1836
1837 ret = exynos_drm_ippdrv_register(ippdrv);
1838 if (ret < 0) {
1839 dev_err(dev, "failed to register drm fimc device.\n");
1840 goto err_ippdrv_register;
1841 }
1842
1843 dev_info(&pdev->dev, "drm fimc registered successfully.\n");
1844
1845 return 0;
1846
1847err_ippdrv_register:
1848 devm_kfree(dev, ippdrv->prop_list);
1849 pm_runtime_disable(dev);
1850err_get_irq:
1851 free_irq(ctx->irq, ctx);
1852
1853 return ret;
1854}
1855
1856static int fimc_remove(struct platform_device *pdev)
1857{
1858 struct device *dev = &pdev->dev;
1859 struct fimc_context *ctx = get_fimc_context(dev);
1860 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1861
1862 devm_kfree(dev, ippdrv->prop_list);
1863 exynos_drm_ippdrv_unregister(ippdrv);
1864 mutex_destroy(&ctx->lock);
1865
1866 pm_runtime_set_suspended(dev);
1867 pm_runtime_disable(dev);
1868
1869 free_irq(ctx->irq, ctx);
1870
1871 return 0;
1872}
1873
1874#ifdef CONFIG_PM_SLEEP
1875static int fimc_suspend(struct device *dev)
1876{
1877 struct fimc_context *ctx = get_fimc_context(dev);
1878
1879 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1880
1881 if (pm_runtime_suspended(dev))
1882 return 0;
1883
1884 return fimc_clk_ctrl(ctx, false);
1885}
1886
1887static int fimc_resume(struct device *dev)
1888{
1889 struct fimc_context *ctx = get_fimc_context(dev);
1890
1891 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1892
1893 if (!pm_runtime_suspended(dev))
1894 return fimc_clk_ctrl(ctx, true);
1895
1896 return 0;
1897}
1898#endif
1899
1900#ifdef CONFIG_PM_RUNTIME
1901static int fimc_runtime_suspend(struct device *dev)
1902{
1903 struct fimc_context *ctx = get_fimc_context(dev);
1904
1905 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1906
1907 return fimc_clk_ctrl(ctx, false);
1908}
1909
1910static int fimc_runtime_resume(struct device *dev)
1911{
1912 struct fimc_context *ctx = get_fimc_context(dev);
1913
1914 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1915
1916 return fimc_clk_ctrl(ctx, true);
1917}
1918#endif
1919
1920static struct fimc_driverdata exynos4210_fimc_data = {
1921 .parent_clk = "mout_mpll",
1922};
1923
1924static struct fimc_driverdata exynos4410_fimc_data = {
1925 .parent_clk = "mout_mpll_user",
1926};
1927
1928static struct platform_device_id fimc_driver_ids[] = {
1929 {
1930 .name = "exynos4210-fimc",
1931 .driver_data = (unsigned long)&exynos4210_fimc_data,
1932 }, {
1933 .name = "exynos4412-fimc",
1934 .driver_data = (unsigned long)&exynos4410_fimc_data,
1935 },
1936 {},
1937};
1938MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
1939
1940static const struct dev_pm_ops fimc_pm_ops = {
1941 SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
1942 SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
1943};
1944
1945struct platform_driver fimc_driver = {
1946 .probe = fimc_probe,
1947 .remove = fimc_remove,
1948 .id_table = fimc_driver_ids,
1949 .driver = {
1950 .name = "exynos-drm-fimc",
1951 .owner = THIS_MODULE,
1952 .pm = &fimc_pm_ops,
1953 },
1954};
1955
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..127a424c5fdf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#ifndef _EXYNOS_DRM_FIMC_H_
16#define _EXYNOS_DRM_FIMC_H_
17
18/*
19 * TODO
20 * FIMD output interface notifier callback.
21 */
22
23#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 130a2b510d4a..9537761931ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/of_device.h>
20#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
21 22
22#include <video/samsung_fimd.h> 23#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
25#include "exynos_drm_drv.h" 26#include "exynos_drm_drv.h"
26#include "exynos_drm_fbdev.h" 27#include "exynos_drm_fbdev.h"
27#include "exynos_drm_crtc.h" 28#include "exynos_drm_crtc.h"
29#include "exynos_drm_iommu.h"
28 30
29/* 31/*
30 * FIMD is stand for Fully Interactive Mobile Display and 32 * FIMD is stand for Fully Interactive Mobile Display and
@@ -61,11 +63,11 @@ struct fimd_driver_data {
61 unsigned int timing_base; 63 unsigned int timing_base;
62}; 64};
63 65
64struct fimd_driver_data exynos4_fimd_driver_data = { 66static struct fimd_driver_data exynos4_fimd_driver_data = {
65 .timing_base = 0x0, 67 .timing_base = 0x0,
66}; 68};
67 69
68struct fimd_driver_data exynos5_fimd_driver_data = { 70static struct fimd_driver_data exynos5_fimd_driver_data = {
69 .timing_base = 0x20000, 71 .timing_base = 0x20000,
70}; 72};
71 73
@@ -78,10 +80,10 @@ struct fimd_win_data {
78 unsigned int fb_height; 80 unsigned int fb_height;
79 unsigned int bpp; 81 unsigned int bpp;
80 dma_addr_t dma_addr; 82 dma_addr_t dma_addr;
81 void __iomem *vaddr;
82 unsigned int buf_offsize; 83 unsigned int buf_offsize;
83 unsigned int line_size; /* bytes */ 84 unsigned int line_size; /* bytes */
84 bool enabled; 85 bool enabled;
86 bool resume;
85}; 87};
86 88
87struct fimd_context { 89struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
99 u32 vidcon1; 101 u32 vidcon1;
100 bool suspended; 102 bool suspended;
101 struct mutex lock; 103 struct mutex lock;
104 wait_queue_head_t wait_vsync_queue;
105 atomic_t wait_vsync_event;
102 106
103 struct exynos_drm_panel_info *panel; 107 struct exynos_drm_panel_info *panel;
104}; 108};
105 109
110#ifdef CONFIG_OF
111static const struct of_device_id fimd_driver_dt_match[] = {
112 { .compatible = "samsung,exynos4-fimd",
113 .data = &exynos4_fimd_driver_data },
114 { .compatible = "samsung,exynos5-fimd",
115 .data = &exynos5_fimd_driver_data },
116 {},
117};
118MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
119#endif
120
106static inline struct fimd_driver_data *drm_fimd_get_driver_data( 121static inline struct fimd_driver_data *drm_fimd_get_driver_data(
107 struct platform_device *pdev) 122 struct platform_device *pdev)
108{ 123{
124#ifdef CONFIG_OF
125 const struct of_device_id *of_id =
126 of_match_device(fimd_driver_dt_match, &pdev->dev);
127
128 if (of_id)
129 return (struct fimd_driver_data *)of_id->data;
130#endif
131
109 return (struct fimd_driver_data *) 132 return (struct fimd_driver_data *)
110 platform_get_device_id(pdev)->driver_data; 133 platform_get_device_id(pdev)->driver_data;
111} 134}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
240 263
241 /* setup horizontal and vertical display size. */ 264 /* setup horizontal and vertical display size. */
242 val = VIDTCON2_LINEVAL(timing->yres - 1) | 265 val = VIDTCON2_LINEVAL(timing->yres - 1) |
243 VIDTCON2_HOZVAL(timing->xres - 1); 266 VIDTCON2_HOZVAL(timing->xres - 1) |
267 VIDTCON2_LINEVAL_E(timing->yres - 1) |
268 VIDTCON2_HOZVAL_E(timing->xres - 1);
244 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); 269 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
245 270
246 /* setup clock source, clock divider, enable dma. */ 271 /* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
307 } 332 }
308} 333}
309 334
335static void fimd_wait_for_vblank(struct device *dev)
336{
337 struct fimd_context *ctx = get_fimd_context(dev);
338
339 if (ctx->suspended)
340 return;
341
342 atomic_set(&ctx->wait_vsync_event, 1);
343
344 /*
345 * wait for FIMD to signal VSYNC interrupt or return after
346 * timeout which is set to 50ms (refresh rate of 20).
347 */
348 if (!wait_event_timeout(ctx->wait_vsync_queue,
349 !atomic_read(&ctx->wait_vsync_event),
350 DRM_HZ/20))
351 DRM_DEBUG_KMS("vblank wait timed out.\n");
352}
353
310static struct exynos_drm_manager_ops fimd_manager_ops = { 354static struct exynos_drm_manager_ops fimd_manager_ops = {
311 .dpms = fimd_dpms, 355 .dpms = fimd_dpms,
312 .apply = fimd_apply, 356 .apply = fimd_apply,
313 .commit = fimd_commit, 357 .commit = fimd_commit,
314 .enable_vblank = fimd_enable_vblank, 358 .enable_vblank = fimd_enable_vblank,
315 .disable_vblank = fimd_disable_vblank, 359 .disable_vblank = fimd_disable_vblank,
360 .wait_for_vblank = fimd_wait_for_vblank,
316}; 361};
317 362
318static void fimd_win_mode_set(struct device *dev, 363static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
351 win_data->fb_width = overlay->fb_width; 396 win_data->fb_width = overlay->fb_width;
352 win_data->fb_height = overlay->fb_height; 397 win_data->fb_height = overlay->fb_height;
353 win_data->dma_addr = overlay->dma_addr[0] + offset; 398 win_data->dma_addr = overlay->dma_addr[0] + offset;
354 win_data->vaddr = overlay->vaddr[0] + offset;
355 win_data->bpp = overlay->bpp; 399 win_data->bpp = overlay->bpp;
356 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 400 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
357 (overlay->bpp >> 3); 401 (overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
361 win_data->offset_x, win_data->offset_y); 405 win_data->offset_x, win_data->offset_y);
362 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 406 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
363 win_data->ovl_width, win_data->ovl_height); 407 win_data->ovl_width, win_data->ovl_height);
364 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 408 DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
365 (unsigned long)win_data->dma_addr,
366 (unsigned long)win_data->vaddr);
367 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 409 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
368 overlay->fb_width, overlay->crtc_width); 410 overlay->fb_width, overlay->crtc_width);
369} 411}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
451 struct fimd_win_data *win_data; 493 struct fimd_win_data *win_data;
452 int win = zpos; 494 int win = zpos;
453 unsigned long val, alpha, size; 495 unsigned long val, alpha, size;
496 unsigned int last_x;
497 unsigned int last_y;
454 498
455 DRM_DEBUG_KMS("%s\n", __FILE__); 499 DRM_DEBUG_KMS("%s\n", __FILE__);
456 500
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
496 540
497 /* buffer size */ 541 /* buffer size */
498 val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) | 542 val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
499 VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size); 543 VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
544 VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
545 VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
500 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); 546 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
501 547
502 /* OSD position */ 548 /* OSD position */
503 val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) | 549 val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
504 VIDOSDxA_TOPLEFT_Y(win_data->offset_y); 550 VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
551 VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
552 VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
505 writel(val, ctx->regs + VIDOSD_A(win)); 553 writel(val, ctx->regs + VIDOSD_A(win));
506 554
507 val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x + 555 last_x = win_data->offset_x + win_data->ovl_width;
508 win_data->ovl_width - 1) | 556 if (last_x)
509 VIDOSDxB_BOTRIGHT_Y(win_data->offset_y + 557 last_x--;
510 win_data->ovl_height - 1); 558 last_y = win_data->offset_y + win_data->ovl_height;
559 if (last_y)
560 last_y--;
561
562 val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
563 VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
564
511 writel(val, ctx->regs + VIDOSD_B(win)); 565 writel(val, ctx->regs + VIDOSD_B(win));
512 566
513 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", 567 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
514 win_data->offset_x, win_data->offset_y, 568 win_data->offset_x, win_data->offset_y, last_x, last_y);
515 win_data->offset_x + win_data->ovl_width - 1,
516 win_data->offset_y + win_data->ovl_height - 1);
517 569
518 /* hardware window 0 doesn't support alpha channel. */ 570 /* hardware window 0 doesn't support alpha channel. */
519 if (win != 0) { 571 if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
573 625
574 win_data = &ctx->win_data[win]; 626 win_data = &ctx->win_data[win];
575 627
628 if (ctx->suspended) {
629 /* do not resume this window*/
630 win_data->resume = false;
631 return;
632 }
633
576 /* protect windows */ 634 /* protect windows */
577 val = readl(ctx->regs + SHADOWCON); 635 val = readl(ctx->regs + SHADOWCON);
578 val |= SHADOWCON_WINx_PROTECT(win); 636 val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
592 win_data->enabled = false; 650 win_data->enabled = false;
593} 651}
594 652
595static void fimd_wait_for_vblank(struct device *dev)
596{
597 struct fimd_context *ctx = get_fimd_context(dev);
598 int ret;
599
600 ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
601 VIDCON1_VSTATUS_VSYNC), 50);
602 if (ret < 0)
603 DRM_DEBUG_KMS("vblank wait timed out.\n");
604}
605
606static struct exynos_drm_overlay_ops fimd_overlay_ops = { 653static struct exynos_drm_overlay_ops fimd_overlay_ops = {
607 .mode_set = fimd_win_mode_set, 654 .mode_set = fimd_win_mode_set,
608 .commit = fimd_win_commit, 655 .commit = fimd_win_commit,
609 .disable = fimd_win_disable, 656 .disable = fimd_win_disable,
610 .wait_for_vblank = fimd_wait_for_vblank,
611}; 657};
612 658
613static struct exynos_drm_manager fimd_manager = { 659static struct exynos_drm_manager fimd_manager = {
@@ -617,52 +663,6 @@ static struct exynos_drm_manager fimd_manager = {
617 .display_ops = &fimd_display_ops, 663 .display_ops = &fimd_display_ops,
618}; 664};
619 665
620static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
621{
622 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
623 struct drm_pending_vblank_event *e, *t;
624 struct timeval now;
625 unsigned long flags;
626 bool is_checked = false;
627
628 spin_lock_irqsave(&drm_dev->event_lock, flags);
629
630 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
631 base.link) {
632 /* if event's pipe isn't same as crtc then ignore it. */
633 if (crtc != e->pipe)
634 continue;
635
636 is_checked = true;
637
638 do_gettimeofday(&now);
639 e->event.sequence = 0;
640 e->event.tv_sec = now.tv_sec;
641 e->event.tv_usec = now.tv_usec;
642
643 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
644 wake_up_interruptible(&e->base.file_priv->event_wait);
645 }
646
647 if (is_checked) {
648 /*
649 * call drm_vblank_put only in case that drm_vblank_get was
650 * called.
651 */
652 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
653 drm_vblank_put(drm_dev, crtc);
654
655 /*
656 * don't off vblank if vblank_disable_allowed is 1,
657 * because vblank would be off by timer handler.
658 */
659 if (!drm_dev->vblank_disable_allowed)
660 drm_vblank_off(drm_dev, crtc);
661 }
662
663 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
664}
665
666static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 666static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
667{ 667{
668 struct fimd_context *ctx = (struct fimd_context *)dev_id; 668 struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -682,8 +682,13 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
682 goto out; 682 goto out;
683 683
684 drm_handle_vblank(drm_dev, manager->pipe); 684 drm_handle_vblank(drm_dev, manager->pipe);
685 fimd_finish_pageflip(drm_dev, manager->pipe); 685 exynos_drm_crtc_finish_pageflip(drm_dev, manager->pipe);
686 686
687 /* set wait vsync event to zero and wake up queue. */
688 if (atomic_read(&ctx->wait_vsync_event)) {
689 atomic_set(&ctx->wait_vsync_event, 0);
690 DRM_WAKEUP(&ctx->wait_vsync_queue);
691 }
687out: 692out:
688 return IRQ_HANDLED; 693 return IRQ_HANDLED;
689} 694}
@@ -709,6 +714,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
709 */ 714 */
710 drm_dev->vblank_disable_allowed = 1; 715 drm_dev->vblank_disable_allowed = 1;
711 716
717 /* attach this sub driver to iommu mapping if supported. */
718 if (is_drm_iommu_supported(drm_dev))
719 drm_iommu_attach_device(drm_dev, dev);
720
712 return 0; 721 return 0;
713} 722}
714 723
@@ -716,7 +725,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
716{ 725{
717 DRM_DEBUG_KMS("%s\n", __FILE__); 726 DRM_DEBUG_KMS("%s\n", __FILE__);
718 727
719 /* TODO. */ 728 /* detach this sub driver from iommu mapping if supported. */
729 if (is_drm_iommu_supported(drm_dev))
730 drm_iommu_detach_device(drm_dev, dev);
720} 731}
721 732
722static int fimd_calc_clkdiv(struct fimd_context *ctx, 733static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +816,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
805 return 0; 816 return 0;
806} 817}
807 818
819static void fimd_window_suspend(struct device *dev)
820{
821 struct fimd_context *ctx = get_fimd_context(dev);
822 struct fimd_win_data *win_data;
823 int i;
824
825 for (i = 0; i < WINDOWS_NR; i++) {
826 win_data = &ctx->win_data[i];
827 win_data->resume = win_data->enabled;
828 fimd_win_disable(dev, i);
829 }
830 fimd_wait_for_vblank(dev);
831}
832
833static void fimd_window_resume(struct device *dev)
834{
835 struct fimd_context *ctx = get_fimd_context(dev);
836 struct fimd_win_data *win_data;
837 int i;
838
839 for (i = 0; i < WINDOWS_NR; i++) {
840 win_data = &ctx->win_data[i];
841 win_data->enabled = win_data->resume;
842 win_data->resume = false;
843 }
844}
845
808static int fimd_activate(struct fimd_context *ctx, bool enable) 846static int fimd_activate(struct fimd_context *ctx, bool enable)
809{ 847{
848 struct device *dev = ctx->subdrv.dev;
810 if (enable) { 849 if (enable) {
811 int ret; 850 int ret;
812 struct device *dev = ctx->subdrv.dev;
813 851
814 ret = fimd_clock(ctx, true); 852 ret = fimd_clock(ctx, true);
815 if (ret < 0) 853 if (ret < 0)
@@ -820,7 +858,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
820 /* if vblank was enabled status, enable it again. */ 858 /* if vblank was enabled status, enable it again. */
821 if (test_and_clear_bit(0, &ctx->irq_flags)) 859 if (test_and_clear_bit(0, &ctx->irq_flags))
822 fimd_enable_vblank(dev); 860 fimd_enable_vblank(dev);
861
862 fimd_window_resume(dev);
823 } else { 863 } else {
864 fimd_window_suspend(dev);
865
824 fimd_clock(ctx, false); 866 fimd_clock(ctx, false);
825 ctx->suspended = true; 867 ctx->suspended = true;
826 } 868 }
@@ -828,7 +870,7 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
828 return 0; 870 return 0;
829} 871}
830 872
831static int __devinit fimd_probe(struct platform_device *pdev) 873static int fimd_probe(struct platform_device *pdev)
832{ 874{
833 struct device *dev = &pdev->dev; 875 struct device *dev = &pdev->dev;
834 struct fimd_context *ctx; 876 struct fimd_context *ctx;
@@ -857,18 +899,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
857 if (!ctx) 899 if (!ctx)
858 return -ENOMEM; 900 return -ENOMEM;
859 901
860 ctx->bus_clk = clk_get(dev, "fimd"); 902 ctx->bus_clk = devm_clk_get(dev, "fimd");
861 if (IS_ERR(ctx->bus_clk)) { 903 if (IS_ERR(ctx->bus_clk)) {
862 dev_err(dev, "failed to get bus clock\n"); 904 dev_err(dev, "failed to get bus clock\n");
863 ret = PTR_ERR(ctx->bus_clk); 905 return PTR_ERR(ctx->bus_clk);
864 goto err_clk_get;
865 } 906 }
866 907
867 ctx->lcd_clk = clk_get(dev, "sclk_fimd"); 908 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
868 if (IS_ERR(ctx->lcd_clk)) { 909 if (IS_ERR(ctx->lcd_clk)) {
869 dev_err(dev, "failed to get lcd clock\n"); 910 dev_err(dev, "failed to get lcd clock\n");
870 ret = PTR_ERR(ctx->lcd_clk); 911 return PTR_ERR(ctx->lcd_clk);
871 goto err_bus_clk;
872 } 912 }
873 913
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 914 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +916,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
876 ctx->regs = devm_request_and_ioremap(&pdev->dev, res); 916 ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
877 if (!ctx->regs) { 917 if (!ctx->regs) {
878 dev_err(dev, "failed to map registers\n"); 918 dev_err(dev, "failed to map registers\n");
879 ret = -ENXIO; 919 return -ENXIO;
880 goto err_clk;
881 } 920 }
882 921
883 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 922 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
884 if (!res) { 923 if (!res) {
885 dev_err(dev, "irq request failed.\n"); 924 dev_err(dev, "irq request failed.\n");
886 goto err_clk; 925 return -ENXIO;
887 } 926 }
888 927
889 ctx->irq = res->start; 928 ctx->irq = res->start;
@@ -892,13 +931,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
892 0, "drm_fimd", ctx); 931 0, "drm_fimd", ctx);
893 if (ret) { 932 if (ret) {
894 dev_err(dev, "irq request failed.\n"); 933 dev_err(dev, "irq request failed.\n");
895 goto err_clk; 934 return ret;
896 } 935 }
897 936
898 ctx->vidcon0 = pdata->vidcon0; 937 ctx->vidcon0 = pdata->vidcon0;
899 ctx->vidcon1 = pdata->vidcon1; 938 ctx->vidcon1 = pdata->vidcon1;
900 ctx->default_win = pdata->default_win; 939 ctx->default_win = pdata->default_win;
901 ctx->panel = panel; 940 ctx->panel = panel;
941 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
942 atomic_set(&ctx->wait_vsync_event, 0);
902 943
903 subdrv = &ctx->subdrv; 944 subdrv = &ctx->subdrv;
904 945
@@ -926,20 +967,9 @@ static int __devinit fimd_probe(struct platform_device *pdev)
926 exynos_drm_subdrv_register(subdrv); 967 exynos_drm_subdrv_register(subdrv);
927 968
928 return 0; 969 return 0;
929
930err_clk:
931 clk_disable(ctx->lcd_clk);
932 clk_put(ctx->lcd_clk);
933
934err_bus_clk:
935 clk_disable(ctx->bus_clk);
936 clk_put(ctx->bus_clk);
937
938err_clk_get:
939 return ret;
940} 970}
941 971
942static int __devexit fimd_remove(struct platform_device *pdev) 972static int fimd_remove(struct platform_device *pdev)
943{ 973{
944 struct device *dev = &pdev->dev; 974 struct device *dev = &pdev->dev;
945 struct fimd_context *ctx = platform_get_drvdata(pdev); 975 struct fimd_context *ctx = platform_get_drvdata(pdev);
@@ -960,9 +990,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
960out: 990out:
961 pm_runtime_disable(dev); 991 pm_runtime_disable(dev);
962 992
963 clk_put(ctx->lcd_clk);
964 clk_put(ctx->bus_clk);
965
966 return 0; 993 return 0;
967} 994}
968 995
@@ -991,7 +1018,7 @@ static int fimd_resume(struct device *dev)
991 * of pm runtime would still be 1 so in this case, fimd driver 1018 * of pm runtime would still be 1 so in this case, fimd driver
992 * should be on directly not drawing on pm runtime interface. 1019 * should be on directly not drawing on pm runtime interface.
993 */ 1020 */
994 if (pm_runtime_suspended(dev)) { 1021 if (!pm_runtime_suspended(dev)) {
995 int ret; 1022 int ret;
996 1023
997 ret = fimd_activate(ctx, true); 1024 ret = fimd_activate(ctx, true);
@@ -1050,11 +1077,12 @@ static const struct dev_pm_ops fimd_pm_ops = {
1050 1077
1051struct platform_driver fimd_driver = { 1078struct platform_driver fimd_driver = {
1052 .probe = fimd_probe, 1079 .probe = fimd_probe,
1053 .remove = __devexit_p(fimd_remove), 1080 .remove = fimd_remove,
1054 .id_table = fimd_driver_ids, 1081 .id_table = fimd_driver_ids,
1055 .driver = { 1082 .driver = {
1056 .name = "exynos4-fb", 1083 .name = "exynos4-fb",
1057 .owner = THIS_MODULE, 1084 .owner = THIS_MODULE,
1058 .pm = &fimd_pm_ops, 1085 .pm = &fimd_pm_ops,
1086 .of_match_table = of_match_ptr(fimd_driver_dt_match),
1059 }, 1087 },
1060}; 1088};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..36c3905536a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/dma-mapping.h>
21#include <linux/dma-attrs.h>
20 22
21#include <drm/drmP.h> 23#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
23#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
24#include "exynos_drm_gem.h" 26#include "exynos_drm_gem.h"
27#include "exynos_drm_iommu.h"
25 28
26#define G2D_HW_MAJOR_VER 4 29#define G2D_HW_MAJOR_VER 4
27#define G2D_HW_MINOR_VER 1 30#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
92#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 95#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
93#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 96#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
94 97
98#define MAX_BUF_ADDR_NR 6
99
100/* maximum buffer pool size of userptr is 64MB as default */
101#define MAX_POOL (64 * 1024 * 1024)
102
103enum {
104 BUF_TYPE_GEM = 1,
105 BUF_TYPE_USERPTR,
106};
107
95/* cmdlist data structure */ 108/* cmdlist data structure */
96struct g2d_cmdlist { 109struct g2d_cmdlist {
97 u32 head; 110 u32 head;
98 u32 data[G2D_CMDLIST_DATA_NUM]; 111 unsigned long data[G2D_CMDLIST_DATA_NUM];
99 u32 last; /* last data offset */ 112 u32 last; /* last data offset */
100}; 113};
101 114
102struct drm_exynos_pending_g2d_event { 115struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
104 struct drm_exynos_g2d_event event; 117 struct drm_exynos_g2d_event event;
105}; 118};
106 119
107struct g2d_gem_node { 120struct g2d_cmdlist_userptr {
108 struct list_head list; 121 struct list_head list;
109 unsigned int handle; 122 dma_addr_t dma_addr;
123 unsigned long userptr;
124 unsigned long size;
125 struct page **pages;
126 unsigned int npages;
127 struct sg_table *sgt;
128 struct vm_area_struct *vma;
129 atomic_t refcount;
130 bool in_pool;
131 bool out_of_list;
110}; 132};
111 133
112struct g2d_cmdlist_node { 134struct g2d_cmdlist_node {
113 struct list_head list; 135 struct list_head list;
114 struct g2d_cmdlist *cmdlist; 136 struct g2d_cmdlist *cmdlist;
115 unsigned int gem_nr; 137 unsigned int map_nr;
138 unsigned long handles[MAX_BUF_ADDR_NR];
139 unsigned int obj_type[MAX_BUF_ADDR_NR];
116 dma_addr_t dma_addr; 140 dma_addr_t dma_addr;
117 141
118 struct drm_exynos_pending_g2d_event *event; 142 struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
122 struct list_head list; 146 struct list_head list;
123 struct list_head run_cmdlist; 147 struct list_head run_cmdlist;
124 struct list_head event_list; 148 struct list_head event_list;
149 struct drm_file *filp;
125 pid_t pid; 150 pid_t pid;
126 struct completion complete; 151 struct completion complete;
127 int async; 152 int async;
@@ -143,23 +168,33 @@ struct g2d_data {
143 struct mutex cmdlist_mutex; 168 struct mutex cmdlist_mutex;
144 dma_addr_t cmdlist_pool; 169 dma_addr_t cmdlist_pool;
145 void *cmdlist_pool_virt; 170 void *cmdlist_pool_virt;
171 struct dma_attrs cmdlist_dma_attrs;
146 172
147 /* runqueue*/ 173 /* runqueue*/
148 struct g2d_runqueue_node *runqueue_node; 174 struct g2d_runqueue_node *runqueue_node;
149 struct list_head runqueue; 175 struct list_head runqueue;
150 struct mutex runqueue_mutex; 176 struct mutex runqueue_mutex;
151 struct kmem_cache *runqueue_slab; 177 struct kmem_cache *runqueue_slab;
178
179 unsigned long current_pool;
180 unsigned long max_pool;
152}; 181};
153 182
154static int g2d_init_cmdlist(struct g2d_data *g2d) 183static int g2d_init_cmdlist(struct g2d_data *g2d)
155{ 184{
156 struct device *dev = g2d->dev; 185 struct device *dev = g2d->dev;
157 struct g2d_cmdlist_node *node = g2d->cmdlist_node; 186 struct g2d_cmdlist_node *node = g2d->cmdlist_node;
187 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
158 int nr; 188 int nr;
159 int ret; 189 int ret;
160 190
161 g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, 191 init_dma_attrs(&g2d->cmdlist_dma_attrs);
162 &g2d->cmdlist_pool, GFP_KERNEL); 192 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
193
194 g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
195 G2D_CMDLIST_POOL_SIZE,
196 &g2d->cmdlist_pool, GFP_KERNEL,
197 &g2d->cmdlist_dma_attrs);
163 if (!g2d->cmdlist_pool_virt) { 198 if (!g2d->cmdlist_pool_virt) {
164 dev_err(dev, "failed to allocate dma memory\n"); 199 dev_err(dev, "failed to allocate dma memory\n");
165 return -ENOMEM; 200 return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
184 return 0; 219 return 0;
185 220
186err: 221err:
187 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 222 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
188 g2d->cmdlist_pool); 223 g2d->cmdlist_pool_virt,
224 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
189 return ret; 225 return ret;
190} 226}
191 227
192static void g2d_fini_cmdlist(struct g2d_data *g2d) 228static void g2d_fini_cmdlist(struct g2d_data *g2d)
193{ 229{
194 struct device *dev = g2d->dev; 230 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
195 231
196 kfree(g2d->cmdlist_node); 232 kfree(g2d->cmdlist_node);
197 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 233 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
198 g2d->cmdlist_pool); 234 g2d->cmdlist_pool_virt,
235 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
199} 236}
200 237
201static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 238static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
245 list_add_tail(&node->event->base.link, &g2d_priv->event_list); 282 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
246} 283}
247 284
248static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, 285static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
249 struct drm_file *file, 286 unsigned long obj,
250 struct g2d_cmdlist_node *node) 287 bool force)
251{ 288{
252 struct drm_exynos_file_private *file_priv = file->driver_priv; 289 struct g2d_cmdlist_userptr *g2d_userptr =
290 (struct g2d_cmdlist_userptr *)obj;
291
292 if (!obj)
293 return;
294
295 if (force)
296 goto out;
297
298 atomic_dec(&g2d_userptr->refcount);
299
300 if (atomic_read(&g2d_userptr->refcount) > 0)
301 return;
302
303 if (g2d_userptr->in_pool)
304 return;
305
306out:
307 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
308 DMA_BIDIRECTIONAL);
309
310 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
311 g2d_userptr->npages,
312 g2d_userptr->vma);
313
314 if (!g2d_userptr->out_of_list)
315 list_del_init(&g2d_userptr->list);
316
317 sg_free_table(g2d_userptr->sgt);
318 kfree(g2d_userptr->sgt);
319 g2d_userptr->sgt = NULL;
320
321 kfree(g2d_userptr->pages);
322 g2d_userptr->pages = NULL;
323 kfree(g2d_userptr);
324 g2d_userptr = NULL;
325}
326
327dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
328 unsigned long userptr,
329 unsigned long size,
330 struct drm_file *filp,
331 unsigned long *obj)
332{
333 struct drm_exynos_file_private *file_priv = filp->driver_priv;
334 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
335 struct g2d_cmdlist_userptr *g2d_userptr;
336 struct g2d_data *g2d;
337 struct page **pages;
338 struct sg_table *sgt;
339 struct vm_area_struct *vma;
340 unsigned long start, end;
341 unsigned int npages, offset;
342 int ret;
343
344 if (!size) {
345 DRM_ERROR("invalid userptr size.\n");
346 return ERR_PTR(-EINVAL);
347 }
348
349 g2d = dev_get_drvdata(g2d_priv->dev);
350
351 /* check if userptr already exists in userptr_list. */
352 list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
353 if (g2d_userptr->userptr == userptr) {
354 /*
355 * also check size because there could be same address
356 * and different size.
357 */
358 if (g2d_userptr->size == size) {
359 atomic_inc(&g2d_userptr->refcount);
360 *obj = (unsigned long)g2d_userptr;
361
362 return &g2d_userptr->dma_addr;
363 }
364
365 /*
366 * at this moment, maybe g2d dma is accessing this
367 * g2d_userptr memory region so just remove this
368 * g2d_userptr object from userptr_list not to be
369 * referred again and also except it the userptr
370 * pool to be released after the dma access completion.
371 */
372 g2d_userptr->out_of_list = true;
373 g2d_userptr->in_pool = false;
374 list_del_init(&g2d_userptr->list);
375
376 break;
377 }
378 }
379
380 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
381 if (!g2d_userptr) {
382 DRM_ERROR("failed to allocate g2d_userptr.\n");
383 return ERR_PTR(-ENOMEM);
384 }
385
386 atomic_set(&g2d_userptr->refcount, 1);
387
388 start = userptr & PAGE_MASK;
389 offset = userptr & ~PAGE_MASK;
390 end = PAGE_ALIGN(userptr + size);
391 npages = (end - start) >> PAGE_SHIFT;
392 g2d_userptr->npages = npages;
393
394 pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
395 if (!pages) {
396 DRM_ERROR("failed to allocate pages.\n");
397 kfree(g2d_userptr);
398 return ERR_PTR(-ENOMEM);
399 }
400
401 vma = find_vma(current->mm, userptr);
402 if (!vma) {
403 DRM_ERROR("failed to get vm region.\n");
404 ret = -EFAULT;
405 goto err_free_pages;
406 }
407
408 if (vma->vm_end < userptr + size) {
409 DRM_ERROR("vma is too small.\n");
410 ret = -EFAULT;
411 goto err_free_pages;
412 }
413
414 g2d_userptr->vma = exynos_gem_get_vma(vma);
415 if (!g2d_userptr->vma) {
416 DRM_ERROR("failed to copy vma.\n");
417 ret = -ENOMEM;
418 goto err_free_pages;
419 }
420
421 g2d_userptr->size = size;
422
423 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
424 npages, pages, vma);
425 if (ret < 0) {
426 DRM_ERROR("failed to get user pages from userptr.\n");
427 goto err_put_vma;
428 }
429
430 g2d_userptr->pages = pages;
431
432 sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
433 if (!sgt) {
434 DRM_ERROR("failed to allocate sg table.\n");
435 ret = -ENOMEM;
436 goto err_free_userptr;
437 }
438
439 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
440 size, GFP_KERNEL);
441 if (ret < 0) {
442 DRM_ERROR("failed to get sgt from pages.\n");
443 goto err_free_sgt;
444 }
445
446 g2d_userptr->sgt = sgt;
447
448 ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
449 DMA_BIDIRECTIONAL);
450 if (ret < 0) {
451 DRM_ERROR("failed to map sgt with dma region.\n");
452 goto err_free_sgt;
453 }
454
455 g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
456 g2d_userptr->userptr = userptr;
457
458 list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
459
460 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
461 g2d->current_pool += npages << PAGE_SHIFT;
462 g2d_userptr->in_pool = true;
463 }
464
465 *obj = (unsigned long)g2d_userptr;
466
467 return &g2d_userptr->dma_addr;
468
469err_free_sgt:
470 sg_free_table(sgt);
471 kfree(sgt);
472 sgt = NULL;
473
474err_free_userptr:
475 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
476 g2d_userptr->npages,
477 g2d_userptr->vma);
478
479err_put_vma:
480 exynos_gem_put_vma(g2d_userptr->vma);
481
482err_free_pages:
483 kfree(pages);
484 kfree(g2d_userptr);
485 pages = NULL;
486 g2d_userptr = NULL;
487
488 return ERR_PTR(ret);
489}
490
491static void g2d_userptr_free_all(struct drm_device *drm_dev,
492 struct g2d_data *g2d,
493 struct drm_file *filp)
494{
495 struct drm_exynos_file_private *file_priv = filp->driver_priv;
253 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 496 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
497 struct g2d_cmdlist_userptr *g2d_userptr, *n;
498
499 list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
500 if (g2d_userptr->in_pool)
501 g2d_userptr_put_dma_addr(drm_dev,
502 (unsigned long)g2d_userptr,
503 true);
504
505 g2d->current_pool = 0;
506}
507
508static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
509 struct g2d_cmdlist_node *node,
510 struct drm_device *drm_dev,
511 struct drm_file *file)
512{
254 struct g2d_cmdlist *cmdlist = node->cmdlist; 513 struct g2d_cmdlist *cmdlist = node->cmdlist;
255 dma_addr_t *addr;
256 int offset; 514 int offset;
257 int i; 515 int i;
258 516
259 for (i = 0; i < node->gem_nr; i++) { 517 for (i = 0; i < node->map_nr; i++) {
260 struct g2d_gem_node *gem_node; 518 unsigned long handle;
261 519 dma_addr_t *addr;
262 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
263 if (!gem_node) {
264 dev_err(g2d_priv->dev, "failed to allocate gem node\n");
265 return -ENOMEM;
266 }
267 520
268 offset = cmdlist->last - (i * 2 + 1); 521 offset = cmdlist->last - (i * 2 + 1);
269 gem_node->handle = cmdlist->data[offset]; 522 handle = cmdlist->data[offset];
270 523
271 addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, 524 if (node->obj_type[i] == BUF_TYPE_GEM) {
272 file); 525 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
273 if (IS_ERR(addr)) { 526 file);
274 node->gem_nr = i; 527 if (IS_ERR(addr)) {
275 kfree(gem_node); 528 node->map_nr = i;
276 return PTR_ERR(addr); 529 return -EFAULT;
530 }
531 } else {
532 struct drm_exynos_g2d_userptr g2d_userptr;
533
534 if (copy_from_user(&g2d_userptr, (void __user *)handle,
535 sizeof(struct drm_exynos_g2d_userptr))) {
536 node->map_nr = i;
537 return -EFAULT;
538 }
539
540 addr = g2d_userptr_get_dma_addr(drm_dev,
541 g2d_userptr.userptr,
542 g2d_userptr.size,
543 file,
544 &handle);
545 if (IS_ERR(addr)) {
546 node->map_nr = i;
547 return -EFAULT;
548 }
277 } 549 }
278 550
279 cmdlist->data[offset] = *addr; 551 cmdlist->data[offset] = *addr;
280 list_add_tail(&gem_node->list, &g2d_priv->gem_list); 552 node->handles[i] = handle;
281 g2d_priv->gem_nr++;
282 } 553 }
283 554
284 return 0; 555 return 0;
285} 556}
286 557
287static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, 558static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
288 struct drm_file *file, 559 struct g2d_cmdlist_node *node,
289 unsigned int nr) 560 struct drm_file *filp)
290{ 561{
291 struct drm_exynos_file_private *file_priv = file->driver_priv; 562 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
292 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 563 int i;
293 struct g2d_gem_node *node, *n;
294 564
295 list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { 565 for (i = 0; i < node->map_nr; i++) {
296 if (!nr) 566 unsigned long handle = node->handles[i];
297 break;
298 567
299 exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); 568 if (node->obj_type[i] == BUF_TYPE_GEM)
300 list_del_init(&node->list); 569 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
301 kfree(node); 570 filp);
302 nr--; 571 else
572 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
573 false);
574
575 node->handles[i] = 0;
303 } 576 }
577
578 node->map_nr = 0;
304} 579}
305 580
306static void g2d_dma_start(struct g2d_data *g2d, 581static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
337static void g2d_free_runqueue_node(struct g2d_data *g2d, 612static void g2d_free_runqueue_node(struct g2d_data *g2d,
338 struct g2d_runqueue_node *runqueue_node) 613 struct g2d_runqueue_node *runqueue_node)
339{ 614{
615 struct g2d_cmdlist_node *node;
616
340 if (!runqueue_node) 617 if (!runqueue_node)
341 return; 618 return;
342 619
343 mutex_lock(&g2d->cmdlist_mutex); 620 mutex_lock(&g2d->cmdlist_mutex);
621 /*
622 * commands in run_cmdlist have been completed so unmap all gem
623 * objects in each command node so that they are unreferenced.
624 */
625 list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
626 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
344 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); 627 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
345 mutex_unlock(&g2d->cmdlist_mutex); 628 mutex_unlock(&g2d->cmdlist_mutex);
346 629
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
430 return IRQ_HANDLED; 713 return IRQ_HANDLED;
431} 714}
432 715
433static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, 716static int g2d_check_reg_offset(struct device *dev,
717 struct g2d_cmdlist_node *node,
434 int nr, bool for_addr) 718 int nr, bool for_addr)
435{ 719{
720 struct g2d_cmdlist *cmdlist = node->cmdlist;
436 int reg_offset; 721 int reg_offset;
437 int index; 722 int index;
438 int i; 723 int i;
439 724
440 for (i = 0; i < nr; i++) { 725 for (i = 0; i < nr; i++) {
441 index = cmdlist->last - 2 * (i + 1); 726 index = cmdlist->last - 2 * (i + 1);
727
728 if (for_addr) {
729 /* check userptr buffer type. */
730 reg_offset = (cmdlist->data[index] &
731 ~0x7fffffff) >> 31;
732 if (reg_offset) {
733 node->obj_type[i] = BUF_TYPE_USERPTR;
734 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
735 }
736 }
737
442 reg_offset = cmdlist->data[index] & ~0xfffff000; 738 reg_offset = cmdlist->data[index] & ~0xfffff000;
443 739
444 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 740 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
455 case G2D_MSK_BASE_ADDR: 751 case G2D_MSK_BASE_ADDR:
456 if (!for_addr) 752 if (!for_addr)
457 goto err; 753 goto err;
754
755 if (node->obj_type[i] != BUF_TYPE_USERPTR)
756 node->obj_type[i] = BUF_TYPE_GEM;
458 break; 757 break;
459 default: 758 default:
460 if (for_addr) 759 if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
466 return 0; 765 return 0;
467 766
468err: 767err:
469 dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); 768 dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
470 return -EINVAL; 769 return -EINVAL;
471} 770}
472 771
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
566 } 865 }
567 866
568 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 867 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
569 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; 868 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
570 if (size > G2D_CMDLIST_DATA_NUM) { 869 if (size > G2D_CMDLIST_DATA_NUM) {
571 dev_err(dev, "cmdlist size is too big\n"); 870 dev_err(dev, "cmdlist size is too big\n");
572 ret = -EINVAL; 871 ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
583 } 882 }
584 cmdlist->last += req->cmd_nr * 2; 883 cmdlist->last += req->cmd_nr * 2;
585 884
586 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); 885 ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
587 if (ret < 0) 886 if (ret < 0)
588 goto err_free_event; 887 goto err_free_event;
589 888
590 node->gem_nr = req->cmd_gem_nr; 889 node->map_nr = req->cmd_buf_nr;
591 if (req->cmd_gem_nr) { 890 if (req->cmd_buf_nr) {
592 struct drm_exynos_g2d_cmd *cmd_gem; 891 struct drm_exynos_g2d_cmd *cmd_buf;
593 892
594 cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; 893 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
595 894
596 if (copy_from_user(cmdlist->data + cmdlist->last, 895 if (copy_from_user(cmdlist->data + cmdlist->last,
597 (void __user *)cmd_gem, 896 (void __user *)cmd_buf,
598 sizeof(*cmd_gem) * req->cmd_gem_nr)) { 897 sizeof(*cmd_buf) * req->cmd_buf_nr)) {
599 ret = -EFAULT; 898 ret = -EFAULT;
600 goto err_free_event; 899 goto err_free_event;
601 } 900 }
602 cmdlist->last += req->cmd_gem_nr * 2; 901 cmdlist->last += req->cmd_buf_nr * 2;
603 902
604 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); 903 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
605 if (ret < 0) 904 if (ret < 0)
606 goto err_free_event; 905 goto err_free_event;
607 906
608 ret = g2d_get_cmdlist_gem(drm_dev, file, node); 907 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
609 if (ret < 0) 908 if (ret < 0)
610 goto err_unmap; 909 goto err_unmap;
611 } 910 }
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
624 return 0; 923 return 0;
625 924
626err_unmap: 925err_unmap:
627 g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); 926 g2d_unmap_cmdlist_gem(g2d, node, file);
628err_free_event: 927err_free_event:
629 if (node->event) { 928 if (node->event) {
630 spin_lock_irqsave(&drm_dev->event_lock, flags); 929 spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
680 979
681 mutex_lock(&g2d->runqueue_mutex); 980 mutex_lock(&g2d->runqueue_mutex);
682 runqueue_node->pid = current->pid; 981 runqueue_node->pid = current->pid;
982 runqueue_node->filp = file;
683 list_add_tail(&runqueue_node->list, &g2d->runqueue); 983 list_add_tail(&runqueue_node->list, &g2d->runqueue);
684 if (!g2d->runqueue_node) 984 if (!g2d->runqueue_node)
685 g2d_exec_runqueue(g2d); 985 g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
696} 996}
697EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); 997EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
698 998
999static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1000{
1001 struct g2d_data *g2d;
1002 int ret;
1003
1004 g2d = dev_get_drvdata(dev);
1005 if (!g2d)
1006 return -EFAULT;
1007
1008 /* allocate dma-aware cmdlist buffer. */
1009 ret = g2d_init_cmdlist(g2d);
1010 if (ret < 0) {
1011 dev_err(dev, "cmdlist init failed\n");
1012 return ret;
1013 }
1014
1015 if (!is_drm_iommu_supported(drm_dev))
1016 return 0;
1017
1018 ret = drm_iommu_attach_device(drm_dev, dev);
1019 if (ret < 0) {
1020 dev_err(dev, "failed to enable iommu.\n");
1021 g2d_fini_cmdlist(g2d);
1022 }
1023
1024 return ret;
1025
1026}
1027
1028static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1029{
1030 if (!is_drm_iommu_supported(drm_dev))
1031 return;
1032
1033 drm_iommu_detach_device(drm_dev, dev);
1034}
1035
699static int g2d_open(struct drm_device *drm_dev, struct device *dev, 1036static int g2d_open(struct drm_device *drm_dev, struct device *dev,
700 struct drm_file *file) 1037 struct drm_file *file)
701{ 1038{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
713 1050
714 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); 1051 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
715 INIT_LIST_HEAD(&g2d_priv->event_list); 1052 INIT_LIST_HEAD(&g2d_priv->event_list);
716 INIT_LIST_HEAD(&g2d_priv->gem_list); 1053 INIT_LIST_HEAD(&g2d_priv->userptr_list);
717 1054
718 return 0; 1055 return 0;
719} 1056}
@@ -734,16 +1071,26 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
734 return; 1071 return;
735 1072
736 mutex_lock(&g2d->cmdlist_mutex); 1073 mutex_lock(&g2d->cmdlist_mutex);
737 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) 1074 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
1075 /*
1076 * unmap all gem objects not completed.
1077 *
1078 * P.S. if current process was terminated forcely then
1079 * there may be some commands in inuse_cmdlist so unmap
1080 * them.
1081 */
1082 g2d_unmap_cmdlist_gem(g2d, node, file);
738 list_move_tail(&node->list, &g2d->free_cmdlist); 1083 list_move_tail(&node->list, &g2d->free_cmdlist);
1084 }
739 mutex_unlock(&g2d->cmdlist_mutex); 1085 mutex_unlock(&g2d->cmdlist_mutex);
740 1086
741 g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); 1087 /* release all g2d_userptr in pool. */
1088 g2d_userptr_free_all(drm_dev, g2d, file);
742 1089
743 kfree(file_priv->g2d_priv); 1090 kfree(file_priv->g2d_priv);
744} 1091}
745 1092
746static int __devinit g2d_probe(struct platform_device *pdev) 1093static int g2d_probe(struct platform_device *pdev)
747{ 1094{
748 struct device *dev = &pdev->dev; 1095 struct device *dev = &pdev->dev;
749 struct resource *res; 1096 struct resource *res;
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
778 mutex_init(&g2d->cmdlist_mutex); 1125 mutex_init(&g2d->cmdlist_mutex);
779 mutex_init(&g2d->runqueue_mutex); 1126 mutex_init(&g2d->runqueue_mutex);
780 1127
781 ret = g2d_init_cmdlist(g2d); 1128 g2d->gate_clk = devm_clk_get(dev, "fimg2d");
782 if (ret < 0)
783 goto err_destroy_workqueue;
784
785 g2d->gate_clk = clk_get(dev, "fimg2d");
786 if (IS_ERR(g2d->gate_clk)) { 1129 if (IS_ERR(g2d->gate_clk)) {
787 dev_err(dev, "failed to get gate clock\n"); 1130 dev_err(dev, "failed to get gate clock\n");
788 ret = PTR_ERR(g2d->gate_clk); 1131 ret = PTR_ERR(g2d->gate_clk);
789 goto err_fini_cmdlist; 1132 goto err_destroy_workqueue;
790 } 1133 }
791 1134
792 pm_runtime_enable(dev); 1135 pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
814 goto err_put_clk; 1157 goto err_put_clk;
815 } 1158 }
816 1159
1160 g2d->max_pool = MAX_POOL;
1161
817 platform_set_drvdata(pdev, g2d); 1162 platform_set_drvdata(pdev, g2d);
818 1163
819 subdrv = &g2d->subdrv; 1164 subdrv = &g2d->subdrv;
820 subdrv->dev = dev; 1165 subdrv->dev = dev;
1166 subdrv->probe = g2d_subdrv_probe;
1167 subdrv->remove = g2d_subdrv_remove;
821 subdrv->open = g2d_open; 1168 subdrv->open = g2d_open;
822 subdrv->close = g2d_close; 1169 subdrv->close = g2d_close;
823 1170
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
834 1181
835err_put_clk: 1182err_put_clk:
836 pm_runtime_disable(dev); 1183 pm_runtime_disable(dev);
837 clk_put(g2d->gate_clk);
838err_fini_cmdlist:
839 g2d_fini_cmdlist(g2d);
840err_destroy_workqueue: 1184err_destroy_workqueue:
841 destroy_workqueue(g2d->g2d_workq); 1185 destroy_workqueue(g2d->g2d_workq);
842err_destroy_slab: 1186err_destroy_slab:
@@ -844,7 +1188,7 @@ err_destroy_slab:
844 return ret; 1188 return ret;
845} 1189}
846 1190
847static int __devexit g2d_remove(struct platform_device *pdev) 1191static int g2d_remove(struct platform_device *pdev)
848{ 1192{
849 struct g2d_data *g2d = platform_get_drvdata(pdev); 1193 struct g2d_data *g2d = platform_get_drvdata(pdev);
850 1194
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
857 } 1201 }
858 1202
859 pm_runtime_disable(&pdev->dev); 1203 pm_runtime_disable(&pdev->dev);
860 clk_put(g2d->gate_clk);
861 1204
862 g2d_fini_cmdlist(g2d); 1205 g2d_fini_cmdlist(g2d);
863 destroy_workqueue(g2d->g2d_workq); 1206 destroy_workqueue(g2d->g2d_workq);
@@ -899,7 +1242,7 @@ static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
899 1242
900struct platform_driver g2d_driver = { 1243struct platform_driver g2d_driver = {
901 .probe = g2d_probe, 1244 .probe = g2d_probe,
902 .remove = __devexit_p(g2d_remove), 1245 .remove = g2d_remove,
903 .driver = { 1246 .driver = {
904 .name = "s5p-g2d", 1247 .name = "s5p-g2d",
905 .owner = THIS_MODULE, 1248 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..473180776528 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#include <drm/drmP.h> 12#include <drm/drmP.h>
@@ -83,157 +69,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
83 69
84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 70static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85{ 71{
86 if (!IS_NONCONTIG_BUFFER(flags)) { 72 /* TODO */
87 if (size >= SZ_1M)
88 return roundup(size, SECTION_SIZE);
89 else if (size >= SZ_64K)
90 return roundup(size, SZ_64K);
91 else
92 goto out;
93 }
94out:
95 return roundup(size, PAGE_SIZE);
96}
97
98struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
99 gfp_t gfpmask)
100{
101 struct page *p, **pages;
102 int i, npages;
103
104 npages = obj->size >> PAGE_SHIFT;
105
106 pages = drm_malloc_ab(npages, sizeof(struct page *));
107 if (pages == NULL)
108 return ERR_PTR(-ENOMEM);
109
110 for (i = 0; i < npages; i++) {
111 p = alloc_page(gfpmask);
112 if (IS_ERR(p))
113 goto fail;
114 pages[i] = p;
115 }
116
117 return pages;
118
119fail:
120 while (--i)
121 __free_page(pages[i]);
122
123 drm_free_large(pages);
124 return ERR_CAST(p);
125}
126
127static void exynos_gem_put_pages(struct drm_gem_object *obj,
128 struct page **pages)
129{
130 int npages;
131 73
132 npages = obj->size >> PAGE_SHIFT; 74 return roundup(size, PAGE_SIZE);
133
134 while (--npages >= 0)
135 __free_page(pages[npages]);
136
137 drm_free_large(pages);
138} 75}
139 76
140static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, 77static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
141 struct vm_area_struct *vma, 78 struct vm_area_struct *vma,
142 unsigned long f_vaddr, 79 unsigned long f_vaddr,
143 pgoff_t page_offset) 80 pgoff_t page_offset)
144{ 81{
145 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 82 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
146 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 83 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
84 struct scatterlist *sgl;
147 unsigned long pfn; 85 unsigned long pfn;
86 int i;
148 87
149 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 88 if (!buf->sgt)
150 if (!buf->pages) 89 return -EINTR;
151 return -EINTR;
152
153 pfn = page_to_pfn(buf->pages[page_offset++]);
154 } else
155 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
156
157 return vm_insert_mixed(vma, f_vaddr, pfn);
158}
159
160static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
161{
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 struct scatterlist *sgl;
165 struct page **pages;
166 unsigned int npages, i = 0;
167 int ret;
168 90
169 if (buf->pages) { 91 if (page_offset >= (buf->size >> PAGE_SHIFT)) {
170 DRM_DEBUG_KMS("already allocated.\n"); 92 DRM_ERROR("invalid page offset\n");
171 return -EINVAL; 93 return -EINVAL;
172 } 94 }
173 95
174 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
175 if (IS_ERR(pages)) {
176 DRM_ERROR("failed to get pages.\n");
177 return PTR_ERR(pages);
178 }
179
180 npages = obj->size >> PAGE_SHIFT;
181 buf->page_size = PAGE_SIZE;
182
183 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
184 if (!buf->sgt) {
185 DRM_ERROR("failed to allocate sg table.\n");
186 ret = -ENOMEM;
187 goto err;
188 }
189
190 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
191 if (ret < 0) {
192 DRM_ERROR("failed to initialize sg table.\n");
193 ret = -EFAULT;
194 goto err1;
195 }
196
197 sgl = buf->sgt->sgl; 96 sgl = buf->sgt->sgl;
198 97 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
199 /* set all pages to sg list. */ 98 if (page_offset < (sgl->length >> PAGE_SHIFT))
200 while (i < npages) { 99 break;
201 sg_set_page(sgl, pages[i], PAGE_SIZE, 0); 100 page_offset -= (sgl->length >> PAGE_SHIFT);
202 sg_dma_address(sgl) = page_to_phys(pages[i]);
203 i++;
204 sgl = sg_next(sgl);
205 } 101 }
206 102
207 /* add some codes for UNCACHED type here. TODO */ 103 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
208
209 buf->pages = pages;
210 return ret;
211err1:
212 kfree(buf->sgt);
213 buf->sgt = NULL;
214err:
215 exynos_gem_put_pages(obj, pages);
216 return ret;
217
218}
219
220static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
221{
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
224
225 /*
226 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 * allocated at gem fault handler.
228 */
229 sg_free_table(buf->sgt);
230 kfree(buf->sgt);
231 buf->sgt = NULL;
232
233 exynos_gem_put_pages(obj, buf->pages);
234 buf->pages = NULL;
235 104
236 /* add some codes for UNCACHED type here. TODO */ 105 return vm_insert_mixed(vma, f_vaddr, pfn);
237} 106}
238 107
239static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 108static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +139,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
270 139
271 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 140 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
272 141
273 if (!buf->pages)
274 return;
275
276 /* 142 /*
277 * do not release memory region from exporter. 143 * do not release memory region from exporter.
278 * 144 *
@@ -282,10 +148,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
282 if (obj->import_attach) 148 if (obj->import_attach)
283 goto out; 149 goto out;
284 150
285 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) 151 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
286 exynos_drm_gem_put_pages(obj);
287 else
288 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
289 152
290out: 153out:
291 exynos_drm_fini_buf(obj->dev, buf); 154 exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +227,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
364 /* set memory type and cache attribute from user side. */ 227 /* set memory type and cache attribute from user side. */
365 exynos_gem_obj->flags = flags; 228 exynos_gem_obj->flags = flags;
366 229
367 /* 230 ret = exynos_drm_alloc_buf(dev, buf, flags);
368 * allocate all pages as desired size if user wants to allocate 231 if (ret < 0) {
369 * physically non-continuous memory. 232 drm_gem_object_release(&exynos_gem_obj->base);
370 */ 233 goto err_fini_buf;
371 if (flags & EXYNOS_BO_NONCONTIG) {
372 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
373 if (ret < 0) {
374 drm_gem_object_release(&exynos_gem_obj->base);
375 goto err_fini_buf;
376 }
377 } else {
378 ret = exynos_drm_alloc_buf(dev, buf, flags);
379 if (ret < 0) {
380 drm_gem_object_release(&exynos_gem_obj->base);
381 goto err_fini_buf;
382 }
383 } 234 }
384 235
385 return exynos_gem_obj; 236 return exynos_gem_obj;
@@ -412,14 +263,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
412 return 0; 263 return 0;
413} 264}
414 265
415void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 266dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
416 unsigned int gem_handle, 267 unsigned int gem_handle,
417 struct drm_file *file_priv) 268 struct drm_file *filp)
418{ 269{
419 struct exynos_drm_gem_obj *exynos_gem_obj; 270 struct exynos_drm_gem_obj *exynos_gem_obj;
420 struct drm_gem_object *obj; 271 struct drm_gem_object *obj;
421 272
422 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 273 obj = drm_gem_object_lookup(dev, filp, gem_handle);
423 if (!obj) { 274 if (!obj) {
424 DRM_ERROR("failed to lookup gem object.\n"); 275 DRM_ERROR("failed to lookup gem object.\n");
425 return ERR_PTR(-EINVAL); 276 return ERR_PTR(-EINVAL);
@@ -427,25 +278,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
427 278
428 exynos_gem_obj = to_exynos_gem_obj(obj); 279 exynos_gem_obj = to_exynos_gem_obj(obj);
429 280
430 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
431 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
432 drm_gem_object_unreference_unlocked(obj);
433
434 /* TODO */
435 return ERR_PTR(-EINVAL);
436 }
437
438 return &exynos_gem_obj->buffer->dma_addr; 281 return &exynos_gem_obj->buffer->dma_addr;
439} 282}
440 283
441void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 284void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
442 unsigned int gem_handle, 285 unsigned int gem_handle,
443 struct drm_file *file_priv) 286 struct drm_file *filp)
444{ 287{
445 struct exynos_drm_gem_obj *exynos_gem_obj; 288 struct exynos_drm_gem_obj *exynos_gem_obj;
446 struct drm_gem_object *obj; 289 struct drm_gem_object *obj;
447 290
448 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 291 obj = drm_gem_object_lookup(dev, filp, gem_handle);
449 if (!obj) { 292 if (!obj) {
450 DRM_ERROR("failed to lookup gem object.\n"); 293 DRM_ERROR("failed to lookup gem object.\n");
451 return; 294 return;
@@ -453,14 +296,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
453 296
454 exynos_gem_obj = to_exynos_gem_obj(obj); 297 exynos_gem_obj = to_exynos_gem_obj(obj);
455 298
456 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
457 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
458 drm_gem_object_unreference_unlocked(obj);
459
460 /* TODO */
461 return;
462 }
463
464 drm_gem_object_unreference_unlocked(obj); 299 drm_gem_object_unreference_unlocked(obj);
465 300
466 /* 301 /*
@@ -489,22 +324,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
489 &args->offset); 324 &args->offset);
490} 325}
491 326
327static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
328 struct file *filp)
329{
330 struct drm_file *file_priv;
331
332 mutex_lock(&drm_dev->struct_mutex);
333
334 /* find current process's drm_file from filelist. */
335 list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
336 if (file_priv->filp == filp) {
337 mutex_unlock(&drm_dev->struct_mutex);
338 return file_priv;
339 }
340 }
341
342 mutex_unlock(&drm_dev->struct_mutex);
343 WARN_ON(1);
344
345 return ERR_PTR(-EFAULT);
346}
347
492static int exynos_drm_gem_mmap_buffer(struct file *filp, 348static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 struct vm_area_struct *vma) 349 struct vm_area_struct *vma)
494{ 350{
495 struct drm_gem_object *obj = filp->private_data; 351 struct drm_gem_object *obj = filp->private_data;
496 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 352 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
353 struct drm_device *drm_dev = obj->dev;
497 struct exynos_drm_gem_buf *buffer; 354 struct exynos_drm_gem_buf *buffer;
498 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; 355 struct drm_file *file_priv;
356 unsigned long vm_size;
499 int ret; 357 int ret;
500 358
501 DRM_DEBUG_KMS("%s\n", __FILE__); 359 DRM_DEBUG_KMS("%s\n", __FILE__);
502 360
503 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 361 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
362 vma->vm_private_data = obj;
363 vma->vm_ops = drm_dev->driver->gem_vm_ops;
364
365 /* restore it to driver's fops. */
366 filp->f_op = fops_get(drm_dev->driver->fops);
367
368 file_priv = exynos_drm_find_drm_file(drm_dev, filp);
369 if (IS_ERR(file_priv))
370 return PTR_ERR(file_priv);
371
372 /* restore it to drm_file. */
373 filp->private_data = file_priv;
504 374
505 update_vm_cache_attr(exynos_gem_obj, vma); 375 update_vm_cache_attr(exynos_gem_obj, vma);
506 376
507 vm_size = usize = vma->vm_end - vma->vm_start; 377 vm_size = vma->vm_end - vma->vm_start;
508 378
509 /* 379 /*
510 * a buffer contains information to physically continuous memory 380 * a buffer contains information to physically continuous memory
@@ -516,40 +386,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
516 if (vm_size > buffer->size) 386 if (vm_size > buffer->size)
517 return -EINVAL; 387 return -EINVAL;
518 388
519 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 389 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
520 int i = 0; 390 buffer->dma_addr, buffer->size,
521 391 &buffer->dma_attrs);
522 if (!buffer->pages) 392 if (ret < 0) {
523 return -EINVAL; 393 DRM_ERROR("failed to mmap.\n");
394 return ret;
395 }
524 396
525 vma->vm_flags |= VM_MIXEDMAP; 397 /*
398 * take a reference to this mapping of the object. And this reference
399 * is unreferenced by the corresponding vm_close call.
400 */
401 drm_gem_object_reference(obj);
526 402
527 do { 403 mutex_lock(&drm_dev->struct_mutex);
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 404 drm_vm_open_locked(drm_dev, vma);
529 if (ret) { 405 mutex_unlock(&drm_dev->struct_mutex);
530 DRM_ERROR("failed to remap user space.\n");
531 return ret;
532 }
533
534 uaddr += PAGE_SIZE;
535 usize -= PAGE_SIZE;
536 } while (usize > 0);
537 } else {
538 /*
539 * get page frame number to physical memory to be mapped
540 * to user space.
541 */
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
543 PAGE_SHIFT;
544
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
546
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
550 return -EAGAIN;
551 }
552 }
553 406
554 return 0; 407 return 0;
555} 408}
@@ -578,16 +431,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
578 return -EINVAL; 431 return -EINVAL;
579 } 432 }
580 433
581 obj->filp->f_op = &exynos_drm_gem_fops; 434 /*
582 obj->filp->private_data = obj; 435 * Set specific mmper's fops. And it will be restored by
436 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
437 * This is used to call specific mapper temporarily.
438 */
439 file_priv->filp->f_op = &exynos_drm_gem_fops;
583 440
584 addr = vm_mmap(obj->filp, 0, args->size, 441 /*
442 * Set gem object to private_data so that specific mmaper
443 * can get the gem object. And it will be restored by
444 * exynos_drm_gem_mmap_buffer to drm_file.
445 */
446 file_priv->filp->private_data = obj;
447
448 addr = vm_mmap(file_priv->filp, 0, args->size,
585 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 449 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
586 450
587 drm_gem_object_unreference_unlocked(obj); 451 drm_gem_object_unreference_unlocked(obj);
588 452
589 if (IS_ERR((void *)addr)) 453 if (IS_ERR((void *)addr)) {
454 file_priv->filp->private_data = file_priv;
590 return PTR_ERR((void *)addr); 455 return PTR_ERR((void *)addr);
456 }
591 457
592 args->mapped = addr; 458 args->mapped = addr;
593 459
@@ -622,6 +488,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
622 return 0; 488 return 0;
623} 489}
624 490
491struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
492{
493 struct vm_area_struct *vma_copy;
494
495 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
496 if (!vma_copy)
497 return NULL;
498
499 if (vma->vm_ops && vma->vm_ops->open)
500 vma->vm_ops->open(vma);
501
502 if (vma->vm_file)
503 get_file(vma->vm_file);
504
505 memcpy(vma_copy, vma, sizeof(*vma));
506
507 vma_copy->vm_mm = NULL;
508 vma_copy->vm_next = NULL;
509 vma_copy->vm_prev = NULL;
510
511 return vma_copy;
512}
513
514void exynos_gem_put_vma(struct vm_area_struct *vma)
515{
516 if (!vma)
517 return;
518
519 if (vma->vm_ops && vma->vm_ops->close)
520 vma->vm_ops->close(vma);
521
522 if (vma->vm_file)
523 fput(vma->vm_file);
524
525 kfree(vma);
526}
527
528int exynos_gem_get_pages_from_userptr(unsigned long start,
529 unsigned int npages,
530 struct page **pages,
531 struct vm_area_struct *vma)
532{
533 int get_npages;
534
535 /* the memory region mmaped with VM_PFNMAP. */
536 if (vma_is_io(vma)) {
537 unsigned int i;
538
539 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
540 unsigned long pfn;
541 int ret = follow_pfn(vma, start, &pfn);
542 if (ret)
543 return ret;
544
545 pages[i] = pfn_to_page(pfn);
546 }
547
548 if (i != npages) {
549 DRM_ERROR("failed to get user_pages.\n");
550 return -EINVAL;
551 }
552
553 return 0;
554 }
555
556 get_npages = get_user_pages(current, current->mm, start,
557 npages, 1, 1, pages, NULL);
558 get_npages = max(get_npages, 0);
559 if (get_npages != npages) {
560 DRM_ERROR("failed to get user_pages.\n");
561 while (get_npages)
562 put_page(pages[--get_npages]);
563 return -EFAULT;
564 }
565
566 return 0;
567}
568
569void exynos_gem_put_pages_to_userptr(struct page **pages,
570 unsigned int npages,
571 struct vm_area_struct *vma)
572{
573 if (!vma_is_io(vma)) {
574 unsigned int i;
575
576 for (i = 0; i < npages; i++) {
577 set_page_dirty_lock(pages[i]);
578
579 /*
580 * undo the reference we took when populating
581 * the table.
582 */
583 put_page(pages[i]);
584 }
585 }
586}
587
588int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
589 struct sg_table *sgt,
590 enum dma_data_direction dir)
591{
592 int nents;
593
594 mutex_lock(&drm_dev->struct_mutex);
595
596 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
597 if (!nents) {
598 DRM_ERROR("failed to map sgl with dma.\n");
599 mutex_unlock(&drm_dev->struct_mutex);
600 return nents;
601 }
602
603 mutex_unlock(&drm_dev->struct_mutex);
604 return 0;
605}
606
607void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
608 struct sg_table *sgt,
609 enum dma_data_direction dir)
610{
611 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
612}
613
625int exynos_drm_gem_init_object(struct drm_gem_object *obj) 614int exynos_drm_gem_init_object(struct drm_gem_object *obj)
626{ 615{
627 DRM_DEBUG_KMS("%s\n", __FILE__); 616 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +742,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
753 742
754 mutex_lock(&dev->struct_mutex); 743 mutex_lock(&dev->struct_mutex);
755 744
756 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 745 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
757 if (ret < 0) 746 if (ret < 0)
758 DRM_ERROR("failed to map pages.\n"); 747 DRM_ERROR("failed to map a buffer with user.\n");
759 748
760 mutex_unlock(&dev->struct_mutex); 749 mutex_unlock(&dev->struct_mutex);
761 750
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..35ebac47dc2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com> 4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_GEM_H_ 12#ifndef _EXYNOS_DRM_GEM_H_
@@ -35,21 +21,27 @@
35 * exynos drm gem buffer structure. 21 * exynos drm gem buffer structure.
36 * 22 *
37 * @kvaddr: kernel virtual address to allocated memory region. 23 * @kvaddr: kernel virtual address to allocated memory region.
24 * *userptr: user space address.
38 * @dma_addr: bus address(accessed by dma) to allocated memory region. 25 * @dma_addr: bus address(accessed by dma) to allocated memory region.
39 * - this address could be physical address without IOMMU and 26 * - this address could be physical address without IOMMU and
40 * device address with IOMMU. 27 * device address with IOMMU.
28 * @write: whether pages will be written to by the caller.
29 * @pages: Array of backing pages.
41 * @sgt: sg table to transfer page data. 30 * @sgt: sg table to transfer page data.
42 * @pages: contain all pages to allocated memory region.
43 * @page_size: could be 4K, 64K or 1MB.
44 * @size: size of allocated memory region. 31 * @size: size of allocated memory region.
32 * @pfnmap: indicate whether memory region from userptr is mmaped with
33 * VM_PFNMAP or not.
45 */ 34 */
46struct exynos_drm_gem_buf { 35struct exynos_drm_gem_buf {
47 void __iomem *kvaddr; 36 void __iomem *kvaddr;
37 unsigned long userptr;
48 dma_addr_t dma_addr; 38 dma_addr_t dma_addr;
49 struct sg_table *sgt; 39 struct dma_attrs dma_attrs;
40 unsigned int write;
50 struct page **pages; 41 struct page **pages;
51 unsigned long page_size; 42 struct sg_table *sgt;
52 unsigned long size; 43 unsigned long size;
44 bool pfnmap;
53}; 45};
54 46
55/* 47/*
@@ -65,6 +57,7 @@ struct exynos_drm_gem_buf {
65 * or at framebuffer creation. 57 * or at framebuffer creation.
66 * @size: size requested from user, in bytes and this size is aligned 58 * @size: size requested from user, in bytes and this size is aligned
67 * in page unit. 59 * in page unit.
60 * @vma: a pointer to vm_area.
68 * @flags: indicate memory type to allocated buffer and cache attruibute. 61 * @flags: indicate memory type to allocated buffer and cache attruibute.
69 * 62 *
70 * P.S. this object would be transfered to user as kms_bo.handle so 63 * P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +67,7 @@ struct exynos_drm_gem_obj {
74 struct drm_gem_object base; 67 struct drm_gem_object base;
75 struct exynos_drm_gem_buf *buffer; 68 struct exynos_drm_gem_buf *buffer;
76 unsigned long size; 69 unsigned long size;
70 struct vm_area_struct *vma;
77 unsigned int flags; 71 unsigned int flags;
78}; 72};
79 73
@@ -104,9 +98,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
104 * other drivers such as 2d/3d acceleration drivers. 98 * other drivers such as 2d/3d acceleration drivers.
105 * with this function call, gem object reference count would be increased. 99 * with this function call, gem object reference count would be increased.
106 */ 100 */
107void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 101dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
108 unsigned int gem_handle, 102 unsigned int gem_handle,
109 struct drm_file *file_priv); 103 struct drm_file *filp);
110 104
111/* 105/*
112 * put dma address from gem handle and this function could be used for 106 * put dma address from gem handle and this function could be used for
@@ -115,7 +109,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
115 */ 109 */
116void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 110void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
117 unsigned int gem_handle, 111 unsigned int gem_handle,
118 struct drm_file *file_priv); 112 struct drm_file *filp);
119 113
120/* get buffer offset to map to user space. */ 114/* get buffer offset to map to user space. */
121int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 115int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +122,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
128int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 122int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
129 struct drm_file *file_priv); 123 struct drm_file *file_priv);
130 124
125/* map user space allocated by malloc to pages. */
126int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
127 struct drm_file *file_priv);
128
131/* get buffer information to memory region allocated by gem. */ 129/* get buffer information to memory region allocated by gem. */
132int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 130int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
133 struct drm_file *file_priv); 131 struct drm_file *file_priv);
@@ -163,4 +161,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
163/* set vm_flags and we can change the vm attribute to other one at here. */ 161/* set vm_flags and we can change the vm attribute to other one at here. */
164int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 162int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
165 163
164static inline int vma_is_io(struct vm_area_struct *vma)
165{
166 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
167}
168
169/* get a copy of a virtual memory region. */
170struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
171
172/* release a userspace virtual memory area. */
173void exynos_gem_put_vma(struct vm_area_struct *vma);
174
175/* get pages from user space. */
176int exynos_gem_get_pages_from_userptr(unsigned long start,
177 unsigned int npages,
178 struct page **pages,
179 struct vm_area_struct *vma);
180
181/* drop the reference to pages. */
182void exynos_gem_put_pages_to_userptr(struct page **pages,
183 unsigned int npages,
184 struct vm_area_struct *vma);
185
186/* map sgt with dma region. */
187int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
188 struct sg_table *sgt,
189 enum dma_data_direction dir);
190
191/* unmap sgt from dma region. */
192void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
193 struct sg_table *sgt,
194 enum dma_data_direction dir);
195
166#endif 196#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 000000000000..8140753ec9c8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1838 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-gsc.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_gsc.h"
26
27/*
28 * GSC stands for General SCaler and
29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory.
32 * GSC supports image rotation and image effect functions.
33 *
34 * M2M operation : supports crop/scale/rotation/csc so on.
35 * Memory ----> GSC H/W ----> Memory.
36 * Writeback operation : supports cloned screen with FIMD.
37 * FIMD ----> GSC H/W ----> Memory.
38 * Output operation : supports direct display using local path.
39 * Memory ----> GSC H/W ----> FIMD, Mixer.
40 */
41
42/*
43 * TODO
44 * 1. check suspend/resume api if needed.
45 * 2. need to check use case platform_device_id.
46 * 3. check src/dst size with, height.
47 * 4. added check_prepare api for right register.
48 * 5. need to add supported list in prop_list.
49 * 6. check prescaler/scaler optimization.
50 */
51
52#define GSC_MAX_DEVS 4
53#define GSC_MAX_SRC 4
54#define GSC_MAX_DST 16
55#define GSC_RESET_TIMEOUT 50
56#define GSC_BUF_STOP 1
57#define GSC_BUF_START 2
58#define GSC_REG_SZ 16
59#define GSC_WIDTH_ITU_709 1280
60#define GSC_SC_UP_MAX_RATIO 65536
61#define GSC_SC_DOWN_RATIO_7_8 74898
62#define GSC_SC_DOWN_RATIO_6_8 87381
63#define GSC_SC_DOWN_RATIO_5_8 104857
64#define GSC_SC_DOWN_RATIO_4_8 131072
65#define GSC_SC_DOWN_RATIO_3_8 174762
66#define GSC_SC_DOWN_RATIO_2_8 262144
67#define GSC_REFRESH_MIN 12
68#define GSC_REFRESH_MAX 60
69#define GSC_CROP_MAX 8192
70#define GSC_CROP_MIN 32
71#define GSC_SCALE_MAX 4224
72#define GSC_SCALE_MIN 32
73#define GSC_COEF_RATIO 7
74#define GSC_COEF_PHASE 9
75#define GSC_COEF_ATTR 16
76#define GSC_COEF_H_8T 8
77#define GSC_COEF_V_4T 4
78#define GSC_COEF_DEPTH 3
79
80#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
81#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
82 struct gsc_context, ippdrv);
83#define gsc_read(offset) readl(ctx->regs + (offset))
84#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
85
86/*
87 * A structure of scaler.
88 *
89 * @range: narrow, wide.
90 * @pre_shfactor: pre sclaer shift factor.
91 * @pre_hratio: horizontal ratio of the prescaler.
92 * @pre_vratio: vertical ratio of the prescaler.
93 * @main_hratio: the main scaler's horizontal ratio.
94 * @main_vratio: the main scaler's vertical ratio.
95 */
96struct gsc_scaler {
97 bool range;
98 u32 pre_shfactor;
99 u32 pre_hratio;
100 u32 pre_vratio;
101 unsigned long main_hratio;
102 unsigned long main_vratio;
103};
104
105/*
106 * A structure of scaler capability.
107 *
108 * find user manual 49.2 features.
109 * @tile_w: tile mode or rotation width.
110 * @tile_h: tile mode or rotation height.
111 * @w: other cases width.
112 * @h: other cases height.
113 */
114struct gsc_capability {
115 /* tile or rotation */
116 u32 tile_w;
117 u32 tile_h;
118 /* other cases */
119 u32 w;
120 u32 h;
121};
122
123/*
124 * A structure of gsc context.
125 *
126 * @ippdrv: prepare initialization using ippdrv.
127 * @regs_res: register resources.
128 * @regs: memory mapped io registers.
129 * @lock: locking of operations.
130 * @gsc_clk: gsc gate clock.
131 * @sc: scaler infomations.
132 * @id: gsc id.
133 * @irq: irq number.
134 * @rotation: supports rotation of src.
135 * @suspended: qos operations.
136 */
137struct gsc_context {
138 struct exynos_drm_ippdrv ippdrv;
139 struct resource *regs_res;
140 void __iomem *regs;
141 struct mutex lock;
142 struct clk *gsc_clk;
143 struct gsc_scaler sc;
144 int id;
145 int irq;
146 bool rotation;
147 bool suspended;
148};
149
150/* 8-tap Filter Coefficient */
151static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
152 { /* Ratio <= 65536 (~8:8) */
153 { 0, 0, 0, 128, 0, 0, 0, 0 },
154 { -1, 2, -6, 127, 7, -2, 1, 0 },
155 { -1, 4, -12, 125, 16, -5, 1, 0 },
156 { -1, 5, -15, 120, 25, -8, 2, 0 },
157 { -1, 6, -18, 114, 35, -10, 3, -1 },
158 { -1, 6, -20, 107, 46, -13, 4, -1 },
159 { -2, 7, -21, 99, 57, -16, 5, -1 },
160 { -1, 6, -20, 89, 68, -18, 5, -1 },
161 { -1, 6, -20, 79, 79, -20, 6, -1 },
162 { -1, 5, -18, 68, 89, -20, 6, -1 },
163 { -1, 5, -16, 57, 99, -21, 7, -2 },
164 { -1, 4, -13, 46, 107, -20, 6, -1 },
165 { -1, 3, -10, 35, 114, -18, 6, -1 },
166 { 0, 2, -8, 25, 120, -15, 5, -1 },
167 { 0, 1, -5, 16, 125, -12, 4, -1 },
168 { 0, 1, -2, 7, 127, -6, 2, -1 }
169 }, { /* 65536 < Ratio <= 74898 (~8:7) */
170 { 3, -8, 14, 111, 13, -8, 3, 0 },
171 { 2, -6, 7, 112, 21, -10, 3, -1 },
172 { 2, -4, 1, 110, 28, -12, 4, -1 },
173 { 1, -2, -3, 106, 36, -13, 4, -1 },
174 { 1, -1, -7, 103, 44, -15, 4, -1 },
175 { 1, 1, -11, 97, 53, -16, 4, -1 },
176 { 0, 2, -13, 91, 61, -16, 4, -1 },
177 { 0, 3, -15, 85, 69, -17, 4, -1 },
178 { 0, 3, -16, 77, 77, -16, 3, 0 },
179 { -1, 4, -17, 69, 85, -15, 3, 0 },
180 { -1, 4, -16, 61, 91, -13, 2, 0 },
181 { -1, 4, -16, 53, 97, -11, 1, 1 },
182 { -1, 4, -15, 44, 103, -7, -1, 1 },
183 { -1, 4, -13, 36, 106, -3, -2, 1 },
184 { -1, 4, -12, 28, 110, 1, -4, 2 },
185 { -1, 3, -10, 21, 112, 7, -6, 2 }
186 }, { /* 74898 < Ratio <= 87381 (~8:6) */
187 { 2, -11, 25, 96, 25, -11, 2, 0 },
188 { 2, -10, 19, 96, 31, -12, 2, 0 },
189 { 2, -9, 14, 94, 37, -12, 2, 0 },
190 { 2, -8, 10, 92, 43, -12, 1, 0 },
191 { 2, -7, 5, 90, 49, -12, 1, 0 },
192 { 2, -5, 1, 86, 55, -12, 0, 1 },
193 { 2, -4, -2, 82, 61, -11, -1, 1 },
194 { 1, -3, -5, 77, 67, -9, -1, 1 },
195 { 1, -2, -7, 72, 72, -7, -2, 1 },
196 { 1, -1, -9, 67, 77, -5, -3, 1 },
197 { 1, -1, -11, 61, 82, -2, -4, 2 },
198 { 1, 0, -12, 55, 86, 1, -5, 2 },
199 { 0, 1, -12, 49, 90, 5, -7, 2 },
200 { 0, 1, -12, 43, 92, 10, -8, 2 },
201 { 0, 2, -12, 37, 94, 14, -9, 2 },
202 { 0, 2, -12, 31, 96, 19, -10, 2 }
203 }, { /* 87381 < Ratio <= 104857 (~8:5) */
204 { -1, -8, 33, 80, 33, -8, -1, 0 },
205 { -1, -8, 28, 80, 37, -7, -2, 1 },
206 { 0, -8, 24, 79, 41, -7, -2, 1 },
207 { 0, -8, 20, 78, 46, -6, -3, 1 },
208 { 0, -8, 16, 76, 50, -4, -3, 1 },
209 { 0, -7, 13, 74, 54, -3, -4, 1 },
210 { 1, -7, 10, 71, 58, -1, -5, 1 },
211 { 1, -6, 6, 68, 62, 1, -5, 1 },
212 { 1, -6, 4, 65, 65, 4, -6, 1 },
213 { 1, -5, 1, 62, 68, 6, -6, 1 },
214 { 1, -5, -1, 58, 71, 10, -7, 1 },
215 { 1, -4, -3, 54, 74, 13, -7, 0 },
216 { 1, -3, -4, 50, 76, 16, -8, 0 },
217 { 1, -3, -6, 46, 78, 20, -8, 0 },
218 { 1, -2, -7, 41, 79, 24, -8, 0 },
219 { 1, -2, -7, 37, 80, 28, -8, -1 }
220 }, { /* 104857 < Ratio <= 131072 (~8:4) */
221 { -3, 0, 35, 64, 35, 0, -3, 0 },
222 { -3, -1, 32, 64, 38, 1, -3, 0 },
223 { -2, -2, 29, 63, 41, 2, -3, 0 },
224 { -2, -3, 27, 63, 43, 4, -4, 0 },
225 { -2, -3, 24, 61, 46, 6, -4, 0 },
226 { -2, -3, 21, 60, 49, 7, -4, 0 },
227 { -1, -4, 19, 59, 51, 9, -4, -1 },
228 { -1, -4, 16, 57, 53, 12, -4, -1 },
229 { -1, -4, 14, 55, 55, 14, -4, -1 },
230 { -1, -4, 12, 53, 57, 16, -4, -1 },
231 { -1, -4, 9, 51, 59, 19, -4, -1 },
232 { 0, -4, 7, 49, 60, 21, -3, -2 },
233 { 0, -4, 6, 46, 61, 24, -3, -2 },
234 { 0, -4, 4, 43, 63, 27, -3, -2 },
235 { 0, -3, 2, 41, 63, 29, -2, -2 },
236 { 0, -3, 1, 38, 64, 32, -1, -3 }
237 }, { /* 131072 < Ratio <= 174762 (~8:3) */
238 { -1, 8, 33, 48, 33, 8, -1, 0 },
239 { -1, 7, 31, 49, 35, 9, -1, -1 },
240 { -1, 6, 30, 49, 36, 10, -1, -1 },
241 { -1, 5, 28, 48, 38, 12, -1, -1 },
242 { -1, 4, 26, 48, 39, 13, 0, -1 },
243 { -1, 3, 24, 47, 41, 15, 0, -1 },
244 { -1, 2, 23, 47, 42, 16, 0, -1 },
245 { -1, 2, 21, 45, 43, 18, 1, -1 },
246 { -1, 1, 19, 45, 45, 19, 1, -1 },
247 { -1, 1, 18, 43, 45, 21, 2, -1 },
248 { -1, 0, 16, 42, 47, 23, 2, -1 },
249 { -1, 0, 15, 41, 47, 24, 3, -1 },
250 { -1, 0, 13, 39, 48, 26, 4, -1 },
251 { -1, -1, 12, 38, 48, 28, 5, -1 },
252 { -1, -1, 10, 36, 49, 30, 6, -1 },
253 { -1, -1, 9, 35, 49, 31, 7, -1 }
254 }, { /* 174762 < Ratio <= 262144 (~8:2) */
255 { 2, 13, 30, 38, 30, 13, 2, 0 },
256 { 2, 12, 29, 38, 30, 14, 3, 0 },
257 { 2, 11, 28, 38, 31, 15, 3, 0 },
258 { 2, 10, 26, 38, 32, 16, 4, 0 },
259 { 1, 10, 26, 37, 33, 17, 4, 0 },
260 { 1, 9, 24, 37, 34, 18, 5, 0 },
261 { 1, 8, 24, 37, 34, 19, 5, 0 },
262 { 1, 7, 22, 36, 35, 20, 6, 1 },
263 { 1, 6, 21, 36, 36, 21, 6, 1 },
264 { 1, 6, 20, 35, 36, 22, 7, 1 },
265 { 0, 5, 19, 34, 37, 24, 8, 1 },
266 { 0, 5, 18, 34, 37, 24, 9, 1 },
267 { 0, 4, 17, 33, 37, 26, 10, 1 },
268 { 0, 4, 16, 32, 38, 26, 10, 2 },
269 { 0, 3, 15, 31, 38, 28, 11, 2 },
270 { 0, 3, 14, 30, 38, 29, 12, 2 }
271 }
272};
273
274/* 4-tap Filter Coefficient */
275static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
276 { /* Ratio <= 65536 (~8:8) */
277 { 0, 128, 0, 0 },
278 { -4, 127, 5, 0 },
279 { -6, 124, 11, -1 },
280 { -8, 118, 19, -1 },
281 { -8, 111, 27, -2 },
282 { -8, 102, 37, -3 },
283 { -8, 92, 48, -4 },
284 { -7, 81, 59, -5 },
285 { -6, 70, 70, -6 },
286 { -5, 59, 81, -7 },
287 { -4, 48, 92, -8 },
288 { -3, 37, 102, -8 },
289 { -2, 27, 111, -8 },
290 { -1, 19, 118, -8 },
291 { -1, 11, 124, -6 },
292 { 0, 5, 127, -4 }
293 }, { /* 65536 < Ratio <= 74898 (~8:7) */
294 { 8, 112, 8, 0 },
295 { 4, 111, 14, -1 },
296 { 1, 109, 20, -2 },
297 { -2, 105, 27, -2 },
298 { -3, 100, 34, -3 },
299 { -5, 93, 43, -3 },
300 { -5, 86, 51, -4 },
301 { -5, 77, 60, -4 },
302 { -5, 69, 69, -5 },
303 { -4, 60, 77, -5 },
304 { -4, 51, 86, -5 },
305 { -3, 43, 93, -5 },
306 { -3, 34, 100, -3 },
307 { -2, 27, 105, -2 },
308 { -2, 20, 109, 1 },
309 { -1, 14, 111, 4 }
310 }, { /* 74898 < Ratio <= 87381 (~8:6) */
311 { 16, 96, 16, 0 },
312 { 12, 97, 21, -2 },
313 { 8, 96, 26, -2 },
314 { 5, 93, 32, -2 },
315 { 2, 89, 39, -2 },
316 { 0, 84, 46, -2 },
317 { -1, 79, 53, -3 },
318 { -2, 73, 59, -2 },
319 { -2, 66, 66, -2 },
320 { -2, 59, 73, -2 },
321 { -3, 53, 79, -1 },
322 { -2, 46, 84, 0 },
323 { -2, 39, 89, 2 },
324 { -2, 32, 93, 5 },
325 { -2, 26, 96, 8 },
326 { -2, 21, 97, 12 }
327 }, { /* 87381 < Ratio <= 104857 (~8:5) */
328 { 22, 84, 22, 0 },
329 { 18, 85, 26, -1 },
330 { 14, 84, 31, -1 },
331 { 11, 82, 36, -1 },
332 { 8, 79, 42, -1 },
333 { 6, 76, 47, -1 },
334 { 4, 72, 52, 0 },
335 { 2, 68, 58, 0 },
336 { 1, 63, 63, 1 },
337 { 0, 58, 68, 2 },
338 { 0, 52, 72, 4 },
339 { -1, 47, 76, 6 },
340 { -1, 42, 79, 8 },
341 { -1, 36, 82, 11 },
342 { -1, 31, 84, 14 },
343 { -1, 26, 85, 18 }
344 }, { /* 104857 < Ratio <= 131072 (~8:4) */
345 { 26, 76, 26, 0 },
346 { 22, 76, 30, 0 },
347 { 19, 75, 34, 0 },
348 { 16, 73, 38, 1 },
349 { 13, 71, 43, 1 },
350 { 10, 69, 47, 2 },
351 { 8, 66, 51, 3 },
352 { 6, 63, 55, 4 },
353 { 5, 59, 59, 5 },
354 { 4, 55, 63, 6 },
355 { 3, 51, 66, 8 },
356 { 2, 47, 69, 10 },
357 { 1, 43, 71, 13 },
358 { 1, 38, 73, 16 },
359 { 0, 34, 75, 19 },
360 { 0, 30, 76, 22 }
361 }, { /* 131072 < Ratio <= 174762 (~8:3) */
362 { 29, 70, 29, 0 },
363 { 26, 68, 32, 2 },
364 { 23, 67, 36, 2 },
365 { 20, 66, 39, 3 },
366 { 17, 65, 43, 3 },
367 { 15, 63, 46, 4 },
368 { 12, 61, 50, 5 },
369 { 10, 58, 53, 7 },
370 { 8, 56, 56, 8 },
371 { 7, 53, 58, 10 },
372 { 5, 50, 61, 12 },
373 { 4, 46, 63, 15 },
374 { 3, 43, 65, 17 },
375 { 3, 39, 66, 20 },
376 { 2, 36, 67, 23 },
377 { 2, 32, 68, 26 }
378 }, { /* 174762 < Ratio <= 262144 (~8:2) */
379 { 32, 64, 32, 0 },
380 { 28, 63, 34, 3 },
381 { 25, 62, 37, 4 },
382 { 22, 62, 40, 4 },
383 { 19, 61, 43, 5 },
384 { 17, 59, 46, 6 },
385 { 15, 58, 48, 7 },
386 { 13, 55, 51, 9 },
387 { 11, 53, 53, 11 },
388 { 9, 51, 55, 13 },
389 { 7, 48, 58, 15 },
390 { 6, 46, 59, 17 },
391 { 5, 43, 61, 19 },
392 { 4, 40, 62, 22 },
393 { 4, 37, 62, 25 },
394 { 3, 34, 63, 28 }
395 }
396};
397
398static int gsc_sw_reset(struct gsc_context *ctx)
399{
400 u32 cfg;
401 int count = GSC_RESET_TIMEOUT;
402
403 DRM_DEBUG_KMS("%s\n", __func__);
404
405 /* s/w reset */
406 cfg = (GSC_SW_RESET_SRESET);
407 gsc_write(cfg, GSC_SW_RESET);
408
409 /* wait s/w reset complete */
410 while (count--) {
411 cfg = gsc_read(GSC_SW_RESET);
412 if (!cfg)
413 break;
414 usleep_range(1000, 2000);
415 }
416
417 if (cfg) {
418 DRM_ERROR("failed to reset gsc h/w.\n");
419 return -EBUSY;
420 }
421
422 /* reset sequence */
423 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
424 cfg |= (GSC_IN_BASE_ADDR_MASK |
425 GSC_IN_BASE_ADDR_PINGPONG(0));
426 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
427 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
428 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
429
430 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
431 cfg |= (GSC_OUT_BASE_ADDR_MASK |
432 GSC_OUT_BASE_ADDR_PINGPONG(0));
433 gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
434 gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
435 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
436
437 return 0;
438}
439
440static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
441{
442 u32 gscblk_cfg;
443
444 DRM_DEBUG_KMS("%s\n", __func__);
445
446 gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
447
448 if (enable)
449 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
450 GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
451 GSC_BLK_SW_RESET_WB_DEST(ctx->id);
452 else
453 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
454
455 writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
456}
457
458static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
459 bool overflow, bool done)
460{
461 u32 cfg;
462
463 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
464 enable, overflow, done);
465
466 cfg = gsc_read(GSC_IRQ);
467 cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
468
469 if (enable)
470 cfg |= GSC_IRQ_ENABLE;
471 else
472 cfg &= ~GSC_IRQ_ENABLE;
473
474 if (overflow)
475 cfg &= ~GSC_IRQ_OR_MASK;
476 else
477 cfg |= GSC_IRQ_OR_MASK;
478
479 if (done)
480 cfg &= ~GSC_IRQ_FRMDONE_MASK;
481 else
482 cfg |= GSC_IRQ_FRMDONE_MASK;
483
484 gsc_write(cfg, GSC_IRQ);
485}
486
487
488static int gsc_src_set_fmt(struct device *dev, u32 fmt)
489{
490 struct gsc_context *ctx = get_gsc_context(dev);
491 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
492 u32 cfg;
493
494 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
495
496 cfg = gsc_read(GSC_IN_CON);
497 cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
498 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
499 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
500 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
501
502 switch (fmt) {
503 case DRM_FORMAT_RGB565:
504 cfg |= GSC_IN_RGB565;
505 break;
506 case DRM_FORMAT_XRGB8888:
507 cfg |= GSC_IN_XRGB8888;
508 break;
509 case DRM_FORMAT_BGRX8888:
510 cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
511 break;
512 case DRM_FORMAT_YUYV:
513 cfg |= (GSC_IN_YUV422_1P |
514 GSC_IN_YUV422_1P_ORDER_LSB_Y |
515 GSC_IN_CHROMA_ORDER_CBCR);
516 break;
517 case DRM_FORMAT_YVYU:
518 cfg |= (GSC_IN_YUV422_1P |
519 GSC_IN_YUV422_1P_ORDER_LSB_Y |
520 GSC_IN_CHROMA_ORDER_CRCB);
521 break;
522 case DRM_FORMAT_UYVY:
523 cfg |= (GSC_IN_YUV422_1P |
524 GSC_IN_YUV422_1P_OEDER_LSB_C |
525 GSC_IN_CHROMA_ORDER_CBCR);
526 break;
527 case DRM_FORMAT_VYUY:
528 cfg |= (GSC_IN_YUV422_1P |
529 GSC_IN_YUV422_1P_OEDER_LSB_C |
530 GSC_IN_CHROMA_ORDER_CRCB);
531 break;
532 case DRM_FORMAT_NV21:
533 case DRM_FORMAT_NV61:
534 cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
535 GSC_IN_YUV420_2P);
536 break;
537 case DRM_FORMAT_YUV422:
538 cfg |= GSC_IN_YUV422_3P;
539 break;
540 case DRM_FORMAT_YUV420:
541 case DRM_FORMAT_YVU420:
542 cfg |= GSC_IN_YUV420_3P;
543 break;
544 case DRM_FORMAT_NV12:
545 case DRM_FORMAT_NV16:
546 cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
547 GSC_IN_YUV420_2P);
548 break;
549 case DRM_FORMAT_NV12MT:
550 cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
551 break;
552 default:
553 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
554 return -EINVAL;
555 }
556
557 gsc_write(cfg, GSC_IN_CON);
558
559 return 0;
560}
561
562static int gsc_src_set_transf(struct device *dev,
563 enum drm_exynos_degree degree,
564 enum drm_exynos_flip flip, bool *swap)
565{
566 struct gsc_context *ctx = get_gsc_context(dev);
567 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
568 u32 cfg;
569
570 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
571 degree, flip);
572
573 cfg = gsc_read(GSC_IN_CON);
574 cfg &= ~GSC_IN_ROT_MASK;
575
576 switch (degree) {
577 case EXYNOS_DRM_DEGREE_0:
578 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
579 cfg |= GSC_IN_ROT_XFLIP;
580 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
581 cfg |= GSC_IN_ROT_YFLIP;
582 break;
583 case EXYNOS_DRM_DEGREE_90:
584 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
585 cfg |= GSC_IN_ROT_90_XFLIP;
586 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
587 cfg |= GSC_IN_ROT_90_YFLIP;
588 else
589 cfg |= GSC_IN_ROT_90;
590 break;
591 case EXYNOS_DRM_DEGREE_180:
592 cfg |= GSC_IN_ROT_180;
593 break;
594 case EXYNOS_DRM_DEGREE_270:
595 cfg |= GSC_IN_ROT_270;
596 break;
597 default:
598 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
599 return -EINVAL;
600 }
601
602 gsc_write(cfg, GSC_IN_CON);
603
604 ctx->rotation = cfg &
605 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
606 *swap = ctx->rotation;
607
608 return 0;
609}
610
611static int gsc_src_set_size(struct device *dev, int swap,
612 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
613{
614 struct gsc_context *ctx = get_gsc_context(dev);
615 struct drm_exynos_pos img_pos = *pos;
616 struct gsc_scaler *sc = &ctx->sc;
617 u32 cfg;
618
619 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
620 __func__, swap, pos->x, pos->y, pos->w, pos->h);
621
622 if (swap) {
623 img_pos.w = pos->h;
624 img_pos.h = pos->w;
625 }
626
627 /* pixel offset */
628 cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
629 GSC_SRCIMG_OFFSET_Y(img_pos.y));
630 gsc_write(cfg, GSC_SRCIMG_OFFSET);
631
632 /* cropped size */
633 cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
634 GSC_CROPPED_HEIGHT(img_pos.h));
635 gsc_write(cfg, GSC_CROPPED_SIZE);
636
637 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
638 __func__, sz->hsize, sz->vsize);
639
640 /* original size */
641 cfg = gsc_read(GSC_SRCIMG_SIZE);
642 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
643 GSC_SRCIMG_WIDTH_MASK);
644
645 cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
646 GSC_SRCIMG_HEIGHT(sz->vsize));
647
648 gsc_write(cfg, GSC_SRCIMG_SIZE);
649
650 cfg = gsc_read(GSC_IN_CON);
651 cfg &= ~GSC_IN_RGB_TYPE_MASK;
652
653 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
654 __func__, pos->w, sc->range);
655
656 if (pos->w >= GSC_WIDTH_ITU_709)
657 if (sc->range)
658 cfg |= GSC_IN_RGB_HD_WIDE;
659 else
660 cfg |= GSC_IN_RGB_HD_NARROW;
661 else
662 if (sc->range)
663 cfg |= GSC_IN_RGB_SD_WIDE;
664 else
665 cfg |= GSC_IN_RGB_SD_NARROW;
666
667 gsc_write(cfg, GSC_IN_CON);
668
669 return 0;
670}
671
672static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
673 enum drm_exynos_ipp_buf_type buf_type)
674{
675 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
676 bool masked;
677 u32 cfg;
678 u32 mask = 0x00000001 << buf_id;
679
680 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
681 buf_id, buf_type);
682
683 /* mask register set */
684 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
685
686 switch (buf_type) {
687 case IPP_BUF_ENQUEUE:
688 masked = false;
689 break;
690 case IPP_BUF_DEQUEUE:
691 masked = true;
692 break;
693 default:
694 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
695 return -EINVAL;
696 }
697
698 /* sequence id */
699 cfg &= ~mask;
700 cfg |= masked << buf_id;
701 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
702 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
703 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
704
705 return 0;
706}
707
708static int gsc_src_set_addr(struct device *dev,
709 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
710 enum drm_exynos_ipp_buf_type buf_type)
711{
712 struct gsc_context *ctx = get_gsc_context(dev);
713 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
714 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
715 struct drm_exynos_ipp_property *property;
716
717 if (!c_node) {
718 DRM_ERROR("failed to get c_node.\n");
719 return -EFAULT;
720 }
721
722 property = &c_node->property;
723
724 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
725 property->prop_id, buf_id, buf_type);
726
727 if (buf_id > GSC_MAX_SRC) {
728 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
729 return -EINVAL;
730 }
731
732 /* address register set */
733 switch (buf_type) {
734 case IPP_BUF_ENQUEUE:
735 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
736 GSC_IN_BASE_ADDR_Y(buf_id));
737 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
738 GSC_IN_BASE_ADDR_CB(buf_id));
739 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
740 GSC_IN_BASE_ADDR_CR(buf_id));
741 break;
742 case IPP_BUF_DEQUEUE:
743 gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
744 gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
745 gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
746 break;
747 default:
748 /* bypass */
749 break;
750 }
751
752 return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
753}
754
755static struct exynos_drm_ipp_ops gsc_src_ops = {
756 .set_fmt = gsc_src_set_fmt,
757 .set_transf = gsc_src_set_transf,
758 .set_size = gsc_src_set_size,
759 .set_addr = gsc_src_set_addr,
760};
761
762static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
763{
764 struct gsc_context *ctx = get_gsc_context(dev);
765 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
766 u32 cfg;
767
768 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
769
770 cfg = gsc_read(GSC_OUT_CON);
771 cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
772 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
773 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
774 GSC_OUT_GLOBAL_ALPHA_MASK);
775
776 switch (fmt) {
777 case DRM_FORMAT_RGB565:
778 cfg |= GSC_OUT_RGB565;
779 break;
780 case DRM_FORMAT_XRGB8888:
781 cfg |= GSC_OUT_XRGB8888;
782 break;
783 case DRM_FORMAT_BGRX8888:
784 cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
785 break;
786 case DRM_FORMAT_YUYV:
787 cfg |= (GSC_OUT_YUV422_1P |
788 GSC_OUT_YUV422_1P_ORDER_LSB_Y |
789 GSC_OUT_CHROMA_ORDER_CBCR);
790 break;
791 case DRM_FORMAT_YVYU:
792 cfg |= (GSC_OUT_YUV422_1P |
793 GSC_OUT_YUV422_1P_ORDER_LSB_Y |
794 GSC_OUT_CHROMA_ORDER_CRCB);
795 break;
796 case DRM_FORMAT_UYVY:
797 cfg |= (GSC_OUT_YUV422_1P |
798 GSC_OUT_YUV422_1P_OEDER_LSB_C |
799 GSC_OUT_CHROMA_ORDER_CBCR);
800 break;
801 case DRM_FORMAT_VYUY:
802 cfg |= (GSC_OUT_YUV422_1P |
803 GSC_OUT_YUV422_1P_OEDER_LSB_C |
804 GSC_OUT_CHROMA_ORDER_CRCB);
805 break;
806 case DRM_FORMAT_NV21:
807 case DRM_FORMAT_NV61:
808 cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
809 break;
810 case DRM_FORMAT_YUV422:
811 case DRM_FORMAT_YUV420:
812 case DRM_FORMAT_YVU420:
813 cfg |= GSC_OUT_YUV420_3P;
814 break;
815 case DRM_FORMAT_NV12:
816 case DRM_FORMAT_NV16:
817 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
818 GSC_OUT_YUV420_2P);
819 break;
820 case DRM_FORMAT_NV12MT:
821 cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
822 break;
823 default:
824 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
825 return -EINVAL;
826 }
827
828 gsc_write(cfg, GSC_OUT_CON);
829
830 return 0;
831}
832
833static int gsc_dst_set_transf(struct device *dev,
834 enum drm_exynos_degree degree,
835 enum drm_exynos_flip flip, bool *swap)
836{
837 struct gsc_context *ctx = get_gsc_context(dev);
838 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
839 u32 cfg;
840
841 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
842 degree, flip);
843
844 cfg = gsc_read(GSC_IN_CON);
845 cfg &= ~GSC_IN_ROT_MASK;
846
847 switch (degree) {
848 case EXYNOS_DRM_DEGREE_0:
849 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
850 cfg |= GSC_IN_ROT_XFLIP;
851 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
852 cfg |= GSC_IN_ROT_YFLIP;
853 break;
854 case EXYNOS_DRM_DEGREE_90:
855 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
856 cfg |= GSC_IN_ROT_90_XFLIP;
857 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
858 cfg |= GSC_IN_ROT_90_YFLIP;
859 else
860 cfg |= GSC_IN_ROT_90;
861 break;
862 case EXYNOS_DRM_DEGREE_180:
863 cfg |= GSC_IN_ROT_180;
864 break;
865 case EXYNOS_DRM_DEGREE_270:
866 cfg |= GSC_IN_ROT_270;
867 break;
868 default:
869 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
870 return -EINVAL;
871 }
872
873 gsc_write(cfg, GSC_IN_CON);
874
875 ctx->rotation = cfg &
876 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
877 *swap = ctx->rotation;
878
879 return 0;
880}
881
882static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
883{
884 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
885
886 if (src >= dst * 8) {
887 DRM_ERROR("failed to make ratio and shift.\n");
888 return -EINVAL;
889 } else if (src >= dst * 4)
890 *ratio = 4;
891 else if (src >= dst * 2)
892 *ratio = 2;
893 else
894 *ratio = 1;
895
896 return 0;
897}
898
899static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
900{
901 if (hratio == 4 && vratio == 4)
902 *shfactor = 4;
903 else if ((hratio == 4 && vratio == 2) ||
904 (hratio == 2 && vratio == 4))
905 *shfactor = 3;
906 else if ((hratio == 4 && vratio == 1) ||
907 (hratio == 1 && vratio == 4) ||
908 (hratio == 2 && vratio == 2))
909 *shfactor = 2;
910 else if (hratio == 1 && vratio == 1)
911 *shfactor = 0;
912 else
913 *shfactor = 1;
914}
915
916static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
917 struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
918{
919 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
920 u32 cfg;
921 u32 src_w, src_h, dst_w, dst_h;
922 int ret = 0;
923
924 src_w = src->w;
925 src_h = src->h;
926
927 if (ctx->rotation) {
928 dst_w = dst->h;
929 dst_h = dst->w;
930 } else {
931 dst_w = dst->w;
932 dst_h = dst->h;
933 }
934
935 ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
936 if (ret) {
937 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
938 return ret;
939 }
940
941 ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
942 if (ret) {
943 dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
944 return ret;
945 }
946
947 DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
948 __func__, sc->pre_hratio, sc->pre_vratio);
949
950 sc->main_hratio = (src_w << 16) / dst_w;
951 sc->main_vratio = (src_h << 16) / dst_h;
952
953 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
954 __func__, sc->main_hratio, sc->main_vratio);
955
956 gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
957 &sc->pre_shfactor);
958
959 DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
960 sc->pre_shfactor);
961
962 cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
963 GSC_PRESC_H_RATIO(sc->pre_hratio) |
964 GSC_PRESC_V_RATIO(sc->pre_vratio));
965 gsc_write(cfg, GSC_PRE_SCALE_RATIO);
966
967 return ret;
968}
969
970static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
971{
972 int i, j, k, sc_ratio;
973
974 if (main_hratio <= GSC_SC_UP_MAX_RATIO)
975 sc_ratio = 0;
976 else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
977 sc_ratio = 1;
978 else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
979 sc_ratio = 2;
980 else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
981 sc_ratio = 3;
982 else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
983 sc_ratio = 4;
984 else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
985 sc_ratio = 5;
986 else
987 sc_ratio = 6;
988
989 for (i = 0; i < GSC_COEF_PHASE; i++)
990 for (j = 0; j < GSC_COEF_H_8T; j++)
991 for (k = 0; k < GSC_COEF_DEPTH; k++)
992 gsc_write(h_coef_8t[sc_ratio][i][j],
993 GSC_HCOEF(i, j, k));
994}
995
996static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
997{
998 int i, j, k, sc_ratio;
999
1000 if (main_vratio <= GSC_SC_UP_MAX_RATIO)
1001 sc_ratio = 0;
1002 else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
1003 sc_ratio = 1;
1004 else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
1005 sc_ratio = 2;
1006 else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
1007 sc_ratio = 3;
1008 else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
1009 sc_ratio = 4;
1010 else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
1011 sc_ratio = 5;
1012 else
1013 sc_ratio = 6;
1014
1015 for (i = 0; i < GSC_COEF_PHASE; i++)
1016 for (j = 0; j < GSC_COEF_V_4T; j++)
1017 for (k = 0; k < GSC_COEF_DEPTH; k++)
1018 gsc_write(v_coef_4t[sc_ratio][i][j],
1019 GSC_VCOEF(i, j, k));
1020}
1021
1022static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1023{
1024 u32 cfg;
1025
1026 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
1027 __func__, sc->main_hratio, sc->main_vratio);
1028
1029 gsc_set_h_coef(ctx, sc->main_hratio);
1030 cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
1031 gsc_write(cfg, GSC_MAIN_H_RATIO);
1032
1033 gsc_set_v_coef(ctx, sc->main_vratio);
1034 cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
1035 gsc_write(cfg, GSC_MAIN_V_RATIO);
1036}
1037
1038static int gsc_dst_set_size(struct device *dev, int swap,
1039 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1040{
1041 struct gsc_context *ctx = get_gsc_context(dev);
1042 struct drm_exynos_pos img_pos = *pos;
1043 struct gsc_scaler *sc = &ctx->sc;
1044 u32 cfg;
1045
1046 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1047 __func__, swap, pos->x, pos->y, pos->w, pos->h);
1048
1049 if (swap) {
1050 img_pos.w = pos->h;
1051 img_pos.h = pos->w;
1052 }
1053
1054 /* pixel offset */
1055 cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
1056 GSC_DSTIMG_OFFSET_Y(pos->y));
1057 gsc_write(cfg, GSC_DSTIMG_OFFSET);
1058
1059 /* scaled size */
1060 cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
1061 gsc_write(cfg, GSC_SCALED_SIZE);
1062
1063 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
1064 __func__, sz->hsize, sz->vsize);
1065
1066 /* original size */
1067 cfg = gsc_read(GSC_DSTIMG_SIZE);
1068 cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
1069 GSC_DSTIMG_WIDTH_MASK);
1070 cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
1071 GSC_DSTIMG_HEIGHT(sz->vsize));
1072 gsc_write(cfg, GSC_DSTIMG_SIZE);
1073
1074 cfg = gsc_read(GSC_OUT_CON);
1075 cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1076
1077 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
1078 __func__, pos->w, sc->range);
1079
1080 if (pos->w >= GSC_WIDTH_ITU_709)
1081 if (sc->range)
1082 cfg |= GSC_OUT_RGB_HD_WIDE;
1083 else
1084 cfg |= GSC_OUT_RGB_HD_NARROW;
1085 else
1086 if (sc->range)
1087 cfg |= GSC_OUT_RGB_SD_WIDE;
1088 else
1089 cfg |= GSC_OUT_RGB_SD_NARROW;
1090
1091 gsc_write(cfg, GSC_OUT_CON);
1092
1093 return 0;
1094}
1095
1096static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1097{
1098 u32 cfg, i, buf_num = GSC_REG_SZ;
1099 u32 mask = 0x00000001;
1100
1101 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1102
1103 for (i = 0; i < GSC_REG_SZ; i++)
1104 if (cfg & (mask << i))
1105 buf_num--;
1106
1107 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1108
1109 return buf_num;
1110}
1111
1112static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1113 enum drm_exynos_ipp_buf_type buf_type)
1114{
1115 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1116 bool masked;
1117 u32 cfg;
1118 u32 mask = 0x00000001 << buf_id;
1119 int ret = 0;
1120
1121 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1122 buf_id, buf_type);
1123
1124 mutex_lock(&ctx->lock);
1125
1126 /* mask register set */
1127 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1128
1129 switch (buf_type) {
1130 case IPP_BUF_ENQUEUE:
1131 masked = false;
1132 break;
1133 case IPP_BUF_DEQUEUE:
1134 masked = true;
1135 break;
1136 default:
1137 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1138 ret = -EINVAL;
1139 goto err_unlock;
1140 }
1141
1142 /* sequence id */
1143 cfg &= ~mask;
1144 cfg |= masked << buf_id;
1145 gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
1146 gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
1147 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
1148
1149 /* interrupt enable */
1150 if (buf_type == IPP_BUF_ENQUEUE &&
1151 gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1152 gsc_handle_irq(ctx, true, false, true);
1153
1154 /* interrupt disable */
1155 if (buf_type == IPP_BUF_DEQUEUE &&
1156 gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1157 gsc_handle_irq(ctx, false, false, true);
1158
1159err_unlock:
1160 mutex_unlock(&ctx->lock);
1161 return ret;
1162}
1163
1164static int gsc_dst_set_addr(struct device *dev,
1165 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1166 enum drm_exynos_ipp_buf_type buf_type)
1167{
1168 struct gsc_context *ctx = get_gsc_context(dev);
1169 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1170 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1171 struct drm_exynos_ipp_property *property;
1172
1173 if (!c_node) {
1174 DRM_ERROR("failed to get c_node.\n");
1175 return -EFAULT;
1176 }
1177
1178 property = &c_node->property;
1179
1180 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1181 property->prop_id, buf_id, buf_type);
1182
1183 if (buf_id > GSC_MAX_DST) {
1184 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1185 return -EINVAL;
1186 }
1187
1188 /* address register set */
1189 switch (buf_type) {
1190 case IPP_BUF_ENQUEUE:
1191 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1192 GSC_OUT_BASE_ADDR_Y(buf_id));
1193 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1194 GSC_OUT_BASE_ADDR_CB(buf_id));
1195 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1196 GSC_OUT_BASE_ADDR_CR(buf_id));
1197 break;
1198 case IPP_BUF_DEQUEUE:
1199 gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
1200 gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
1201 gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
1202 break;
1203 default:
1204 /* bypass */
1205 break;
1206 }
1207
1208 return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
1209}
1210
1211static struct exynos_drm_ipp_ops gsc_dst_ops = {
1212 .set_fmt = gsc_dst_set_fmt,
1213 .set_transf = gsc_dst_set_transf,
1214 .set_size = gsc_dst_set_size,
1215 .set_addr = gsc_dst_set_addr,
1216};
1217
1218static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1219{
1220 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1221
1222 if (enable) {
1223 clk_enable(ctx->gsc_clk);
1224 ctx->suspended = false;
1225 } else {
1226 clk_disable(ctx->gsc_clk);
1227 ctx->suspended = true;
1228 }
1229
1230 return 0;
1231}
1232
1233static int gsc_get_src_buf_index(struct gsc_context *ctx)
1234{
1235 u32 cfg, curr_index, i;
1236 u32 buf_id = GSC_MAX_SRC;
1237 int ret;
1238
1239 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1240
1241 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
1242 curr_index = GSC_IN_CURR_GET_INDEX(cfg);
1243
1244 for (i = curr_index; i < GSC_MAX_SRC; i++) {
1245 if (!((cfg >> i) & 0x1)) {
1246 buf_id = i;
1247 break;
1248 }
1249 }
1250
1251 if (buf_id == GSC_MAX_SRC) {
1252 DRM_ERROR("failed to get in buffer index.\n");
1253 return -EINVAL;
1254 }
1255
1256 ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1257 if (ret < 0) {
1258 DRM_ERROR("failed to dequeue.\n");
1259 return ret;
1260 }
1261
1262 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1263 curr_index, buf_id);
1264
1265 return buf_id;
1266}
1267
1268static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1269{
1270 u32 cfg, curr_index, i;
1271 u32 buf_id = GSC_MAX_DST;
1272 int ret;
1273
1274 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1275
1276 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1277 curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
1278
1279 for (i = curr_index; i < GSC_MAX_DST; i++) {
1280 if (!((cfg >> i) & 0x1)) {
1281 buf_id = i;
1282 break;
1283 }
1284 }
1285
1286 if (buf_id == GSC_MAX_DST) {
1287 DRM_ERROR("failed to get out buffer index.\n");
1288 return -EINVAL;
1289 }
1290
1291 ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1292 if (ret < 0) {
1293 DRM_ERROR("failed to dequeue.\n");
1294 return ret;
1295 }
1296
1297 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1298 curr_index, buf_id);
1299
1300 return buf_id;
1301}
1302
1303static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1304{
1305 struct gsc_context *ctx = dev_id;
1306 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1307 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1308 struct drm_exynos_ipp_event_work *event_work =
1309 c_node->event_work;
1310 u32 status;
1311 int buf_id[EXYNOS_DRM_OPS_MAX];
1312
1313 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1314
1315 status = gsc_read(GSC_IRQ);
1316 if (status & GSC_IRQ_STATUS_OR_IRQ) {
1317 dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
1318 ctx->id, status);
1319 return IRQ_NONE;
1320 }
1321
1322 if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
1323 dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
1324 ctx->id, status);
1325
1326 buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
1327 if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
1328 return IRQ_HANDLED;
1329
1330 buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
1331 if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1332 return IRQ_HANDLED;
1333
1334 DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
1335 buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1336
1337 event_work->ippdrv = ippdrv;
1338 event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
1339 buf_id[EXYNOS_DRM_OPS_SRC];
1340 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1341 buf_id[EXYNOS_DRM_OPS_DST];
1342 queue_work(ippdrv->event_workq,
1343 (struct work_struct *)event_work);
1344 }
1345
1346 return IRQ_HANDLED;
1347}
1348
1349static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1350{
1351 struct drm_exynos_ipp_prop_list *prop_list;
1352
1353 DRM_DEBUG_KMS("%s\n", __func__);
1354
1355 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1356 if (!prop_list) {
1357 DRM_ERROR("failed to alloc property list.\n");
1358 return -ENOMEM;
1359 }
1360
1361 prop_list->version = 1;
1362 prop_list->writeback = 1;
1363 prop_list->refresh_min = GSC_REFRESH_MIN;
1364 prop_list->refresh_max = GSC_REFRESH_MAX;
1365 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1366 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1367 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1368 (1 << EXYNOS_DRM_DEGREE_90) |
1369 (1 << EXYNOS_DRM_DEGREE_180) |
1370 (1 << EXYNOS_DRM_DEGREE_270);
1371 prop_list->csc = 1;
1372 prop_list->crop = 1;
1373 prop_list->crop_max.hsize = GSC_CROP_MAX;
1374 prop_list->crop_max.vsize = GSC_CROP_MAX;
1375 prop_list->crop_min.hsize = GSC_CROP_MIN;
1376 prop_list->crop_min.vsize = GSC_CROP_MIN;
1377 prop_list->scale = 1;
1378 prop_list->scale_max.hsize = GSC_SCALE_MAX;
1379 prop_list->scale_max.vsize = GSC_SCALE_MAX;
1380 prop_list->scale_min.hsize = GSC_SCALE_MIN;
1381 prop_list->scale_min.vsize = GSC_SCALE_MIN;
1382
1383 ippdrv->prop_list = prop_list;
1384
1385 return 0;
1386}
1387
1388static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1389{
1390 switch (flip) {
1391 case EXYNOS_DRM_FLIP_NONE:
1392 case EXYNOS_DRM_FLIP_VERTICAL:
1393 case EXYNOS_DRM_FLIP_HORIZONTAL:
1394 case EXYNOS_DRM_FLIP_BOTH:
1395 return true;
1396 default:
1397 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1398 return false;
1399 }
1400}
1401
1402static int gsc_ippdrv_check_property(struct device *dev,
1403 struct drm_exynos_ipp_property *property)
1404{
1405 struct gsc_context *ctx = get_gsc_context(dev);
1406 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1407 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1408 struct drm_exynos_ipp_config *config;
1409 struct drm_exynos_pos *pos;
1410 struct drm_exynos_sz *sz;
1411 bool swap;
1412 int i;
1413
1414 DRM_DEBUG_KMS("%s\n", __func__);
1415
1416 for_each_ipp_ops(i) {
1417 if ((i == EXYNOS_DRM_OPS_SRC) &&
1418 (property->cmd == IPP_CMD_WB))
1419 continue;
1420
1421 config = &property->config[i];
1422 pos = &config->pos;
1423 sz = &config->sz;
1424
1425 /* check for flip */
1426 if (!gsc_check_drm_flip(config->flip)) {
1427 DRM_ERROR("invalid flip.\n");
1428 goto err_property;
1429 }
1430
1431 /* check for degree */
1432 switch (config->degree) {
1433 case EXYNOS_DRM_DEGREE_90:
1434 case EXYNOS_DRM_DEGREE_270:
1435 swap = true;
1436 break;
1437 case EXYNOS_DRM_DEGREE_0:
1438 case EXYNOS_DRM_DEGREE_180:
1439 swap = false;
1440 break;
1441 default:
1442 DRM_ERROR("invalid degree.\n");
1443 goto err_property;
1444 }
1445
1446 /* check for buffer bound */
1447 if ((pos->x + pos->w > sz->hsize) ||
1448 (pos->y + pos->h > sz->vsize)) {
1449 DRM_ERROR("out of buf bound.\n");
1450 goto err_property;
1451 }
1452
1453 /* check for crop */
1454 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1455 if (swap) {
1456 if ((pos->h < pp->crop_min.hsize) ||
1457 (sz->vsize > pp->crop_max.hsize) ||
1458 (pos->w < pp->crop_min.vsize) ||
1459 (sz->hsize > pp->crop_max.vsize)) {
1460 DRM_ERROR("out of crop size.\n");
1461 goto err_property;
1462 }
1463 } else {
1464 if ((pos->w < pp->crop_min.hsize) ||
1465 (sz->hsize > pp->crop_max.hsize) ||
1466 (pos->h < pp->crop_min.vsize) ||
1467 (sz->vsize > pp->crop_max.vsize)) {
1468 DRM_ERROR("out of crop size.\n");
1469 goto err_property;
1470 }
1471 }
1472 }
1473
1474 /* check for scale */
1475 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1476 if (swap) {
1477 if ((pos->h < pp->scale_min.hsize) ||
1478 (sz->vsize > pp->scale_max.hsize) ||
1479 (pos->w < pp->scale_min.vsize) ||
1480 (sz->hsize > pp->scale_max.vsize)) {
1481 DRM_ERROR("out of scale size.\n");
1482 goto err_property;
1483 }
1484 } else {
1485 if ((pos->w < pp->scale_min.hsize) ||
1486 (sz->hsize > pp->scale_max.hsize) ||
1487 (pos->h < pp->scale_min.vsize) ||
1488 (sz->vsize > pp->scale_max.vsize)) {
1489 DRM_ERROR("out of scale size.\n");
1490 goto err_property;
1491 }
1492 }
1493 }
1494 }
1495
1496 return 0;
1497
1498err_property:
1499 for_each_ipp_ops(i) {
1500 if ((i == EXYNOS_DRM_OPS_SRC) &&
1501 (property->cmd == IPP_CMD_WB))
1502 continue;
1503
1504 config = &property->config[i];
1505 pos = &config->pos;
1506 sz = &config->sz;
1507
1508 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1509 i ? "dst" : "src", config->flip, config->degree,
1510 pos->x, pos->y, pos->w, pos->h,
1511 sz->hsize, sz->vsize);
1512 }
1513
1514 return -EINVAL;
1515}
1516
1517
1518static int gsc_ippdrv_reset(struct device *dev)
1519{
1520 struct gsc_context *ctx = get_gsc_context(dev);
1521 struct gsc_scaler *sc = &ctx->sc;
1522 int ret;
1523
1524 DRM_DEBUG_KMS("%s\n", __func__);
1525
1526 /* reset h/w block */
1527 ret = gsc_sw_reset(ctx);
1528 if (ret < 0) {
1529 dev_err(dev, "failed to reset hardware.\n");
1530 return ret;
1531 }
1532
1533 /* scaler setting */
1534 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1535 sc->range = true;
1536
1537 return 0;
1538}
1539
1540static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1541{
1542 struct gsc_context *ctx = get_gsc_context(dev);
1543 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1544 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
1545 struct drm_exynos_ipp_property *property;
1546 struct drm_exynos_ipp_config *config;
1547 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1548 struct drm_exynos_ipp_set_wb set_wb;
1549 u32 cfg;
1550 int ret, i;
1551
1552 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1553
1554 if (!c_node) {
1555 DRM_ERROR("failed to get c_node.\n");
1556 return -EINVAL;
1557 }
1558
1559 property = &c_node->property;
1560
1561 gsc_handle_irq(ctx, true, false, true);
1562
1563 for_each_ipp_ops(i) {
1564 config = &property->config[i];
1565 img_pos[i] = config->pos;
1566 }
1567
1568 switch (cmd) {
1569 case IPP_CMD_M2M:
1570 /* enable one shot */
1571 cfg = gsc_read(GSC_ENABLE);
1572 cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
1573 GSC_ENABLE_CLK_GATE_MODE_MASK);
1574 cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1575 gsc_write(cfg, GSC_ENABLE);
1576
1577 /* src dma memory */
1578 cfg = gsc_read(GSC_IN_CON);
1579 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1580 cfg |= GSC_IN_PATH_MEMORY;
1581 gsc_write(cfg, GSC_IN_CON);
1582
1583 /* dst dma memory */
1584 cfg = gsc_read(GSC_OUT_CON);
1585 cfg |= GSC_OUT_PATH_MEMORY;
1586 gsc_write(cfg, GSC_OUT_CON);
1587 break;
1588 case IPP_CMD_WB:
1589 set_wb.enable = 1;
1590 set_wb.refresh = property->refresh_rate;
1591 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1592 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1593
1594 /* src local path */
1595 cfg = gsc_read(GSC_IN_CON);
1596 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1597 cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1598 gsc_write(cfg, GSC_IN_CON);
1599
1600 /* dst dma memory */
1601 cfg = gsc_read(GSC_OUT_CON);
1602 cfg |= GSC_OUT_PATH_MEMORY;
1603 gsc_write(cfg, GSC_OUT_CON);
1604 break;
1605 case IPP_CMD_OUTPUT:
1606 /* src dma memory */
1607 cfg = gsc_read(GSC_IN_CON);
1608 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1609 cfg |= GSC_IN_PATH_MEMORY;
1610 gsc_write(cfg, GSC_IN_CON);
1611
1612 /* dst local path */
1613 cfg = gsc_read(GSC_OUT_CON);
1614 cfg |= GSC_OUT_PATH_MEMORY;
1615 gsc_write(cfg, GSC_OUT_CON);
1616 break;
1617 default:
1618 ret = -EINVAL;
1619 dev_err(dev, "invalid operations.\n");
1620 return ret;
1621 }
1622
1623 ret = gsc_set_prescaler(ctx, &ctx->sc,
1624 &img_pos[EXYNOS_DRM_OPS_SRC],
1625 &img_pos[EXYNOS_DRM_OPS_DST]);
1626 if (ret) {
1627 dev_err(dev, "failed to set precalser.\n");
1628 return ret;
1629 }
1630
1631 gsc_set_scaler(ctx, &ctx->sc);
1632
1633 cfg = gsc_read(GSC_ENABLE);
1634 cfg |= GSC_ENABLE_ON;
1635 gsc_write(cfg, GSC_ENABLE);
1636
1637 return 0;
1638}
1639
1640static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1641{
1642 struct gsc_context *ctx = get_gsc_context(dev);
1643 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1644 u32 cfg;
1645
1646 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1647
1648 switch (cmd) {
1649 case IPP_CMD_M2M:
1650 /* bypass */
1651 break;
1652 case IPP_CMD_WB:
1653 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1654 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1655 break;
1656 case IPP_CMD_OUTPUT:
1657 default:
1658 dev_err(dev, "invalid operations.\n");
1659 break;
1660 }
1661
1662 gsc_handle_irq(ctx, false, false, true);
1663
1664 /* reset sequence */
1665 gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
1666 gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
1667 gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
1668
1669 cfg = gsc_read(GSC_ENABLE);
1670 cfg &= ~GSC_ENABLE_ON;
1671 gsc_write(cfg, GSC_ENABLE);
1672}
1673
1674static int gsc_probe(struct platform_device *pdev)
1675{
1676 struct device *dev = &pdev->dev;
1677 struct gsc_context *ctx;
1678 struct resource *res;
1679 struct exynos_drm_ippdrv *ippdrv;
1680 int ret;
1681
1682 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1683 if (!ctx)
1684 return -ENOMEM;
1685
1686 /* clock control */
1687 ctx->gsc_clk = devm_clk_get(dev, "gscl");
1688 if (IS_ERR(ctx->gsc_clk)) {
1689 dev_err(dev, "failed to get gsc clock.\n");
1690 return PTR_ERR(ctx->gsc_clk);
1691 }
1692
1693 /* resource memory */
1694 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1695 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1696 if (!ctx->regs) {
1697 dev_err(dev, "failed to map registers.\n");
1698 return -ENXIO;
1699 }
1700
1701 /* resource irq */
1702 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1703 if (!res) {
1704 dev_err(dev, "failed to request irq resource.\n");
1705 return -ENOENT;
1706 }
1707
1708 ctx->irq = res->start;
1709 ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
1710 IRQF_ONESHOT, "drm_gsc", ctx);
1711 if (ret < 0) {
1712 dev_err(dev, "failed to request irq.\n");
1713 return ret;
1714 }
1715
1716 /* context initailization */
1717 ctx->id = pdev->id;
1718
1719 ippdrv = &ctx->ippdrv;
1720 ippdrv->dev = dev;
1721 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
1722 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
1723 ippdrv->check_property = gsc_ippdrv_check_property;
1724 ippdrv->reset = gsc_ippdrv_reset;
1725 ippdrv->start = gsc_ippdrv_start;
1726 ippdrv->stop = gsc_ippdrv_stop;
1727 ret = gsc_init_prop_list(ippdrv);
1728 if (ret < 0) {
1729 dev_err(dev, "failed to init property list.\n");
1730 goto err_get_irq;
1731 }
1732
1733 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1734 (int)ippdrv);
1735
1736 mutex_init(&ctx->lock);
1737 platform_set_drvdata(pdev, ctx);
1738
1739 pm_runtime_set_active(dev);
1740 pm_runtime_enable(dev);
1741
1742 ret = exynos_drm_ippdrv_register(ippdrv);
1743 if (ret < 0) {
1744 dev_err(dev, "failed to register drm gsc device.\n");
1745 goto err_ippdrv_register;
1746 }
1747
1748 dev_info(&pdev->dev, "drm gsc registered successfully.\n");
1749
1750 return 0;
1751
1752err_ippdrv_register:
1753 devm_kfree(dev, ippdrv->prop_list);
1754 pm_runtime_disable(dev);
1755err_get_irq:
1756 free_irq(ctx->irq, ctx);
1757 return ret;
1758}
1759
1760static int gsc_remove(struct platform_device *pdev)
1761{
1762 struct device *dev = &pdev->dev;
1763 struct gsc_context *ctx = get_gsc_context(dev);
1764 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1765
1766 devm_kfree(dev, ippdrv->prop_list);
1767 exynos_drm_ippdrv_unregister(ippdrv);
1768 mutex_destroy(&ctx->lock);
1769
1770 pm_runtime_set_suspended(dev);
1771 pm_runtime_disable(dev);
1772
1773 free_irq(ctx->irq, ctx);
1774
1775 return 0;
1776}
1777
1778#ifdef CONFIG_PM_SLEEP
1779static int gsc_suspend(struct device *dev)
1780{
1781 struct gsc_context *ctx = get_gsc_context(dev);
1782
1783 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1784
1785 if (pm_runtime_suspended(dev))
1786 return 0;
1787
1788 return gsc_clk_ctrl(ctx, false);
1789}
1790
1791static int gsc_resume(struct device *dev)
1792{
1793 struct gsc_context *ctx = get_gsc_context(dev);
1794
1795 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1796
1797 if (!pm_runtime_suspended(dev))
1798 return gsc_clk_ctrl(ctx, true);
1799
1800 return 0;
1801}
1802#endif
1803
1804#ifdef CONFIG_PM_RUNTIME
1805static int gsc_runtime_suspend(struct device *dev)
1806{
1807 struct gsc_context *ctx = get_gsc_context(dev);
1808
1809 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1810
1811 return gsc_clk_ctrl(ctx, false);
1812}
1813
1814static int gsc_runtime_resume(struct device *dev)
1815{
1816 struct gsc_context *ctx = get_gsc_context(dev);
1817
1818 DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
1819
1820 return gsc_clk_ctrl(ctx, true);
1821}
1822#endif
1823
1824static const struct dev_pm_ops gsc_pm_ops = {
1825 SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
1826 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1827};
1828
1829struct platform_driver gsc_driver = {
1830 .probe = gsc_probe,
1831 .remove = gsc_remove,
1832 .driver = {
1833 .name = "exynos-drm-gsc",
1834 .owner = THIS_MODULE,
1835 .pm = &gsc_pm_ops,
1836 },
1837};
1838
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 000000000000..29ec1c5efcf2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,24 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#ifndef _EXYNOS_DRM_GSC_H_
16#define _EXYNOS_DRM_GSC_H_
17
18/*
19 * TODO
20 * FIMD output interface notifier callback.
21 * Mixer output interface notifier callback.
22 */
23
24#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..850e9950b7da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
29#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ 29#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
30 struct drm_hdmi_context, subdrv); 30 struct drm_hdmi_context, subdrv);
31 31
32/* platform device pointer for common drm hdmi device. */
33static struct platform_device *exynos_drm_hdmi_pdev;
34
32/* Common hdmi subdrv needs to access the hdmi and mixer though context. 35/* Common hdmi subdrv needs to access the hdmi and mixer though context.
33* These should be initialied by the repective drivers */ 36* These should be initialied by the repective drivers */
34static struct exynos_drm_hdmi_context *hdmi_ctx; 37static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
46 bool enabled[MIXER_WIN_NR]; 49 bool enabled[MIXER_WIN_NR];
47}; 50};
48 51
52int exynos_platform_device_hdmi_register(void)
53{
54 if (exynos_drm_hdmi_pdev)
55 return -EEXIST;
56
57 exynos_drm_hdmi_pdev = platform_device_register_simple(
58 "exynos-drm-hdmi", -1, NULL, 0);
59 if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
60 return PTR_ERR(exynos_drm_hdmi_pdev);
61
62 return 0;
63}
64
65void exynos_platform_device_hdmi_unregister(void)
66{
67 if (exynos_drm_hdmi_pdev)
68 platform_device_unregister(exynos_drm_hdmi_pdev);
69}
70
49void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) 71void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
50{ 72{
51 if (ctx) 73 if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
157 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx); 179 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
158} 180}
159 181
182static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
183{
184 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
185
186 DRM_DEBUG_KMS("%s\n", __FILE__);
187
188 if (mixer_ops && mixer_ops->wait_for_vblank)
189 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
190}
191
160static void drm_hdmi_mode_fixup(struct device *subdrv_dev, 192static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
161 struct drm_connector *connector, 193 struct drm_connector *connector,
162 const struct drm_display_mode *mode, 194 const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
238 .apply = drm_hdmi_apply, 270 .apply = drm_hdmi_apply,
239 .enable_vblank = drm_hdmi_enable_vblank, 271 .enable_vblank = drm_hdmi_enable_vblank,
240 .disable_vblank = drm_hdmi_disable_vblank, 272 .disable_vblank = drm_hdmi_disable_vblank,
273 .wait_for_vblank = drm_hdmi_wait_for_vblank,
241 .mode_fixup = drm_hdmi_mode_fixup, 274 .mode_fixup = drm_hdmi_mode_fixup,
242 .mode_set = drm_hdmi_mode_set, 275 .mode_set = drm_hdmi_mode_set,
243 .get_max_resol = drm_hdmi_get_max_resol, 276 .get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
291 ctx->enabled[win] = false; 324 ctx->enabled[win] = false;
292} 325}
293 326
294static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
295{
296 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
297
298 DRM_DEBUG_KMS("%s\n", __FILE__);
299
300 if (mixer_ops && mixer_ops->wait_for_vblank)
301 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
302}
303
304static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { 327static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
305 .mode_set = drm_mixer_mode_set, 328 .mode_set = drm_mixer_mode_set,
306 .commit = drm_mixer_commit, 329 .commit = drm_mixer_commit,
307 .disable = drm_mixer_disable, 330 .disable = drm_mixer_disable,
308 .wait_for_vblank = drm_mixer_wait_for_vblank,
309}; 331};
310 332
311static struct exynos_drm_manager hdmi_manager = { 333static struct exynos_drm_manager hdmi_manager = {
@@ -346,10 +368,24 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
346 ctx->hdmi_ctx->drm_dev = drm_dev; 368 ctx->hdmi_ctx->drm_dev = drm_dev;
347 ctx->mixer_ctx->drm_dev = drm_dev; 369 ctx->mixer_ctx->drm_dev = drm_dev;
348 370
371 if (mixer_ops->iommu_on)
372 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
373
349 return 0; 374 return 0;
350} 375}
351 376
352static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) 377static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
378{
379 struct drm_hdmi_context *ctx;
380 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
381
382 ctx = get_ctx_from_subdrv(subdrv);
383
384 if (mixer_ops->iommu_on)
385 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
386}
387
388static int exynos_drm_hdmi_probe(struct platform_device *pdev)
353{ 389{
354 struct device *dev = &pdev->dev; 390 struct device *dev = &pdev->dev;
355 struct exynos_drm_subdrv *subdrv; 391 struct exynos_drm_subdrv *subdrv;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
368 subdrv->dev = dev; 404 subdrv->dev = dev;
369 subdrv->manager = &hdmi_manager; 405 subdrv->manager = &hdmi_manager;
370 subdrv->probe = hdmi_subdrv_probe; 406 subdrv->probe = hdmi_subdrv_probe;
407 subdrv->remove = hdmi_subdrv_remove;
371 408
372 platform_set_drvdata(pdev, subdrv); 409 platform_set_drvdata(pdev, subdrv);
373 410
@@ -376,7 +413,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
376 return 0; 413 return 0;
377} 414}
378 415
379static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev) 416static int exynos_drm_hdmi_remove(struct platform_device *pdev)
380{ 417{
381 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev); 418 struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
382 419
@@ -389,7 +426,7 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
389 426
390struct platform_driver exynos_drm_common_hdmi_driver = { 427struct platform_driver exynos_drm_common_hdmi_driver = {
391 .probe = exynos_drm_hdmi_probe, 428 .probe = exynos_drm_hdmi_probe,
392 .remove = __devexit_p(exynos_drm_hdmi_remove), 429 .remove = exynos_drm_hdmi_remove,
393 .driver = { 430 .driver = {
394 .name = "exynos-drm-hdmi", 431 .name = "exynos-drm-hdmi",
395 .owner = THIS_MODULE, 432 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..784a7e9a766c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com> 4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_HDMI_H_ 12#ifndef _EXYNOS_DRM_HDMI_H_
@@ -62,12 +48,13 @@ struct exynos_hdmi_ops {
62 48
63struct exynos_mixer_ops { 49struct exynos_mixer_ops {
64 /* manager */ 50 /* manager */
51 int (*iommu_on)(void *ctx, bool enable);
65 int (*enable_vblank)(void *ctx, int pipe); 52 int (*enable_vblank)(void *ctx, int pipe);
66 void (*disable_vblank)(void *ctx); 53 void (*disable_vblank)(void *ctx);
54 void (*wait_for_vblank)(void *ctx);
67 void (*dpms)(void *ctx, int mode); 55 void (*dpms)(void *ctx, int mode);
68 56
69 /* overlay */ 57 /* overlay */
70 void (*wait_for_vblank)(void *ctx);
71 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); 58 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
72 void (*win_commit)(void *ctx, int zpos); 59 void (*win_commit)(void *ctx, int zpos);
73 void (*win_disable)(void *ctx, int zpos); 60 void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..3799d5c2b5df
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,136 @@
1/* exynos_drm_iommu.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <drmP.h>
13#include <drm/exynos_drm.h>
14
15#include <linux/dma-mapping.h>
16#include <linux/iommu.h>
17#include <linux/kref.h>
18
19#include <asm/dma-iommu.h>
20
21#include "exynos_drm_drv.h"
22#include "exynos_drm_iommu.h"
23
24/*
25 * drm_create_iommu_mapping - create a mapping structure
26 *
27 * @drm_dev: DRM device
28 */
29int drm_create_iommu_mapping(struct drm_device *drm_dev)
30{
31 struct dma_iommu_mapping *mapping = NULL;
32 struct exynos_drm_private *priv = drm_dev->dev_private;
33 struct device *dev = drm_dev->dev;
34
35 if (!priv->da_start)
36 priv->da_start = EXYNOS_DEV_ADDR_START;
37 if (!priv->da_space_size)
38 priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
39 if (!priv->da_space_order)
40 priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
41
42 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
43 priv->da_space_size,
44 priv->da_space_order);
45 if (IS_ERR(mapping))
46 return PTR_ERR(mapping);
47
48 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
49 GFP_KERNEL);
50 dma_set_max_seg_size(dev, 0xffffffffu);
51 dev->archdata.mapping = mapping;
52
53 return 0;
54}
55
56/*
57 * drm_release_iommu_mapping - release iommu mapping structure
58 *
59 * @drm_dev: DRM device
60 *
61 * if mapping->kref becomes 0 then all things related to iommu mapping
62 * will be released
63 */
64void drm_release_iommu_mapping(struct drm_device *drm_dev)
65{
66 struct device *dev = drm_dev->dev;
67
68 arm_iommu_release_mapping(dev->archdata.mapping);
69}
70
71/*
72 * drm_iommu_attach_device- attach device to iommu mapping
73 *
74 * @drm_dev: DRM device
75 * @subdrv_dev: device to be attach
76 *
77 * This function should be called by sub drivers to attach it to iommu
78 * mapping.
79 */
80int drm_iommu_attach_device(struct drm_device *drm_dev,
81 struct device *subdrv_dev)
82{
83 struct device *dev = drm_dev->dev;
84 int ret;
85
86 if (!dev->archdata.mapping) {
87 DRM_ERROR("iommu_mapping is null.\n");
88 return -EFAULT;
89 }
90
91 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
92 sizeof(*subdrv_dev->dma_parms),
93 GFP_KERNEL);
94 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
95
96 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
97 if (ret < 0) {
98 DRM_DEBUG_KMS("failed iommu attach.\n");
99 return ret;
100 }
101
102 /*
103 * Set dma_ops to drm_device just one time.
104 *
105 * The dma mapping api needs device object and the api is used
106 * to allocate physial memory and map it with iommu table.
107 * If iommu attach succeeded, the sub driver would have dma_ops
108 * for iommu and also all sub drivers have same dma_ops.
109 */
110 if (!dev->archdata.dma_ops)
111 dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
112
113 return 0;
114}
115
116/*
117 * drm_iommu_detach_device -detach device address space mapping from device
118 *
119 * @drm_dev: DRM device
120 * @subdrv_dev: device to be detached
121 *
122 * This function should be called by sub drivers to detach it from iommu
123 * mapping
124 */
125void drm_iommu_detach_device(struct drm_device *drm_dev,
126 struct device *subdrv_dev)
127{
128 struct device *dev = drm_dev->dev;
129 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
130
131 if (!mapping || !mapping->domain)
132 return;
133
134 iommu_detach_device(mapping->domain, subdrv_dev);
135 drm_release_iommu_mapping(drm_dev);
136}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..53b7deea8ab7
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,71 @@
1/* exynos_drm_iommu.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#ifndef _EXYNOS_DRM_IOMMU_H_
13#define _EXYNOS_DRM_IOMMU_H_
14
15#define EXYNOS_DEV_ADDR_START 0x20000000
16#define EXYNOS_DEV_ADDR_SIZE 0x40000000
17#define EXYNOS_DEV_ADDR_ORDER 0x4
18
19#ifdef CONFIG_DRM_EXYNOS_IOMMU
20
21int drm_create_iommu_mapping(struct drm_device *drm_dev);
22
23void drm_release_iommu_mapping(struct drm_device *drm_dev);
24
25int drm_iommu_attach_device(struct drm_device *drm_dev,
26 struct device *subdrv_dev);
27
28void drm_iommu_detach_device(struct drm_device *dev_dev,
29 struct device *subdrv_dev);
30
31static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
32{
33#ifdef CONFIG_ARM_DMA_USE_IOMMU
34 struct device *dev = drm_dev->dev;
35
36 return dev->archdata.mapping ? true : false;
37#else
38 return false;
39#endif
40}
41
42#else
43
44struct dma_iommu_mapping;
45static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
46{
47 return 0;
48}
49
50static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
51{
52}
53
54static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
55 struct device *subdrv_dev)
56{
57 return 0;
58}
59
60static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
61 struct device *subdrv_dev)
62{
63}
64
65static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
66{
67 return false;
68}
69
70#endif
71#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..0bda96454a02
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2050 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/types.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20#include <plat/map-base.h>
21
22#include <drm/drmP.h>
23#include <drm/exynos_drm.h>
24#include "exynos_drm_drv.h"
25#include "exynos_drm_gem.h"
26#include "exynos_drm_ipp.h"
27#include "exynos_drm_iommu.h"
28
29/*
30 * IPP stands for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
34 */
35
36/*
37 * TODO
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
45 */
46
47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49
50/*
51 * A structure of event.
52 *
53 * @base: base of event.
54 * @event: ipp event.
55 */
56struct drm_exynos_ipp_send_event {
57 struct drm_pending_event base;
58 struct drm_exynos_ipp_event event;
59};
60
61/*
62 * A structure of memory node.
63 *
64 * @list: list head to memory queue information.
65 * @ops_id: id of operations.
66 * @prop_id: id of property.
67 * @buf_id: id of buffer.
68 * @buf_info: gem objects and dma address, size.
69 * @filp: a pointer to drm_file.
70 */
71struct drm_exynos_ipp_mem_node {
72 struct list_head list;
73 enum drm_exynos_ops_id ops_id;
74 u32 prop_id;
75 u32 buf_id;
76 struct drm_exynos_ipp_buf_info buf_info;
77 struct drm_file *filp;
78};
79
80/*
81 * A structure of ipp context.
82 *
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
90 */
91struct ipp_context {
92 struct exynos_drm_subdrv subdrv;
93 struct mutex ipp_lock;
94 struct mutex prop_lock;
95 struct idr ipp_idr;
96 struct idr prop_idr;
97 struct workqueue_struct *event_workq;
98 struct workqueue_struct *cmd_workq;
99};
100
101static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104
105int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
106{
107 DRM_DEBUG_KMS("%s\n", __func__);
108
109 if (!ippdrv)
110 return -EINVAL;
111
112 mutex_lock(&exynos_drm_ippdrv_lock);
113 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
114 mutex_unlock(&exynos_drm_ippdrv_lock);
115
116 return 0;
117}
118
119int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
120{
121 DRM_DEBUG_KMS("%s\n", __func__);
122
123 if (!ippdrv)
124 return -EINVAL;
125
126 mutex_lock(&exynos_drm_ippdrv_lock);
127 list_del(&ippdrv->drv_list);
128 mutex_unlock(&exynos_drm_ippdrv_lock);
129
130 return 0;
131}
132
133static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
134 u32 *idp)
135{
136 int ret;
137
138 DRM_DEBUG_KMS("%s\n", __func__);
139
140again:
141 /* ensure there is space available to allocate a handle */
142 if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
143 DRM_ERROR("failed to get idr.\n");
144 return -ENOMEM;
145 }
146
147 /* do the allocation under our mutexlock */
148 mutex_lock(lock);
149 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
150 mutex_unlock(lock);
151 if (ret == -EAGAIN)
152 goto again;
153
154 return ret;
155}
156
157static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
158{
159 void *obj;
160
161 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
162
163 mutex_lock(lock);
164
165 /* find object using handle */
166 obj = idr_find(id_idr, id);
167 if (!obj) {
168 DRM_ERROR("failed to find object.\n");
169 mutex_unlock(lock);
170 return ERR_PTR(-ENODEV);
171 }
172
173 mutex_unlock(lock);
174
175 return obj;
176}
177
178static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
179 enum drm_exynos_ipp_cmd cmd)
180{
181 /*
182 * check dedicated flag and WB, OUTPUT operation with
183 * power on state.
184 */
185 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
186 !pm_runtime_suspended(ippdrv->dev)))
187 return true;
188
189 return false;
190}
191
192static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
193 struct drm_exynos_ipp_property *property)
194{
195 struct exynos_drm_ippdrv *ippdrv;
196 u32 ipp_id = property->ipp_id;
197
198 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
199
200 if (ipp_id) {
201 /* find ipp driver using idr */
202 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
203 ipp_id);
204 if (IS_ERR_OR_NULL(ippdrv)) {
205 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
206 return ippdrv;
207 }
208
209 /*
210 * WB, OUTPUT opertion not supported multi-operation.
211 * so, make dedicated state at set property ioctl.
212 * when ipp driver finished operations, clear dedicated flags.
213 */
214 if (ipp_check_dedicated(ippdrv, property->cmd)) {
215 DRM_ERROR("already used choose device.\n");
216 return ERR_PTR(-EBUSY);
217 }
218
219 /*
220 * This is necessary to find correct device in ipp drivers.
221 * ipp drivers have different abilities,
222 * so need to check property.
223 */
224 if (ippdrv->check_property &&
225 ippdrv->check_property(ippdrv->dev, property)) {
226 DRM_ERROR("not support property.\n");
227 return ERR_PTR(-EINVAL);
228 }
229
230 return ippdrv;
231 } else {
232 /*
233 * This case is search all ipp driver for finding.
234 * user application don't set ipp_id in this case,
235 * so ipp subsystem search correct driver in driver list.
236 */
237 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
238 if (ipp_check_dedicated(ippdrv, property->cmd)) {
239 DRM_DEBUG_KMS("%s:used device.\n", __func__);
240 continue;
241 }
242
243 if (ippdrv->check_property &&
244 ippdrv->check_property(ippdrv->dev, property)) {
245 DRM_DEBUG_KMS("%s:not support property.\n",
246 __func__);
247 continue;
248 }
249
250 return ippdrv;
251 }
252
253 DRM_ERROR("not support ipp driver operations.\n");
254 }
255
256 return ERR_PTR(-ENODEV);
257}
258
259static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
260{
261 struct exynos_drm_ippdrv *ippdrv;
262 struct drm_exynos_ipp_cmd_node *c_node;
263 int count = 0;
264
265 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
266
267 if (list_empty(&exynos_drm_ippdrv_list)) {
268 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
269 return ERR_PTR(-ENODEV);
270 }
271
272 /*
273 * This case is search ipp driver by prop_id handle.
274 * sometimes, ipp subsystem find driver by prop_id.
275 * e.g PAUSE state, queue buf, command contro.
276 */
277 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
278 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
279 count++, (int)ippdrv);
280
281 if (!list_empty(&ippdrv->cmd_list)) {
282 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
283 if (c_node->property.prop_id == prop_id)
284 return ippdrv;
285 }
286 }
287
288 return ERR_PTR(-ENODEV);
289}
290
291int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
292 struct drm_file *file)
293{
294 struct drm_exynos_file_private *file_priv = file->driver_priv;
295 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
296 struct device *dev = priv->dev;
297 struct ipp_context *ctx = get_ipp_context(dev);
298 struct drm_exynos_ipp_prop_list *prop_list = data;
299 struct exynos_drm_ippdrv *ippdrv;
300 int count = 0;
301
302 DRM_DEBUG_KMS("%s\n", __func__);
303
304 if (!ctx) {
305 DRM_ERROR("invalid context.\n");
306 return -EINVAL;
307 }
308
309 if (!prop_list) {
310 DRM_ERROR("invalid property parameter.\n");
311 return -EINVAL;
312 }
313
314 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
315
316 if (!prop_list->ipp_id) {
317 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
318 count++;
319 /*
320 * Supports ippdrv list count for user application.
321 * First step user application getting ippdrv count.
322 * and second step getting ippdrv capability using ipp_id.
323 */
324 prop_list->count = count;
325 } else {
326 /*
327 * Getting ippdrv capability by ipp_id.
328 * some deivce not supported wb, output interface.
329 * so, user application detect correct ipp driver
330 * using this ioctl.
331 */
332 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
333 prop_list->ipp_id);
334 if (!ippdrv) {
335 DRM_ERROR("not found ipp%d driver.\n",
336 prop_list->ipp_id);
337 return -EINVAL;
338 }
339
340 prop_list = ippdrv->prop_list;
341 }
342
343 return 0;
344}
345
346static void ipp_print_property(struct drm_exynos_ipp_property *property,
347 int idx)
348{
349 struct drm_exynos_ipp_config *config = &property->config[idx];
350 struct drm_exynos_pos *pos = &config->pos;
351 struct drm_exynos_sz *sz = &config->sz;
352
353 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
354 __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
355
356 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
357 __func__, pos->x, pos->y, pos->w, pos->h,
358 sz->hsize, sz->vsize, config->flip, config->degree);
359}
360
361static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
362{
363 struct exynos_drm_ippdrv *ippdrv;
364 struct drm_exynos_ipp_cmd_node *c_node;
365 u32 prop_id = property->prop_id;
366
367 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
368
369 ippdrv = ipp_find_drv_by_handle(prop_id);
370 if (IS_ERR_OR_NULL(ippdrv)) {
371 DRM_ERROR("failed to get ipp driver.\n");
372 return -EINVAL;
373 }
374
375 /*
376 * Find command node using command list in ippdrv.
377 * when we find this command no using prop_id.
378 * return property information set in this command node.
379 */
380 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
381 if ((c_node->property.prop_id == prop_id) &&
382 (c_node->state == IPP_STATE_STOP)) {
383 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
384 __func__, property->cmd, (int)ippdrv);
385
386 c_node->property = *property;
387 return 0;
388 }
389 }
390
391 DRM_ERROR("failed to search property.\n");
392
393 return -EINVAL;
394}
395
396static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
397{
398 struct drm_exynos_ipp_cmd_work *cmd_work;
399
400 DRM_DEBUG_KMS("%s\n", __func__);
401
402 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
403 if (!cmd_work) {
404 DRM_ERROR("failed to alloc cmd_work.\n");
405 return ERR_PTR(-ENOMEM);
406 }
407
408 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
409
410 return cmd_work;
411}
412
413static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
414{
415 struct drm_exynos_ipp_event_work *event_work;
416
417 DRM_DEBUG_KMS("%s\n", __func__);
418
419 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
420 if (!event_work) {
421 DRM_ERROR("failed to alloc event_work.\n");
422 return ERR_PTR(-ENOMEM);
423 }
424
425 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
426
427 return event_work;
428}
429
430int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
431 struct drm_file *file)
432{
433 struct drm_exynos_file_private *file_priv = file->driver_priv;
434 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
435 struct device *dev = priv->dev;
436 struct ipp_context *ctx = get_ipp_context(dev);
437 struct drm_exynos_ipp_property *property = data;
438 struct exynos_drm_ippdrv *ippdrv;
439 struct drm_exynos_ipp_cmd_node *c_node;
440 int ret, i;
441
442 DRM_DEBUG_KMS("%s\n", __func__);
443
444 if (!ctx) {
445 DRM_ERROR("invalid context.\n");
446 return -EINVAL;
447 }
448
449 if (!property) {
450 DRM_ERROR("invalid property parameter.\n");
451 return -EINVAL;
452 }
453
454 /*
455 * This is log print for user application property.
456 * user application set various property.
457 */
458 for_each_ipp_ops(i)
459 ipp_print_property(property, i);
460
461 /*
462 * set property ioctl generated new prop_id.
463 * but in this case already asigned prop_id using old set property.
464 * e.g PAUSE state. this case supports find current prop_id and use it
465 * instead of allocation.
466 */
467 if (property->prop_id) {
468 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
469 return ipp_find_and_set_property(property);
470 }
471
472 /* find ipp driver using ipp id */
473 ippdrv = ipp_find_driver(ctx, property);
474 if (IS_ERR_OR_NULL(ippdrv)) {
475 DRM_ERROR("failed to get ipp driver.\n");
476 return -EINVAL;
477 }
478
479 /* allocate command node */
480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
481 if (!c_node) {
482 DRM_ERROR("failed to allocate map node.\n");
483 return -ENOMEM;
484 }
485
486 /* create property id */
487 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
488 &property->prop_id);
489 if (ret) {
490 DRM_ERROR("failed to create id.\n");
491 goto err_clear;
492 }
493
494 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
495 __func__, property->prop_id, property->cmd, (int)ippdrv);
496
497 /* stored property information and ippdrv in private data */
498 c_node->priv = priv;
499 c_node->property = *property;
500 c_node->state = IPP_STATE_IDLE;
501
502 c_node->start_work = ipp_create_cmd_work();
503 if (IS_ERR_OR_NULL(c_node->start_work)) {
504 DRM_ERROR("failed to create start work.\n");
505 goto err_clear;
506 }
507
508 c_node->stop_work = ipp_create_cmd_work();
509 if (IS_ERR_OR_NULL(c_node->stop_work)) {
510 DRM_ERROR("failed to create stop work.\n");
511 goto err_free_start;
512 }
513
514 c_node->event_work = ipp_create_event_work();
515 if (IS_ERR_OR_NULL(c_node->event_work)) {
516 DRM_ERROR("failed to create event work.\n");
517 goto err_free_stop;
518 }
519
520 mutex_init(&c_node->cmd_lock);
521 mutex_init(&c_node->mem_lock);
522 mutex_init(&c_node->event_lock);
523
524 init_completion(&c_node->start_complete);
525 init_completion(&c_node->stop_complete);
526
527 for_each_ipp_ops(i)
528 INIT_LIST_HEAD(&c_node->mem_list[i]);
529
530 INIT_LIST_HEAD(&c_node->event_list);
531 list_splice_init(&priv->event_list, &c_node->event_list);
532 list_add_tail(&c_node->list, &ippdrv->cmd_list);
533
534 /* make dedicated state without m2m */
535 if (!ipp_is_m2m_cmd(property->cmd))
536 ippdrv->dedicated = true;
537
538 return 0;
539
540err_free_stop:
541 kfree(c_node->stop_work);
542err_free_start:
543 kfree(c_node->start_work);
544err_clear:
545 kfree(c_node);
546 return ret;
547}
548
549static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
550{
551 DRM_DEBUG_KMS("%s\n", __func__);
552
553 /* delete list */
554 list_del(&c_node->list);
555
556 /* destroy mutex */
557 mutex_destroy(&c_node->cmd_lock);
558 mutex_destroy(&c_node->mem_lock);
559 mutex_destroy(&c_node->event_lock);
560
561 /* free command node */
562 kfree(c_node->start_work);
563 kfree(c_node->stop_work);
564 kfree(c_node->event_work);
565 kfree(c_node);
566}
567
568static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
569{
570 struct drm_exynos_ipp_property *property = &c_node->property;
571 struct drm_exynos_ipp_mem_node *m_node;
572 struct list_head *head;
573 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
574
575 DRM_DEBUG_KMS("%s\n", __func__);
576
577 mutex_lock(&c_node->mem_lock);
578
579 for_each_ipp_ops(i) {
580 /* source/destination memory list */
581 head = &c_node->mem_list[i];
582
583 if (list_empty(head)) {
584 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
585 i ? "dst" : "src");
586 continue;
587 }
588
589 /* find memory node entry */
590 list_for_each_entry(m_node, head, list) {
591 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
592 i ? "dst" : "src", count[i], (int)m_node);
593 count[i]++;
594 }
595 }
596
597 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
598 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
599 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
600
601 /*
602 * M2M operations should be need paired memory address.
603 * so, need to check minimum count about src, dst.
604 * other case not use paired memory, so use maximum count
605 */
606 if (ipp_is_m2m_cmd(property->cmd))
607 ret = min(count[EXYNOS_DRM_OPS_SRC],
608 count[EXYNOS_DRM_OPS_DST]);
609 else
610 ret = max(count[EXYNOS_DRM_OPS_SRC],
611 count[EXYNOS_DRM_OPS_DST]);
612
613 mutex_unlock(&c_node->mem_lock);
614
615 return ret;
616}
617
618static struct drm_exynos_ipp_mem_node
619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
620 struct drm_exynos_ipp_queue_buf *qbuf)
621{
622 struct drm_exynos_ipp_mem_node *m_node;
623 struct list_head *head;
624 int count = 0;
625
626 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
627
628 /* source/destination memory list */
629 head = &c_node->mem_list[qbuf->ops_id];
630
631 /* find memory node from memory list */
632 list_for_each_entry(m_node, head, list) {
633 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
634 __func__, count++, (int)m_node);
635
636 /* compare buffer id */
637 if (m_node->buf_id == qbuf->buf_id)
638 return m_node;
639 }
640
641 return NULL;
642}
643
644static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
645 struct drm_exynos_ipp_cmd_node *c_node,
646 struct drm_exynos_ipp_mem_node *m_node)
647{
648 struct exynos_drm_ipp_ops *ops = NULL;
649 int ret = 0;
650
651 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
652
653 if (!m_node) {
654 DRM_ERROR("invalid queue node.\n");
655 return -EFAULT;
656 }
657
658 mutex_lock(&c_node->mem_lock);
659
660 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
661
662 /* get operations callback */
663 ops = ippdrv->ops[m_node->ops_id];
664 if (!ops) {
665 DRM_ERROR("not support ops.\n");
666 ret = -EFAULT;
667 goto err_unlock;
668 }
669
670 /* set address and enable irq */
671 if (ops->set_addr) {
672 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
673 m_node->buf_id, IPP_BUF_ENQUEUE);
674 if (ret) {
675 DRM_ERROR("failed to set addr.\n");
676 goto err_unlock;
677 }
678 }
679
680err_unlock:
681 mutex_unlock(&c_node->mem_lock);
682 return ret;
683}
684
685static struct drm_exynos_ipp_mem_node
686 *ipp_get_mem_node(struct drm_device *drm_dev,
687 struct drm_file *file,
688 struct drm_exynos_ipp_cmd_node *c_node,
689 struct drm_exynos_ipp_queue_buf *qbuf)
690{
691 struct drm_exynos_ipp_mem_node *m_node;
692 struct drm_exynos_ipp_buf_info buf_info;
693 void *addr;
694 int i;
695
696 DRM_DEBUG_KMS("%s\n", __func__);
697
698 mutex_lock(&c_node->mem_lock);
699
700 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
701 if (!m_node) {
702 DRM_ERROR("failed to allocate queue node.\n");
703 goto err_unlock;
704 }
705
706 /* clear base address for error handling */
707 memset(&buf_info, 0x0, sizeof(buf_info));
708
709 /* operations, buffer id */
710 m_node->ops_id = qbuf->ops_id;
711 m_node->prop_id = qbuf->prop_id;
712 m_node->buf_id = qbuf->buf_id;
713
714 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
715 (int)m_node, qbuf->ops_id);
716 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
717 qbuf->prop_id, m_node->buf_id);
718
719 for_each_ipp_planar(i) {
720 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
721 i, qbuf->handle[i]);
722
723 /* get dma address by handle */
724 if (qbuf->handle[i]) {
725 addr = exynos_drm_gem_get_dma_addr(drm_dev,
726 qbuf->handle[i], file);
727 if (IS_ERR(addr)) {
728 DRM_ERROR("failed to get addr.\n");
729 goto err_clear;
730 }
731
732 buf_info.handles[i] = qbuf->handle[i];
733 buf_info.base[i] = *(dma_addr_t *) addr;
734 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
735 __func__, i, buf_info.base[i],
736 (int)buf_info.handles[i]);
737 }
738 }
739
740 m_node->filp = file;
741 m_node->buf_info = buf_info;
742 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
743
744 mutex_unlock(&c_node->mem_lock);
745 return m_node;
746
747err_clear:
748 kfree(m_node);
749err_unlock:
750 mutex_unlock(&c_node->mem_lock);
751 return ERR_PTR(-EFAULT);
752}
753
754static int ipp_put_mem_node(struct drm_device *drm_dev,
755 struct drm_exynos_ipp_cmd_node *c_node,
756 struct drm_exynos_ipp_mem_node *m_node)
757{
758 int i;
759
760 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
761
762 if (!m_node) {
763 DRM_ERROR("invalid dequeue node.\n");
764 return -EFAULT;
765 }
766
767 if (list_empty(&m_node->list)) {
768 DRM_ERROR("empty memory node.\n");
769 return -ENOMEM;
770 }
771
772 mutex_lock(&c_node->mem_lock);
773
774 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
775
776 /* put gem buffer */
777 for_each_ipp_planar(i) {
778 unsigned long handle = m_node->buf_info.handles[i];
779 if (handle)
780 exynos_drm_gem_put_dma_addr(drm_dev, handle,
781 m_node->filp);
782 }
783
784 /* delete list in queue */
785 list_del(&m_node->list);
786 kfree(m_node);
787
788 mutex_unlock(&c_node->mem_lock);
789
790 return 0;
791}
792
793static void ipp_free_event(struct drm_pending_event *event)
794{
795 kfree(event);
796}
797
798static int ipp_get_event(struct drm_device *drm_dev,
799 struct drm_file *file,
800 struct drm_exynos_ipp_cmd_node *c_node,
801 struct drm_exynos_ipp_queue_buf *qbuf)
802{
803 struct drm_exynos_ipp_send_event *e;
804 unsigned long flags;
805
806 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
807 qbuf->ops_id, qbuf->buf_id);
808
809 e = kzalloc(sizeof(*e), GFP_KERNEL);
810
811 if (!e) {
812 DRM_ERROR("failed to allocate event.\n");
813 spin_lock_irqsave(&drm_dev->event_lock, flags);
814 file->event_space += sizeof(e->event);
815 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
816 return -ENOMEM;
817 }
818
819 /* make event */
820 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
821 e->event.base.length = sizeof(e->event);
822 e->event.user_data = qbuf->user_data;
823 e->event.prop_id = qbuf->prop_id;
824 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
825 e->base.event = &e->event.base;
826 e->base.file_priv = file;
827 e->base.destroy = ipp_free_event;
828 list_add_tail(&e->base.link, &c_node->event_list);
829
830 return 0;
831}
832
833static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
834 struct drm_exynos_ipp_queue_buf *qbuf)
835{
836 struct drm_exynos_ipp_send_event *e, *te;
837 int count = 0;
838
839 DRM_DEBUG_KMS("%s\n", __func__);
840
841 if (list_empty(&c_node->event_list)) {
842 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
843 return;
844 }
845
846 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
847 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
848 __func__, count++, (int)e);
849
850 /*
851 * quf == NULL condition means all event deletion.
852 * stop operations want to delete all event list.
853 * another case delete only same buf id.
854 */
855 if (!qbuf) {
856 /* delete list */
857 list_del(&e->base.link);
858 kfree(e);
859 }
860
861 /* compare buffer id */
862 if (qbuf && (qbuf->buf_id ==
863 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
864 /* delete list */
865 list_del(&e->base.link);
866 kfree(e);
867 return;
868 }
869 }
870}
871
872void ipp_handle_cmd_work(struct device *dev,
873 struct exynos_drm_ippdrv *ippdrv,
874 struct drm_exynos_ipp_cmd_work *cmd_work,
875 struct drm_exynos_ipp_cmd_node *c_node)
876{
877 struct ipp_context *ctx = get_ipp_context(dev);
878
879 cmd_work->ippdrv = ippdrv;
880 cmd_work->c_node = c_node;
881 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
882}
883
884static int ipp_queue_buf_with_run(struct device *dev,
885 struct drm_exynos_ipp_cmd_node *c_node,
886 struct drm_exynos_ipp_mem_node *m_node,
887 struct drm_exynos_ipp_queue_buf *qbuf)
888{
889 struct exynos_drm_ippdrv *ippdrv;
890 struct drm_exynos_ipp_property *property;
891 struct exynos_drm_ipp_ops *ops;
892 int ret;
893
894 DRM_DEBUG_KMS("%s\n", __func__);
895
896 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
897 if (IS_ERR_OR_NULL(ippdrv)) {
898 DRM_ERROR("failed to get ipp driver.\n");
899 return -EFAULT;
900 }
901
902 ops = ippdrv->ops[qbuf->ops_id];
903 if (!ops) {
904 DRM_ERROR("failed to get ops.\n");
905 return -EFAULT;
906 }
907
908 property = &c_node->property;
909
910 if (c_node->state != IPP_STATE_START) {
911 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
912 return 0;
913 }
914
915 if (!ipp_check_mem_list(c_node)) {
916 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
917 return 0;
918 }
919
920 /*
921 * If set destination buffer and enabled clock,
922 * then m2m operations need start operations at queue_buf
923 */
924 if (ipp_is_m2m_cmd(property->cmd)) {
925 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
926
927 cmd_work->ctrl = IPP_CTRL_PLAY;
928 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
929 } else {
930 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
931 if (ret) {
932 DRM_ERROR("failed to set m node.\n");
933 return ret;
934 }
935 }
936
937 return 0;
938}
939
940static void ipp_clean_queue_buf(struct drm_device *drm_dev,
941 struct drm_exynos_ipp_cmd_node *c_node,
942 struct drm_exynos_ipp_queue_buf *qbuf)
943{
944 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
945
946 DRM_DEBUG_KMS("%s\n", __func__);
947
948 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
949 /* delete list */
950 list_for_each_entry_safe(m_node, tm_node,
951 &c_node->mem_list[qbuf->ops_id], list) {
952 if (m_node->buf_id == qbuf->buf_id &&
953 m_node->ops_id == qbuf->ops_id)
954 ipp_put_mem_node(drm_dev, c_node, m_node);
955 }
956 }
957}
958
959int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
960 struct drm_file *file)
961{
962 struct drm_exynos_file_private *file_priv = file->driver_priv;
963 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
964 struct device *dev = priv->dev;
965 struct ipp_context *ctx = get_ipp_context(dev);
966 struct drm_exynos_ipp_queue_buf *qbuf = data;
967 struct drm_exynos_ipp_cmd_node *c_node;
968 struct drm_exynos_ipp_mem_node *m_node;
969 int ret;
970
971 DRM_DEBUG_KMS("%s\n", __func__);
972
973 if (!qbuf) {
974 DRM_ERROR("invalid buf parameter.\n");
975 return -EINVAL;
976 }
977
978 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
979 DRM_ERROR("invalid ops parameter.\n");
980 return -EINVAL;
981 }
982
983 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
984 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
985 qbuf->buf_id, qbuf->buf_type);
986
987 /* find command node */
988 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
989 qbuf->prop_id);
990 if (!c_node) {
991 DRM_ERROR("failed to get command node.\n");
992 return -EFAULT;
993 }
994
995 /* buffer control */
996 switch (qbuf->buf_type) {
997 case IPP_BUF_ENQUEUE:
998 /* get memory node */
999 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
1000 if (IS_ERR(m_node)) {
1001 DRM_ERROR("failed to get m_node.\n");
1002 return PTR_ERR(m_node);
1003 }
1004
1005 /*
1006 * first step get event for destination buffer.
1007 * and second step when M2M case run with destination buffer
1008 * if needed.
1009 */
1010 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1011 /* get event for destination buffer */
1012 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1013 if (ret) {
1014 DRM_ERROR("failed to get event.\n");
1015 goto err_clean_node;
1016 }
1017
1018 /*
1019 * M2M case run play control for streaming feature.
1020 * other case set address and waiting.
1021 */
1022 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1023 if (ret) {
1024 DRM_ERROR("failed to run command.\n");
1025 goto err_clean_node;
1026 }
1027 }
1028 break;
1029 case IPP_BUF_DEQUEUE:
1030 mutex_lock(&c_node->cmd_lock);
1031
1032 /* put event for destination buffer */
1033 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1034 ipp_put_event(c_node, qbuf);
1035
1036 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1037
1038 mutex_unlock(&c_node->cmd_lock);
1039 break;
1040 default:
1041 DRM_ERROR("invalid buffer control.\n");
1042 return -EINVAL;
1043 }
1044
1045 return 0;
1046
1047err_clean_node:
1048 DRM_ERROR("clean memory nodes.\n");
1049
1050 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1051 return ret;
1052}
1053
1054static bool exynos_drm_ipp_check_valid(struct device *dev,
1055 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1056{
1057 DRM_DEBUG_KMS("%s\n", __func__);
1058
1059 if (ctrl != IPP_CTRL_PLAY) {
1060 if (pm_runtime_suspended(dev)) {
1061 DRM_ERROR("pm:runtime_suspended.\n");
1062 goto err_status;
1063 }
1064 }
1065
1066 switch (ctrl) {
1067 case IPP_CTRL_PLAY:
1068 if (state != IPP_STATE_IDLE)
1069 goto err_status;
1070 break;
1071 case IPP_CTRL_STOP:
1072 if (state == IPP_STATE_STOP)
1073 goto err_status;
1074 break;
1075 case IPP_CTRL_PAUSE:
1076 if (state != IPP_STATE_START)
1077 goto err_status;
1078 break;
1079 case IPP_CTRL_RESUME:
1080 if (state != IPP_STATE_STOP)
1081 goto err_status;
1082 break;
1083 default:
1084 DRM_ERROR("invalid state.\n");
1085 goto err_status;
1086 break;
1087 }
1088
1089 return true;
1090
1091err_status:
1092 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1093 return false;
1094}
1095
1096int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1097 struct drm_file *file)
1098{
1099 struct drm_exynos_file_private *file_priv = file->driver_priv;
1100 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1101 struct exynos_drm_ippdrv *ippdrv = NULL;
1102 struct device *dev = priv->dev;
1103 struct ipp_context *ctx = get_ipp_context(dev);
1104 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1105 struct drm_exynos_ipp_cmd_work *cmd_work;
1106 struct drm_exynos_ipp_cmd_node *c_node;
1107
1108 DRM_DEBUG_KMS("%s\n", __func__);
1109
1110 if (!ctx) {
1111 DRM_ERROR("invalid context.\n");
1112 return -EINVAL;
1113 }
1114
1115 if (!cmd_ctrl) {
1116 DRM_ERROR("invalid control parameter.\n");
1117 return -EINVAL;
1118 }
1119
1120 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1121 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1122
1123 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1124 if (IS_ERR(ippdrv)) {
1125 DRM_ERROR("failed to get ipp driver.\n");
1126 return PTR_ERR(ippdrv);
1127 }
1128
1129 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1130 cmd_ctrl->prop_id);
1131 if (!c_node) {
1132 DRM_ERROR("invalid command node list.\n");
1133 return -EINVAL;
1134 }
1135
1136 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1137 c_node->state)) {
1138 DRM_ERROR("invalid state.\n");
1139 return -EINVAL;
1140 }
1141
1142 switch (cmd_ctrl->ctrl) {
1143 case IPP_CTRL_PLAY:
1144 if (pm_runtime_suspended(ippdrv->dev))
1145 pm_runtime_get_sync(ippdrv->dev);
1146 c_node->state = IPP_STATE_START;
1147
1148 cmd_work = c_node->start_work;
1149 cmd_work->ctrl = cmd_ctrl->ctrl;
1150 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1151 c_node->state = IPP_STATE_START;
1152 break;
1153 case IPP_CTRL_STOP:
1154 cmd_work = c_node->stop_work;
1155 cmd_work->ctrl = cmd_ctrl->ctrl;
1156 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1157
1158 if (!wait_for_completion_timeout(&c_node->stop_complete,
1159 msecs_to_jiffies(300))) {
1160 DRM_ERROR("timeout stop:prop_id[%d]\n",
1161 c_node->property.prop_id);
1162 }
1163
1164 c_node->state = IPP_STATE_STOP;
1165 ippdrv->dedicated = false;
1166 ipp_clean_cmd_node(c_node);
1167
1168 if (list_empty(&ippdrv->cmd_list))
1169 pm_runtime_put_sync(ippdrv->dev);
1170 break;
1171 case IPP_CTRL_PAUSE:
1172 cmd_work = c_node->stop_work;
1173 cmd_work->ctrl = cmd_ctrl->ctrl;
1174 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1175
1176 if (!wait_for_completion_timeout(&c_node->stop_complete,
1177 msecs_to_jiffies(200))) {
1178 DRM_ERROR("timeout stop:prop_id[%d]\n",
1179 c_node->property.prop_id);
1180 }
1181
1182 c_node->state = IPP_STATE_STOP;
1183 break;
1184 case IPP_CTRL_RESUME:
1185 c_node->state = IPP_STATE_START;
1186 cmd_work = c_node->start_work;
1187 cmd_work->ctrl = cmd_ctrl->ctrl;
1188 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1189 break;
1190 default:
1191 DRM_ERROR("could not support this state currently.\n");
1192 return -EINVAL;
1193 }
1194
1195 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1196 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1197
1198 return 0;
1199}
1200
1201int exynos_drm_ippnb_register(struct notifier_block *nb)
1202{
1203 return blocking_notifier_chain_register(
1204 &exynos_drm_ippnb_list, nb);
1205}
1206
1207int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1208{
1209 return blocking_notifier_chain_unregister(
1210 &exynos_drm_ippnb_list, nb);
1211}
1212
1213int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1214{
1215 return blocking_notifier_call_chain(
1216 &exynos_drm_ippnb_list, val, v);
1217}
1218
1219static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1220 struct drm_exynos_ipp_property *property)
1221{
1222 struct exynos_drm_ipp_ops *ops = NULL;
1223 bool swap = false;
1224 int ret, i;
1225
1226 if (!property) {
1227 DRM_ERROR("invalid property parameter.\n");
1228 return -EINVAL;
1229 }
1230
1231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1232
1233 /* reset h/w block */
1234 if (ippdrv->reset &&
1235 ippdrv->reset(ippdrv->dev)) {
1236 DRM_ERROR("failed to reset.\n");
1237 return -EINVAL;
1238 }
1239
1240 /* set source,destination operations */
1241 for_each_ipp_ops(i) {
1242 struct drm_exynos_ipp_config *config =
1243 &property->config[i];
1244
1245 ops = ippdrv->ops[i];
1246 if (!ops || !config) {
1247 DRM_ERROR("not support ops and config.\n");
1248 return -EINVAL;
1249 }
1250
1251 /* set format */
1252 if (ops->set_fmt) {
1253 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1254 if (ret) {
1255 DRM_ERROR("not support format.\n");
1256 return ret;
1257 }
1258 }
1259
1260 /* set transform for rotation, flip */
1261 if (ops->set_transf) {
1262 ret = ops->set_transf(ippdrv->dev, config->degree,
1263 config->flip, &swap);
1264 if (ret) {
1265 DRM_ERROR("not support tranf.\n");
1266 return -EINVAL;
1267 }
1268 }
1269
1270 /* set size */
1271 if (ops->set_size) {
1272 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1273 &config->sz);
1274 if (ret) {
1275 DRM_ERROR("not support size.\n");
1276 return ret;
1277 }
1278 }
1279 }
1280
1281 return 0;
1282}
1283
1284static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1285 struct drm_exynos_ipp_cmd_node *c_node)
1286{
1287 struct drm_exynos_ipp_mem_node *m_node;
1288 struct drm_exynos_ipp_property *property = &c_node->property;
1289 struct list_head *head;
1290 int ret, i;
1291
1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1293
1294 /* store command info in ippdrv */
1295 ippdrv->c_node = c_node;
1296
1297 if (!ipp_check_mem_list(c_node)) {
1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1299 return -ENOMEM;
1300 }
1301
1302 /* set current property in ippdrv */
1303 ret = ipp_set_property(ippdrv, property);
1304 if (ret) {
1305 DRM_ERROR("failed to set property.\n");
1306 ippdrv->c_node = NULL;
1307 return ret;
1308 }
1309
1310 /* check command */
1311 switch (property->cmd) {
1312 case IPP_CMD_M2M:
1313 for_each_ipp_ops(i) {
1314 /* source/destination memory list */
1315 head = &c_node->mem_list[i];
1316
1317 m_node = list_first_entry(head,
1318 struct drm_exynos_ipp_mem_node, list);
1319 if (!m_node) {
1320 DRM_ERROR("failed to get node.\n");
1321 ret = -EFAULT;
1322 return ret;
1323 }
1324
1325 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1326 __func__, (int)m_node);
1327
1328 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1329 if (ret) {
1330 DRM_ERROR("failed to set m node.\n");
1331 return ret;
1332 }
1333 }
1334 break;
1335 case IPP_CMD_WB:
1336 /* destination memory list */
1337 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1338
1339 list_for_each_entry(m_node, head, list) {
1340 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1341 if (ret) {
1342 DRM_ERROR("failed to set m node.\n");
1343 return ret;
1344 }
1345 }
1346 break;
1347 case IPP_CMD_OUTPUT:
1348 /* source memory list */
1349 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1350
1351 list_for_each_entry(m_node, head, list) {
1352 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1353 if (ret) {
1354 DRM_ERROR("failed to set m node.\n");
1355 return ret;
1356 }
1357 }
1358 break;
1359 default:
1360 DRM_ERROR("invalid operations.\n");
1361 return -EINVAL;
1362 }
1363
1364 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1365
1366 /* start operations */
1367 if (ippdrv->start) {
1368 ret = ippdrv->start(ippdrv->dev, property->cmd);
1369 if (ret) {
1370 DRM_ERROR("failed to start ops.\n");
1371 return ret;
1372 }
1373 }
1374
1375 return 0;
1376}
1377
1378static int ipp_stop_property(struct drm_device *drm_dev,
1379 struct exynos_drm_ippdrv *ippdrv,
1380 struct drm_exynos_ipp_cmd_node *c_node)
1381{
1382 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1383 struct drm_exynos_ipp_property *property = &c_node->property;
1384 struct list_head *head;
1385 int ret = 0, i;
1386
1387 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1388
1389 /* put event */
1390 ipp_put_event(c_node, NULL);
1391
1392 /* check command */
1393 switch (property->cmd) {
1394 case IPP_CMD_M2M:
1395 for_each_ipp_ops(i) {
1396 /* source/destination memory list */
1397 head = &c_node->mem_list[i];
1398
1399 if (list_empty(head)) {
1400 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1401 __func__);
1402 break;
1403 }
1404
1405 list_for_each_entry_safe(m_node, tm_node,
1406 head, list) {
1407 ret = ipp_put_mem_node(drm_dev, c_node,
1408 m_node);
1409 if (ret) {
1410 DRM_ERROR("failed to put m_node.\n");
1411 goto err_clear;
1412 }
1413 }
1414 }
1415 break;
1416 case IPP_CMD_WB:
1417 /* destination memory list */
1418 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1419
1420 if (list_empty(head)) {
1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1422 break;
1423 }
1424
1425 list_for_each_entry_safe(m_node, tm_node, head, list) {
1426 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1427 if (ret) {
1428 DRM_ERROR("failed to put m_node.\n");
1429 goto err_clear;
1430 }
1431 }
1432 break;
1433 case IPP_CMD_OUTPUT:
1434 /* source memory list */
1435 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1436
1437 if (list_empty(head)) {
1438 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1439 break;
1440 }
1441
1442 list_for_each_entry_safe(m_node, tm_node, head, list) {
1443 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1444 if (ret) {
1445 DRM_ERROR("failed to put m_node.\n");
1446 goto err_clear;
1447 }
1448 }
1449 break;
1450 default:
1451 DRM_ERROR("invalid operations.\n");
1452 ret = -EINVAL;
1453 goto err_clear;
1454 }
1455
1456err_clear:
1457 /* stop operations */
1458 if (ippdrv->stop)
1459 ippdrv->stop(ippdrv->dev, property->cmd);
1460
1461 return ret;
1462}
1463
1464void ipp_sched_cmd(struct work_struct *work)
1465{
1466 struct drm_exynos_ipp_cmd_work *cmd_work =
1467 (struct drm_exynos_ipp_cmd_work *)work;
1468 struct exynos_drm_ippdrv *ippdrv;
1469 struct drm_exynos_ipp_cmd_node *c_node;
1470 struct drm_exynos_ipp_property *property;
1471 int ret;
1472
1473 DRM_DEBUG_KMS("%s\n", __func__);
1474
1475 ippdrv = cmd_work->ippdrv;
1476 if (!ippdrv) {
1477 DRM_ERROR("invalid ippdrv list.\n");
1478 return;
1479 }
1480
1481 c_node = cmd_work->c_node;
1482 if (!c_node) {
1483 DRM_ERROR("invalid command node list.\n");
1484 return;
1485 }
1486
1487 mutex_lock(&c_node->cmd_lock);
1488
1489 property = &c_node->property;
1490
1491 switch (cmd_work->ctrl) {
1492 case IPP_CTRL_PLAY:
1493 case IPP_CTRL_RESUME:
1494 ret = ipp_start_property(ippdrv, c_node);
1495 if (ret) {
1496 DRM_ERROR("failed to start property:prop_id[%d]\n",
1497 c_node->property.prop_id);
1498 goto err_unlock;
1499 }
1500
1501 /*
1502 * M2M case supports wait_completion of transfer.
1503 * because M2M case supports single unit operation
1504 * with multiple queue.
1505 * M2M need to wait completion of data transfer.
1506 */
1507 if (ipp_is_m2m_cmd(property->cmd)) {
1508 if (!wait_for_completion_timeout
1509 (&c_node->start_complete, msecs_to_jiffies(200))) {
1510 DRM_ERROR("timeout event:prop_id[%d]\n",
1511 c_node->property.prop_id);
1512 goto err_unlock;
1513 }
1514 }
1515 break;
1516 case IPP_CTRL_STOP:
1517 case IPP_CTRL_PAUSE:
1518 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1519 c_node);
1520 if (ret) {
1521 DRM_ERROR("failed to stop property.\n");
1522 goto err_unlock;
1523 }
1524
1525 complete(&c_node->stop_complete);
1526 break;
1527 default:
1528 DRM_ERROR("unknown control type\n");
1529 break;
1530 }
1531
1532 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1533
1534err_unlock:
1535 mutex_unlock(&c_node->cmd_lock);
1536}
1537
1538static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1539 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1540{
1541 struct drm_device *drm_dev = ippdrv->drm_dev;
1542 struct drm_exynos_ipp_property *property = &c_node->property;
1543 struct drm_exynos_ipp_mem_node *m_node;
1544 struct drm_exynos_ipp_queue_buf qbuf;
1545 struct drm_exynos_ipp_send_event *e;
1546 struct list_head *head;
1547 struct timeval now;
1548 unsigned long flags;
1549 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1550 int ret, i;
1551
1552 for_each_ipp_ops(i)
1553 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1554 i ? "dst" : "src", buf_id[i]);
1555
1556 if (!drm_dev) {
1557 DRM_ERROR("failed to get drm_dev.\n");
1558 return -EINVAL;
1559 }
1560
1561 if (!property) {
1562 DRM_ERROR("failed to get property.\n");
1563 return -EINVAL;
1564 }
1565
1566 if (list_empty(&c_node->event_list)) {
1567 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1568 return 0;
1569 }
1570
1571 if (!ipp_check_mem_list(c_node)) {
1572 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1573 return 0;
1574 }
1575
1576 /* check command */
1577 switch (property->cmd) {
1578 case IPP_CMD_M2M:
1579 for_each_ipp_ops(i) {
1580 /* source/destination memory list */
1581 head = &c_node->mem_list[i];
1582
1583 m_node = list_first_entry(head,
1584 struct drm_exynos_ipp_mem_node, list);
1585 if (!m_node) {
1586 DRM_ERROR("empty memory node.\n");
1587 return -ENOMEM;
1588 }
1589
1590 tbuf_id[i] = m_node->buf_id;
1591 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1592 i ? "dst" : "src", tbuf_id[i]);
1593
1594 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1595 if (ret)
1596 DRM_ERROR("failed to put m_node.\n");
1597 }
1598 break;
1599 case IPP_CMD_WB:
1600 /* clear buf for finding */
1601 memset(&qbuf, 0x0, sizeof(qbuf));
1602 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1603 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1604
1605 /* get memory node entry */
1606 m_node = ipp_find_mem_node(c_node, &qbuf);
1607 if (!m_node) {
1608 DRM_ERROR("empty memory node.\n");
1609 return -ENOMEM;
1610 }
1611
1612 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1613
1614 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1615 if (ret)
1616 DRM_ERROR("failed to put m_node.\n");
1617 break;
1618 case IPP_CMD_OUTPUT:
1619 /* source memory list */
1620 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1621
1622 m_node = list_first_entry(head,
1623 struct drm_exynos_ipp_mem_node, list);
1624 if (!m_node) {
1625 DRM_ERROR("empty memory node.\n");
1626 return -ENOMEM;
1627 }
1628
1629 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1630
1631 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1632 if (ret)
1633 DRM_ERROR("failed to put m_node.\n");
1634 break;
1635 default:
1636 DRM_ERROR("invalid operations.\n");
1637 return -EINVAL;
1638 }
1639
1640 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1641 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1642 tbuf_id[1], buf_id[1], property->prop_id);
1643
1644 /*
1645 * command node have event list of destination buffer
1646 * If destination buffer enqueue to mem list,
1647 * then we make event and link to event list tail.
1648 * so, we get first event for first enqueued buffer.
1649 */
1650 e = list_first_entry(&c_node->event_list,
1651 struct drm_exynos_ipp_send_event, base.link);
1652
1653 if (!e) {
1654 DRM_ERROR("empty event.\n");
1655 return -EINVAL;
1656 }
1657
1658 do_gettimeofday(&now);
1659 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1660 , __func__, now.tv_sec, now.tv_usec);
1661 e->event.tv_sec = now.tv_sec;
1662 e->event.tv_usec = now.tv_usec;
1663 e->event.prop_id = property->prop_id;
1664
1665 /* set buffer id about source destination */
1666 for_each_ipp_ops(i)
1667 e->event.buf_id[i] = tbuf_id[i];
1668
1669 spin_lock_irqsave(&drm_dev->event_lock, flags);
1670 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1671 wake_up_interruptible(&e->base.file_priv->event_wait);
1672 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1673
1674 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1675 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1676
1677 return 0;
1678}
1679
1680void ipp_sched_event(struct work_struct *work)
1681{
1682 struct drm_exynos_ipp_event_work *event_work =
1683 (struct drm_exynos_ipp_event_work *)work;
1684 struct exynos_drm_ippdrv *ippdrv;
1685 struct drm_exynos_ipp_cmd_node *c_node;
1686 int ret;
1687
1688 if (!event_work) {
1689 DRM_ERROR("failed to get event_work.\n");
1690 return;
1691 }
1692
1693 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1694 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1695
1696 ippdrv = event_work->ippdrv;
1697 if (!ippdrv) {
1698 DRM_ERROR("failed to get ipp driver.\n");
1699 return;
1700 }
1701
1702 c_node = ippdrv->c_node;
1703 if (!c_node) {
1704 DRM_ERROR("failed to get command node.\n");
1705 return;
1706 }
1707
1708 /*
1709 * IPP supports command thread, event thread synchronization.
1710 * If IPP close immediately from user land, then IPP make
1711 * synchronization with command thread, so make complete event.
1712 * or going out operations.
1713 */
1714 if (c_node->state != IPP_STATE_START) {
1715 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1716 __func__, c_node->state, c_node->property.prop_id);
1717 goto err_completion;
1718 }
1719
1720 mutex_lock(&c_node->event_lock);
1721
1722 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1723 if (ret) {
1724 DRM_ERROR("failed to send event.\n");
1725 goto err_completion;
1726 }
1727
1728err_completion:
1729 if (ipp_is_m2m_cmd(c_node->property.cmd))
1730 complete(&c_node->start_complete);
1731
1732 mutex_unlock(&c_node->event_lock);
1733}
1734
1735static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1736{
1737 struct ipp_context *ctx = get_ipp_context(dev);
1738 struct exynos_drm_ippdrv *ippdrv;
1739 int ret, count = 0;
1740
1741 DRM_DEBUG_KMS("%s\n", __func__);
1742
1743 /* get ipp driver entry */
1744 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1745 ippdrv->drm_dev = drm_dev;
1746
1747 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1748 &ippdrv->ipp_id);
1749 if (ret) {
1750 DRM_ERROR("failed to create id.\n");
1751 goto err_idr;
1752 }
1753
1754 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1755 count++, (int)ippdrv, ippdrv->ipp_id);
1756
1757 if (ippdrv->ipp_id == 0) {
1758 DRM_ERROR("failed to get ipp_id[%d]\n",
1759 ippdrv->ipp_id);
1760 goto err_idr;
1761 }
1762
1763 /* store parent device for node */
1764 ippdrv->parent_dev = dev;
1765
1766 /* store event work queue and handler */
1767 ippdrv->event_workq = ctx->event_workq;
1768 ippdrv->sched_event = ipp_sched_event;
1769 INIT_LIST_HEAD(&ippdrv->cmd_list);
1770
1771 if (is_drm_iommu_supported(drm_dev)) {
1772 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1773 if (ret) {
1774 DRM_ERROR("failed to activate iommu\n");
1775 goto err_iommu;
1776 }
1777 }
1778 }
1779
1780 return 0;
1781
1782err_iommu:
1783 /* get ipp driver entry */
1784 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1785 if (is_drm_iommu_supported(drm_dev))
1786 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1787
1788err_idr:
1789 idr_remove_all(&ctx->ipp_idr);
1790 idr_remove_all(&ctx->prop_idr);
1791 idr_destroy(&ctx->ipp_idr);
1792 idr_destroy(&ctx->prop_idr);
1793 return ret;
1794}
1795
1796static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1797{
1798 struct exynos_drm_ippdrv *ippdrv;
1799
1800 DRM_DEBUG_KMS("%s\n", __func__);
1801
1802 /* get ipp driver entry */
1803 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1804 if (is_drm_iommu_supported(drm_dev))
1805 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1806
1807 ippdrv->drm_dev = NULL;
1808 exynos_drm_ippdrv_unregister(ippdrv);
1809 }
1810}
1811
1812static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1813 struct drm_file *file)
1814{
1815 struct drm_exynos_file_private *file_priv = file->driver_priv;
1816 struct exynos_drm_ipp_private *priv;
1817
1818 DRM_DEBUG_KMS("%s\n", __func__);
1819
1820 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1821 if (!priv) {
1822 DRM_ERROR("failed to allocate priv.\n");
1823 return -ENOMEM;
1824 }
1825 priv->dev = dev;
1826 file_priv->ipp_priv = priv;
1827
1828 INIT_LIST_HEAD(&priv->event_list);
1829
1830 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1831
1832 return 0;
1833}
1834
1835static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1836 struct drm_file *file)
1837{
1838 struct drm_exynos_file_private *file_priv = file->driver_priv;
1839 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1840 struct exynos_drm_ippdrv *ippdrv = NULL;
1841 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1842 int count = 0;
1843
1844 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1845
1846 if (list_empty(&exynos_drm_ippdrv_list)) {
1847 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1848 goto err_clear;
1849 }
1850
1851 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1852 if (list_empty(&ippdrv->cmd_list))
1853 continue;
1854
1855 list_for_each_entry_safe(c_node, tc_node,
1856 &ippdrv->cmd_list, list) {
1857 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1858 __func__, count++, (int)ippdrv);
1859
1860 if (c_node->priv == priv) {
1861 /*
1862 * userland goto unnormal state. process killed.
1863 * and close the file.
1864 * so, IPP didn't called stop cmd ctrl.
1865 * so, we are make stop operation in this state.
1866 */
1867 if (c_node->state == IPP_STATE_START) {
1868 ipp_stop_property(drm_dev, ippdrv,
1869 c_node);
1870 c_node->state = IPP_STATE_STOP;
1871 }
1872
1873 ippdrv->dedicated = false;
1874 ipp_clean_cmd_node(c_node);
1875 if (list_empty(&ippdrv->cmd_list))
1876 pm_runtime_put_sync(ippdrv->dev);
1877 }
1878 }
1879 }
1880
1881err_clear:
1882 kfree(priv);
1883 return;
1884}
1885
1886static int ipp_probe(struct platform_device *pdev)
1887{
1888 struct device *dev = &pdev->dev;
1889 struct ipp_context *ctx;
1890 struct exynos_drm_subdrv *subdrv;
1891 int ret;
1892
1893 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
1894 if (!ctx)
1895 return -ENOMEM;
1896
1897 DRM_DEBUG_KMS("%s\n", __func__);
1898
1899 mutex_init(&ctx->ipp_lock);
1900 mutex_init(&ctx->prop_lock);
1901
1902 idr_init(&ctx->ipp_idr);
1903 idr_init(&ctx->prop_idr);
1904
1905 /*
1906 * create single thread for ipp event
1907 * IPP supports event thread for IPP drivers.
1908 * IPP driver send event_work to this thread.
1909 * and IPP event thread send event to user process.
1910 */
1911 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1912 if (!ctx->event_workq) {
1913 dev_err(dev, "failed to create event workqueue\n");
1914 return -EINVAL;
1915 }
1916
1917 /*
1918 * create single thread for ipp command
1919 * IPP supports command thread for user process.
1920 * user process make command node using set property ioctl.
1921 * and make start_work and send this work to command thread.
1922 * and then this command thread start property.
1923 */
1924 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1925 if (!ctx->cmd_workq) {
1926 dev_err(dev, "failed to create cmd workqueue\n");
1927 ret = -EINVAL;
1928 goto err_event_workq;
1929 }
1930
1931 /* set sub driver informations */
1932 subdrv = &ctx->subdrv;
1933 subdrv->dev = dev;
1934 subdrv->probe = ipp_subdrv_probe;
1935 subdrv->remove = ipp_subdrv_remove;
1936 subdrv->open = ipp_subdrv_open;
1937 subdrv->close = ipp_subdrv_close;
1938
1939 platform_set_drvdata(pdev, ctx);
1940
1941 ret = exynos_drm_subdrv_register(subdrv);
1942 if (ret < 0) {
1943 DRM_ERROR("failed to register drm ipp device.\n");
1944 goto err_cmd_workq;
1945 }
1946
1947 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1948
1949 return 0;
1950
1951err_cmd_workq:
1952 destroy_workqueue(ctx->cmd_workq);
1953err_event_workq:
1954 destroy_workqueue(ctx->event_workq);
1955 return ret;
1956}
1957
1958static int ipp_remove(struct platform_device *pdev)
1959{
1960 struct ipp_context *ctx = platform_get_drvdata(pdev);
1961
1962 DRM_DEBUG_KMS("%s\n", __func__);
1963
1964 /* unregister sub driver */
1965 exynos_drm_subdrv_unregister(&ctx->subdrv);
1966
1967 /* remove,destroy ipp idr */
1968 idr_remove_all(&ctx->ipp_idr);
1969 idr_remove_all(&ctx->prop_idr);
1970 idr_destroy(&ctx->ipp_idr);
1971 idr_destroy(&ctx->prop_idr);
1972
1973 mutex_destroy(&ctx->ipp_lock);
1974 mutex_destroy(&ctx->prop_lock);
1975
1976 /* destroy command, event work queue */
1977 destroy_workqueue(ctx->cmd_workq);
1978 destroy_workqueue(ctx->event_workq);
1979
1980 return 0;
1981}
1982
1983static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1984{
1985 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1986
1987 return 0;
1988}
1989
1990#ifdef CONFIG_PM_SLEEP
1991static int ipp_suspend(struct device *dev)
1992{
1993 struct ipp_context *ctx = get_ipp_context(dev);
1994
1995 DRM_DEBUG_KMS("%s\n", __func__);
1996
1997 if (pm_runtime_suspended(dev))
1998 return 0;
1999
2000 return ipp_power_ctrl(ctx, false);
2001}
2002
2003static int ipp_resume(struct device *dev)
2004{
2005 struct ipp_context *ctx = get_ipp_context(dev);
2006
2007 DRM_DEBUG_KMS("%s\n", __func__);
2008
2009 if (!pm_runtime_suspended(dev))
2010 return ipp_power_ctrl(ctx, true);
2011
2012 return 0;
2013}
2014#endif
2015
2016#ifdef CONFIG_PM_RUNTIME
2017static int ipp_runtime_suspend(struct device *dev)
2018{
2019 struct ipp_context *ctx = get_ipp_context(dev);
2020
2021 DRM_DEBUG_KMS("%s\n", __func__);
2022
2023 return ipp_power_ctrl(ctx, false);
2024}
2025
2026static int ipp_runtime_resume(struct device *dev)
2027{
2028 struct ipp_context *ctx = get_ipp_context(dev);
2029
2030 DRM_DEBUG_KMS("%s\n", __func__);
2031
2032 return ipp_power_ctrl(ctx, true);
2033}
2034#endif
2035
2036static const struct dev_pm_ops ipp_pm_ops = {
2037 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2038 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2039};
2040
2041struct platform_driver ipp_driver = {
2042 .probe = ipp_probe,
2043 .remove = ipp_remove,
2044 .driver = {
2045 .name = "exynos-drm-ipp",
2046 .owner = THIS_MODULE,
2047 .pm = &ipp_pm_ops,
2048 },
2049};
2050
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..4cadbea7dbde
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,252 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 */
14
15#ifndef _EXYNOS_DRM_IPP_H_
16#define _EXYNOS_DRM_IPP_H_
17
18#define for_each_ipp_ops(pos) \
19 for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
20#define for_each_ipp_planar(pos) \
21 for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
22
23#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
24#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
25#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
26
27/* definition of state */
28enum drm_exynos_ipp_state {
29 IPP_STATE_IDLE,
30 IPP_STATE_START,
31 IPP_STATE_STOP,
32};
33
34/*
35 * A structure of command work information.
36 * @work: work structure.
37 * @ippdrv: current work ippdrv.
38 * @c_node: command node information.
39 * @ctrl: command control.
40 */
41struct drm_exynos_ipp_cmd_work {
42 struct work_struct work;
43 struct exynos_drm_ippdrv *ippdrv;
44 struct drm_exynos_ipp_cmd_node *c_node;
45 enum drm_exynos_ipp_ctrl ctrl;
46};
47
48/*
49 * A structure of command node.
50 *
51 * @priv: IPP private infomation.
52 * @list: list head to command queue information.
53 * @event_list: list head of event.
54 * @mem_list: list head to source,destination memory queue information.
55 * @cmd_lock: lock for synchronization of access to ioctl.
56 * @mem_lock: lock for synchronization of access to memory nodes.
57 * @event_lock: lock for synchronization of access to scheduled event.
58 * @start_complete: completion of start of command.
59 * @stop_complete: completion of stop of command.
60 * @property: property information.
61 * @start_work: start command work structure.
62 * @stop_work: stop command work structure.
63 * @event_work: event work structure.
64 * @state: state of command node.
65 */
66struct drm_exynos_ipp_cmd_node {
67 struct exynos_drm_ipp_private *priv;
68 struct list_head list;
69 struct list_head event_list;
70 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
71 struct mutex cmd_lock;
72 struct mutex mem_lock;
73 struct mutex event_lock;
74 struct completion start_complete;
75 struct completion stop_complete;
76 struct drm_exynos_ipp_property property;
77 struct drm_exynos_ipp_cmd_work *start_work;
78 struct drm_exynos_ipp_cmd_work *stop_work;
79 struct drm_exynos_ipp_event_work *event_work;
80 enum drm_exynos_ipp_state state;
81};
82
83/*
84 * A structure of buffer information.
85 *
86 * @gem_objs: Y, Cb, Cr each gem object.
87 * @base: Y, Cb, Cr each planar address.
88 */
89struct drm_exynos_ipp_buf_info {
90 unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
91 dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
92};
93
94/*
95 * A structure of wb setting infomation.
96 *
97 * @enable: enable flag for wb.
98 * @refresh: HZ of the refresh rate.
99 */
100struct drm_exynos_ipp_set_wb {
101 __u32 enable;
102 __u32 refresh;
103};
104
105/*
106 * A structure of event work information.
107 *
108 * @work: work structure.
109 * @ippdrv: current work ippdrv.
110 * @buf_id: id of src, dst buffer.
111 */
112struct drm_exynos_ipp_event_work {
113 struct work_struct work;
114 struct exynos_drm_ippdrv *ippdrv;
115 u32 buf_id[EXYNOS_DRM_OPS_MAX];
116};
117
118/*
119 * A structure of source,destination operations.
120 *
121 * @set_fmt: set format of image.
122 * @set_transf: set transform(rotations, flip).
123 * @set_size: set size of region.
124 * @set_addr: set address for dma.
125 */
126struct exynos_drm_ipp_ops {
127 int (*set_fmt)(struct device *dev, u32 fmt);
128 int (*set_transf)(struct device *dev,
129 enum drm_exynos_degree degree,
130 enum drm_exynos_flip flip, bool *swap);
131 int (*set_size)(struct device *dev, int swap,
132 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
133 int (*set_addr)(struct device *dev,
134 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
135 enum drm_exynos_ipp_buf_type buf_type);
136};
137
138/*
139 * A structure of ipp driver.
140 *
141 * @drv_list: list head for registed sub driver information.
142 * @parent_dev: parent device information.
143 * @dev: platform device.
144 * @drm_dev: drm device.
145 * @ipp_id: id of ipp driver.
146 * @dedicated: dedicated ipp device.
147 * @ops: source, destination operations.
148 * @event_workq: event work queue.
149 * @c_node: current command information.
150 * @cmd_list: list head for command information.
151 * @prop_list: property informations of current ipp driver.
152 * @check_property: check property about format, size, buffer.
153 * @reset: reset ipp block.
154 * @start: ipp each device start.
155 * @stop: ipp each device stop.
156 * @sched_event: work schedule handler.
157 */
158struct exynos_drm_ippdrv {
159 struct list_head drv_list;
160 struct device *parent_dev;
161 struct device *dev;
162 struct drm_device *drm_dev;
163 u32 ipp_id;
164 bool dedicated;
165 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
166 struct workqueue_struct *event_workq;
167 struct drm_exynos_ipp_cmd_node *c_node;
168 struct list_head cmd_list;
169 struct drm_exynos_ipp_prop_list *prop_list;
170
171 int (*check_property)(struct device *dev,
172 struct drm_exynos_ipp_property *property);
173 int (*reset)(struct device *dev);
174 int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
175 void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
176 void (*sched_event)(struct work_struct *work);
177};
178
179#ifdef CONFIG_DRM_EXYNOS_IPP
180extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
181extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
182extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
183 struct drm_file *file);
184extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
185 struct drm_file *file);
186extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
187 struct drm_file *file);
188extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
189 struct drm_file *file);
190extern int exynos_drm_ippnb_register(struct notifier_block *nb);
191extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
192extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
193extern void ipp_sched_cmd(struct work_struct *work);
194extern void ipp_sched_event(struct work_struct *work);
195
196#else
197static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
198{
199 return -ENODEV;
200}
201
202static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
203{
204 return -ENODEV;
205}
206
207static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
208 void *data,
209 struct drm_file *file_priv)
210{
211 return -ENOTTY;
212}
213
214static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
215 void *data,
216 struct drm_file *file_priv)
217{
218 return -ENOTTY;
219}
220
221static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
222 void *data,
223 struct drm_file *file)
224{
225 return -ENOTTY;
226}
227
228static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
229 void *data,
230 struct drm_file *file)
231{
232 return -ENOTTY;
233}
234
235static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
236{
237 return -ENODEV;
238}
239
240static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
241{
242 return -ENODEV;
243}
244
245static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
246{
247 return -ENOTTY;
248}
249#endif
250
251#endif /* _EXYNOS_DRM_IPP_H_ */
252
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 60b877a388c2..83efc662d65a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
40 * CRTC ---------------- 40 * CRTC ----------------
41 * ^ start ^ end 41 * ^ start ^ end
42 * 42 *
43 * There are six cases from a to b. 43 * There are six cases from a to f.
44 * 44 *
45 * <----- SCREEN -----> 45 * <----- SCREEN ----->
46 * 0 last 46 * 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
93 } 93 }
94 94
95 overlay->dma_addr[i] = buffer->dma_addr; 95 overlay->dma_addr[i] = buffer->dma_addr;
96 overlay->vaddr[i] = buffer->kvaddr;
97 96
98 DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n", 97 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
99 i, (unsigned long)overlay->vaddr[i], 98 i, (unsigned long)overlay->dma_addr[i]);
100 (unsigned long)overlay->dma_addr[i]);
101 } 99 }
102 100
103 actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); 101 actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
106 if (crtc_x < 0) { 104 if (crtc_x < 0) {
107 if (actual_w) 105 if (actual_w)
108 src_x -= crtc_x; 106 src_x -= crtc_x;
109 else
110 src_x += crtc_w;
111 crtc_x = 0; 107 crtc_x = 0;
112 } 108 }
113 109
114 if (crtc_y < 0) { 110 if (crtc_y < 0) {
115 if (actual_h) 111 if (actual_h)
116 src_y -= crtc_y; 112 src_y -= crtc_y;
117 else
118 src_y += crtc_h;
119 crtc_y = 0; 113 crtc_y = 0;
120 } 114 }
121 115
@@ -204,7 +198,6 @@ exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
204 return ret; 198 return ret;
205 199
206 plane->crtc = crtc; 200 plane->crtc = crtc;
207 plane->fb = crtc->fb;
208 201
209 exynos_plane_commit(plane); 202 exynos_plane_commit(plane);
210 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON); 203 exynos_plane_dpms(plane, DRM_MODE_DPMS_ON);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 000000000000..e9e83ef688f0
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,839 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * YoungJun Cho <yj44.cho@samsung.com>
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundationr
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-rotator.h"
24#include "exynos_drm.h"
25#include "exynos_drm_ipp.h"
26
27/*
28 * Rotator supports image crop/rotator and input/output DMA operations.
29 * input DMA reads image data from the memory.
30 * output DMA writes image data to memory.
31 *
32 * M2M operation : supports crop/scale/rotation/csc so on.
33 * Memory ----> Rotator H/W ----> Memory.
34 */
35
36/*
37 * TODO
38 * 1. check suspend/resume api if needed.
39 * 2. need to check use case platform_device_id.
40 * 3. check src/dst size with, height.
41 * 4. need to add supported list in prop_list.
42 */
43
44#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
45#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
46 struct rot_context, ippdrv);
47#define rot_read(offset) readl(rot->regs + (offset))
48#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
49
50enum rot_irq_status {
51 ROT_IRQ_STATUS_COMPLETE = 8,
52 ROT_IRQ_STATUS_ILLEGAL = 9,
53};
54
55/*
56 * A structure of limitation.
57 *
58 * @min_w: minimum width.
59 * @min_h: minimum height.
60 * @max_w: maximum width.
61 * @max_h: maximum height.
62 * @align: align size.
63 */
64struct rot_limit {
65 u32 min_w;
66 u32 min_h;
67 u32 max_w;
68 u32 max_h;
69 u32 align;
70};
71
72/*
73 * A structure of limitation table.
74 *
75 * @ycbcr420_2p: case of YUV.
76 * @rgb888: case of RGB.
77 */
78struct rot_limit_table {
79 struct rot_limit ycbcr420_2p;
80 struct rot_limit rgb888;
81};
82
83/*
84 * A structure of rotator context.
85 * @ippdrv: prepare initialization using ippdrv.
86 * @regs_res: register resources.
87 * @regs: memory mapped io registers.
88 * @clock: rotator gate clock.
89 * @limit_tbl: limitation of rotator.
90 * @irq: irq number.
91 * @cur_buf_id: current operation buffer id.
92 * @suspended: suspended state.
93 */
94struct rot_context {
95 struct exynos_drm_ippdrv ippdrv;
96 struct resource *regs_res;
97 void __iomem *regs;
98 struct clk *clock;
99 struct rot_limit_table *limit_tbl;
100 int irq;
101 int cur_buf_id[EXYNOS_DRM_OPS_MAX];
102 bool suspended;
103};
104
105static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
106{
107 u32 val = rot_read(ROT_CONFIG);
108
109 if (enable == true)
110 val |= ROT_CONFIG_IRQ;
111 else
112 val &= ~ROT_CONFIG_IRQ;
113
114 rot_write(val, ROT_CONFIG);
115}
116
117static u32 rotator_reg_get_fmt(struct rot_context *rot)
118{
119 u32 val = rot_read(ROT_CONTROL);
120
121 val &= ROT_CONTROL_FMT_MASK;
122
123 return val;
124}
125
126static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
127{
128 u32 val = rot_read(ROT_STATUS);
129
130 val = ROT_STATUS_IRQ(val);
131
132 if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
133 return ROT_IRQ_STATUS_COMPLETE;
134
135 return ROT_IRQ_STATUS_ILLEGAL;
136}
137
138static irqreturn_t rotator_irq_handler(int irq, void *arg)
139{
140 struct rot_context *rot = arg;
141 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
142 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node;
143 struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
144 enum rot_irq_status irq_status;
145 u32 val;
146
147 /* Get execution result */
148 irq_status = rotator_reg_get_irq_status(rot);
149
150 /* clear status */
151 val = rot_read(ROT_STATUS);
152 val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
153 rot_write(val, ROT_STATUS);
154
155 if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
156 event_work->ippdrv = ippdrv;
157 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
159 queue_work(ippdrv->event_workq,
160 (struct work_struct *)event_work);
161 } else
162 DRM_ERROR("the SFR is set illegally\n");
163
164 return IRQ_HANDLED;
165}
166
167static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
168 u32 *vsize)
169{
170 struct rot_limit_table *limit_tbl = rot->limit_tbl;
171 struct rot_limit *limit;
172 u32 mask, val;
173
174 /* Get size limit */
175 if (fmt == ROT_CONTROL_FMT_RGB888)
176 limit = &limit_tbl->rgb888;
177 else
178 limit = &limit_tbl->ycbcr420_2p;
179
180 /* Get mask for rounding to nearest aligned val */
181 mask = ~((1 << limit->align) - 1);
182
183 /* Set aligned width */
184 val = ROT_ALIGN(*hsize, limit->align, mask);
185 if (val < limit->min_w)
186 *hsize = ROT_MIN(limit->min_w, mask);
187 else if (val > limit->max_w)
188 *hsize = ROT_MAX(limit->max_w, mask);
189 else
190 *hsize = val;
191
192 /* Set aligned height */
193 val = ROT_ALIGN(*vsize, limit->align, mask);
194 if (val < limit->min_h)
195 *vsize = ROT_MIN(limit->min_h, mask);
196 else if (val > limit->max_h)
197 *vsize = ROT_MAX(limit->max_h, mask);
198 else
199 *vsize = val;
200}
201
202static int rotator_src_set_fmt(struct device *dev, u32 fmt)
203{
204 struct rot_context *rot = dev_get_drvdata(dev);
205 u32 val;
206
207 val = rot_read(ROT_CONTROL);
208 val &= ~ROT_CONTROL_FMT_MASK;
209
210 switch (fmt) {
211 case DRM_FORMAT_NV12:
212 val |= ROT_CONTROL_FMT_YCBCR420_2P;
213 break;
214 case DRM_FORMAT_XRGB8888:
215 val |= ROT_CONTROL_FMT_RGB888;
216 break;
217 default:
218 DRM_ERROR("invalid image format\n");
219 return -EINVAL;
220 }
221
222 rot_write(val, ROT_CONTROL);
223
224 return 0;
225}
226
227static inline bool rotator_check_reg_fmt(u32 fmt)
228{
229 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
230 (fmt == ROT_CONTROL_FMT_RGB888))
231 return true;
232
233 return false;
234}
235
236static int rotator_src_set_size(struct device *dev, int swap,
237 struct drm_exynos_pos *pos,
238 struct drm_exynos_sz *sz)
239{
240 struct rot_context *rot = dev_get_drvdata(dev);
241 u32 fmt, hsize, vsize;
242 u32 val;
243
244 /* Get format */
245 fmt = rotator_reg_get_fmt(rot);
246 if (!rotator_check_reg_fmt(fmt)) {
247 DRM_ERROR("%s:invalid format.\n", __func__);
248 return -EINVAL;
249 }
250
251 /* Align buffer size */
252 hsize = sz->hsize;
253 vsize = sz->vsize;
254 rotator_align_size(rot, fmt, &hsize, &vsize);
255
256 /* Set buffer size configuration */
257 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
258 rot_write(val, ROT_SRC_BUF_SIZE);
259
260 /* Set crop image position configuration */
261 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
262 rot_write(val, ROT_SRC_CROP_POS);
263 val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
264 rot_write(val, ROT_SRC_CROP_SIZE);
265
266 return 0;
267}
268
269static int rotator_src_set_addr(struct device *dev,
270 struct drm_exynos_ipp_buf_info *buf_info,
271 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
272{
273 struct rot_context *rot = dev_get_drvdata(dev);
274 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
275 u32 val, fmt, hsize, vsize;
276 int i;
277
278 /* Set current buf_id */
279 rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
280
281 switch (buf_type) {
282 case IPP_BUF_ENQUEUE:
283 /* Set address configuration */
284 for_each_ipp_planar(i)
285 addr[i] = buf_info->base[i];
286
287 /* Get format */
288 fmt = rotator_reg_get_fmt(rot);
289 if (!rotator_check_reg_fmt(fmt)) {
290 DRM_ERROR("%s:invalid format.\n", __func__);
291 return -EINVAL;
292 }
293
294 /* Re-set cb planar for NV12 format */
295 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
296 !addr[EXYNOS_DRM_PLANAR_CB]) {
297
298 val = rot_read(ROT_SRC_BUF_SIZE);
299 hsize = ROT_GET_BUF_SIZE_W(val);
300 vsize = ROT_GET_BUF_SIZE_H(val);
301
302 /* Set cb planar */
303 addr[EXYNOS_DRM_PLANAR_CB] =
304 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
305 }
306
307 for_each_ipp_planar(i)
308 rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
309 break;
310 case IPP_BUF_DEQUEUE:
311 for_each_ipp_planar(i)
312 rot_write(0x0, ROT_SRC_BUF_ADDR(i));
313 break;
314 default:
315 /* Nothing to do */
316 break;
317 }
318
319 return 0;
320}
321
322static int rotator_dst_set_transf(struct device *dev,
323 enum drm_exynos_degree degree,
324 enum drm_exynos_flip flip, bool *swap)
325{
326 struct rot_context *rot = dev_get_drvdata(dev);
327 u32 val;
328
329 /* Set transform configuration */
330 val = rot_read(ROT_CONTROL);
331 val &= ~ROT_CONTROL_FLIP_MASK;
332
333 switch (flip) {
334 case EXYNOS_DRM_FLIP_VERTICAL:
335 val |= ROT_CONTROL_FLIP_VERTICAL;
336 break;
337 case EXYNOS_DRM_FLIP_HORIZONTAL:
338 val |= ROT_CONTROL_FLIP_HORIZONTAL;
339 break;
340 default:
341 /* Flip None */
342 break;
343 }
344
345 val &= ~ROT_CONTROL_ROT_MASK;
346
347 switch (degree) {
348 case EXYNOS_DRM_DEGREE_90:
349 val |= ROT_CONTROL_ROT_90;
350 break;
351 case EXYNOS_DRM_DEGREE_180:
352 val |= ROT_CONTROL_ROT_180;
353 break;
354 case EXYNOS_DRM_DEGREE_270:
355 val |= ROT_CONTROL_ROT_270;
356 break;
357 default:
358 /* Rotation 0 Degree */
359 break;
360 }
361
362 rot_write(val, ROT_CONTROL);
363
364 /* Check degree for setting buffer size swap */
365 if ((degree == EXYNOS_DRM_DEGREE_90) ||
366 (degree == EXYNOS_DRM_DEGREE_270))
367 *swap = true;
368 else
369 *swap = false;
370
371 return 0;
372}
373
374static int rotator_dst_set_size(struct device *dev, int swap,
375 struct drm_exynos_pos *pos,
376 struct drm_exynos_sz *sz)
377{
378 struct rot_context *rot = dev_get_drvdata(dev);
379 u32 val, fmt, hsize, vsize;
380
381 /* Get format */
382 fmt = rotator_reg_get_fmt(rot);
383 if (!rotator_check_reg_fmt(fmt)) {
384 DRM_ERROR("%s:invalid format.\n", __func__);
385 return -EINVAL;
386 }
387
388 /* Align buffer size */
389 hsize = sz->hsize;
390 vsize = sz->vsize;
391 rotator_align_size(rot, fmt, &hsize, &vsize);
392
393 /* Set buffer size configuration */
394 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
395 rot_write(val, ROT_DST_BUF_SIZE);
396
397 /* Set crop image position configuration */
398 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
399 rot_write(val, ROT_DST_CROP_POS);
400
401 return 0;
402}
403
404static int rotator_dst_set_addr(struct device *dev,
405 struct drm_exynos_ipp_buf_info *buf_info,
406 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
407{
408 struct rot_context *rot = dev_get_drvdata(dev);
409 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
410 u32 val, fmt, hsize, vsize;
411 int i;
412
413 /* Set current buf_id */
414 rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
415
416 switch (buf_type) {
417 case IPP_BUF_ENQUEUE:
418 /* Set address configuration */
419 for_each_ipp_planar(i)
420 addr[i] = buf_info->base[i];
421
422 /* Get format */
423 fmt = rotator_reg_get_fmt(rot);
424 if (!rotator_check_reg_fmt(fmt)) {
425 DRM_ERROR("%s:invalid format.\n", __func__);
426 return -EINVAL;
427 }
428
429 /* Re-set cb planar for NV12 format */
430 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
431 !addr[EXYNOS_DRM_PLANAR_CB]) {
432 /* Get buf size */
433 val = rot_read(ROT_DST_BUF_SIZE);
434
435 hsize = ROT_GET_BUF_SIZE_W(val);
436 vsize = ROT_GET_BUF_SIZE_H(val);
437
438 /* Set cb planar */
439 addr[EXYNOS_DRM_PLANAR_CB] =
440 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
441 }
442
443 for_each_ipp_planar(i)
444 rot_write(addr[i], ROT_DST_BUF_ADDR(i));
445 break;
446 case IPP_BUF_DEQUEUE:
447 for_each_ipp_planar(i)
448 rot_write(0x0, ROT_DST_BUF_ADDR(i));
449 break;
450 default:
451 /* Nothing to do */
452 break;
453 }
454
455 return 0;
456}
457
458static struct exynos_drm_ipp_ops rot_src_ops = {
459 .set_fmt = rotator_src_set_fmt,
460 .set_size = rotator_src_set_size,
461 .set_addr = rotator_src_set_addr,
462};
463
464static struct exynos_drm_ipp_ops rot_dst_ops = {
465 .set_transf = rotator_dst_set_transf,
466 .set_size = rotator_dst_set_size,
467 .set_addr = rotator_dst_set_addr,
468};
469
470static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
471{
472 struct drm_exynos_ipp_prop_list *prop_list;
473
474 DRM_DEBUG_KMS("%s\n", __func__);
475
476 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
477 if (!prop_list) {
478 DRM_ERROR("failed to alloc property list.\n");
479 return -ENOMEM;
480 }
481
482 prop_list->version = 1;
483 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
484 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
485 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
486 (1 << EXYNOS_DRM_DEGREE_90) |
487 (1 << EXYNOS_DRM_DEGREE_180) |
488 (1 << EXYNOS_DRM_DEGREE_270);
489 prop_list->csc = 0;
490 prop_list->crop = 0;
491 prop_list->scale = 0;
492
493 ippdrv->prop_list = prop_list;
494
495 return 0;
496}
497
498static inline bool rotator_check_drm_fmt(u32 fmt)
499{
500 switch (fmt) {
501 case DRM_FORMAT_XRGB8888:
502 case DRM_FORMAT_NV12:
503 return true;
504 default:
505 DRM_DEBUG_KMS("%s:not support format\n", __func__);
506 return false;
507 }
508}
509
510static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
511{
512 switch (flip) {
513 case EXYNOS_DRM_FLIP_NONE:
514 case EXYNOS_DRM_FLIP_VERTICAL:
515 case EXYNOS_DRM_FLIP_HORIZONTAL:
516 case EXYNOS_DRM_FLIP_BOTH:
517 return true;
518 default:
519 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
520 return false;
521 }
522}
523
524static int rotator_ippdrv_check_property(struct device *dev,
525 struct drm_exynos_ipp_property *property)
526{
527 struct drm_exynos_ipp_config *src_config =
528 &property->config[EXYNOS_DRM_OPS_SRC];
529 struct drm_exynos_ipp_config *dst_config =
530 &property->config[EXYNOS_DRM_OPS_DST];
531 struct drm_exynos_pos *src_pos = &src_config->pos;
532 struct drm_exynos_pos *dst_pos = &dst_config->pos;
533 struct drm_exynos_sz *src_sz = &src_config->sz;
534 struct drm_exynos_sz *dst_sz = &dst_config->sz;
535 bool swap = false;
536
537 /* Check format configuration */
538 if (src_config->fmt != dst_config->fmt) {
539 DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
540 return -EINVAL;
541 }
542
543 if (!rotator_check_drm_fmt(dst_config->fmt)) {
544 DRM_DEBUG_KMS("%s:invalid format\n", __func__);
545 return -EINVAL;
546 }
547
548 /* Check transform configuration */
549 if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
550 DRM_DEBUG_KMS("%s:not support source-side rotation\n",
551 __func__);
552 return -EINVAL;
553 }
554
555 switch (dst_config->degree) {
556 case EXYNOS_DRM_DEGREE_90:
557 case EXYNOS_DRM_DEGREE_270:
558 swap = true;
559 case EXYNOS_DRM_DEGREE_0:
560 case EXYNOS_DRM_DEGREE_180:
561 /* No problem */
562 break;
563 default:
564 DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
565 return -EINVAL;
566 }
567
568 if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
569 DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
570 return -EINVAL;
571 }
572
573 if (!rotator_check_drm_flip(dst_config->flip)) {
574 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
575 return -EINVAL;
576 }
577
578 /* Check size configuration */
579 if ((src_pos->x + src_pos->w > src_sz->hsize) ||
580 (src_pos->y + src_pos->h > src_sz->vsize)) {
581 DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
582 return -EINVAL;
583 }
584
585 if (swap) {
586 if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
587 (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
588 DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
589 __func__);
590 return -EINVAL;
591 }
592
593 if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
594 DRM_DEBUG_KMS("%s:not support scale feature\n",
595 __func__);
596 return -EINVAL;
597 }
598 } else {
599 if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
600 (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
601 DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
602 __func__);
603 return -EINVAL;
604 }
605
606 if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
607 DRM_DEBUG_KMS("%s:not support scale feature\n",
608 __func__);
609 return -EINVAL;
610 }
611 }
612
613 return 0;
614}
615
616static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
617{
618 struct rot_context *rot = dev_get_drvdata(dev);
619 u32 val;
620
621 if (rot->suspended) {
622 DRM_ERROR("suspended state\n");
623 return -EPERM;
624 }
625
626 if (cmd != IPP_CMD_M2M) {
627 DRM_ERROR("not support cmd: %d\n", cmd);
628 return -EINVAL;
629 }
630
631 /* Set interrupt enable */
632 rotator_reg_set_irq(rot, true);
633
634 val = rot_read(ROT_CONTROL);
635 val |= ROT_CONTROL_START;
636
637 rot_write(val, ROT_CONTROL);
638
639 return 0;
640}
641
642static int rotator_probe(struct platform_device *pdev)
643{
644 struct device *dev = &pdev->dev;
645 struct rot_context *rot;
646 struct exynos_drm_ippdrv *ippdrv;
647 int ret;
648
649 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
650 if (!rot) {
651 dev_err(dev, "failed to allocate rot\n");
652 return -ENOMEM;
653 }
654
655 rot->limit_tbl = (struct rot_limit_table *)
656 platform_get_device_id(pdev)->driver_data;
657
658 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
659 rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
660 if (!rot->regs) {
661 dev_err(dev, "failed to map register\n");
662 return -ENXIO;
663 }
664
665 rot->irq = platform_get_irq(pdev, 0);
666 if (rot->irq < 0) {
667 dev_err(dev, "failed to get irq\n");
668 return rot->irq;
669 }
670
671 ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
672 IRQF_ONESHOT, "drm_rotator", rot);
673 if (ret < 0) {
674 dev_err(dev, "failed to request irq\n");
675 return ret;
676 }
677
678 rot->clock = devm_clk_get(dev, "rotator");
679 if (IS_ERR_OR_NULL(rot->clock)) {
680 dev_err(dev, "failed to get clock\n");
681 ret = PTR_ERR(rot->clock);
682 goto err_clk_get;
683 }
684
685 pm_runtime_enable(dev);
686
687 ippdrv = &rot->ippdrv;
688 ippdrv->dev = dev;
689 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
690 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
691 ippdrv->check_property = rotator_ippdrv_check_property;
692 ippdrv->start = rotator_ippdrv_start;
693 ret = rotator_init_prop_list(ippdrv);
694 if (ret < 0) {
695 dev_err(dev, "failed to init property list.\n");
696 goto err_ippdrv_register;
697 }
698
699 DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
700
701 platform_set_drvdata(pdev, rot);
702
703 ret = exynos_drm_ippdrv_register(ippdrv);
704 if (ret < 0) {
705 dev_err(dev, "failed to register drm rotator device\n");
706 goto err_ippdrv_register;
707 }
708
709 dev_info(dev, "The exynos rotator is probed successfully\n");
710
711 return 0;
712
713err_ippdrv_register:
714 devm_kfree(dev, ippdrv->prop_list);
715 pm_runtime_disable(dev);
716err_clk_get:
717 free_irq(rot->irq, rot);
718 return ret;
719}
720
721static int rotator_remove(struct platform_device *pdev)
722{
723 struct device *dev = &pdev->dev;
724 struct rot_context *rot = dev_get_drvdata(dev);
725 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
726
727 devm_kfree(dev, ippdrv->prop_list);
728 exynos_drm_ippdrv_unregister(ippdrv);
729
730 pm_runtime_disable(dev);
731
732 free_irq(rot->irq, rot);
733
734 return 0;
735}
736
737struct rot_limit_table rot_limit_tbl = {
738 .ycbcr420_2p = {
739 .min_w = 32,
740 .min_h = 32,
741 .max_w = SZ_32K,
742 .max_h = SZ_32K,
743 .align = 3,
744 },
745 .rgb888 = {
746 .min_w = 8,
747 .min_h = 8,
748 .max_w = SZ_8K,
749 .max_h = SZ_8K,
750 .align = 2,
751 },
752};
753
754struct platform_device_id rotator_driver_ids[] = {
755 {
756 .name = "exynos-rot",
757 .driver_data = (unsigned long)&rot_limit_tbl,
758 },
759 {},
760};
761
762static int rotator_clk_crtl(struct rot_context *rot, bool enable)
763{
764 DRM_DEBUG_KMS("%s\n", __func__);
765
766 if (enable) {
767 clk_enable(rot->clock);
768 rot->suspended = false;
769 } else {
770 clk_disable(rot->clock);
771 rot->suspended = true;
772 }
773
774 return 0;
775}
776
777
778#ifdef CONFIG_PM_SLEEP
779static int rotator_suspend(struct device *dev)
780{
781 struct rot_context *rot = dev_get_drvdata(dev);
782
783 DRM_DEBUG_KMS("%s\n", __func__);
784
785 if (pm_runtime_suspended(dev))
786 return 0;
787
788 return rotator_clk_crtl(rot, false);
789}
790
791static int rotator_resume(struct device *dev)
792{
793 struct rot_context *rot = dev_get_drvdata(dev);
794
795 DRM_DEBUG_KMS("%s\n", __func__);
796
797 if (!pm_runtime_suspended(dev))
798 return rotator_clk_crtl(rot, true);
799
800 return 0;
801}
802#endif
803
804#ifdef CONFIG_PM_RUNTIME
805static int rotator_runtime_suspend(struct device *dev)
806{
807 struct rot_context *rot = dev_get_drvdata(dev);
808
809 DRM_DEBUG_KMS("%s\n", __func__);
810
811 return rotator_clk_crtl(rot, false);
812}
813
814static int rotator_runtime_resume(struct device *dev)
815{
816 struct rot_context *rot = dev_get_drvdata(dev);
817
818 DRM_DEBUG_KMS("%s\n", __func__);
819
820 return rotator_clk_crtl(rot, true);
821}
822#endif
823
824static const struct dev_pm_ops rotator_pm_ops = {
825 SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
826 SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
827 NULL)
828};
829
830struct platform_driver rotator_driver = {
831 .probe = rotator_probe,
832 .remove = rotator_remove,
833 .id_table = rotator_driver_ids,
834 .driver = {
835 .name = "exynos-rot",
836 .owner = THIS_MODULE,
837 .pm = &rotator_pm_ops,
838 },
839};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 000000000000..71a0b4c0c1e8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * YoungJun Cho <yj44.cho@samsung.com>
6 * Eunchul Kim <chulspro.kim@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef _EXYNOS_DRM_ROTATOR_H_
15#define _EXYNOS_DRM_ROTATOR_H_
16
17/* TODO */
18
19#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..d0ca3c4e06c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
39 unsigned int fb_height; 39 unsigned int fb_height;
40 unsigned int bpp; 40 unsigned int bpp;
41 dma_addr_t dma_addr; 41 dma_addr_t dma_addr;
42 void __iomem *vaddr;
43 unsigned int buf_offsize; 42 unsigned int buf_offsize;
44 unsigned int line_size; /* bytes */ 43 unsigned int line_size; /* bytes */
45 bool enabled; 44 bool enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
294 win_data->fb_width = overlay->fb_width; 293 win_data->fb_width = overlay->fb_width;
295 win_data->fb_height = overlay->fb_height; 294 win_data->fb_height = overlay->fb_height;
296 win_data->dma_addr = overlay->dma_addr[0] + offset; 295 win_data->dma_addr = overlay->dma_addr[0] + offset;
297 win_data->vaddr = overlay->vaddr[0] + offset;
298 win_data->bpp = overlay->bpp; 296 win_data->bpp = overlay->bpp;
299 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 297 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
300 (overlay->bpp >> 3); 298 (overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
309 win_data->offset_x, win_data->offset_y); 307 win_data->offset_x, win_data->offset_y);
310 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 308 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
311 win_data->ovl_width, win_data->ovl_height); 309 win_data->ovl_width, win_data->ovl_height);
312 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 310 DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
313 (unsigned long)win_data->dma_addr,
314 (unsigned long)win_data->vaddr);
315 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 311 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
316 overlay->fb_width, overlay->crtc_width); 312 overlay->fb_width, overlay->crtc_width);
317} 313}
@@ -376,52 +372,6 @@ static struct exynos_drm_manager vidi_manager = {
376 .display_ops = &vidi_display_ops, 372 .display_ops = &vidi_display_ops,
377}; 373};
378 374
379static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
380{
381 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
382 struct drm_pending_vblank_event *e, *t;
383 struct timeval now;
384 unsigned long flags;
385 bool is_checked = false;
386
387 spin_lock_irqsave(&drm_dev->event_lock, flags);
388
389 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
390 base.link) {
391 /* if event's pipe isn't same as crtc then ignore it. */
392 if (crtc != e->pipe)
393 continue;
394
395 is_checked = true;
396
397 do_gettimeofday(&now);
398 e->event.sequence = 0;
399 e->event.tv_sec = now.tv_sec;
400 e->event.tv_usec = now.tv_usec;
401
402 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
403 wake_up_interruptible(&e->base.file_priv->event_wait);
404 }
405
406 if (is_checked) {
407 /*
408 * call drm_vblank_put only in case that drm_vblank_get was
409 * called.
410 */
411 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
412 drm_vblank_put(drm_dev, crtc);
413
414 /*
415 * don't off vblank if vblank_disable_allowed is 1,
416 * because vblank would be off by timer handler.
417 */
418 if (!drm_dev->vblank_disable_allowed)
419 drm_vblank_off(drm_dev, crtc);
420 }
421
422 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
423}
424
425static void vidi_fake_vblank_handler(struct work_struct *work) 375static void vidi_fake_vblank_handler(struct work_struct *work)
426{ 376{
427 struct vidi_context *ctx = container_of(work, struct vidi_context, 377 struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -446,7 +396,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
446 396
447 mutex_unlock(&ctx->lock); 397 mutex_unlock(&ctx->lock);
448 398
449 vidi_finish_pageflip(subdrv->drm_dev, manager->pipe); 399 exynos_drm_crtc_finish_pageflip(subdrv->drm_dev, manager->pipe);
450} 400}
451 401
452static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev) 402static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
@@ -631,7 +581,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
631 return 0; 581 return 0;
632} 582}
633 583
634static int __devinit vidi_probe(struct platform_device *pdev) 584static int vidi_probe(struct platform_device *pdev)
635{ 585{
636 struct device *dev = &pdev->dev; 586 struct device *dev = &pdev->dev;
637 struct vidi_context *ctx; 587 struct vidi_context *ctx;
@@ -667,7 +617,7 @@ static int __devinit vidi_probe(struct platform_device *pdev)
667 return 0; 617 return 0;
668} 618}
669 619
670static int __devexit vidi_remove(struct platform_device *pdev) 620static int vidi_remove(struct platform_device *pdev)
671{ 621{
672 struct vidi_context *ctx = platform_get_drvdata(pdev); 622 struct vidi_context *ctx = platform_get_drvdata(pdev);
673 623
@@ -705,7 +655,7 @@ static const struct dev_pm_ops vidi_pm_ops = {
705 655
706struct platform_driver vidi_driver = { 656struct platform_driver vidi_driver = {
707 .probe = vidi_probe, 657 .probe = vidi_probe,
708 .remove = __devexit_p(vidi_remove), 658 .remove = vidi_remove,
709 .driver = { 659 .driver = {
710 .name = "exynos-drm-vidi", 660 .name = "exynos-drm-vidi",
711 .owner = THIS_MODULE, 661 .owner = THIS_MODULE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.h b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
index a4babe4e65d7..1e5fdaa36ccc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.h
@@ -3,24 +3,10 @@
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com> 4 * Author: Inki Dae <inki.dae@samsung.com>
5 * 5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a 6 * This program is free software; you can redistribute it and/or modify it
7 * copy of this software and associated documentation files (the "Software"), 7 * under the terms of the GNU General Public License as published by the
8 * to deal in the Software without restriction, including without limitation 8 * Free Software Foundation; either version 2 of the License, or (at your
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * option) any later version.
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */ 10 */
25 11
26#ifndef _EXYNOS_DRM_VIDI_H_ 12#ifndef _EXYNOS_DRM_VIDI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..41ff79d8ac8e 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
50#define MAX_HEIGHT 1080 50#define MAX_HEIGHT 1080
51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) 51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
52 52
53/* AVI header and aspect ratio */
54#define HDMI_AVI_VERSION 0x02
55#define HDMI_AVI_LENGTH 0x0D
56#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
57#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
58
59/* AUI header info */
60#define HDMI_AUI_VERSION 0x01
61#define HDMI_AUI_LENGTH 0x0A
62
63/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
64enum HDMI_PACKET_TYPE {
65 /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
66 /* InfoFrame packet type */
67 HDMI_PACKET_TYPE_INFOFRAME = 0x80,
68 /* Vendor-Specific InfoFrame */
69 HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
70 /* Auxiliary Video information InfoFrame */
71 HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
72 /* Audio information InfoFrame */
73 HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
74};
75
53enum hdmi_type { 76enum hdmi_type {
54 HDMI_TYPE13, 77 HDMI_TYPE13,
55 HDMI_TYPE14, 78 HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
74 struct mutex hdmi_mutex; 97 struct mutex hdmi_mutex;
75 98
76 void __iomem *regs; 99 void __iomem *regs;
100 void *parent_ctx;
77 int external_irq; 101 int external_irq;
78 int internal_irq; 102 int internal_irq;
79 103
@@ -84,7 +108,6 @@ struct hdmi_context {
84 int cur_conf; 108 int cur_conf;
85 109
86 struct hdmi_resources res; 110 struct hdmi_resources res;
87 void *parent_ctx;
88 111
89 int hpd_gpio; 112 int hpd_gpio;
90 113
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
182 int height; 205 int height;
183 int vrefresh; 206 int vrefresh;
184 bool interlace; 207 bool interlace;
208 int cea_video_id;
185 const u8 *hdmiphy_data; 209 const u8 *hdmiphy_data;
186 const struct hdmi_v13_preset_conf *conf; 210 const struct hdmi_v13_preset_conf *conf;
187}; 211};
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
353}; 377};
354 378
355static const struct hdmi_v13_conf hdmi_v13_confs[] = { 379static const struct hdmi_v13_conf hdmi_v13_confs[] = {
356 { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 380 { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
357 { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 381 &hdmi_v13_conf_720p60 },
358 { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, 382 { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
359 { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, 383 &hdmi_v13_conf_720p60 },
360 { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, 384 { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
361 &hdmi_v13_conf_1080p50 }, 385 &hdmi_v13_conf_480p },
362 { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, 386 { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
363 { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, 387 &hdmi_v13_conf_1080i50 },
364 &hdmi_v13_conf_1080p60 }, 388 { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
389 &hdmi_v13_conf_1080p50 },
390 { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
391 &hdmi_v13_conf_1080i60 },
392 { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
393 &hdmi_v13_conf_1080p60 },
365}; 394};
366 395
367/* HDMI Version 1.4 */ 396/* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
479 int height; 508 int height;
480 int vrefresh; 509 int vrefresh;
481 bool interlace; 510 bool interlace;
511 int cea_video_id;
482 const u8 *hdmiphy_data; 512 const u8 *hdmiphy_data;
483 const struct hdmi_preset_conf *conf; 513 const struct hdmi_preset_conf *conf;
484}; 514};
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
934}; 964};
935 965
936static const struct hdmi_conf hdmi_confs[] = { 966static const struct hdmi_conf hdmi_confs[] = {
937 { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, 967 { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
938 { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, 968 { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
939 { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, 969 { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
940 { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, 970 { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
941 { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, 971 { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
942 { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, 972 { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
943 { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, 973 { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
944 { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, 974 { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
945}; 975};
946 976
977struct hdmi_infoframe {
978 enum HDMI_PACKET_TYPE type;
979 u8 ver;
980 u8 len;
981};
947 982
948static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 983static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
949{ 984{
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
1267 return hdmi_v14_conf_index(mode); 1302 return hdmi_v14_conf_index(mode);
1268} 1303}
1269 1304
1305static u8 hdmi_chksum(struct hdmi_context *hdata,
1306 u32 start, u8 len, u32 hdr_sum)
1307{
1308 int i;
1309
1310 /* hdr_sum : header0 + header1 + header2
1311 * start : start address of packet byte1
1312 * len : packet bytes - 1 */
1313 for (i = 0; i < len; ++i)
1314 hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
1315
1316 /* return 2's complement of 8 bit hdr_sum */
1317 return (u8)(~(hdr_sum & 0xff) + 1);
1318}
1319
1320static void hdmi_reg_infoframe(struct hdmi_context *hdata,
1321 struct hdmi_infoframe *infoframe)
1322{
1323 u32 hdr_sum;
1324 u8 chksum;
1325 u32 aspect_ratio;
1326 u32 mod;
1327 u32 vic;
1328
1329 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1330
1331 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
1332 if (hdata->dvi_mode) {
1333 hdmi_reg_writeb(hdata, HDMI_VSI_CON,
1334 HDMI_VSI_CON_DO_NOT_TRANSMIT);
1335 hdmi_reg_writeb(hdata, HDMI_AVI_CON,
1336 HDMI_AVI_CON_DO_NOT_TRANSMIT);
1337 hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
1338 return;
1339 }
1340
1341 switch (infoframe->type) {
1342 case HDMI_PACKET_TYPE_AVI:
1343 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
1344 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
1345 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
1346 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
1347 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1348
1349 /* Output format zero hardcoded ,RGB YBCR selection */
1350 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
1351 AVI_ACTIVE_FORMAT_VALID |
1352 AVI_UNDERSCANNED_DISPLAY_VALID);
1353
1354 aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
1355
1356 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
1357 AVI_SAME_AS_PIC_ASPECT_RATIO);
1358
1359 if (hdata->type == HDMI_TYPE13)
1360 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
1361 else
1362 vic = hdmi_confs[hdata->cur_conf].cea_video_id;
1363
1364 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
1365
1366 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
1367 infoframe->len, hdr_sum);
1368 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
1369 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
1370 break;
1371 case HDMI_PACKET_TYPE_AUI:
1372 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
1373 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
1374 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
1375 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
1376 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1377 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
1378 infoframe->len, hdr_sum);
1379 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
1380 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
1381 break;
1382 default:
1383 break;
1384 }
1385}
1386
1270static bool hdmi_is_connected(void *ctx) 1387static bool hdmi_is_connected(void *ctx)
1271{ 1388{
1272 struct hdmi_context *hdata = ctx; 1389 struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
1293 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", 1410 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
1294 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), 1411 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
1295 raw_edid->width_cm, raw_edid->height_cm); 1412 raw_edid->width_cm, raw_edid->height_cm);
1413 kfree(raw_edid);
1296 } else { 1414 } else {
1297 return -ENODEV; 1415 return -ENODEV;
1298 } 1416 }
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
1541 1659
1542static void hdmi_conf_init(struct hdmi_context *hdata) 1660static void hdmi_conf_init(struct hdmi_context *hdata)
1543{ 1661{
1662 struct hdmi_infoframe infoframe;
1663
1544 /* disable HPD interrupts */ 1664 /* disable HPD interrupts */
1545 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1665 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
1546 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1666 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1575 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); 1695 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
1576 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); 1696 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
1577 } else { 1697 } else {
1698 infoframe.type = HDMI_PACKET_TYPE_AVI;
1699 infoframe.ver = HDMI_AVI_VERSION;
1700 infoframe.len = HDMI_AVI_LENGTH;
1701 hdmi_reg_infoframe(hdata, &infoframe);
1702
1703 infoframe.type = HDMI_PACKET_TYPE_AUI;
1704 infoframe.ver = HDMI_AUI_VERSION;
1705 infoframe.len = HDMI_AUI_LENGTH;
1706 hdmi_reg_infoframe(hdata, &infoframe);
1707
1578 /* enable AVI packet every vsync, fixes purple line problem */ 1708 /* enable AVI packet every vsync, fixes purple line problem */
1579 hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
1580 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
1581 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); 1709 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
1582 } 1710 }
1583} 1711}
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1875 mdelay(10); 2003 mdelay(10);
1876} 2004}
1877 2005
2006static void hdmiphy_poweron(struct hdmi_context *hdata)
2007{
2008 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2009
2010 if (hdata->type == HDMI_TYPE14)
2011 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
2012 HDMI_PHY_POWER_OFF_EN);
2013}
2014
2015static void hdmiphy_poweroff(struct hdmi_context *hdata)
2016{
2017 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2018
2019 if (hdata->type == HDMI_TYPE14)
2020 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
2021 HDMI_PHY_POWER_OFF_EN);
2022}
2023
1878static void hdmiphy_conf_apply(struct hdmi_context *hdata) 2024static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1879{ 2025{
1880 const u8 *hdmiphy_data; 2026 const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
1978 index = hdmi_v14_conf_index(m); 2124 index = hdmi_v14_conf_index(m);
1979 2125
1980 if (index >= 0) { 2126 if (index >= 0) {
2127 struct drm_mode_object base;
2128 struct list_head head;
2129
1981 DRM_INFO("desired mode doesn't exist so\n"); 2130 DRM_INFO("desired mode doesn't exist so\n");
1982 DRM_INFO("use the most suitable mode among modes.\n"); 2131 DRM_INFO("use the most suitable mode among modes.\n");
2132
2133 /* preserve display mode header while copying. */
2134 head = adjusted_mode->head;
2135 base = adjusted_mode->base;
1983 memcpy(adjusted_mode, m, sizeof(*m)); 2136 memcpy(adjusted_mode, m, sizeof(*m));
2137 adjusted_mode->head = head;
2138 adjusted_mode->base = base;
1984 break; 2139 break;
1985 } 2140 }
1986 } 2141 }
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
2034 2189
2035 mutex_unlock(&hdata->hdmi_mutex); 2190 mutex_unlock(&hdata->hdmi_mutex);
2036 2191
2037 pm_runtime_get_sync(hdata->dev);
2038
2039 regulator_bulk_enable(res->regul_count, res->regul_bulk); 2192 regulator_bulk_enable(res->regul_count, res->regul_bulk);
2040 clk_enable(res->hdmiphy); 2193 clk_enable(res->hdmiphy);
2041 clk_enable(res->hdmi); 2194 clk_enable(res->hdmi);
2042 clk_enable(res->sclk_hdmi); 2195 clk_enable(res->sclk_hdmi);
2196
2197 hdmiphy_poweron(hdata);
2043} 2198}
2044 2199
2045static void hdmi_poweroff(struct hdmi_context *hdata) 2200static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
2058 * its reset state seems to meet the condition. 2213 * its reset state seems to meet the condition.
2059 */ 2214 */
2060 hdmiphy_conf_reset(hdata); 2215 hdmiphy_conf_reset(hdata);
2216 hdmiphy_poweroff(hdata);
2061 2217
2062 clk_disable(res->sclk_hdmi); 2218 clk_disable(res->sclk_hdmi);
2063 clk_disable(res->hdmi); 2219 clk_disable(res->hdmi);
2064 clk_disable(res->hdmiphy); 2220 clk_disable(res->hdmiphy);
2065 regulator_bulk_disable(res->regul_count, res->regul_bulk); 2221 regulator_bulk_disable(res->regul_count, res->regul_bulk);
2066 2222
2067 pm_runtime_put_sync(hdata->dev);
2068
2069 mutex_lock(&hdata->hdmi_mutex); 2223 mutex_lock(&hdata->hdmi_mutex);
2070 2224
2071 hdata->powered = false; 2225 hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
2078{ 2232{
2079 struct hdmi_context *hdata = ctx; 2233 struct hdmi_context *hdata = ctx;
2080 2234
2081 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2235 DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
2082 2236
2083 switch (mode) { 2237 switch (mode) {
2084 case DRM_MODE_DPMS_ON: 2238 case DRM_MODE_DPMS_ON:
2085 hdmi_poweron(hdata); 2239 if (pm_runtime_suspended(hdata->dev))
2240 pm_runtime_get_sync(hdata->dev);
2086 break; 2241 break;
2087 case DRM_MODE_DPMS_STANDBY: 2242 case DRM_MODE_DPMS_STANDBY:
2088 case DRM_MODE_DPMS_SUSPEND: 2243 case DRM_MODE_DPMS_SUSPEND:
2089 case DRM_MODE_DPMS_OFF: 2244 case DRM_MODE_DPMS_OFF:
2090 hdmi_poweroff(hdata); 2245 if (!pm_runtime_suspended(hdata->dev))
2246 pm_runtime_put_sync(hdata->dev);
2091 break; 2247 break;
2092 default: 2248 default:
2093 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); 2249 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2149,7 +2305,7 @@ static irqreturn_t hdmi_internal_irq_thread(int irq, void *arg)
2149 return IRQ_HANDLED; 2305 return IRQ_HANDLED;
2150} 2306}
2151 2307
2152static int __devinit hdmi_resources_init(struct hdmi_context *hdata) 2308static int hdmi_resources_init(struct hdmi_context *hdata)
2153{ 2309{
2154 struct device *dev = hdata->dev; 2310 struct device *dev = hdata->dev;
2155 struct hdmi_resources *res = &hdata->res; 2311 struct hdmi_resources *res = &hdata->res;
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2166 memset(res, 0, sizeof(*res)); 2322 memset(res, 0, sizeof(*res));
2167 2323
2168 /* get clocks, power */ 2324 /* get clocks, power */
2169 res->hdmi = clk_get(dev, "hdmi"); 2325 res->hdmi = devm_clk_get(dev, "hdmi");
2170 if (IS_ERR_OR_NULL(res->hdmi)) { 2326 if (IS_ERR_OR_NULL(res->hdmi)) {
2171 DRM_ERROR("failed to get clock 'hdmi'\n"); 2327 DRM_ERROR("failed to get clock 'hdmi'\n");
2172 goto fail; 2328 goto fail;
2173 } 2329 }
2174 res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 2330 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
2175 if (IS_ERR_OR_NULL(res->sclk_hdmi)) { 2331 if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
2176 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 2332 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
2177 goto fail; 2333 goto fail;
2178 } 2334 }
2179 res->sclk_pixel = clk_get(dev, "sclk_pixel"); 2335 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
2180 if (IS_ERR_OR_NULL(res->sclk_pixel)) { 2336 if (IS_ERR_OR_NULL(res->sclk_pixel)) {
2181 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 2337 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
2182 goto fail; 2338 goto fail;
2183 } 2339 }
2184 res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); 2340 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
2185 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { 2341 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
2186 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 2342 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
2187 goto fail; 2343 goto fail;
2188 } 2344 }
2189 res->hdmiphy = clk_get(dev, "hdmiphy"); 2345 res->hdmiphy = devm_clk_get(dev, "hdmiphy");
2190 if (IS_ERR_OR_NULL(res->hdmiphy)) { 2346 if (IS_ERR_OR_NULL(res->hdmiphy)) {
2191 DRM_ERROR("failed to get clock 'hdmiphy'\n"); 2347 DRM_ERROR("failed to get clock 'hdmiphy'\n");
2192 goto fail; 2348 goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2194 2350
2195 clk_set_parent(res->sclk_hdmi, res->sclk_pixel); 2351 clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
2196 2352
2197 res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * 2353 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
2198 sizeof(res->regul_bulk[0]), GFP_KERNEL); 2354 sizeof(res->regul_bulk[0]), GFP_KERNEL);
2199 if (!res->regul_bulk) { 2355 if (!res->regul_bulk) {
2200 DRM_ERROR("failed to get memory for regulators\n"); 2356 DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2204 res->regul_bulk[i].supply = supply[i]; 2360 res->regul_bulk[i].supply = supply[i];
2205 res->regul_bulk[i].consumer = NULL; 2361 res->regul_bulk[i].consumer = NULL;
2206 } 2362 }
2207 ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); 2363 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
2208 if (ret) { 2364 if (ret) {
2209 DRM_ERROR("failed to get regulators\n"); 2365 DRM_ERROR("failed to get regulators\n");
2210 goto fail; 2366 goto fail;
@@ -2217,28 +2373,6 @@ fail:
2217 return -ENODEV; 2373 return -ENODEV;
2218} 2374}
2219 2375
2220static int hdmi_resources_cleanup(struct hdmi_context *hdata)
2221{
2222 struct hdmi_resources *res = &hdata->res;
2223
2224 regulator_bulk_free(res->regul_count, res->regul_bulk);
2225 /* kfree is NULL-safe */
2226 kfree(res->regul_bulk);
2227 if (!IS_ERR_OR_NULL(res->hdmiphy))
2228 clk_put(res->hdmiphy);
2229 if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
2230 clk_put(res->sclk_hdmiphy);
2231 if (!IS_ERR_OR_NULL(res->sclk_pixel))
2232 clk_put(res->sclk_pixel);
2233 if (!IS_ERR_OR_NULL(res->sclk_hdmi))
2234 clk_put(res->sclk_hdmi);
2235 if (!IS_ERR_OR_NULL(res->hdmi))
2236 clk_put(res->hdmi);
2237 memset(res, 0, sizeof(*res));
2238
2239 return 0;
2240}
2241
2242static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; 2376static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
2243 2377
2244void hdmi_attach_ddc_client(struct i2c_client *ddc) 2378void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
2306 } 2440 }
2307}; 2441};
2308 2442
2443#ifdef CONFIG_OF
2309static struct of_device_id hdmi_match_types[] = { 2444static struct of_device_id hdmi_match_types[] = {
2310 { 2445 {
2311 .compatible = "samsung,exynos5-hdmi", 2446 .compatible = "samsung,exynos5-hdmi",
@@ -2314,8 +2449,9 @@ static struct of_device_id hdmi_match_types[] = {
2314 /* end node */ 2449 /* end node */
2315 } 2450 }
2316}; 2451};
2452#endif
2317 2453
2318static int __devinit hdmi_probe(struct platform_device *pdev) 2454static int hdmi_probe(struct platform_device *pdev)
2319{ 2455{
2320 struct device *dev = &pdev->dev; 2456 struct device *dev = &pdev->dev;
2321 struct exynos_drm_hdmi_context *drm_hdmi_ctx; 2457 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2366 const struct of_device_id *match; 2502 const struct of_device_id *match;
2367 match = of_match_node(of_match_ptr(hdmi_match_types), 2503 match = of_match_node(of_match_ptr(hdmi_match_types),
2368 pdev->dev.of_node); 2504 pdev->dev.of_node);
2505 if (match == NULL)
2506 return -ENODEV;
2369 hdata->type = (enum hdmi_type)match->data; 2507 hdata->type = (enum hdmi_type)match->data;
2370 } else { 2508 } else {
2371 hdata->type = (enum hdmi_type)platform_get_device_id 2509 hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2378 ret = hdmi_resources_init(hdata); 2516 ret = hdmi_resources_init(hdata);
2379 2517
2380 if (ret) { 2518 if (ret) {
2381 ret = -EINVAL;
2382 DRM_ERROR("hdmi_resources_init failed\n"); 2519 DRM_ERROR("hdmi_resources_init failed\n");
2383 goto err_data; 2520 return -EINVAL;
2384 } 2521 }
2385 2522
2386 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2523 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2387 if (!res) { 2524 if (!res) {
2388 DRM_ERROR("failed to find registers\n"); 2525 DRM_ERROR("failed to find registers\n");
2389 ret = -ENOENT; 2526 return -ENOENT;
2390 goto err_resource;
2391 } 2527 }
2392 2528
2393 hdata->regs = devm_request_and_ioremap(&pdev->dev, res); 2529 hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
2394 if (!hdata->regs) { 2530 if (!hdata->regs) {
2395 DRM_ERROR("failed to map registers\n"); 2531 DRM_ERROR("failed to map registers\n");
2396 ret = -ENXIO; 2532 return -ENXIO;
2397 goto err_resource;
2398 } 2533 }
2399 2534
2400 ret = gpio_request(hdata->hpd_gpio, "HPD"); 2535 ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
2401 if (ret) { 2536 if (ret) {
2402 DRM_ERROR("failed to request HPD gpio\n"); 2537 DRM_ERROR("failed to request HPD gpio\n");
2403 goto err_resource; 2538 return ret;
2404 } 2539 }
2405 2540
2406 /* DDC i2c driver */ 2541 /* DDC i2c driver */
2407 if (i2c_add_driver(&ddc_driver)) { 2542 if (i2c_add_driver(&ddc_driver)) {
2408 DRM_ERROR("failed to register ddc i2c driver\n"); 2543 DRM_ERROR("failed to register ddc i2c driver\n");
2409 ret = -ENOENT; 2544 return -ENOENT;
2410 goto err_gpio;
2411 } 2545 }
2412 2546
2413 hdata->ddc_port = hdmi_ddc; 2547 hdata->ddc_port = hdmi_ddc;
@@ -2470,15 +2604,10 @@ err_hdmiphy:
2470 i2c_del_driver(&hdmiphy_driver); 2604 i2c_del_driver(&hdmiphy_driver);
2471err_ddc: 2605err_ddc:
2472 i2c_del_driver(&ddc_driver); 2606 i2c_del_driver(&ddc_driver);
2473err_gpio:
2474 gpio_free(hdata->hpd_gpio);
2475err_resource:
2476 hdmi_resources_cleanup(hdata);
2477err_data:
2478 return ret; 2607 return ret;
2479} 2608}
2480 2609
2481static int __devexit hdmi_remove(struct platform_device *pdev) 2610static int hdmi_remove(struct platform_device *pdev)
2482{ 2611{
2483 struct device *dev = &pdev->dev; 2612 struct device *dev = &pdev->dev;
2484 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); 2613 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
2491 free_irq(hdata->internal_irq, hdata); 2620 free_irq(hdata->internal_irq, hdata);
2492 free_irq(hdata->external_irq, hdata); 2621 free_irq(hdata->external_irq, hdata);
2493 2622
2494 gpio_free(hdata->hpd_gpio);
2495
2496 hdmi_resources_cleanup(hdata);
2497 2623
2498 /* hdmiphy i2c driver */ 2624 /* hdmiphy i2c driver */
2499 i2c_del_driver(&hdmiphy_driver); 2625 i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
2509 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2635 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2510 struct hdmi_context *hdata = ctx->ctx; 2636 struct hdmi_context *hdata = ctx->ctx;
2511 2637
2638 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2639
2512 disable_irq(hdata->internal_irq); 2640 disable_irq(hdata->internal_irq);
2513 disable_irq(hdata->external_irq); 2641 disable_irq(hdata->external_irq);
2514 2642
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
2516 if (ctx->drm_dev) 2644 if (ctx->drm_dev)
2517 drm_helper_hpd_irq_event(ctx->drm_dev); 2645 drm_helper_hpd_irq_event(ctx->drm_dev);
2518 2646
2647 if (pm_runtime_suspended(dev)) {
2648 DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
2649 return 0;
2650 }
2651
2519 hdmi_poweroff(hdata); 2652 hdmi_poweroff(hdata);
2520 2653
2521 return 0; 2654 return 0;
@@ -2526,22 +2659,61 @@ static int hdmi_resume(struct device *dev)
2526 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2659 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2527 struct hdmi_context *hdata = ctx->ctx; 2660 struct hdmi_context *hdata = ctx->ctx;
2528 2661
2662 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2663
2664 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2665
2529 enable_irq(hdata->external_irq); 2666 enable_irq(hdata->external_irq);
2530 enable_irq(hdata->internal_irq); 2667 enable_irq(hdata->internal_irq);
2668
2669 if (!pm_runtime_suspended(dev)) {
2670 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
2671 return 0;
2672 }
2673
2674 hdmi_poweron(hdata);
2675
2531 return 0; 2676 return 0;
2532} 2677}
2533#endif 2678#endif
2534 2679
2535static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume); 2680#ifdef CONFIG_PM_RUNTIME
2681static int hdmi_runtime_suspend(struct device *dev)
2682{
2683 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2684 struct hdmi_context *hdata = ctx->ctx;
2685 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2686
2687 hdmi_poweroff(hdata);
2688
2689 return 0;
2690}
2691
2692static int hdmi_runtime_resume(struct device *dev)
2693{
2694 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2695 struct hdmi_context *hdata = ctx->ctx;
2696 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2697
2698 hdmi_poweron(hdata);
2699
2700 return 0;
2701}
2702#endif
2703
2704static const struct dev_pm_ops hdmi_pm_ops = {
2705 SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
2706 SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
2707};
2536 2708
2537struct platform_driver hdmi_driver = { 2709struct platform_driver hdmi_driver = {
2538 .probe = hdmi_probe, 2710 .probe = hdmi_probe,
2539 .remove = __devexit_p(hdmi_remove), 2711 .remove = hdmi_remove,
2540 .id_table = hdmi_driver_types, 2712 .id_table = hdmi_driver_types,
2541 .driver = { 2713 .driver = {
2542 .name = "exynos-hdmi", 2714 .name = "exynos-hdmi",
2543 .owner = THIS_MODULE, 2715 .owner = THIS_MODULE,
2544 .pm = &hdmi_pm_ops, 2716 .pm = &hdmi_pm_ops,
2545 .of_match_table = hdmi_match_types, 2717 .of_match_table = of_match_ptr(hdmi_match_types),
2546 }, 2718 },
2547}; 2719};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
index 1c3b6d8f1fe7..0ddf3957de15 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -5,24 +5,10 @@
5 * Inki Dae <inki.dae@samsung.com> 5 * Inki Dae <inki.dae@samsung.com>
6 * Seung-Woo Kim <sw0312.kim@samsung.com> 6 * Seung-Woo Kim <sw0312.kim@samsung.com>
7 * 7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a 8 * This program is free software; you can redistribute it and/or modify it
9 * copy of this software and associated documentation files (the "Software"), 9 * under the terms of the GNU General Public License as published by the
10 * to deal in the Software without restriction, including without limitation 10 * Free Software Foundation; either version 2 of the License, or (at your
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * option) any later version.
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */ 12 */
27 13
28#ifndef _EXYNOS_HDMI_H_ 14#ifndef _EXYNOS_HDMI_H_
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bbd..ea49d132ecf6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
46 { }, 46 { },
47}; 47};
48 48
49#ifdef CONFIG_OF
49static struct of_device_id hdmiphy_match_types[] = { 50static struct of_device_id hdmiphy_match_types[] = {
50 { 51 {
51 .compatible = "samsung,exynos5-hdmiphy", 52 .compatible = "samsung,exynos5-hdmiphy",
@@ -53,16 +54,17 @@ static struct of_device_id hdmiphy_match_types[] = {
53 /* end node */ 54 /* end node */
54 } 55 }
55}; 56};
57#endif
56 58
57struct i2c_driver hdmiphy_driver = { 59struct i2c_driver hdmiphy_driver = {
58 .driver = { 60 .driver = {
59 .name = "exynos-hdmiphy", 61 .name = "exynos-hdmiphy",
60 .owner = THIS_MODULE, 62 .owner = THIS_MODULE,
61 .of_match_table = hdmiphy_match_types, 63 .of_match_table = of_match_ptr(hdmiphy_match_types),
62 }, 64 },
63 .id_table = hdmiphy_id, 65 .id_table = hdmiphy_id,
64 .probe = hdmiphy_probe, 66 .probe = hdmiphy_probe,
65 .remove = __devexit_p(hdmiphy_remove), 67 .remove = hdmiphy_remove,
66 .command = NULL, 68 .command = NULL,
67}; 69};
68EXPORT_SYMBOL(hdmiphy_driver); 70EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 614b2e9ac462..c187ea33b748 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -35,15 +35,15 @@
35#include <drm/exynos_drm.h> 35#include <drm/exynos_drm.h>
36 36
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_crtc.h"
38#include "exynos_drm_hdmi.h" 39#include "exynos_drm_hdmi.h"
40#include "exynos_drm_iommu.h"
39 41
40#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) 42#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
41 43
42struct hdmi_win_data { 44struct hdmi_win_data {
43 dma_addr_t dma_addr; 45 dma_addr_t dma_addr;
44 void __iomem *vaddr;
45 dma_addr_t chroma_dma_addr; 46 dma_addr_t chroma_dma_addr;
46 void __iomem *chroma_vaddr;
47 uint32_t pixel_format; 47 uint32_t pixel_format;
48 unsigned int bpp; 48 unsigned int bpp;
49 unsigned int crtc_x; 49 unsigned int crtc_x;
@@ -59,6 +59,8 @@ struct hdmi_win_data {
59 unsigned int mode_width; 59 unsigned int mode_width;
60 unsigned int mode_height; 60 unsigned int mode_height;
61 unsigned int scan_flags; 61 unsigned int scan_flags;
62 bool enabled;
63 bool resume;
62}; 64};
63 65
64struct mixer_resources { 66struct mixer_resources {
@@ -80,6 +82,7 @@ enum mixer_version_id {
80 82
81struct mixer_context { 83struct mixer_context {
82 struct device *dev; 84 struct device *dev;
85 struct drm_device *drm_dev;
83 int pipe; 86 int pipe;
84 bool interlace; 87 bool interlace;
85 bool powered; 88 bool powered;
@@ -90,6 +93,9 @@ struct mixer_context {
90 struct mixer_resources mixer_res; 93 struct mixer_resources mixer_res;
91 struct hdmi_win_data win_data[MIXER_WIN_NR]; 94 struct hdmi_win_data win_data[MIXER_WIN_NR];
92 enum mixer_version_id mxr_ver; 95 enum mixer_version_id mxr_ver;
96 void *parent_ctx;
97 wait_queue_head_t wait_vsync_queue;
98 atomic_t wait_vsync_event;
93}; 99};
94 100
95struct mixer_drv_data { 101struct mixer_drv_data {
@@ -665,58 +671,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
665 spin_unlock_irqrestore(&res->reg_slock, flags); 671 spin_unlock_irqrestore(&res->reg_slock, flags);
666} 672}
667 673
668static void mixer_poweron(struct mixer_context *ctx) 674static int mixer_iommu_on(void *ctx, bool enable)
669{ 675{
670 struct mixer_resources *res = &ctx->mixer_res; 676 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
671 677 struct mixer_context *mdata = ctx;
672 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 678 struct drm_device *drm_dev;
673
674 mutex_lock(&ctx->mixer_mutex);
675 if (ctx->powered) {
676 mutex_unlock(&ctx->mixer_mutex);
677 return;
678 }
679 ctx->powered = true;
680 mutex_unlock(&ctx->mixer_mutex);
681
682 pm_runtime_get_sync(ctx->dev);
683
684 clk_enable(res->mixer);
685 if (ctx->vp_enabled) {
686 clk_enable(res->vp);
687 clk_enable(res->sclk_mixer);
688 }
689
690 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
691 mixer_win_reset(ctx);
692}
693
694static void mixer_poweroff(struct mixer_context *ctx)
695{
696 struct mixer_resources *res = &ctx->mixer_res;
697
698 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
699 679
700 mutex_lock(&ctx->mixer_mutex); 680 drm_hdmi_ctx = mdata->parent_ctx;
701 if (!ctx->powered) 681 drm_dev = drm_hdmi_ctx->drm_dev;
702 goto out;
703 mutex_unlock(&ctx->mixer_mutex);
704 682
705 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 683 if (is_drm_iommu_supported(drm_dev)) {
684 if (enable)
685 return drm_iommu_attach_device(drm_dev, mdata->dev);
706 686
707 clk_disable(res->mixer); 687 drm_iommu_detach_device(drm_dev, mdata->dev);
708 if (ctx->vp_enabled) {
709 clk_disable(res->vp);
710 clk_disable(res->sclk_mixer);
711 } 688 }
712 689 return 0;
713 pm_runtime_put_sync(ctx->dev);
714
715 mutex_lock(&ctx->mixer_mutex);
716 ctx->powered = false;
717
718out:
719 mutex_unlock(&ctx->mixer_mutex);
720} 690}
721 691
722static int mixer_enable_vblank(void *ctx, int pipe) 692static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +716,6 @@ static void mixer_disable_vblank(void *ctx)
746 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 716 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
747} 717}
748 718
749static void mixer_dpms(void *ctx, int mode)
750{
751 struct mixer_context *mixer_ctx = ctx;
752
753 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
754
755 switch (mode) {
756 case DRM_MODE_DPMS_ON:
757 mixer_poweron(mixer_ctx);
758 break;
759 case DRM_MODE_DPMS_STANDBY:
760 case DRM_MODE_DPMS_SUSPEND:
761 case DRM_MODE_DPMS_OFF:
762 mixer_poweroff(mixer_ctx);
763 break;
764 default:
765 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
766 break;
767 }
768}
769
770static void mixer_wait_for_vblank(void *ctx)
771{
772 struct mixer_context *mixer_ctx = ctx;
773 struct mixer_resources *res = &mixer_ctx->mixer_res;
774 int ret;
775
776 ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
777 MXR_INT_STATUS_VSYNC), 50);
778 if (ret < 0)
779 DRM_DEBUG_KMS("vblank wait timed out.\n");
780}
781
782static void mixer_win_mode_set(void *ctx, 719static void mixer_win_mode_set(void *ctx,
783 struct exynos_drm_overlay *overlay) 720 struct exynos_drm_overlay *overlay)
784{ 721{
@@ -811,9 +748,7 @@ static void mixer_win_mode_set(void *ctx,
811 win_data = &mixer_ctx->win_data[win]; 748 win_data = &mixer_ctx->win_data[win];
812 749
813 win_data->dma_addr = overlay->dma_addr[0]; 750 win_data->dma_addr = overlay->dma_addr[0];
814 win_data->vaddr = overlay->vaddr[0];
815 win_data->chroma_dma_addr = overlay->dma_addr[1]; 751 win_data->chroma_dma_addr = overlay->dma_addr[1];
816 win_data->chroma_vaddr = overlay->vaddr[1];
817 win_data->pixel_format = overlay->pixel_format; 752 win_data->pixel_format = overlay->pixel_format;
818 win_data->bpp = overlay->bpp; 753 win_data->bpp = overlay->bpp;
819 754
@@ -845,6 +780,8 @@ static void mixer_win_commit(void *ctx, int win)
845 vp_video_buffer(mixer_ctx, win); 780 vp_video_buffer(mixer_ctx, win);
846 else 781 else
847 mixer_graph_buffer(mixer_ctx, win); 782 mixer_graph_buffer(mixer_ctx, win);
783
784 mixer_ctx->win_data[win].enabled = true;
848} 785}
849 786
850static void mixer_win_disable(void *ctx, int win) 787static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +792,14 @@ static void mixer_win_disable(void *ctx, int win)
855 792
856 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 793 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
857 794
795 mutex_lock(&mixer_ctx->mixer_mutex);
796 if (!mixer_ctx->powered) {
797 mutex_unlock(&mixer_ctx->mixer_mutex);
798 mixer_ctx->win_data[win].resume = false;
799 return;
800 }
801 mutex_unlock(&mixer_ctx->mixer_mutex);
802
858 spin_lock_irqsave(&res->reg_slock, flags); 803 spin_lock_irqsave(&res->reg_slock, flags);
859 mixer_vsync_set_update(mixer_ctx, false); 804 mixer_vsync_set_update(mixer_ctx, false);
860 805
@@ -862,59 +807,149 @@ static void mixer_win_disable(void *ctx, int win)
862 807
863 mixer_vsync_set_update(mixer_ctx, true); 808 mixer_vsync_set_update(mixer_ctx, true);
864 spin_unlock_irqrestore(&res->reg_slock, flags); 809 spin_unlock_irqrestore(&res->reg_slock, flags);
810
811 mixer_ctx->win_data[win].enabled = false;
865} 812}
866 813
867static struct exynos_mixer_ops mixer_ops = { 814static void mixer_wait_for_vblank(void *ctx)
868 /* manager */ 815{
869 .enable_vblank = mixer_enable_vblank, 816 struct mixer_context *mixer_ctx = ctx;
870 .disable_vblank = mixer_disable_vblank,
871 .dpms = mixer_dpms,
872 817
873 /* overlay */ 818 mutex_lock(&mixer_ctx->mixer_mutex);
874 .wait_for_vblank = mixer_wait_for_vblank, 819 if (!mixer_ctx->powered) {
875 .win_mode_set = mixer_win_mode_set, 820 mutex_unlock(&mixer_ctx->mixer_mutex);
876 .win_commit = mixer_win_commit, 821 return;
877 .win_disable = mixer_win_disable, 822 }
878}; 823 mutex_unlock(&mixer_ctx->mixer_mutex);
824
825 atomic_set(&mixer_ctx->wait_vsync_event, 1);
826
827 /*
828 * wait for MIXER to signal VSYNC interrupt or return after
829 * timeout which is set to 50ms (refresh rate of 20).
830 */
831 if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
832 !atomic_read(&mixer_ctx->wait_vsync_event),
833 DRM_HZ/20))
834 DRM_DEBUG_KMS("vblank wait timed out.\n");
835}
879 836
880/* for pageflip event */ 837static void mixer_window_suspend(struct mixer_context *ctx)
881static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
882{ 838{
883 struct exynos_drm_private *dev_priv = drm_dev->dev_private; 839 struct hdmi_win_data *win_data;
884 struct drm_pending_vblank_event *e, *t; 840 int i;
885 struct timeval now;
886 unsigned long flags;
887 bool is_checked = false;
888 841
889 spin_lock_irqsave(&drm_dev->event_lock, flags); 842 for (i = 0; i < MIXER_WIN_NR; i++) {
843 win_data = &ctx->win_data[i];
844 win_data->resume = win_data->enabled;
845 mixer_win_disable(ctx, i);
846 }
847 mixer_wait_for_vblank(ctx);
848}
890 849
891 list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list, 850static void mixer_window_resume(struct mixer_context *ctx)
892 base.link) { 851{
893 /* if event's pipe isn't same as crtc then ignore it. */ 852 struct hdmi_win_data *win_data;
894 if (crtc != e->pipe) 853 int i;
895 continue;
896 854
897 is_checked = true; 855 for (i = 0; i < MIXER_WIN_NR; i++) {
898 do_gettimeofday(&now); 856 win_data = &ctx->win_data[i];
899 e->event.sequence = 0; 857 win_data->enabled = win_data->resume;
900 e->event.tv_sec = now.tv_sec; 858 win_data->resume = false;
901 e->event.tv_usec = now.tv_usec; 859 }
860}
902 861
903 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 862static void mixer_poweron(struct mixer_context *ctx)
904 wake_up_interruptible(&e->base.file_priv->event_wait); 863{
864 struct mixer_resources *res = &ctx->mixer_res;
865
866 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
867
868 mutex_lock(&ctx->mixer_mutex);
869 if (ctx->powered) {
870 mutex_unlock(&ctx->mixer_mutex);
871 return;
905 } 872 }
873 ctx->powered = true;
874 mutex_unlock(&ctx->mixer_mutex);
906 875
907 if (is_checked) 876 clk_enable(res->mixer);
908 /* 877 if (ctx->vp_enabled) {
909 * call drm_vblank_put only in case that drm_vblank_get was 878 clk_enable(res->vp);
910 * called. 879 clk_enable(res->sclk_mixer);
911 */ 880 }
912 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0) 881
913 drm_vblank_put(drm_dev, crtc); 882 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
883 mixer_win_reset(ctx);
884
885 mixer_window_resume(ctx);
886}
887
888static void mixer_poweroff(struct mixer_context *ctx)
889{
890 struct mixer_resources *res = &ctx->mixer_res;
891
892 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
893
894 mutex_lock(&ctx->mixer_mutex);
895 if (!ctx->powered)
896 goto out;
897 mutex_unlock(&ctx->mixer_mutex);
898
899 mixer_window_suspend(ctx);
900
901 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
902
903 clk_disable(res->mixer);
904 if (ctx->vp_enabled) {
905 clk_disable(res->vp);
906 clk_disable(res->sclk_mixer);
907 }
908
909 mutex_lock(&ctx->mixer_mutex);
910 ctx->powered = false;
911
912out:
913 mutex_unlock(&ctx->mixer_mutex);
914}
915
916static void mixer_dpms(void *ctx, int mode)
917{
918 struct mixer_context *mixer_ctx = ctx;
919
920 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
914 921
915 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 922 switch (mode) {
923 case DRM_MODE_DPMS_ON:
924 if (pm_runtime_suspended(mixer_ctx->dev))
925 pm_runtime_get_sync(mixer_ctx->dev);
926 break;
927 case DRM_MODE_DPMS_STANDBY:
928 case DRM_MODE_DPMS_SUSPEND:
929 case DRM_MODE_DPMS_OFF:
930 if (!pm_runtime_suspended(mixer_ctx->dev))
931 pm_runtime_put_sync(mixer_ctx->dev);
932 break;
933 default:
934 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
935 break;
936 }
916} 937}
917 938
939static struct exynos_mixer_ops mixer_ops = {
940 /* manager */
941 .iommu_on = mixer_iommu_on,
942 .enable_vblank = mixer_enable_vblank,
943 .disable_vblank = mixer_disable_vblank,
944 .wait_for_vblank = mixer_wait_for_vblank,
945 .dpms = mixer_dpms,
946
947 /* overlay */
948 .win_mode_set = mixer_win_mode_set,
949 .win_commit = mixer_win_commit,
950 .win_disable = mixer_win_disable,
951};
952
918static irqreturn_t mixer_irq_handler(int irq, void *arg) 953static irqreturn_t mixer_irq_handler(int irq, void *arg)
919{ 954{
920 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg; 955 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
@@ -943,7 +978,14 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
943 } 978 }
944 979
945 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe); 980 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
946 mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe); 981 exynos_drm_crtc_finish_pageflip(drm_hdmi_ctx->drm_dev,
982 ctx->pipe);
983
984 /* set wait vsync event to zero and wake up queue. */
985 if (atomic_read(&ctx->wait_vsync_event)) {
986 atomic_set(&ctx->wait_vsync_event, 0);
987 DRM_WAKEUP(&ctx->wait_vsync_queue);
988 }
947 } 989 }
948 990
949out: 991out:
@@ -960,8 +1002,8 @@ out:
960 return IRQ_HANDLED; 1002 return IRQ_HANDLED;
961} 1003}
962 1004
963static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, 1005static int mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
964 struct platform_device *pdev) 1006 struct platform_device *pdev)
965{ 1007{
966 struct mixer_context *mixer_ctx = ctx->ctx; 1008 struct mixer_context *mixer_ctx = ctx->ctx;
967 struct device *dev = &pdev->dev; 1009 struct device *dev = &pdev->dev;
@@ -971,85 +1013,69 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
971 1013
972 spin_lock_init(&mixer_res->reg_slock); 1014 spin_lock_init(&mixer_res->reg_slock);
973 1015
974 mixer_res->mixer = clk_get(dev, "mixer"); 1016 mixer_res->mixer = devm_clk_get(dev, "mixer");
975 if (IS_ERR_OR_NULL(mixer_res->mixer)) { 1017 if (IS_ERR_OR_NULL(mixer_res->mixer)) {
976 dev_err(dev, "failed to get clock 'mixer'\n"); 1018 dev_err(dev, "failed to get clock 'mixer'\n");
977 ret = -ENODEV; 1019 return -ENODEV;
978 goto fail;
979 } 1020 }
980 1021
981 mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 1022 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
982 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { 1023 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
983 dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); 1024 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
984 ret = -ENODEV; 1025 return -ENODEV;
985 goto fail;
986 } 1026 }
987 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1027 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
988 if (res == NULL) { 1028 if (res == NULL) {
989 dev_err(dev, "get memory resource failed.\n"); 1029 dev_err(dev, "get memory resource failed.\n");
990 ret = -ENXIO; 1030 return -ENXIO;
991 goto fail;
992 } 1031 }
993 1032
994 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, 1033 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
995 resource_size(res)); 1034 resource_size(res));
996 if (mixer_res->mixer_regs == NULL) { 1035 if (mixer_res->mixer_regs == NULL) {
997 dev_err(dev, "register mapping failed.\n"); 1036 dev_err(dev, "register mapping failed.\n");
998 ret = -ENXIO; 1037 return -ENXIO;
999 goto fail;
1000 } 1038 }
1001 1039
1002 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1040 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1003 if (res == NULL) { 1041 if (res == NULL) {
1004 dev_err(dev, "get interrupt resource failed.\n"); 1042 dev_err(dev, "get interrupt resource failed.\n");
1005 ret = -ENXIO; 1043 return -ENXIO;
1006 goto fail;
1007 } 1044 }
1008 1045
1009 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, 1046 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
1010 0, "drm_mixer", ctx); 1047 0, "drm_mixer", ctx);
1011 if (ret) { 1048 if (ret) {
1012 dev_err(dev, "request interrupt failed.\n"); 1049 dev_err(dev, "request interrupt failed.\n");
1013 goto fail; 1050 return ret;
1014 } 1051 }
1015 mixer_res->irq = res->start; 1052 mixer_res->irq = res->start;
1016 1053
1017 return 0; 1054 return 0;
1018
1019fail:
1020 if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
1021 clk_put(mixer_res->sclk_hdmi);
1022 if (!IS_ERR_OR_NULL(mixer_res->mixer))
1023 clk_put(mixer_res->mixer);
1024 return ret;
1025} 1055}
1026 1056
1027static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, 1057static int vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1028 struct platform_device *pdev) 1058 struct platform_device *pdev)
1029{ 1059{
1030 struct mixer_context *mixer_ctx = ctx->ctx; 1060 struct mixer_context *mixer_ctx = ctx->ctx;
1031 struct device *dev = &pdev->dev; 1061 struct device *dev = &pdev->dev;
1032 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; 1062 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
1033 struct resource *res; 1063 struct resource *res;
1034 int ret;
1035 1064
1036 mixer_res->vp = clk_get(dev, "vp"); 1065 mixer_res->vp = devm_clk_get(dev, "vp");
1037 if (IS_ERR_OR_NULL(mixer_res->vp)) { 1066 if (IS_ERR_OR_NULL(mixer_res->vp)) {
1038 dev_err(dev, "failed to get clock 'vp'\n"); 1067 dev_err(dev, "failed to get clock 'vp'\n");
1039 ret = -ENODEV; 1068 return -ENODEV;
1040 goto fail;
1041 } 1069 }
1042 mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); 1070 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
1043 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 1071 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
1044 dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 1072 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
1045 ret = -ENODEV; 1073 return -ENODEV;
1046 goto fail;
1047 } 1074 }
1048 mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); 1075 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
1049 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1076 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
1050 dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1077 dev_err(dev, "failed to get clock 'sclk_dac'\n");
1051 ret = -ENODEV; 1078 return -ENODEV;
1052 goto fail;
1053 } 1079 }
1054 1080
1055 if (mixer_res->sclk_hdmi) 1081 if (mixer_res->sclk_hdmi)
@@ -1058,28 +1084,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1058 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1059 if (res == NULL) { 1085 if (res == NULL) {
1060 dev_err(dev, "get memory resource failed.\n"); 1086 dev_err(dev, "get memory resource failed.\n");
1061 ret = -ENXIO; 1087 return -ENXIO;
1062 goto fail;
1063 } 1088 }
1064 1089
1065 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, 1090 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
1066 resource_size(res)); 1091 resource_size(res));
1067 if (mixer_res->vp_regs == NULL) { 1092 if (mixer_res->vp_regs == NULL) {
1068 dev_err(dev, "register mapping failed.\n"); 1093 dev_err(dev, "register mapping failed.\n");
1069 ret = -ENXIO; 1094 return -ENXIO;
1070 goto fail;
1071 } 1095 }
1072 1096
1073 return 0; 1097 return 0;
1074
1075fail:
1076 if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
1077 clk_put(mixer_res->sclk_dac);
1078 if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
1079 clk_put(mixer_res->sclk_mixer);
1080 if (!IS_ERR_OR_NULL(mixer_res->vp))
1081 clk_put(mixer_res->vp);
1082 return ret;
1083} 1098}
1084 1099
1085static struct mixer_drv_data exynos5_mxr_drv_data = { 1100static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1113,7 +1128,7 @@ static struct of_device_id mixer_match_types[] = {
1113 } 1128 }
1114}; 1129};
1115 1130
1116static int __devinit mixer_probe(struct platform_device *pdev) 1131static int mixer_probe(struct platform_device *pdev)
1117{ 1132{
1118 struct device *dev = &pdev->dev; 1133 struct device *dev = &pdev->dev;
1119 struct exynos_drm_hdmi_context *drm_hdmi_ctx; 1134 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
@@ -1142,16 +1157,19 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1142 const struct of_device_id *match; 1157 const struct of_device_id *match;
1143 match = of_match_node(of_match_ptr(mixer_match_types), 1158 match = of_match_node(of_match_ptr(mixer_match_types),
1144 pdev->dev.of_node); 1159 pdev->dev.of_node);
1145 drv = match->data; 1160 drv = (struct mixer_drv_data *)match->data;
1146 } else { 1161 } else {
1147 drv = (struct mixer_drv_data *) 1162 drv = (struct mixer_drv_data *)
1148 platform_get_device_id(pdev)->driver_data; 1163 platform_get_device_id(pdev)->driver_data;
1149 } 1164 }
1150 1165
1151 ctx->dev = &pdev->dev; 1166 ctx->dev = &pdev->dev;
1167 ctx->parent_ctx = (void *)drm_hdmi_ctx;
1152 drm_hdmi_ctx->ctx = (void *)ctx; 1168 drm_hdmi_ctx->ctx = (void *)ctx;
1153 ctx->vp_enabled = drv->is_vp_enabled; 1169 ctx->vp_enabled = drv->is_vp_enabled;
1154 ctx->mxr_ver = drv->version; 1170 ctx->mxr_ver = drv->version;
1171 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
1172 atomic_set(&ctx->wait_vsync_event, 0);
1155 1173
1156 platform_set_drvdata(pdev, drm_hdmi_ctx); 1174 platform_set_drvdata(pdev, drm_hdmi_ctx);
1157 1175
@@ -1202,13 +1220,66 @@ static int mixer_suspend(struct device *dev)
1202 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); 1220 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1203 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 1221 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1204 1222
1223 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1224
1225 if (pm_runtime_suspended(dev)) {
1226 DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
1227 return 0;
1228 }
1229
1205 mixer_poweroff(ctx); 1230 mixer_poweroff(ctx);
1206 1231
1207 return 0; 1232 return 0;
1208} 1233}
1234
1235static int mixer_resume(struct device *dev)
1236{
1237 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1238 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1239
1240 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1241
1242 if (!pm_runtime_suspended(dev)) {
1243 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
1244 return 0;
1245 }
1246
1247 mixer_poweron(ctx);
1248
1249 return 0;
1250}
1251#endif
1252
1253#ifdef CONFIG_PM_RUNTIME
1254static int mixer_runtime_suspend(struct device *dev)
1255{
1256 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1257 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1258
1259 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1260
1261 mixer_poweroff(ctx);
1262
1263 return 0;
1264}
1265
1266static int mixer_runtime_resume(struct device *dev)
1267{
1268 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1269 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1270
1271 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1272
1273 mixer_poweron(ctx);
1274
1275 return 0;
1276}
1209#endif 1277#endif
1210 1278
1211static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL); 1279static const struct dev_pm_ops mixer_pm_ops = {
1280 SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
1281 SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
1282};
1212 1283
1213struct platform_driver mixer_driver = { 1284struct platform_driver mixer_driver = {
1214 .driver = { 1285 .driver = {
@@ -1218,6 +1289,6 @@ struct platform_driver mixer_driver = {
1218 .of_match_table = mixer_match_types, 1289 .of_match_table = mixer_match_types,
1219 }, 1290 },
1220 .probe = mixer_probe, 1291 .probe = mixer_probe,
1221 .remove = __devexit_p(mixer_remove), 1292 .remove = mixer_remove,
1222 .id_table = mixer_driver_types, 1293 .id_table = mixer_driver_types,
1223}; 1294};
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 000000000000..b4f9ca1fd851
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
1/* drivers/gpu/drm/exynos/regs-fimc.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Register definition file for Samsung Camera Interface (FIMC) driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef EXYNOS_REGS_FIMC_H
14#define EXYNOS_REGS_FIMC_H
15
16/*
17 * Register part
18*/
19/* Input source format */
20#define EXYNOS_CISRCFMT (0x00)
21/* Window offset */
22#define EXYNOS_CIWDOFST (0x04)
23/* Global control */
24#define EXYNOS_CIGCTRL (0x08)
25/* Window offset 2 */
26#define EXYNOS_CIWDOFST2 (0x14)
27/* Y 1st frame start address for output DMA */
28#define EXYNOS_CIOYSA1 (0x18)
29/* Y 2nd frame start address for output DMA */
30#define EXYNOS_CIOYSA2 (0x1c)
31/* Y 3rd frame start address for output DMA */
32#define EXYNOS_CIOYSA3 (0x20)
33/* Y 4th frame start address for output DMA */
34#define EXYNOS_CIOYSA4 (0x24)
35/* Cb 1st frame start address for output DMA */
36#define EXYNOS_CIOCBSA1 (0x28)
37/* Cb 2nd frame start address for output DMA */
38#define EXYNOS_CIOCBSA2 (0x2c)
39/* Cb 3rd frame start address for output DMA */
40#define EXYNOS_CIOCBSA3 (0x30)
41/* Cb 4th frame start address for output DMA */
42#define EXYNOS_CIOCBSA4 (0x34)
43/* Cr 1st frame start address for output DMA */
44#define EXYNOS_CIOCRSA1 (0x38)
45/* Cr 2nd frame start address for output DMA */
46#define EXYNOS_CIOCRSA2 (0x3c)
47/* Cr 3rd frame start address for output DMA */
48#define EXYNOS_CIOCRSA3 (0x40)
49/* Cr 4th frame start address for output DMA */
50#define EXYNOS_CIOCRSA4 (0x44)
51/* Target image format */
52#define EXYNOS_CITRGFMT (0x48)
53/* Output DMA control */
54#define EXYNOS_CIOCTRL (0x4c)
55/* Pre-scaler control 1 */
56#define EXYNOS_CISCPRERATIO (0x50)
57/* Pre-scaler control 2 */
58#define EXYNOS_CISCPREDST (0x54)
59/* Main scaler control */
60#define EXYNOS_CISCCTRL (0x58)
61/* Target area */
62#define EXYNOS_CITAREA (0x5c)
63/* Status */
64#define EXYNOS_CISTATUS (0x64)
65/* Status2 */
66#define EXYNOS_CISTATUS2 (0x68)
67/* Image capture enable command */
68#define EXYNOS_CIIMGCPT (0xc0)
69/* Capture sequence */
70#define EXYNOS_CICPTSEQ (0xc4)
71/* Image effects */
72#define EXYNOS_CIIMGEFF (0xd0)
73/* Y frame start address for input DMA */
74#define EXYNOS_CIIYSA0 (0xd4)
75/* Cb frame start address for input DMA */
76#define EXYNOS_CIICBSA0 (0xd8)
77/* Cr frame start address for input DMA */
78#define EXYNOS_CIICRSA0 (0xdc)
79/* Input DMA Y Line Skip */
80#define EXYNOS_CIILINESKIP_Y (0xec)
81/* Input DMA Cb Line Skip */
82#define EXYNOS_CIILINESKIP_CB (0xf0)
83/* Input DMA Cr Line Skip */
84#define EXYNOS_CIILINESKIP_CR (0xf4)
85/* Real input DMA image size */
86#define EXYNOS_CIREAL_ISIZE (0xf8)
87/* Input DMA control */
88#define EXYNOS_MSCTRL (0xfc)
89/* Y frame start address for input DMA */
90#define EXYNOS_CIIYSA1 (0x144)
91/* Cb frame start address for input DMA */
92#define EXYNOS_CIICBSA1 (0x148)
93/* Cr frame start address for input DMA */
94#define EXYNOS_CIICRSA1 (0x14c)
95/* Output DMA Y offset */
96#define EXYNOS_CIOYOFF (0x168)
97/* Output DMA CB offset */
98#define EXYNOS_CIOCBOFF (0x16c)
99/* Output DMA CR offset */
100#define EXYNOS_CIOCROFF (0x170)
101/* Input DMA Y offset */
102#define EXYNOS_CIIYOFF (0x174)
103/* Input DMA CB offset */
104#define EXYNOS_CIICBOFF (0x178)
105/* Input DMA CR offset */
106#define EXYNOS_CIICROFF (0x17c)
107/* Input DMA original image size */
108#define EXYNOS_ORGISIZE (0x180)
109/* Output DMA original image size */
110#define EXYNOS_ORGOSIZE (0x184)
111/* Real output DMA image size */
112#define EXYNOS_CIEXTEN (0x188)
113/* DMA parameter */
114#define EXYNOS_CIDMAPARAM (0x18c)
115/* MIPI CSI image format */
116#define EXYNOS_CSIIMGFMT (0x194)
117/* FIMC Clock Source Select */
118#define EXYNOS_MISC_FIMC (0x198)
119
120/* Add for FIMC v5.1 */
121/* Output Frame Buffer Sequence */
122#define EXYNOS_CIFCNTSEQ (0x1fc)
123/* Y 5th frame start address for output DMA */
124#define EXYNOS_CIOYSA5 (0x200)
125/* Y 6th frame start address for output DMA */
126#define EXYNOS_CIOYSA6 (0x204)
127/* Y 7th frame start address for output DMA */
128#define EXYNOS_CIOYSA7 (0x208)
129/* Y 8th frame start address for output DMA */
130#define EXYNOS_CIOYSA8 (0x20c)
131/* Y 9th frame start address for output DMA */
132#define EXYNOS_CIOYSA9 (0x210)
133/* Y 10th frame start address for output DMA */
134#define EXYNOS_CIOYSA10 (0x214)
135/* Y 11th frame start address for output DMA */
136#define EXYNOS_CIOYSA11 (0x218)
137/* Y 12th frame start address for output DMA */
138#define EXYNOS_CIOYSA12 (0x21c)
139/* Y 13th frame start address for output DMA */
140#define EXYNOS_CIOYSA13 (0x220)
141/* Y 14th frame start address for output DMA */
142#define EXYNOS_CIOYSA14 (0x224)
143/* Y 15th frame start address for output DMA */
144#define EXYNOS_CIOYSA15 (0x228)
145/* Y 16th frame start address for output DMA */
146#define EXYNOS_CIOYSA16 (0x22c)
147/* Y 17th frame start address for output DMA */
148#define EXYNOS_CIOYSA17 (0x230)
149/* Y 18th frame start address for output DMA */
150#define EXYNOS_CIOYSA18 (0x234)
151/* Y 19th frame start address for output DMA */
152#define EXYNOS_CIOYSA19 (0x238)
153/* Y 20th frame start address for output DMA */
154#define EXYNOS_CIOYSA20 (0x23c)
155/* Y 21th frame start address for output DMA */
156#define EXYNOS_CIOYSA21 (0x240)
157/* Y 22th frame start address for output DMA */
158#define EXYNOS_CIOYSA22 (0x244)
159/* Y 23th frame start address for output DMA */
160#define EXYNOS_CIOYSA23 (0x248)
161/* Y 24th frame start address for output DMA */
162#define EXYNOS_CIOYSA24 (0x24c)
163/* Y 25th frame start address for output DMA */
164#define EXYNOS_CIOYSA25 (0x250)
165/* Y 26th frame start address for output DMA */
166#define EXYNOS_CIOYSA26 (0x254)
167/* Y 27th frame start address for output DMA */
168#define EXYNOS_CIOYSA27 (0x258)
169/* Y 28th frame start address for output DMA */
170#define EXYNOS_CIOYSA28 (0x25c)
171/* Y 29th frame start address for output DMA */
172#define EXYNOS_CIOYSA29 (0x260)
173/* Y 30th frame start address for output DMA */
174#define EXYNOS_CIOYSA30 (0x264)
175/* Y 31th frame start address for output DMA */
176#define EXYNOS_CIOYSA31 (0x268)
177/* Y 32th frame start address for output DMA */
178#define EXYNOS_CIOYSA32 (0x26c)
179
180/* CB 5th frame start address for output DMA */
181#define EXYNOS_CIOCBSA5 (0x270)
182/* CB 6th frame start address for output DMA */
183#define EXYNOS_CIOCBSA6 (0x274)
184/* CB 7th frame start address for output DMA */
185#define EXYNOS_CIOCBSA7 (0x278)
186/* CB 8th frame start address for output DMA */
187#define EXYNOS_CIOCBSA8 (0x27c)
188/* CB 9th frame start address for output DMA */
189#define EXYNOS_CIOCBSA9 (0x280)
190/* CB 10th frame start address for output DMA */
191#define EXYNOS_CIOCBSA10 (0x284)
192/* CB 11th frame start address for output DMA */
193#define EXYNOS_CIOCBSA11 (0x288)
194/* CB 12th frame start address for output DMA */
195#define EXYNOS_CIOCBSA12 (0x28c)
196/* CB 13th frame start address for output DMA */
197#define EXYNOS_CIOCBSA13 (0x290)
198/* CB 14th frame start address for output DMA */
199#define EXYNOS_CIOCBSA14 (0x294)
200/* CB 15th frame start address for output DMA */
201#define EXYNOS_CIOCBSA15 (0x298)
202/* CB 16th frame start address for output DMA */
203#define EXYNOS_CIOCBSA16 (0x29c)
204/* CB 17th frame start address for output DMA */
205#define EXYNOS_CIOCBSA17 (0x2a0)
206/* CB 18th frame start address for output DMA */
207#define EXYNOS_CIOCBSA18 (0x2a4)
208/* CB 19th frame start address for output DMA */
209#define EXYNOS_CIOCBSA19 (0x2a8)
210/* CB 20th frame start address for output DMA */
211#define EXYNOS_CIOCBSA20 (0x2ac)
212/* CB 21th frame start address for output DMA */
213#define EXYNOS_CIOCBSA21 (0x2b0)
214/* CB 22th frame start address for output DMA */
215#define EXYNOS_CIOCBSA22 (0x2b4)
216/* CB 23th frame start address for output DMA */
217#define EXYNOS_CIOCBSA23 (0x2b8)
218/* CB 24th frame start address for output DMA */
219#define EXYNOS_CIOCBSA24 (0x2bc)
220/* CB 25th frame start address for output DMA */
221#define EXYNOS_CIOCBSA25 (0x2c0)
222/* CB 26th frame start address for output DMA */
223#define EXYNOS_CIOCBSA26 (0x2c4)
224/* CB 27th frame start address for output DMA */
225#define EXYNOS_CIOCBSA27 (0x2c8)
226/* CB 28th frame start address for output DMA */
227#define EXYNOS_CIOCBSA28 (0x2cc)
228/* CB 29th frame start address for output DMA */
229#define EXYNOS_CIOCBSA29 (0x2d0)
230/* CB 30th frame start address for output DMA */
231#define EXYNOS_CIOCBSA30 (0x2d4)
232/* CB 31th frame start address for output DMA */
233#define EXYNOS_CIOCBSA31 (0x2d8)
234/* CB 32th frame start address for output DMA */
235#define EXYNOS_CIOCBSA32 (0x2dc)
236
237/* CR 5th frame start address for output DMA */
238#define EXYNOS_CIOCRSA5 (0x2e0)
239/* CR 6th frame start address for output DMA */
240#define EXYNOS_CIOCRSA6 (0x2e4)
241/* CR 7th frame start address for output DMA */
242#define EXYNOS_CIOCRSA7 (0x2e8)
243/* CR 8th frame start address for output DMA */
244#define EXYNOS_CIOCRSA8 (0x2ec)
245/* CR 9th frame start address for output DMA */
246#define EXYNOS_CIOCRSA9 (0x2f0)
247/* CR 10th frame start address for output DMA */
248#define EXYNOS_CIOCRSA10 (0x2f4)
249/* CR 11th frame start address for output DMA */
250#define EXYNOS_CIOCRSA11 (0x2f8)
251/* CR 12th frame start address for output DMA */
252#define EXYNOS_CIOCRSA12 (0x2fc)
253/* CR 13th frame start address for output DMA */
254#define EXYNOS_CIOCRSA13 (0x300)
255/* CR 14th frame start address for output DMA */
256#define EXYNOS_CIOCRSA14 (0x304)
257/* CR 15th frame start address for output DMA */
258#define EXYNOS_CIOCRSA15 (0x308)
259/* CR 16th frame start address for output DMA */
260#define EXYNOS_CIOCRSA16 (0x30c)
261/* CR 17th frame start address for output DMA */
262#define EXYNOS_CIOCRSA17 (0x310)
263/* CR 18th frame start address for output DMA */
264#define EXYNOS_CIOCRSA18 (0x314)
265/* CR 19th frame start address for output DMA */
266#define EXYNOS_CIOCRSA19 (0x318)
267/* CR 20th frame start address for output DMA */
268#define EXYNOS_CIOCRSA20 (0x31c)
269/* CR 21th frame start address for output DMA */
270#define EXYNOS_CIOCRSA21 (0x320)
271/* CR 22th frame start address for output DMA */
272#define EXYNOS_CIOCRSA22 (0x324)
273/* CR 23th frame start address for output DMA */
274#define EXYNOS_CIOCRSA23 (0x328)
275/* CR 24th frame start address for output DMA */
276#define EXYNOS_CIOCRSA24 (0x32c)
277/* CR 25th frame start address for output DMA */
278#define EXYNOS_CIOCRSA25 (0x330)
279/* CR 26th frame start address for output DMA */
280#define EXYNOS_CIOCRSA26 (0x334)
281/* CR 27th frame start address for output DMA */
282#define EXYNOS_CIOCRSA27 (0x338)
283/* CR 28th frame start address for output DMA */
284#define EXYNOS_CIOCRSA28 (0x33c)
285/* CR 29th frame start address for output DMA */
286#define EXYNOS_CIOCRSA29 (0x340)
287/* CR 30th frame start address for output DMA */
288#define EXYNOS_CIOCRSA30 (0x344)
289/* CR 31th frame start address for output DMA */
290#define EXYNOS_CIOCRSA31 (0x348)
291/* CR 32th frame start address for output DMA */
292#define EXYNOS_CIOCRSA32 (0x34c)
293
294/*
295 * Macro part
296*/
297/* frame start address 1 ~ 4, 5 ~ 32 */
298/* Number of Default PingPong Memory */
299#define DEF_PP 4
300#define EXYNOS_CIOYSA(__x) \
301 (((__x) < DEF_PP) ? \
302 (EXYNOS_CIOYSA1 + (__x) * 4) : \
303 (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
304#define EXYNOS_CIOCBSA(__x) \
305 (((__x) < DEF_PP) ? \
306 (EXYNOS_CIOCBSA1 + (__x) * 4) : \
307 (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
308#define EXYNOS_CIOCRSA(__x) \
309 (((__x) < DEF_PP) ? \
310 (EXYNOS_CIOCRSA1 + (__x) * 4) : \
311 (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
312/* Number of Default PingPong Memory */
313#define DEF_IPP 1
314#define EXYNOS_CIIYSA(__x) \
315 (((__x) < DEF_IPP) ? \
316 (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
317#define EXYNOS_CIICBSA(__x) \
318 (((__x) < DEF_IPP) ? \
319 (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
320#define EXYNOS_CIICRSA(__x) \
321 (((__x) < DEF_IPP) ? \
322 (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
323
324#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
325#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
326
327#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
328#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
329
330#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
331#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
332
333#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
334#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
335
336#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
337#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
338#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
339
340#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
341#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
342
343#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
344#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
345
346#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
347
348#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
349#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
350#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
351#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
352#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
353
354#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
355#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
356
357#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
358#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
359#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
360
361#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
362
363#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
364#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
365
366#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
367#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
368
369#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
370#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
371
372#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
373#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
374
375#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
376#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
377
378#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
379#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
380
381#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
382#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
383
384#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
385#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
386
387#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
388#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
389
390#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
391#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
392
393#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
394#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
395#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
396#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
397
398/*
399 * Bit definition part
400*/
401/* Source format register */
402#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
403#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
404#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
405#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
406#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
407#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
408#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
409/* ITU601 16bit only */
410#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
411/* ITU601 16bit only */
412#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
413
414/* Window offset register */
415#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
416#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
417#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
418#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
419#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
420#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
421#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
422
423/* Global control register */
424#define EXYNOS_CIGCTRL_SWRST (1 << 31)
425#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
426#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
427#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
428#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
429#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
430#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
431#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
432#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
433#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
434#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
435#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
436#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
437#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
438#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
439#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
440#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
441#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
442#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
443#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
444#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
445#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
446#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
447#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
448#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
449#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
450#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
451#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
452#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
453#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
454#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
455#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
456#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
457#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
458#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
459#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
460#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
461#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
462#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
463#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
464#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
465#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
466
467/* Window offset2 register */
468#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
469#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
470
471/* Target format register */
472#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
473#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
474#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
475#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
476#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
477#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
478#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
479#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
480#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
481#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
482#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
483#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
484#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
485#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
486#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
487
488/* Output DMA control register */
489#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
490#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
491#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
492#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
493#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
494#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
495#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
496#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
497#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
498#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
499#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
500#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
501#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
502#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
503#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
504#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
505#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
506#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
507#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
508
509/* Main scaler control register */
510#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
511#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
512#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
513#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
514#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
515#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
516#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
517#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
518#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
519#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
520#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
521#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
522#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
523#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
524#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
525#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
526#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
527#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
528#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
529#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
530#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
531#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
532#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
533#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
534#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
535
536/* Status register */
537#define EXYNOS_CISTATUS_OVFIY (1 << 31)
538#define EXYNOS_CISTATUS_OVFICB (1 << 30)
539#define EXYNOS_CISTATUS_OVFICR (1 << 29)
540#define EXYNOS_CISTATUS_VSYNC (1 << 28)
541#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
542#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
543#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
544#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
545#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
546#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
547#define EXYNOS_CISTATUS_OVRLB (1 << 18)
548#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
549#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
550#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
551#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
552
553/* Image capture enable register */
554#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
555#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
556#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
557#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
558#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
559
560/* Image effects register */
561#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
562#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
563#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
564#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
565#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
566#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
567#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
568#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
569#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
570#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
571#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
572#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
573
574/* Real input DMA size register */
575#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
576#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
577#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
578#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
579
580/* Input DMA control register */
581#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
582#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
583#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
584#define EXYNOS_MSCTRL_BURST_CNT (24)
585#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
586#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
587#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
588#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
589#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
590#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
591#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
592#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
593#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
594#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
595#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
596#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
597#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
598#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
599#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
600#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
601#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
602#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
603#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
604#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
605#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
606#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
607#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
608#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
609#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
610#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
611#define EXYNOS_MSCTRL_ENVID (1 << 0)
612
613/* DMA parameter register */
614#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
615#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
616#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
617#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
618#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
619#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
620#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
621#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
622#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
623#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
624#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
625#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
626#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
627#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
628#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
629#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
630#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
631#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
632#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
633#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
634#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
635#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
636#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
637#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
638#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
639#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
640#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
641#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
642#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
643#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
644#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
645#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
646#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
647#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
648#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
649#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
650
651/* Gathering Extension register */
652#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
653#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
654#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
655#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
656#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
657
658/* FIMC Clock Source Select register */
659#define EXYNOS_CLKSRC_HCLK (0 << 1)
660#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
661#define EXYNOS_CLKSRC_SCLK (1 << 1)
662
663/* SYSREG for FIMC writeback */
664#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
665#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
666#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
667#define SYSREG_FIMD0WB_DEST_SHIFT 23
668
669#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 000000000000..9ad592707aaf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
1/* linux/drivers/gpu/drm/exynos/regs-gsc.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Register definition file for Samsung G-Scaler driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef EXYNOS_REGS_GSC_H_
14#define EXYNOS_REGS_GSC_H_
15
16/* G-Scaler enable */
17#define GSC_ENABLE 0x00
18#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
19#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
20#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
21#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
22#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
23#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
24#define GSC_ENABLE_NORM_MODE (0 << 7)
25#define GSC_ENABLE_IPC_MODE (1 << 7)
26#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
27#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
28#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
29#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
30#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
31#define GSC_ENABLE_QOS_ENABLE (1 << 3)
32#define GSC_ENABLE_OP_STATUS (1 << 2)
33#define GSC_ENABLE_SFR_UPDATE (1 << 1)
34#define GSC_ENABLE_ON (1 << 0)
35
36/* G-Scaler S/W reset */
37#define GSC_SW_RESET 0x04
38#define GSC_SW_RESET_SRESET (1 << 0)
39
40/* G-Scaler IRQ */
41#define GSC_IRQ 0x08
42#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
43#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
44#define GSC_IRQ_OR_MASK (1 << 2)
45#define GSC_IRQ_FRMDONE_MASK (1 << 1)
46#define GSC_IRQ_ENABLE (1 << 0)
47
48/* G-Scaler input control */
49#define GSC_IN_CON 0x10
50#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
51#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
52#define GSC_IN_RB_SWAP_MASK (1 << 19)
53#define GSC_IN_RB_SWAP (1 << 19)
54#define GSC_IN_ROT_MASK (7 << 16)
55#define GSC_IN_ROT_270 (7 << 16)
56#define GSC_IN_ROT_90_YFLIP (6 << 16)
57#define GSC_IN_ROT_90_XFLIP (5 << 16)
58#define GSC_IN_ROT_90 (4 << 16)
59#define GSC_IN_ROT_180 (3 << 16)
60#define GSC_IN_ROT_YFLIP (2 << 16)
61#define GSC_IN_ROT_XFLIP (1 << 16)
62#define GSC_IN_RGB_TYPE_MASK (3 << 14)
63#define GSC_IN_RGB_HD_WIDE (3 << 14)
64#define GSC_IN_RGB_HD_NARROW (2 << 14)
65#define GSC_IN_RGB_SD_WIDE (1 << 14)
66#define GSC_IN_RGB_SD_NARROW (0 << 14)
67#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
68#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
69#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
70#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
71#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
72#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
73#define GSC_IN_FORMAT_MASK (7 << 8)
74#define GSC_IN_XRGB8888 (0 << 8)
75#define GSC_IN_RGB565 (1 << 8)
76#define GSC_IN_YUV420_2P (2 << 8)
77#define GSC_IN_YUV420_3P (3 << 8)
78#define GSC_IN_YUV422_1P (4 << 8)
79#define GSC_IN_YUV422_2P (5 << 8)
80#define GSC_IN_YUV422_3P (6 << 8)
81#define GSC_IN_TILE_TYPE_MASK (1 << 4)
82#define GSC_IN_TILE_C_16x8 (0 << 4)
83#define GSC_IN_TILE_C_16x16 (1 << 4)
84#define GSC_IN_TILE_MODE (1 << 3)
85#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
86#define GSC_IN_LOCAL_CAM3 (3 << 1)
87#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
88#define GSC_IN_LOCAL_CAM1 (1 << 1)
89#define GSC_IN_LOCAL_CAM0 (0 << 1)
90#define GSC_IN_PATH_MASK (1 << 0)
91#define GSC_IN_PATH_LOCAL (1 << 0)
92#define GSC_IN_PATH_MEMORY (0 << 0)
93
94/* G-Scaler source image size */
95#define GSC_SRCIMG_SIZE 0x14
96#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
97#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
98#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
99#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
100
101/* G-Scaler source image offset */
102#define GSC_SRCIMG_OFFSET 0x18
103#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
104#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
105#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
106#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
107
108/* G-Scaler cropped source image size */
109#define GSC_CROPPED_SIZE 0x1C
110#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
111#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
112#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
113#define GSC_CROPPED_WIDTH(x) ((x) << 0)
114
115/* G-Scaler output control */
116#define GSC_OUT_CON 0x20
117#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
118#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
119#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
120#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
121#define GSC_OUT_RB_SWAP_MASK (1 << 12)
122#define GSC_OUT_RB_SWAP (1 << 12)
123#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
124#define GSC_OUT_RGB_HD_NARROW (3 << 10)
125#define GSC_OUT_RGB_HD_WIDE (2 << 10)
126#define GSC_OUT_RGB_SD_NARROW (1 << 10)
127#define GSC_OUT_RGB_SD_WIDE (0 << 10)
128#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
129#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
130#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
131#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
132#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
133#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
134#define GSC_OUT_FORMAT_MASK (7 << 4)
135#define GSC_OUT_XRGB8888 (0 << 4)
136#define GSC_OUT_RGB565 (1 << 4)
137#define GSC_OUT_YUV420_2P (2 << 4)
138#define GSC_OUT_YUV420_3P (3 << 4)
139#define GSC_OUT_YUV422_1P (4 << 4)
140#define GSC_OUT_YUV422_2P (5 << 4)
141#define GSC_OUT_YUV444 (7 << 4)
142#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
143#define GSC_OUT_TILE_C_16x8 (0 << 2)
144#define GSC_OUT_TILE_C_16x16 (1 << 2)
145#define GSC_OUT_TILE_MODE (1 << 1)
146#define GSC_OUT_PATH_MASK (1 << 0)
147#define GSC_OUT_PATH_LOCAL (1 << 0)
148#define GSC_OUT_PATH_MEMORY (0 << 0)
149
150/* G-Scaler scaled destination image size */
151#define GSC_SCALED_SIZE 0x24
152#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
153#define GSC_SCALED_HEIGHT(x) ((x) << 16)
154#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
155#define GSC_SCALED_WIDTH(x) ((x) << 0)
156
157/* G-Scaler pre scale ratio */
158#define GSC_PRE_SCALE_RATIO 0x28
159#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
160#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
161#define GSC_PRESC_V_RATIO_MASK (7 << 16)
162#define GSC_PRESC_V_RATIO(x) ((x) << 16)
163#define GSC_PRESC_H_RATIO_MASK (7 << 0)
164#define GSC_PRESC_H_RATIO(x) ((x) << 0)
165
166/* G-Scaler main scale horizontal ratio */
167#define GSC_MAIN_H_RATIO 0x2C
168#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
169#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
170
171/* G-Scaler main scale vertical ratio */
172#define GSC_MAIN_V_RATIO 0x30
173#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
174#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
175
176/* G-Scaler input chrominance stride */
177#define GSC_IN_CHROM_STRIDE 0x3C
178#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
179#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
180
181/* G-Scaler destination image size */
182#define GSC_DSTIMG_SIZE 0x40
183#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
184#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
185#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
186#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
187
188/* G-Scaler destination image offset */
189#define GSC_DSTIMG_OFFSET 0x44
190#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
191#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
192#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
193#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
194
195/* G-Scaler output chrominance stride */
196#define GSC_OUT_CHROM_STRIDE 0x48
197#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
198#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
199
200/* G-Scaler input y address mask */
201#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
202/* G-Scaler input y base address */
203#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
204/* G-Scaler input y base current address */
205#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
206
207/* G-Scaler input cb address mask */
208#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
209/* G-Scaler input cb base address */
210#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
211/* G-Scaler input cb base current address */
212#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
213
214/* G-Scaler input cr address mask */
215#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
216/* G-Scaler input cr base address */
217#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
218/* G-Scaler input cr base current address */
219#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
220
221/* G-Scaler input address mask */
222#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
223#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
224#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
225#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
226
227/* G-Scaler output y address mask */
228#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
229/* G-Scaler output y base address */
230#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
231
232/* G-Scaler output cb address mask */
233#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
234/* G-Scaler output cb base address */
235#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
236
237/* G-Scaler output cr address mask */
238#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
239/* G-Scaler output cr base address */
240#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
241
242/* G-Scaler output address mask */
243#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
244#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
245#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
246#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
247
248/* G-Scaler horizontal scaling filter */
249#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
250
251/* G-Scaler vertical scaling filter */
252#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
253
254/* G-Scaler BUS control */
255#define GSC_BUSCON 0xA78
256#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
257#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
258#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
259#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
260#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
261
262/* G-Scaler V position */
263#define GSC_VPOSITION 0xA7C
264#define GSC_VPOS_F(x) ((x) << 0)
265
266
267/* G-Scaler clock initial count */
268#define GSC_CLK_INIT_COUNT 0xC00
269#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
270
271/* G-Scaler clock snoop count */
272#define GSC_CLK_SNOOP_COUNT 0xC04
273#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
274
275/* SYSCON. GSCBLK_CFG */
276#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
277#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
278#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
279#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
280#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
281#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
282#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
283
284#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..ef1b3eb3ba6e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
176#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C) 176#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
177#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080) 177#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
178 178
179/* PHY Control bit definition */
180
181/* HDMI_PHY_CON_0 */
182#define HDMI_PHY_POWER_OFF_EN (1 << 0)
183
179/* Video related registers */ 184/* Video related registers */
180#define HDMI_YMAX HDMI_CORE_BASE(0x0060) 185#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
181#define HDMI_YMIN HDMI_CORE_BASE(0x0064) 186#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
298#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) 303#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
299#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) 304#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
300#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) 305#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
301#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) 306#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
302 307
303#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) 308#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
304#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) 309#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
305#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) 310#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
306#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) 311#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
307#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) 312#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
308#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) 313#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
309 314
310#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) 315#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
311#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) 316#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
338#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) 343#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
339#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) 344#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
340 345
346/* AVI bit definition */
347#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
348#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
349
350#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
351#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
352
353/* AUI bit definition */
354#define HDMI_AUI_CON_NO_TRAN (0 << 0)
355
356/* VSI bit definition */
357#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
358
341/* HDCP related registers */ 359/* HDCP related registers */
342#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) 360#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
343#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) 361#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 000000000000..a09ac6e180da
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
1/* drivers/gpu/drm/exynos/regs-rotator.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Register definition file for Samsung Rotator Interface (Rotator) driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef EXYNOS_REGS_ROTATOR_H
14#define EXYNOS_REGS_ROTATOR_H
15
16/* Configuration */
17#define ROT_CONFIG 0x00
18#define ROT_CONFIG_IRQ (3 << 8)
19
20/* Image Control */
21#define ROT_CONTROL 0x10
22#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
23#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
24#define ROT_CONTROL_FMT_RGB888 (6 << 8)
25#define ROT_CONTROL_FMT_MASK (7 << 8)
26#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
27#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
28#define ROT_CONTROL_FLIP_MASK (3 << 6)
29#define ROT_CONTROL_ROT_90 (1 << 4)
30#define ROT_CONTROL_ROT_180 (2 << 4)
31#define ROT_CONTROL_ROT_270 (3 << 4)
32#define ROT_CONTROL_ROT_MASK (3 << 4)
33#define ROT_CONTROL_START (1 << 0)
34
35/* Status */
36#define ROT_STATUS 0x20
37#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
38#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
39#define ROT_STATUS_IRQ_VAL_COMPLETE 1
40#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
41
42/* Buffer Address */
43#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
44#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
45
46/* Buffer Size */
47#define ROT_SRC_BUF_SIZE 0x3c
48#define ROT_DST_BUF_SIZE 0x5c
49#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
50#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
51#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
52#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
53
54/* Crop Position */
55#define ROT_SRC_CROP_POS 0x40
56#define ROT_DST_CROP_POS 0x60
57#define ROT_CROP_POS_Y(x) ((x) << 16)
58#define ROT_CROP_POS_X(x) ((x) << 0)
59
60/* Source Crop Size */
61#define ROT_SRC_CROP_SIZE 0x44
62#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
63#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
64
65/* Round to nearest aligned value */
66#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
67/* Minimum limit value */
68#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
69/* Maximum limit value */
70#define ROT_MAX(max, mask) ((max) & (mask))
71
72#endif /* EXYNOS_REGS_ROTATOR_H */
73
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b65..23e14e93991f 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
523 523
524 dev_priv->force_audio_property = prop; 524 dev_priv->force_audio_property = prop;
525 } 525 }
526 drm_connector_attach_property(connector, prop, 0); 526 drm_object_attach_property(&connector->base, prop, 0);
527} 527}
528 528
529 529
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
553 dev_priv->broadcast_rgb_property = prop; 553 dev_priv->broadcast_rgb_property = prop;
554 } 554 }
555 555
556 drm_connector_attach_property(connector, prop, 0); 556 drm_object_attach_property(&connector->base, prop, 0);
557} 557}
558 558
559/* Cedarview */ 559/* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf320..51044cc55cf2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
1650 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1650 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1651 int ret; 1651 int ret;
1652 1652
1653 ret = drm_connector_property_set_value(connector, property, val); 1653 ret = drm_object_property_set_value(&connector->base, property, val);
1654 if (ret) 1654 if (ret)
1655 return ret; 1655 return ret;
1656 1656
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 7272a461edfe..e223b500022e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
185 return -1; 185 return -1;
186 } 186 }
187 187
188 if (drm_connector_property_get_value(connector, 188 if (drm_object_property_get_value(&connector->base,
189 property, &curValue)) 189 property, &curValue))
190 return -1; 190 return -1;
191 191
192 if (curValue == value) 192 if (curValue == value)
193 return 0; 193 return 0;
194 194
195 if (drm_connector_property_set_value(connector, 195 if (drm_object_property_set_value(&connector->base,
196 property, value)) 196 property, value))
197 return -1; 197 return -1;
198 198
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
341 connector->interlace_allowed = false; 341 connector->interlace_allowed = false;
342 connector->doublescan_allowed = false; 342 connector->doublescan_allowed = false;
343 343
344 drm_connector_attach_property(connector, 344 drm_object_attach_property(&connector->base,
345 dev->mode_config.scaling_mode_property, 345 dev->mode_config.scaling_mode_property,
346 DRM_MODE_SCALE_FULLSCREEN); 346 DRM_MODE_SCALE_FULLSCREEN);
347 347
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index b362dd39bf5a..d81dbc3368f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
479 return -1; 479 return -1;
480 } 480 }
481 481
482 if (drm_connector_property_get_value(connector, 482 if (drm_object_property_get_value(&connector->base,
483 property, 483 property,
484 &curValue)) 484 &curValue))
485 return -1; 485 return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
487 if (curValue == value) 487 if (curValue == value)
488 return 0; 488 return 0;
489 489
490 if (drm_connector_property_set_value(connector, 490 if (drm_object_property_set_value(&connector->base,
491 property, 491 property,
492 value)) 492 value))
493 return -1; 493 return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
502 return -1; 502 return -1;
503 } 503 }
504 } else if (!strcmp(property->name, "backlight") && encoder) { 504 } else if (!strcmp(property->name, "backlight") && encoder) {
505 if (drm_connector_property_set_value(connector, 505 if (drm_object_property_set_value(&connector->base,
506 property, 506 property,
507 value)) 507 value))
508 return -1; 508 return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
671 connector->doublescan_allowed = false; 671 connector->doublescan_allowed = false;
672 672
673 /*Attach connector properties*/ 673 /*Attach connector properties*/
674 drm_connector_attach_property(connector, 674 drm_object_attach_property(&connector->base,
675 dev->mode_config.scaling_mode_property, 675 dev->mode_config.scaling_mode_property,
676 DRM_MODE_SCALE_FULLSCREEN); 676 DRM_MODE_SCALE_FULLSCREEN);
677 drm_connector_attach_property(connector, 677 drm_object_attach_property(&connector->base,
678 dev_priv->backlight_property, 678 dev_priv->backlight_property,
679 BRIGHTNESS_MAX_LEVEL); 679 BRIGHTNESS_MAX_LEVEL);
680 680
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 32dba2ab53e1..2d4ab48f07a2 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
265 goto set_prop_error; 265 goto set_prop_error;
266 } 266 }
267 267
268 if (drm_connector_property_get_value(connector, property, &val)) 268 if (drm_object_property_get_value(&connector->base, property, &val))
269 goto set_prop_error; 269 goto set_prop_error;
270 270
271 if (val == value) 271 if (val == value)
272 goto set_prop_done; 272 goto set_prop_done;
273 273
274 if (drm_connector_property_set_value(connector, 274 if (drm_object_property_set_value(&connector->base,
275 property, value)) 275 property, value))
276 goto set_prop_error; 276 goto set_prop_error;
277 277
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
296 } 296 }
297 } 297 }
298 } else if (!strcmp(property->name, "backlight") && encoder) { 298 } else if (!strcmp(property->name, "backlight") && encoder) {
299 if (drm_connector_property_set_value(connector, property, 299 if (drm_object_property_set_value(&connector->base, property,
300 value)) 300 value))
301 goto set_prop_error; 301 goto set_prop_error;
302 else 302 else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
506 506
507 dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe); 507 dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
508 508
509 if (!dev || ((pipe != 0) && (pipe != 2))) { 509 if (pipe != 0 && pipe != 2) {
510 DRM_ERROR("Invalid parameter\n"); 510 DRM_ERROR("Invalid parameter\n");
511 return; 511 return;
512 } 512 }
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
572 connector->doublescan_allowed = false; 572 connector->doublescan_allowed = false;
573 573
574 /*attach properties*/ 574 /*attach properties*/
575 drm_connector_attach_property(connector, 575 drm_object_attach_property(&connector->base,
576 dev->mode_config.scaling_mode_property, 576 dev->mode_config.scaling_mode_property,
577 DRM_MODE_SCALE_FULLSCREEN); 577 DRM_MODE_SCALE_FULLSCREEN);
578 drm_connector_attach_property(connector, 578 drm_object_attach_property(&connector->base,
579 dev_priv->backlight_property, 579 dev_priv->backlight_property,
580 MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); 580 MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
581 581
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index dec6a9aea3c6..74485dc43945 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
820 REG_WRITE(map->pos, 0); 820 REG_WRITE(map->pos, 0);
821 821
822 if (psb_intel_encoder) 822 if (psb_intel_encoder)
823 drm_connector_property_get_value(connector, 823 drm_object_property_get_value(&connector->base,
824 dev->mode_config.scaling_mode_property, &scalingType); 824 dev->mode_config.scaling_mode_property, &scalingType);
825 825
826 if (scalingType == DRM_MODE_SCALE_NO_SCALE) { 826 if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index f2f9f38a5362..30adbbe23024 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
249extern void oaktrail_hdmi_save(struct drm_device *dev); 249extern void oaktrail_hdmi_save(struct drm_device *dev);
250extern void oaktrail_hdmi_restore(struct drm_device *dev); 250extern void oaktrail_hdmi_restore(struct drm_device *dev);
251extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); 251extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
252extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
253 struct drm_display_mode *adjusted_mode, int x, int y,
254 struct drm_framebuffer *old_fb);
255extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
256
257
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index cdafd2acc72f..3071526bc3c1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
168 const struct psb_offset *map = &dev_priv->regmap[pipe]; 168 const struct psb_offset *map = &dev_priv->regmap[pipe];
169 u32 temp; 169 u32 temp;
170 170
171 if (pipe == 1) {
172 oaktrail_crtc_hdmi_dpms(crtc, mode);
173 return;
174 }
175
171 if (!gma_power_begin(dev, true)) 176 if (!gma_power_begin(dev, true))
172 return; 177 return;
173 178
@@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
302 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
303 struct drm_connector *connector; 308 struct drm_connector *connector;
304 309
310 if (pipe == 1)
311 return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
312
305 if (!gma_power_begin(dev, true)) 313 if (!gma_power_begin(dev, true))
306 return 0; 314 return 0;
307 315
@@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
343 (mode->crtc_vdisplay - 1)); 351 (mode->crtc_vdisplay - 1));
344 352
345 if (psb_intel_encoder) 353 if (psb_intel_encoder)
346 drm_connector_property_get_value(connector, 354 drm_object_property_get_value(&connector->base,
347 dev->mode_config.scaling_mode_property, &scalingType); 355 dev->mode_config.scaling_mode_property, &scalingType);
348 356
349 if (scalingType == DRM_MODE_SCALE_NO_SCALE) { 357 if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 010b820744a5..08747fd7105c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
544 .accel_2d = 1, 544 .accel_2d = 1,
545 .pipes = 2, 545 .pipes = 2,
546 .crtcs = 2, 546 .crtcs = 2,
547 .hdmi_mask = (1 << 0), 547 .hdmi_mask = (1 << 1),
548 .lvds_mask = (1 << 0), 548 .lvds_mask = (1 << 0),
549 .cursor_needs_phys = 0, 549 .cursor_needs_phys = 0,
550 .sgx_offset = MRST_SGX_OFFSET, 550 .sgx_offset = MRST_SGX_OFFSET,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 69e51e903f35..f036f1fc161e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
155 HDMI_READ(HDMI_HCR); 155 HDMI_READ(HDMI_HCR);
156} 156}
157 157
158static void wait_for_vblank(struct drm_device *dev)
159{
160 /* Wait for 20ms, i.e. one cycle at 50hz. */
161 mdelay(20);
162}
163
164static unsigned int htotal_calculate(struct drm_display_mode *mode)
165{
166 u32 htotal, new_crtc_htotal;
167
168 htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
169
170 /*
171 * 1024 x 768 new_crtc_htotal = 0x1024;
172 * 1280 x 1024 new_crtc_htotal = 0x0c34;
173 */
174 new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
175
176 DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
177 return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
178}
179
180static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
181 int refclk, struct oaktrail_hdmi_clock *best_clock)
182{
183 int np_min, np_max, nr_min, nr_max;
184 int np, nr, nf;
185
186 np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
187 np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
188 if (np_min < oaktrail_hdmi_limit.np.min)
189 np_min = oaktrail_hdmi_limit.np.min;
190 if (np_max > oaktrail_hdmi_limit.np.max)
191 np_max = oaktrail_hdmi_limit.np.max;
192
193 nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
194 nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
195 if (nr_min < oaktrail_hdmi_limit.nr.min)
196 nr_min = oaktrail_hdmi_limit.nr.min;
197 if (nr_max > oaktrail_hdmi_limit.nr.max)
198 nr_max = oaktrail_hdmi_limit.nr.max;
199
200 np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
201 nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
202 nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
203 DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
204
205 /*
206 * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
207 * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
208 */
209 best_clock->np = np;
210 best_clock->nr = nr - 1;
211 best_clock->nf = (nf << 14);
212}
213
214static void scu_busy_loop(void __iomem *scu_base)
215{
216 u32 status = 0;
217 u32 loop_count = 0;
218
219 status = readl(scu_base + 0x04);
220 while (status & 1) {
221 udelay(1); /* scu processing time is in few u secods */
222 status = readl(scu_base + 0x04);
223 loop_count++;
224 /* break if scu doesn't reset busy bit after huge retry */
225 if (loop_count > 1000) {
226 DRM_DEBUG_KMS("SCU IPC timed out");
227 return;
228 }
229 }
230}
231
232/*
233 * You don't want to know, you really really don't want to know....
234 *
235 * This is magic. However it's safe magic because of the way the platform
236 * works and it is necessary magic.
237 */
238static void oaktrail_hdmi_reset(struct drm_device *dev)
239{
240 void __iomem *base;
241 unsigned long scu_ipc_mmio = 0xff11c000UL;
242 int scu_len = 1024;
243
244 base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
245 if (base == NULL) {
246 DRM_ERROR("failed to map scu mmio\n");
247 return;
248 }
249
250 /* scu ipc: assert hdmi controller reset */
251 writel(0xff11d118, base + 0x0c);
252 writel(0x7fffffdf, base + 0x80);
253 writel(0x42005, base + 0x0);
254 scu_busy_loop(base);
255
256 /* scu ipc: de-assert hdmi controller reset */
257 writel(0xff11d118, base + 0x0c);
258 writel(0x7fffffff, base + 0x80);
259 writel(0x42005, base + 0x0);
260 scu_busy_loop(base);
261
262 iounmap(base);
263}
264
265int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
266 struct drm_display_mode *mode,
267 struct drm_display_mode *adjusted_mode,
268 int x, int y,
269 struct drm_framebuffer *old_fb)
270{
271 struct drm_device *dev = crtc->dev;
272 struct drm_psb_private *dev_priv = dev->dev_private;
273 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
274 int pipe = 1;
275 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
276 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
277 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
278 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
279 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
280 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
281 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
282 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
283 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
284 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
285 int refclk;
286 struct oaktrail_hdmi_clock clock;
287 u32 dspcntr, pipeconf, dpll, temp;
288 int dspcntr_reg = DSPBCNTR;
289
290 if (!gma_power_begin(dev, true))
291 return 0;
292
293 /* Disable the VGA plane that we never use */
294 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
295
296 /* Disable dpll if necessary */
297 dpll = REG_READ(DPLL_CTRL);
298 if ((dpll & DPLL_PWRDN) == 0) {
299 REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
300 REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
301 REG_WRITE(DPLL_STATUS, 0x1);
302 }
303 udelay(150);
304
305 /* Reset controller */
306 oaktrail_hdmi_reset(dev);
307
308 /* program and enable dpll */
309 refclk = 25000;
310 oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
311
312 /* Set the DPLL */
313 dpll = REG_READ(DPLL_CTRL);
314 dpll &= ~DPLL_PDIV_MASK;
315 dpll &= ~(DPLL_PWRDN | DPLL_RESET);
316 REG_WRITE(DPLL_CTRL, 0x00000008);
317 REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
318 REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
319 REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
320 REG_WRITE(DPLL_UPDATE, 0x80000000);
321 REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
322 udelay(150);
323
324 /* configure HDMI */
325 HDMI_WRITE(0x1004, 0x1fd);
326 HDMI_WRITE(0x2000, 0x1);
327 HDMI_WRITE(0x2008, 0x0);
328 HDMI_WRITE(0x3130, 0x8);
329 HDMI_WRITE(0x101c, 0x1800810);
330
331 temp = htotal_calculate(adjusted_mode);
332 REG_WRITE(htot_reg, temp);
333 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
334 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
335 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
336 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
337 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
338 REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
339
340 REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
341 REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
342 REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
343 REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
344 REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
345 REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
346 REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
347
348 temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
349 HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
350
351 REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
352 REG_WRITE(dsppos_reg, 0);
353
354 /* Flush the plane changes */
355 {
356 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
357 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
358 }
359
360 /* Set up the display plane register */
361 dspcntr = REG_READ(dspcntr_reg);
362 dspcntr |= DISPPLANE_GAMMA_ENABLE;
363 dspcntr |= DISPPLANE_SEL_PIPE_B;
364 dspcntr |= DISPLAY_PLANE_ENABLE;
365
366 /* setup pipeconf */
367 pipeconf = REG_READ(pipeconf_reg);
368 pipeconf |= PIPEACONF_ENABLE;
369
370 REG_WRITE(pipeconf_reg, pipeconf);
371 REG_READ(pipeconf_reg);
372
373 REG_WRITE(PCH_PIPEBCONF, pipeconf);
374 REG_READ(PCH_PIPEBCONF);
375 wait_for_vblank(dev);
376
377 REG_WRITE(dspcntr_reg, dspcntr);
378 wait_for_vblank(dev);
379
380 gma_power_end(dev);
381
382 return 0;
383}
384
385void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
386{
387 struct drm_device *dev = crtc->dev;
388 u32 temp;
389
390 DRM_DEBUG_KMS("%s %d\n", __func__, mode);
391
392 switch (mode) {
393 case DRM_MODE_DPMS_OFF:
394 REG_WRITE(VGACNTRL, 0x80000000);
395
396 /* Disable plane */
397 temp = REG_READ(DSPBCNTR);
398 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
399 REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
400 REG_READ(DSPBCNTR);
401 /* Flush the plane changes */
402 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
403 REG_READ(DSPBSURF);
404 }
405
406 /* Disable pipe B */
407 temp = REG_READ(PIPEBCONF);
408 if ((temp & PIPEACONF_ENABLE) != 0) {
409 REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
410 REG_READ(PIPEBCONF);
411 }
412
413 /* Disable LNW Pipes, etc */
414 temp = REG_READ(PCH_PIPEBCONF);
415 if ((temp & PIPEACONF_ENABLE) != 0) {
416 REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
417 REG_READ(PCH_PIPEBCONF);
418 }
419
420 /* wait for pipe off */
421 udelay(150);
422
423 /* Disable dpll */
424 temp = REG_READ(DPLL_CTRL);
425 if ((temp & DPLL_PWRDN) == 0) {
426 REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
427 REG_WRITE(DPLL_STATUS, 0x1);
428 }
429
430 /* wait for dpll off */
431 udelay(150);
432
433 break;
434 case DRM_MODE_DPMS_ON:
435 case DRM_MODE_DPMS_STANDBY:
436 case DRM_MODE_DPMS_SUSPEND:
437 /* Enable dpll */
438 temp = REG_READ(DPLL_CTRL);
439 if ((temp & DPLL_PWRDN) != 0) {
440 REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
441 temp = REG_READ(DPLL_CLK_ENABLE);
442 REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
443 REG_READ(DPLL_CLK_ENABLE);
444 }
445 /* wait for dpll warm up */
446 udelay(150);
447
448 /* Enable pipe B */
449 temp = REG_READ(PIPEBCONF);
450 if ((temp & PIPEACONF_ENABLE) == 0) {
451 REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
452 REG_READ(PIPEBCONF);
453 }
454
455 /* Enable LNW Pipe B */
456 temp = REG_READ(PCH_PIPEBCONF);
457 if ((temp & PIPEACONF_ENABLE) == 0) {
458 REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
459 REG_READ(PCH_PIPEBCONF);
460 }
461
462 wait_for_vblank(dev);
463
464 /* Enable plane */
465 temp = REG_READ(DSPBCNTR);
466 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
467 REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
468 /* Flush the plane changes */
469 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
470 REG_READ(DSPBSURF);
471 }
472
473 psb_intel_crtc_load_lut(crtc);
474 }
475
476 /* DSPARB */
477 REG_WRITE(DSPARB, 0x00003fbf);
478
479 /* FW1 */
480 REG_WRITE(0x70034, 0x3f880a0a);
481
482 /* FW2 */
483 REG_WRITE(0x70038, 0x0b060808);
484
485 /* FW4 */
486 REG_WRITE(0x70050, 0x08030404);
487
488 /* FW5 */
489 REG_WRITE(0x70054, 0x04040404);
490
491 /* LNC Chicken Bits - Squawk! */
492 REG_WRITE(0x70400, 0x4000);
493
494 return;
495}
496
158static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) 497static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
159{ 498{
160 static int dpms_mode = -1; 499 static int dpms_mode = -1;
@@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
233 572
234static int oaktrail_hdmi_get_modes(struct drm_connector *connector) 573static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
235{ 574{
236 struct drm_device *dev = connector->dev;
237 struct drm_psb_private *dev_priv = dev->dev_private;
238 struct i2c_adapter *i2c_adap; 575 struct i2c_adapter *i2c_adap;
239 struct edid *edid; 576 struct edid *edid;
240 struct drm_display_mode *mode, *t; 577 int ret = 0;
241 int i = 0, ret = 0;
242 578
579 /*
580 * FIXME: We need to figure this lot out. In theory we can
581 * read the EDID somehow but I've yet to find working reference
582 * code.
583 */
243 i2c_adap = i2c_get_adapter(3); 584 i2c_adap = i2c_get_adapter(3);
244 if (i2c_adap == NULL) { 585 if (i2c_adap == NULL) {
245 DRM_ERROR("No ddc adapter available!\n"); 586 DRM_ERROR("No ddc adapter available!\n");
@@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
253 drm_mode_connector_update_edid_property(connector, edid); 594 drm_mode_connector_update_edid_property(connector, edid);
254 ret = drm_add_edid_modes(connector, edid); 595 ret = drm_add_edid_modes(connector, edid);
255 } 596 }
256 597 return ret;
257 /*
258 * prune modes that require frame buffer bigger than stolen mem
259 */
260 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
261 if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
262 i++;
263 drm_mode_remove(connector, mode);
264 }
265 }
266 return ret - i;
267} 598}
268 599
269static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder, 600static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
349 connector->interlace_allowed = false; 680 connector->interlace_allowed = false;
350 connector->doublescan_allowed = false; 681 connector->doublescan_allowed = false;
351 drm_sysfs_connector_add(connector); 682 drm_sysfs_connector_add(connector);
683 dev_info(dev->dev, "HDMI initialised.\n");
352 684
353 return; 685 return;
354 686
@@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
403 735
404 dev_priv->hdmi_priv = hdmi_dev; 736 dev_priv->hdmi_priv = hdmi_dev;
405 oaktrail_hdmi_audio_disable(dev); 737 oaktrail_hdmi_audio_disable(dev);
738
739 dev_info(dev->dev, "HDMI hardware present.\n");
740
406 return; 741 return;
407 742
408free: 743free:
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 558c77fb55ec..325013a9c48c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
133 return; 133 return;
134 } 134 }
135 135
136 drm_connector_property_get_value( 136 drm_object_property_get_value(
137 connector, 137 &connector->base,
138 dev->mode_config.scaling_mode_property, 138 dev->mode_config.scaling_mode_property,
139 &v); 139 &v);
140 140
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
363 connector->interlace_allowed = false; 363 connector->interlace_allowed = false;
364 connector->doublescan_allowed = false; 364 connector->doublescan_allowed = false;
365 365
366 drm_connector_attach_property(connector, 366 drm_object_attach_property(&connector->base,
367 dev->mode_config.scaling_mode_property, 367 dev->mode_config.scaling_mode_property,
368 DRM_MODE_SCALE_FULLSCREEN); 368 DRM_MODE_SCALE_FULLSCREEN);
369 drm_connector_attach_property(connector, 369 drm_object_attach_property(&connector->base,
370 dev_priv->backlight_property, 370 dev_priv->backlight_property,
371 BRIGHTNESS_MAX_LEVEL); 371 BRIGHTNESS_MAX_LEVEL);
372 372
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 2a4c3a9e33e3..9fa5fa2e6192 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
603 goto set_prop_error; 603 goto set_prop_error;
604 } 604 }
605 605
606 if (drm_connector_property_get_value(connector, 606 if (drm_object_property_get_value(&connector->base,
607 property, 607 property,
608 &curval)) 608 &curval))
609 goto set_prop_error; 609 goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
611 if (curval == value) 611 if (curval == value)
612 goto set_prop_done; 612 goto set_prop_done;
613 613
614 if (drm_connector_property_set_value(connector, 614 if (drm_object_property_set_value(&connector->base,
615 property, 615 property,
616 value)) 616 value))
617 goto set_prop_error; 617 goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
626 goto set_prop_error; 626 goto set_prop_error;
627 } 627 }
628 } else if (!strcmp(property->name, "backlight")) { 628 } else if (!strcmp(property->name, "backlight")) {
629 if (drm_connector_property_set_value(connector, 629 if (drm_object_property_set_value(&connector->base,
630 property, 630 property,
631 value)) 631 value))
632 goto set_prop_error; 632 goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
746 connector->doublescan_allowed = false; 746 connector->doublescan_allowed = false;
747 747
748 /*Attach connector properties*/ 748 /*Attach connector properties*/
749 drm_connector_attach_property(connector, 749 drm_object_attach_property(&connector->base,
750 dev->mode_config.scaling_mode_property, 750 dev->mode_config.scaling_mode_property,
751 DRM_MODE_SCALE_FULLSCREEN); 751 DRM_MODE_SCALE_FULLSCREEN);
752 drm_connector_attach_property(connector, 752 drm_object_attach_property(&connector->base,
753 dev_priv->backlight_property, 753 dev_priv->backlight_property,
754 BRIGHTNESS_MAX_LEVEL); 754 BRIGHTNESS_MAX_LEVEL);
755 755
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index fc9292705dbf..a4cc777ab7a6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1694 uint8_t cmd; 1694 uint8_t cmd;
1695 int ret; 1695 int ret;
1696 1696
1697 ret = drm_connector_property_set_value(connector, property, val); 1697 ret = drm_object_property_set_value(&connector->base, property, val);
1698 if (ret) 1698 if (ret)
1699 return ret; 1699 return ret;
1700 1700
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1749 } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) { 1749 } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
1750 temp_value = val; 1750 temp_value = val;
1751 if (psb_intel_sdvo_connector->left == property) { 1751 if (psb_intel_sdvo_connector->left == property) {
1752 drm_connector_property_set_value(connector, 1752 drm_object_property_set_value(&connector->base,
1753 psb_intel_sdvo_connector->right, val); 1753 psb_intel_sdvo_connector->right, val);
1754 if (psb_intel_sdvo_connector->left_margin == temp_value) 1754 if (psb_intel_sdvo_connector->left_margin == temp_value)
1755 return 0; 1755 return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1761 cmd = SDVO_CMD_SET_OVERSCAN_H; 1761 cmd = SDVO_CMD_SET_OVERSCAN_H;
1762 goto set_value; 1762 goto set_value;
1763 } else if (psb_intel_sdvo_connector->right == property) { 1763 } else if (psb_intel_sdvo_connector->right == property) {
1764 drm_connector_property_set_value(connector, 1764 drm_object_property_set_value(&connector->base,
1765 psb_intel_sdvo_connector->left, val); 1765 psb_intel_sdvo_connector->left, val);
1766 if (psb_intel_sdvo_connector->right_margin == temp_value) 1766 if (psb_intel_sdvo_connector->right_margin == temp_value)
1767 return 0; 1767 return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1773 cmd = SDVO_CMD_SET_OVERSCAN_H; 1773 cmd = SDVO_CMD_SET_OVERSCAN_H;
1774 goto set_value; 1774 goto set_value;
1775 } else if (psb_intel_sdvo_connector->top == property) { 1775 } else if (psb_intel_sdvo_connector->top == property) {
1776 drm_connector_property_set_value(connector, 1776 drm_object_property_set_value(&connector->base,
1777 psb_intel_sdvo_connector->bottom, val); 1777 psb_intel_sdvo_connector->bottom, val);
1778 if (psb_intel_sdvo_connector->top_margin == temp_value) 1778 if (psb_intel_sdvo_connector->top_margin == temp_value)
1779 return 0; 1779 return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1785 cmd = SDVO_CMD_SET_OVERSCAN_V; 1785 cmd = SDVO_CMD_SET_OVERSCAN_V;
1786 goto set_value; 1786 goto set_value;
1787 } else if (psb_intel_sdvo_connector->bottom == property) { 1787 } else if (psb_intel_sdvo_connector->bottom == property) {
1788 drm_connector_property_set_value(connector, 1788 drm_object_property_set_value(&connector->base,
1789 psb_intel_sdvo_connector->top, val); 1789 psb_intel_sdvo_connector->top, val);
1790 if (psb_intel_sdvo_connector->bottom_margin == temp_value) 1790 if (psb_intel_sdvo_connector->bottom_margin == temp_value)
1791 return 0; 1791 return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
2286 i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]); 2286 i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
2287 2287
2288 psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0]; 2288 psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
2289 drm_connector_attach_property(&psb_intel_sdvo_connector->base.base, 2289 drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
2290 psb_intel_sdvo_connector->tv_format, 0); 2290 psb_intel_sdvo_connector->tv_format, 0);
2291 return true; 2291 return true;
2292 2292
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
2302 psb_intel_sdvo_connector->name = \ 2302 psb_intel_sdvo_connector->name = \
2303 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ 2303 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
2304 if (!psb_intel_sdvo_connector->name) return false; \ 2304 if (!psb_intel_sdvo_connector->name) return false; \
2305 drm_connector_attach_property(connector, \ 2305 drm_object_attach_property(&connector->base, \
2306 psb_intel_sdvo_connector->name, \ 2306 psb_intel_sdvo_connector->name, \
2307 psb_intel_sdvo_connector->cur_##name); \ 2307 psb_intel_sdvo_connector->cur_##name); \
2308 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ 2308 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2339 if (!psb_intel_sdvo_connector->left) 2339 if (!psb_intel_sdvo_connector->left)
2340 return false; 2340 return false;
2341 2341
2342 drm_connector_attach_property(connector, 2342 drm_object_attach_property(&connector->base,
2343 psb_intel_sdvo_connector->left, 2343 psb_intel_sdvo_connector->left,
2344 psb_intel_sdvo_connector->left_margin); 2344 psb_intel_sdvo_connector->left_margin);
2345 2345
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2348 if (!psb_intel_sdvo_connector->right) 2348 if (!psb_intel_sdvo_connector->right)
2349 return false; 2349 return false;
2350 2350
2351 drm_connector_attach_property(connector, 2351 drm_object_attach_property(&connector->base,
2352 psb_intel_sdvo_connector->right, 2352 psb_intel_sdvo_connector->right,
2353 psb_intel_sdvo_connector->right_margin); 2353 psb_intel_sdvo_connector->right_margin);
2354 DRM_DEBUG_KMS("h_overscan: max %d, " 2354 DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2375 if (!psb_intel_sdvo_connector->top) 2375 if (!psb_intel_sdvo_connector->top)
2376 return false; 2376 return false;
2377 2377
2378 drm_connector_attach_property(connector, 2378 drm_object_attach_property(&connector->base,
2379 psb_intel_sdvo_connector->top, 2379 psb_intel_sdvo_connector->top,
2380 psb_intel_sdvo_connector->top_margin); 2380 psb_intel_sdvo_connector->top_margin);
2381 2381
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2384 if (!psb_intel_sdvo_connector->bottom) 2384 if (!psb_intel_sdvo_connector->bottom)
2385 return false; 2385 return false;
2386 2386
2387 drm_connector_attach_property(connector, 2387 drm_object_attach_property(&connector->base,
2388 psb_intel_sdvo_connector->bottom, 2388 psb_intel_sdvo_connector->bottom,
2389 psb_intel_sdvo_connector->bottom_margin); 2389 psb_intel_sdvo_connector->bottom_margin);
2390 DRM_DEBUG_KMS("v_overscan: max %d, " 2390 DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2416 if (!psb_intel_sdvo_connector->dot_crawl) 2416 if (!psb_intel_sdvo_connector->dot_crawl)
2417 return false; 2417 return false;
2418 2418
2419 drm_connector_attach_property(connector, 2419 drm_object_attach_property(&connector->base,
2420 psb_intel_sdvo_connector->dot_crawl, 2420 psb_intel_sdvo_connector->dot_crawl,
2421 psb_intel_sdvo_connector->cur_dot_crawl); 2421 psb_intel_sdvo_connector->cur_dot_crawl);
2422 DRM_DEBUG_KMS("dot crawl: current %d\n", response); 2422 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 4a07ab596174..771ff66711af 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -700,7 +700,7 @@ static struct i2c_driver tc35876x_bridge_i2c_driver = {
700 }, 700 },
701 .id_table = tc35876x_bridge_id, 701 .id_table = tc35876x_bridge_id,
702 .probe = tc35876x_bridge_probe, 702 .probe = tc35876x_bridge_probe,
703 .remove = __devexit_p(tc35876x_bridge_remove), 703 .remove = tc35876x_bridge_remove,
704}; 704};
705 705
706/* LCD panel I2C */ 706/* LCD panel I2C */
@@ -741,7 +741,7 @@ static struct i2c_driver cmi_lcd_i2c_driver = {
741 }, 741 },
742 .id_table = cmi_lcd_i2c_id, 742 .id_table = cmi_lcd_i2c_id,
743 .probe = cmi_lcd_i2c_probe, 743 .probe = cmi_lcd_i2c_probe,
744 .remove = __devexit_p(cmi_lcd_i2c_remove), 744 .remove = cmi_lcd_i2c_remove,
745}; 745};
746 746
747/* HACK to create I2C device while it's not created by platform code */ 747/* HACK to create I2C device while it's not created by platform code */
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 599099fe76e3..b865d0728e28 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
214 else 214 else
215 priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 215 priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
216 216
217 drm_connector_property_set_value(connector, 217 drm_object_property_set_value(&connector->base,
218 encoder->dev->mode_config.tv_subconnector_property, 218 encoder->dev->mode_config.tv_subconnector_property,
219 priv->subconnector); 219 priv->subconnector);
220 220
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
254 254
255 priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2); 255 priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
256 256
257 drm_connector_attach_property(connector, conf->tv_select_subconnector_property, 257 drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
258 priv->select_subconnector); 258 priv->select_subconnector);
259 drm_connector_attach_property(connector, conf->tv_subconnector_property, 259 drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
260 priv->subconnector); 260 priv->subconnector);
261 drm_connector_attach_property(connector, conf->tv_left_margin_property, 261 drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
262 priv->hmargin); 262 priv->hmargin);
263 drm_connector_attach_property(connector, conf->tv_bottom_margin_property, 263 drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
264 priv->vmargin); 264 priv->vmargin);
265 drm_connector_attach_property(connector, conf->tv_mode_property, 265 drm_object_attach_property(&connector->base, conf->tv_mode_property,
266 priv->norm); 266 priv->norm);
267 drm_connector_attach_property(connector, conf->tv_brightness_property, 267 drm_object_attach_property(&connector->base, conf->tv_brightness_property,
268 priv->brightness); 268 priv->brightness);
269 drm_connector_attach_property(connector, conf->tv_contrast_property, 269 drm_object_attach_property(&connector->base, conf->tv_contrast_property,
270 priv->contrast); 270 priv->contrast);
271 drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, 271 drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
272 priv->flicker); 272 priv->flicker);
273 drm_connector_attach_property(connector, priv->scale_property, 273 drm_object_attach_property(&connector->base, priv->scale_property,
274 priv->scale); 274 priv->scale);
275 275
276 return 0; 276 return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dde8b505bf7f..e6a11ca85eaf 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
317 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 317 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
318 pipe, plane); 318 pipe, plane);
319 } else { 319 } else {
320 if (!work->pending) { 320 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
322 pipe, plane); 322 pipe, plane);
323 } else { 323 } else {
@@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
328 seq_printf(m, "Stall check enabled, "); 328 seq_printf(m, "Stall check enabled, ");
329 else 329 else
330 seq_printf(m, "Stall check waiting for page flip ioctl, "); 330 seq_printf(m, "Stall check waiting for page flip ioctl, ");
331 seq_printf(m, "%d prepares\n", work->pending); 331 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
332 332
333 if (work->old_fb_obj) { 333 if (work->old_fb_obj) {
334 struct drm_i915_gem_object *obj = work->old_fb_obj; 334 struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
655 if (INTEL_INFO(dev)->gen >= 6) { 655 if (INTEL_INFO(dev)->gen >= 6) {
656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
658 seq_printf(m, " SYNC_0: 0x%08x\n", 658 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
659 error->semaphore_mboxes[ring][0]); 659 error->semaphore_mboxes[ring][0],
660 seq_printf(m, " SYNC_1: 0x%08x\n", 660 error->semaphore_seqno[ring][0]);
661 error->semaphore_mboxes[ring][1]); 661 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
662 error->semaphore_mboxes[ring][1],
663 error->semaphore_seqno[ring][1]);
662 } 664 }
663 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 665 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
664 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 666 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
@@ -1068,7 +1070,7 @@ static int gen6_drpc_info(struct seq_file *m)
1068 struct drm_info_node *node = (struct drm_info_node *) m->private; 1070 struct drm_info_node *node = (struct drm_info_node *) m->private;
1069 struct drm_device *dev = node->minor->dev; 1071 struct drm_device *dev = node->minor->dev;
1070 struct drm_i915_private *dev_priv = dev->dev_private; 1072 struct drm_i915_private *dev_priv = dev->dev_private;
1071 u32 rpmodectl1, gt_core_status, rcctl1; 1073 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1072 unsigned forcewake_count; 1074 unsigned forcewake_count;
1073 int count=0, ret; 1075 int count=0, ret;
1074 1076
@@ -1097,6 +1099,9 @@ static int gen6_drpc_info(struct seq_file *m)
1097 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1099 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1098 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1100 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1099 mutex_unlock(&dev->struct_mutex); 1101 mutex_unlock(&dev->struct_mutex);
1102 mutex_lock(&dev_priv->rps.hw_lock);
1103 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1104 mutex_unlock(&dev_priv->rps.hw_lock);
1100 1105
1101 seq_printf(m, "Video Turbo Mode: %s\n", 1106 seq_printf(m, "Video Turbo Mode: %s\n",
1102 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1107 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1148,6 +1153,12 @@ static int gen6_drpc_info(struct seq_file *m)
1148 seq_printf(m, "RC6++ residency since boot: %u\n", 1153 seq_printf(m, "RC6++ residency since boot: %u\n",
1149 I915_READ(GEN6_GT_GFX_RC6pp)); 1154 I915_READ(GEN6_GT_GFX_RC6pp));
1150 1155
1156 seq_printf(m, "RC6 voltage: %dmV\n",
1157 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1158 seq_printf(m, "RC6+ voltage: %dmV\n",
1159 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1160 seq_printf(m, "RC6++ voltage: %dmV\n",
1161 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1151 return 0; 1162 return 0;
1152} 1163}
1153 1164
@@ -1273,7 +1284,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1273 return 0; 1284 return 0;
1274 } 1285 }
1275 1286
1276 ret = mutex_lock_interruptible(&dev->struct_mutex); 1287 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1277 if (ret) 1288 if (ret)
1278 return ret; 1289 return ret;
1279 1290
@@ -1282,19 +1293,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1282 for (gpu_freq = dev_priv->rps.min_delay; 1293 for (gpu_freq = dev_priv->rps.min_delay;
1283 gpu_freq <= dev_priv->rps.max_delay; 1294 gpu_freq <= dev_priv->rps.max_delay;
1284 gpu_freq++) { 1295 gpu_freq++) {
1285 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1296 ia_freq = gpu_freq;
1286 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1297 sandybridge_pcode_read(dev_priv,
1287 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1298 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1288 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & 1299 &ia_freq);
1289 GEN6_PCODE_READY) == 0, 10)) {
1290 DRM_ERROR("pcode read of freq table timed out\n");
1291 continue;
1292 }
1293 ia_freq = I915_READ(GEN6_PCODE_DATA);
1294 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); 1300 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
1295 } 1301 }
1296 1302
1297 mutex_unlock(&dev->struct_mutex); 1303 mutex_unlock(&dev_priv->rps.hw_lock);
1298 1304
1299 return 0; 1305 return 0;
1300} 1306}
@@ -1398,15 +1404,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
1398 if (ret) 1404 if (ret)
1399 return ret; 1405 return ret;
1400 1406
1401 if (dev_priv->pwrctx) { 1407 if (dev_priv->ips.pwrctx) {
1402 seq_printf(m, "power context "); 1408 seq_printf(m, "power context ");
1403 describe_obj(m, dev_priv->pwrctx); 1409 describe_obj(m, dev_priv->ips.pwrctx);
1404 seq_printf(m, "\n"); 1410 seq_printf(m, "\n");
1405 } 1411 }
1406 1412
1407 if (dev_priv->renderctx) { 1413 if (dev_priv->ips.renderctx) {
1408 seq_printf(m, "render context "); 1414 seq_printf(m, "render context ");
1409 describe_obj(m, dev_priv->renderctx); 1415 describe_obj(m, dev_priv->ips.renderctx);
1410 seq_printf(m, "\n"); 1416 seq_printf(m, "\n");
1411 } 1417 }
1412 1418
@@ -1711,13 +1717,13 @@ i915_max_freq_read(struct file *filp,
1711 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1717 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1712 return -ENODEV; 1718 return -ENODEV;
1713 1719
1714 ret = mutex_lock_interruptible(&dev->struct_mutex); 1720 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1715 if (ret) 1721 if (ret)
1716 return ret; 1722 return ret;
1717 1723
1718 len = snprintf(buf, sizeof(buf), 1724 len = snprintf(buf, sizeof(buf),
1719 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); 1725 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
1720 mutex_unlock(&dev->struct_mutex); 1726 mutex_unlock(&dev_priv->rps.hw_lock);
1721 1727
1722 if (len > sizeof(buf)) 1728 if (len > sizeof(buf))
1723 len = sizeof(buf); 1729 len = sizeof(buf);
@@ -1752,7 +1758,7 @@ i915_max_freq_write(struct file *filp,
1752 1758
1753 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1759 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1754 1760
1755 ret = mutex_lock_interruptible(&dev->struct_mutex); 1761 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1756 if (ret) 1762 if (ret)
1757 return ret; 1763 return ret;
1758 1764
@@ -1762,7 +1768,7 @@ i915_max_freq_write(struct file *filp,
1762 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1768 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
1763 1769
1764 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1770 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1765 mutex_unlock(&dev->struct_mutex); 1771 mutex_unlock(&dev_priv->rps.hw_lock);
1766 1772
1767 return cnt; 1773 return cnt;
1768} 1774}
@@ -1787,13 +1793,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1787 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1793 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1788 return -ENODEV; 1794 return -ENODEV;
1789 1795
1790 ret = mutex_lock_interruptible(&dev->struct_mutex); 1796 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1791 if (ret) 1797 if (ret)
1792 return ret; 1798 return ret;
1793 1799
1794 len = snprintf(buf, sizeof(buf), 1800 len = snprintf(buf, sizeof(buf),
1795 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); 1801 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
1796 mutex_unlock(&dev->struct_mutex); 1802 mutex_unlock(&dev_priv->rps.hw_lock);
1797 1803
1798 if (len > sizeof(buf)) 1804 if (len > sizeof(buf))
1799 len = sizeof(buf); 1805 len = sizeof(buf);
@@ -1826,7 +1832,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1826 1832
1827 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 1833 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1828 1834
1829 ret = mutex_lock_interruptible(&dev->struct_mutex); 1835 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1830 if (ret) 1836 if (ret)
1831 return ret; 1837 return ret;
1832 1838
@@ -1836,7 +1842,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1836 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; 1842 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
1837 1843
1838 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1844 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1839 mutex_unlock(&dev->struct_mutex); 1845 mutex_unlock(&dev_priv->rps.hw_lock);
1840 1846
1841 return cnt; 1847 return cnt;
1842} 1848}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index c9bfd83dde64..99daa896105d 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -104,32 +104,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
104} 104}
105 105
106/** 106/**
107 * Sets up the hardware status page for devices that need a physical address
108 * in the register.
109 */
110static int i915_init_phys_hws(struct drm_device *dev)
111{
112 drm_i915_private_t *dev_priv = dev->dev_private;
113
114 /* Program Hardware Status Page */
115 dev_priv->status_page_dmah =
116 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
117
118 if (!dev_priv->status_page_dmah) {
119 DRM_ERROR("Can not allocate hardware status page\n");
120 return -ENOMEM;
121 }
122
123 memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
124 0, PAGE_SIZE);
125
126 i915_write_hws_pga(dev);
127
128 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
129 return 0;
130}
131
132/**
133 * Frees the hardware status page, whether it's a physical address or a virtual 107 * Frees the hardware status page, whether it's a physical address or a virtual
134 * address set up by the X Server. 108 * address set up by the X Server.
135 */ 109 */
@@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
167 141
168 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 142 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
169 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 143 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
170 ring->space = ring->head - (ring->tail + 8); 144 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
171 if (ring->space < 0) 145 if (ring->space < 0)
172 ring->space += ring->size; 146 ring->space += ring->size;
173 147
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
451 drm_i915_private_t *dev_priv = dev->dev_private; 425 drm_i915_private_t *dev_priv = dev->dev_private;
452 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 426 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
453 427
454 dev_priv->counter++; 428 dev_priv->dri1.counter++;
455 if (dev_priv->counter > 0x7FFFFFFFUL) 429 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
456 dev_priv->counter = 0; 430 dev_priv->dri1.counter = 0;
457 if (master_priv->sarea_priv) 431 if (master_priv->sarea_priv)
458 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 432 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
459 433
460 if (BEGIN_LP_RING(4) == 0) { 434 if (BEGIN_LP_RING(4) == 0) {
461 OUT_RING(MI_STORE_DWORD_INDEX); 435 OUT_RING(MI_STORE_DWORD_INDEX);
462 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 436 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
463 OUT_RING(dev_priv->counter); 437 OUT_RING(dev_priv->dri1.counter);
464 OUT_RING(0); 438 OUT_RING(0);
465 ADVANCE_LP_RING(); 439 ADVANCE_LP_RING();
466 } 440 }
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
602 576
603 ADVANCE_LP_RING(); 577 ADVANCE_LP_RING();
604 578
605 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 579 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
606 580
607 if (BEGIN_LP_RING(4) == 0) { 581 if (BEGIN_LP_RING(4) == 0) {
608 OUT_RING(MI_STORE_DWORD_INDEX); 582 OUT_RING(MI_STORE_DWORD_INDEX);
609 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 583 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
610 OUT_RING(dev_priv->counter); 584 OUT_RING(dev_priv->dri1.counter);
611 OUT_RING(0); 585 OUT_RING(0);
612 ADVANCE_LP_RING(); 586 ADVANCE_LP_RING();
613 } 587 }
@@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
618 592
619static int i915_quiescent(struct drm_device *dev) 593static int i915_quiescent(struct drm_device *dev)
620{ 594{
621 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
622
623 i915_kernel_lost_context(dev); 595 i915_kernel_lost_context(dev);
624 return intel_wait_ring_idle(ring); 596 return intel_ring_idle(LP_RING(dev->dev_private));
625} 597}
626 598
627static int i915_flush_ioctl(struct drm_device *dev, void *data, 599static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
775 747
776 DRM_DEBUG_DRIVER("\n"); 748 DRM_DEBUG_DRIVER("\n");
777 749
778 dev_priv->counter++; 750 dev_priv->dri1.counter++;
779 if (dev_priv->counter > 0x7FFFFFFFUL) 751 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
780 dev_priv->counter = 1; 752 dev_priv->dri1.counter = 1;
781 if (master_priv->sarea_priv) 753 if (master_priv->sarea_priv)
782 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 754 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
783 755
784 if (BEGIN_LP_RING(4) == 0) { 756 if (BEGIN_LP_RING(4) == 0) {
785 OUT_RING(MI_STORE_DWORD_INDEX); 757 OUT_RING(MI_STORE_DWORD_INDEX);
786 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 758 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
787 OUT_RING(dev_priv->counter); 759 OUT_RING(dev_priv->dri1.counter);
788 OUT_RING(MI_USER_INTERRUPT); 760 OUT_RING(MI_USER_INTERRUPT);
789 ADVANCE_LP_RING(); 761 ADVANCE_LP_RING();
790 } 762 }
791 763
792 return dev_priv->counter; 764 return dev_priv->dri1.counter;
793} 765}
794 766
795static int i915_wait_irq(struct drm_device * dev, int irq_nr) 767static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
820 792
821 if (ret == -EBUSY) { 793 if (ret == -EBUSY) {
822 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 794 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
823 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 795 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
824 } 796 }
825 797
826 return ret; 798 return ret;
@@ -1014,6 +986,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
1014 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 986 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
1015 value = 1; 987 value = 1;
1016 break; 988 break;
989 case I915_PARAM_HAS_SECURE_BATCHES:
990 value = capable(CAP_SYS_ADMIN);
991 break;
992 case I915_PARAM_HAS_PINNED_BATCHES:
993 value = 1;
994 break;
1017 default: 995 default:
1018 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 996 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1019 param->param); 997 param->param);
@@ -1068,7 +1046,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1068{ 1046{
1069 drm_i915_private_t *dev_priv = dev->dev_private; 1047 drm_i915_private_t *dev_priv = dev->dev_private;
1070 drm_i915_hws_addr_t *hws = data; 1048 drm_i915_hws_addr_t *hws = data;
1071 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1049 struct intel_ring_buffer *ring;
1072 1050
1073 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1051 if (drm_core_check_feature(dev, DRIVER_MODESET))
1074 return -ENODEV; 1052 return -ENODEV;
@@ -1088,6 +1066,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1088 1066
1089 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 1067 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1090 1068
1069 ring = LP_RING(dev_priv);
1091 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1070 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1092 1071
1093 dev_priv->dri1.gfx_hws_cpu_addr = 1072 dev_priv->dri1.gfx_hws_cpu_addr =
@@ -1326,6 +1305,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1326 1305
1327 intel_modeset_gem_init(dev); 1306 intel_modeset_gem_init(dev);
1328 1307
1308 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1309
1329 ret = drm_irq_install(dev); 1310 ret = drm_irq_install(dev);
1330 if (ret) 1311 if (ret)
1331 goto cleanup_gem; 1312 goto cleanup_gem;
@@ -1491,21 +1472,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1491 goto free_priv; 1472 goto free_priv;
1492 } 1473 }
1493 1474
1494 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); 1475 ret = i915_gem_gtt_init(dev);
1495 if (!ret) { 1476 if (ret)
1496 DRM_ERROR("failed to set up gmch\n");
1497 ret = -EIO;
1498 goto put_bridge; 1477 goto put_bridge;
1499 }
1500 1478
1501 dev_priv->mm.gtt = intel_gtt_get(); 1479 if (drm_core_check_feature(dev, DRIVER_MODESET))
1502 if (!dev_priv->mm.gtt) { 1480 i915_kick_out_firmware_fb(dev_priv);
1503 DRM_ERROR("Failed to initialize GTT\n");
1504 ret = -ENODEV;
1505 goto put_gmch;
1506 }
1507
1508 i915_kick_out_firmware_fb(dev_priv);
1509 1481
1510 pci_set_master(dev->pdev); 1482 pci_set_master(dev->pdev);
1511 1483
@@ -1589,18 +1561,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1589 intel_setup_gmbus(dev); 1561 intel_setup_gmbus(dev);
1590 intel_opregion_setup(dev); 1562 intel_opregion_setup(dev);
1591 1563
1592 /* Make sure the bios did its job and set up vital registers */
1593 intel_setup_bios(dev); 1564 intel_setup_bios(dev);
1594 1565
1595 i915_gem_load(dev); 1566 i915_gem_load(dev);
1596 1567
1597 /* Init HWS */
1598 if (!I915_NEED_GFX_HWS(dev)) {
1599 ret = i915_init_phys_hws(dev);
1600 if (ret)
1601 goto out_gem_unload;
1602 }
1603
1604 /* On the 945G/GM, the chipset reports the MSI capability on the 1568 /* On the 945G/GM, the chipset reports the MSI capability on the
1605 * integrated graphics even though the support isn't actually there 1569 * integrated graphics even though the support isn't actually there
1606 * according to the published specs. It doesn't appear to function 1570 * according to the published specs. It doesn't appear to function
@@ -1620,6 +1584,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1620 spin_lock_init(&dev_priv->rps.lock); 1584 spin_lock_init(&dev_priv->rps.lock);
1621 spin_lock_init(&dev_priv->dpio_lock); 1585 spin_lock_init(&dev_priv->dpio_lock);
1622 1586
1587 mutex_init(&dev_priv->rps.hw_lock);
1588
1623 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1589 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1624 dev_priv->num_pipe = 3; 1590 dev_priv->num_pipe = 3;
1625 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1591 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1677,7 +1643,7 @@ out_mtrrfree:
1677out_rmmap: 1643out_rmmap:
1678 pci_iounmap(dev->pdev, dev_priv->regs); 1644 pci_iounmap(dev->pdev, dev_priv->regs);
1679put_gmch: 1645put_gmch:
1680 intel_gmch_remove(); 1646 i915_gem_gtt_fini(dev);
1681put_bridge: 1647put_bridge:
1682 pci_dev_put(dev_priv->bridge_dev); 1648 pci_dev_put(dev_priv->bridge_dev);
1683free_priv: 1649free_priv:
@@ -1720,6 +1686,7 @@ int i915_driver_unload(struct drm_device *dev)
1720 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1686 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1721 intel_fbdev_fini(dev); 1687 intel_fbdev_fini(dev);
1722 intel_modeset_cleanup(dev); 1688 intel_modeset_cleanup(dev);
1689 cancel_work_sync(&dev_priv->console_resume_work);
1723 1690
1724 /* 1691 /*
1725 * free the memory space allocated for the child device 1692 * free the memory space allocated for the child device
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6770ee6084b4..117265840b1f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
47unsigned int i915_fbpercrtc __always_unused = 0; 47unsigned int i915_fbpercrtc __always_unused = 0;
48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
49 49
50int i915_panel_ignore_lid __read_mostly = 0; 50int i915_panel_ignore_lid __read_mostly = 1;
51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52MODULE_PARM_DESC(panel_ignore_lid, 52MODULE_PARM_DESC(panel_ignore_lid,
53 "Override lid status (0=autodetect [default], 1=lid open, " 53 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
54 "-1=lid closed)"); 54 "-1=force lid closed, -2=force lid open)");
55 55
56unsigned int i915_powersave __read_mostly = 1; 56unsigned int i915_powersave __read_mostly = 1;
57module_param_named(powersave, i915_powersave, int, 0600); 57module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
396MODULE_DEVICE_TABLE(pci, pciidlist); 396MODULE_DEVICE_TABLE(pci, pciidlist);
397#endif 397#endif
398 398
399#define INTEL_PCH_DEVICE_ID_MASK 0xff00
400#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
401#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
402#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
403#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
404
405void intel_detect_pch(struct drm_device *dev) 399void intel_detect_pch(struct drm_device *dev)
406{ 400{
407 struct drm_i915_private *dev_priv = dev->dev_private; 401 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
416 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 410 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
417 if (pch) { 411 if (pch) {
418 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 412 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
419 int id; 413 unsigned short id;
420 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 414 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
415 dev_priv->pch_id = id;
421 416
422 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 417 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
423 dev_priv->pch_type = PCH_IBX; 418 dev_priv->pch_type = PCH_IBX;
424 dev_priv->num_pch_pll = 2; 419 dev_priv->num_pch_pll = 2;
425 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 420 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
421 WARN_ON(!IS_GEN5(dev));
426 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 422 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
427 dev_priv->pch_type = PCH_CPT; 423 dev_priv->pch_type = PCH_CPT;
428 dev_priv->num_pch_pll = 2; 424 dev_priv->num_pch_pll = 2;
429 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 425 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
426 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
430 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 427 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
431 /* PantherPoint is CPT compatible */ 428 /* PantherPoint is CPT compatible */
432 dev_priv->pch_type = PCH_CPT; 429 dev_priv->pch_type = PCH_CPT;
433 dev_priv->num_pch_pll = 2; 430 dev_priv->num_pch_pll = 2;
434 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 431 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
432 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
435 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 433 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
436 dev_priv->pch_type = PCH_LPT; 434 dev_priv->pch_type = PCH_LPT;
437 dev_priv->num_pch_pll = 0; 435 dev_priv->num_pch_pll = 0;
438 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 436 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
437 WARN_ON(!IS_HASWELL(dev));
438 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
439 dev_priv->pch_type = PCH_LPT;
440 dev_priv->num_pch_pll = 0;
441 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
442 WARN_ON(!IS_HASWELL(dev));
439 } 443 }
440 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); 444 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
441 } 445 }
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
477 return error; 481 return error;
478 } 482 }
479 483
484 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
485
480 intel_modeset_disable(dev); 486 intel_modeset_disable(dev);
481 487
482 drm_irq_uninstall(dev); 488 drm_irq_uninstall(dev);
@@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
526 return 0; 532 return 0;
527} 533}
528 534
529static int i915_drm_thaw(struct drm_device *dev) 535void intel_console_resume(struct work_struct *work)
536{
537 struct drm_i915_private *dev_priv =
538 container_of(work, struct drm_i915_private,
539 console_resume_work);
540 struct drm_device *dev = dev_priv->dev;
541
542 console_lock();
543 intel_fbdev_set_suspend(dev, 0);
544 console_unlock();
545}
546
547static int __i915_drm_thaw(struct drm_device *dev)
530{ 548{
531 struct drm_i915_private *dev_priv = dev->dev_private; 549 struct drm_i915_private *dev_priv = dev->dev_private;
532 int error = 0; 550 int error = 0;
533 551
534 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
535 mutex_lock(&dev->struct_mutex);
536 i915_gem_restore_gtt_mappings(dev);
537 mutex_unlock(&dev->struct_mutex);
538 }
539
540 i915_restore_state(dev); 552 i915_restore_state(dev);
541 intel_opregion_setup(dev); 553 intel_opregion_setup(dev);
542 554
543 /* KMS EnterVT equivalent */ 555 /* KMS EnterVT equivalent */
544 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 556 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
545 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 557 intel_init_pch_refclk(dev);
546 ironlake_init_pch_refclk(dev);
547 558
548 mutex_lock(&dev->struct_mutex); 559 mutex_lock(&dev->struct_mutex);
549 dev_priv->mm.suspended = 0; 560 dev_priv->mm.suspended = 0;
@@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
552 mutex_unlock(&dev->struct_mutex); 563 mutex_unlock(&dev->struct_mutex);
553 564
554 intel_modeset_init_hw(dev); 565 intel_modeset_init_hw(dev);
555 intel_modeset_setup_hw_state(dev); 566 intel_modeset_setup_hw_state(dev, false);
556 drm_mode_config_reset(dev);
557 drm_irq_install(dev); 567 drm_irq_install(dev);
558 } 568 }
559 569
@@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
561 571
562 dev_priv->modeset_on_lid = 0; 572 dev_priv->modeset_on_lid = 0;
563 573
564 console_lock(); 574 /*
565 intel_fbdev_set_suspend(dev, 0); 575 * The console lock can be pretty contented on resume due
566 console_unlock(); 576 * to all the printk activity. Try to keep it out of the hot
577 * path of resume if possible.
578 */
579 if (console_trylock()) {
580 intel_fbdev_set_suspend(dev, 0);
581 console_unlock();
582 } else {
583 schedule_work(&dev_priv->console_resume_work);
584 }
585
586 return error;
587}
588
589static int i915_drm_thaw(struct drm_device *dev)
590{
591 int error = 0;
592
593 intel_gt_reset(dev);
594
595 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
596 mutex_lock(&dev->struct_mutex);
597 i915_gem_restore_gtt_mappings(dev);
598 mutex_unlock(&dev->struct_mutex);
599 }
600
601 __i915_drm_thaw(dev);
602
567 return error; 603 return error;
568} 604}
569 605
570int i915_resume(struct drm_device *dev) 606int i915_resume(struct drm_device *dev)
571{ 607{
608 struct drm_i915_private *dev_priv = dev->dev_private;
572 int ret; 609 int ret;
573 610
574 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 611 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
579 616
580 pci_set_master(dev->pdev); 617 pci_set_master(dev->pdev);
581 618
582 ret = i915_drm_thaw(dev); 619 intel_gt_reset(dev);
620
621 /*
622 * Platforms with opregion should have sane BIOS, older ones (gen3 and
623 * earlier) need this since the BIOS might clear all our scratch PTEs.
624 */
625 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
626 !dev_priv->opregion.header) {
627 mutex_lock(&dev->struct_mutex);
628 i915_gem_restore_gtt_mappings(dev);
629 mutex_unlock(&dev->struct_mutex);
630 }
631
632 ret = __i915_drm_thaw(dev);
583 if (ret) 633 if (ret)
584 return ret; 634 return ret;
585 635
@@ -827,13 +877,12 @@ int i915_reset(struct drm_device *dev)
827 return 0; 877 return 0;
828} 878}
829 879
830static int __devinit 880static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
831i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
832{ 881{
833 struct intel_device_info *intel_info = 882 struct intel_device_info *intel_info =
834 (struct intel_device_info *) ent->driver_data; 883 (struct intel_device_info *) ent->driver_data;
835 884
836 if (intel_info->is_haswell || intel_info->is_valleyview) 885 if (intel_info->is_valleyview)
837 if(!i915_preliminary_hw_support) { 886 if(!i915_preliminary_hw_support) {
838 DRM_ERROR("Preliminary hardware support disabled\n"); 887 DRM_ERROR("Preliminary hardware support disabled\n");
839 return -ENODEV; 888 return -ENODEV;
@@ -1140,12 +1189,40 @@ static bool IS_DISPLAYREG(u32 reg)
1140 if (reg == GEN6_GDRST) 1189 if (reg == GEN6_GDRST)
1141 return false; 1190 return false;
1142 1191
1192 switch (reg) {
1193 case _3D_CHICKEN3:
1194 case IVB_CHICKEN3:
1195 case GEN7_COMMON_SLICE_CHICKEN1:
1196 case GEN7_L3CNTLREG1:
1197 case GEN7_L3_CHICKEN_MODE_REGISTER:
1198 case GEN7_ROW_CHICKEN2:
1199 case GEN7_L3SQCREG4:
1200 case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
1201 case GEN7_HALF_SLICE_CHICKEN1:
1202 case GEN6_MBCTL:
1203 case GEN6_UCGCTL2:
1204 return false;
1205 default:
1206 break;
1207 }
1208
1143 return true; 1209 return true;
1144} 1210}
1145 1211
1212static void
1213ilk_dummy_write(struct drm_i915_private *dev_priv)
1214{
1215 /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
1216 * chip from rc6 before touching it for real. MI_MODE is masked, hence
1217 * harmless to write 0 into. */
1218 I915_WRITE_NOTRACE(MI_MODE, 0);
1219}
1220
1146#define __i915_read(x, y) \ 1221#define __i915_read(x, y) \
1147u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1222u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1148 u##x val = 0; \ 1223 u##x val = 0; \
1224 if (IS_GEN5(dev_priv->dev)) \
1225 ilk_dummy_write(dev_priv); \
1149 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1226 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1150 unsigned long irqflags; \ 1227 unsigned long irqflags; \
1151 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ 1228 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1177,6 +1254,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1177 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1254 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1178 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1255 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1179 } \ 1256 } \
1257 if (IS_GEN5(dev_priv->dev)) \
1258 ilk_dummy_write(dev_priv); \
1259 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1260 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1261 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1262 } \
1180 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1263 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1181 write##y(val, dev_priv->regs + reg + 0x180000); \ 1264 write##y(val, dev_priv->regs + reg + 0x180000); \
1182 } else { \ 1265 } else { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f511fa2f4168..ed3059575576 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,6 +58,14 @@ enum pipe {
58}; 58};
59#define pipe_name(p) ((p) + 'A') 59#define pipe_name(p) ((p) + 'A')
60 60
61enum transcoder {
62 TRANSCODER_A = 0,
63 TRANSCODER_B,
64 TRANSCODER_C,
65 TRANSCODER_EDP = 0xF,
66};
67#define transcoder_name(t) ((t) + 'A')
68
61enum plane { 69enum plane {
62 PLANE_A = 0, 70 PLANE_A = 0,
63 PLANE_B, 71 PLANE_B,
@@ -93,6 +101,12 @@ struct intel_pch_pll {
93}; 101};
94#define I915_NUM_PLLS 2 102#define I915_NUM_PLLS 2
95 103
104struct intel_ddi_plls {
105 int spll_refcount;
106 int wrpll1_refcount;
107 int wrpll2_refcount;
108};
109
96/* Interface history: 110/* Interface history:
97 * 111 *
98 * 1.1: Original. 112 * 1.1: Original.
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
123 struct drm_i915_gem_object *cur_obj; 137 struct drm_i915_gem_object *cur_obj;
124}; 138};
125 139
126struct mem_block {
127 struct mem_block *next;
128 struct mem_block *prev;
129 int start;
130 int size;
131 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
132};
133
134struct opregion_header; 140struct opregion_header;
135struct opregion_acpi; 141struct opregion_acpi;
136struct opregion_swsci; 142struct opregion_swsci;
@@ -191,6 +197,7 @@ struct drm_i915_error_state {
191 u32 instdone[I915_NUM_RINGS]; 197 u32 instdone[I915_NUM_RINGS];
192 u32 acthd[I915_NUM_RINGS]; 198 u32 acthd[I915_NUM_RINGS];
193 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 199 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
200 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
194 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ 201 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
195 /* our own tracking of ring head and tail */ 202 /* our own tracking of ring head and tail */
196 u32 cpu_ring_head[I915_NUM_RINGS]; 203 u32 cpu_ring_head[I915_NUM_RINGS];
@@ -251,6 +258,7 @@ struct drm_i915_display_funcs {
251 uint32_t sprite_width, int pixel_size); 258 uint32_t sprite_width, int pixel_size);
252 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 259 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
253 struct drm_display_mode *mode); 260 struct drm_display_mode *mode);
261 void (*modeset_global_resources)(struct drm_device *dev);
254 int (*crtc_mode_set)(struct drm_crtc *crtc, 262 int (*crtc_mode_set)(struct drm_crtc *crtc,
255 struct drm_display_mode *mode, 263 struct drm_display_mode *mode,
256 struct drm_display_mode *adjusted_mode, 264 struct drm_display_mode *adjusted_mode,
@@ -263,7 +271,6 @@ struct drm_i915_display_funcs {
263 struct drm_crtc *crtc); 271 struct drm_crtc *crtc);
264 void (*fdi_link_train)(struct drm_crtc *crtc); 272 void (*fdi_link_train)(struct drm_crtc *crtc);
265 void (*init_clock_gating)(struct drm_device *dev); 273 void (*init_clock_gating)(struct drm_device *dev);
266 void (*init_pch_clock_gating)(struct drm_device *dev);
267 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 274 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
268 struct drm_framebuffer *fb, 275 struct drm_framebuffer *fb,
269 struct drm_i915_gem_object *obj); 276 struct drm_i915_gem_object *obj);
@@ -338,6 +345,7 @@ struct intel_device_info {
338#define I915_PPGTT_PD_ENTRIES 512 345#define I915_PPGTT_PD_ENTRIES 512
339#define I915_PPGTT_PT_ENTRIES 1024 346#define I915_PPGTT_PT_ENTRIES 1024
340struct i915_hw_ppgtt { 347struct i915_hw_ppgtt {
348 struct drm_device *dev;
341 unsigned num_pd_entries; 349 unsigned num_pd_entries;
342 struct page **pt_pages; 350 struct page **pt_pages;
343 uint32_t pd_offset; 351 uint32_t pd_offset;
@@ -374,6 +382,11 @@ enum intel_pch {
374 PCH_LPT, /* Lynxpoint PCH */ 382 PCH_LPT, /* Lynxpoint PCH */
375}; 383};
376 384
385enum intel_sbi_destination {
386 SBI_ICLK,
387 SBI_MPHY,
388};
389
377#define QUIRK_PIPEA_FORCE (1<<0) 390#define QUIRK_PIPEA_FORCE (1<<0)
378#define QUIRK_LVDS_SSC_DISABLE (1<<1) 391#define QUIRK_LVDS_SSC_DISABLE (1<<1)
379#define QUIRK_INVERT_BRIGHTNESS (1<<2) 392#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -383,154 +396,18 @@ struct intel_fbc_work;
383 396
384struct intel_gmbus { 397struct intel_gmbus {
385 struct i2c_adapter adapter; 398 struct i2c_adapter adapter;
386 bool force_bit; 399 u32 force_bit;
387 u32 reg0; 400 u32 reg0;
388 u32 gpio_reg; 401 u32 gpio_reg;
389 struct i2c_algo_bit_data bit_algo; 402 struct i2c_algo_bit_data bit_algo;
390 struct drm_i915_private *dev_priv; 403 struct drm_i915_private *dev_priv;
391}; 404};
392 405
393typedef struct drm_i915_private { 406struct i915_suspend_saved_registers {
394 struct drm_device *dev;
395
396 const struct intel_device_info *info;
397
398 int relative_constants_mode;
399
400 void __iomem *regs;
401
402 struct drm_i915_gt_funcs gt;
403 /** gt_fifo_count and the subsequent register write are synchronized
404 * with dev->struct_mutex. */
405 unsigned gt_fifo_count;
406 /** forcewake_count is protected by gt_lock */
407 unsigned forcewake_count;
408 /** gt_lock is also taken in irq contexts. */
409 struct spinlock gt_lock;
410
411 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
412
413 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
414 * controller on different i2c buses. */
415 struct mutex gmbus_mutex;
416
417 /**
418 * Base address of the gmbus and gpio block.
419 */
420 uint32_t gpio_mmio_base;
421
422 struct pci_dev *bridge_dev;
423 struct intel_ring_buffer ring[I915_NUM_RINGS];
424 uint32_t next_seqno;
425
426 drm_dma_handle_t *status_page_dmah;
427 uint32_t counter;
428 struct drm_i915_gem_object *pwrctx;
429 struct drm_i915_gem_object *renderctx;
430
431 struct resource mch_res;
432
433 atomic_t irq_received;
434
435 /* protects the irq masks */
436 spinlock_t irq_lock;
437
438 /* DPIO indirect register protection */
439 spinlock_t dpio_lock;
440
441 /** Cached value of IMR to avoid reads in updating the bitfield */
442 u32 pipestat[2];
443 u32 irq_mask;
444 u32 gt_irq_mask;
445 u32 pch_irq_mask;
446
447 u32 hotplug_supported_mask;
448 struct work_struct hotplug_work;
449
450 int num_pipe;
451 int num_pch_pll;
452
453 /* For hangcheck timer */
454#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
455 struct timer_list hangcheck_timer;
456 int hangcheck_count;
457 uint32_t last_acthd[I915_NUM_RINGS];
458 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
459
460 unsigned int stop_rings;
461
462 unsigned long cfb_size;
463 unsigned int cfb_fb;
464 enum plane cfb_plane;
465 int cfb_y;
466 struct intel_fbc_work *fbc_work;
467
468 struct intel_opregion opregion;
469
470 /* overlay */
471 struct intel_overlay *overlay;
472 bool sprite_scaling_enabled;
473
474 /* LVDS info */
475 int backlight_level; /* restore backlight to this value */
476 bool backlight_enabled;
477 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
478 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
479
480 /* Feature bits from the VBIOS */
481 unsigned int int_tv_support:1;
482 unsigned int lvds_dither:1;
483 unsigned int lvds_vbt:1;
484 unsigned int int_crt_support:1;
485 unsigned int lvds_use_ssc:1;
486 unsigned int display_clock_mode:1;
487 int lvds_ssc_freq;
488 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
489 unsigned int lvds_val; /* used for checking LVDS channel mode */
490 struct {
491 int rate;
492 int lanes;
493 int preemphasis;
494 int vswing;
495
496 bool initialized;
497 bool support;
498 int bpp;
499 struct edp_power_seq pps;
500 } edp;
501 bool no_aux_handshake;
502
503 struct notifier_block lid_notifier;
504
505 int crt_ddc_pin;
506 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
507 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
508 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
509
510 unsigned int fsb_freq, mem_freq, is_ddr3;
511
512 spinlock_t error_lock;
513 /* Protected by dev->error_lock. */
514 struct drm_i915_error_state *first_error;
515 struct work_struct error_work;
516 struct completion error_completion;
517 struct workqueue_struct *wq;
518
519 /* Display functions */
520 struct drm_i915_display_funcs display;
521
522 /* PCH chipset type */
523 enum intel_pch pch_type;
524
525 unsigned long quirks;
526
527 /* Register state */
528 bool modeset_on_lid;
529 u8 saveLBB; 407 u8 saveLBB;
530 u32 saveDSPACNTR; 408 u32 saveDSPACNTR;
531 u32 saveDSPBCNTR; 409 u32 saveDSPBCNTR;
532 u32 saveDSPARB; 410 u32 saveDSPARB;
533 u32 saveHWS;
534 u32 savePIPEACONF; 411 u32 savePIPEACONF;
535 u32 savePIPEBCONF; 412 u32 savePIPEBCONF;
536 u32 savePIPEASRC; 413 u32 savePIPEASRC;
@@ -676,10 +553,206 @@ typedef struct drm_i915_private {
676 u32 savePIPEB_LINK_N1; 553 u32 savePIPEB_LINK_N1;
677 u32 saveMCHBAR_RENDER_STANDBY; 554 u32 saveMCHBAR_RENDER_STANDBY;
678 u32 savePCH_PORT_HOTPLUG; 555 u32 savePCH_PORT_HOTPLUG;
556};
557
558struct intel_gen6_power_mgmt {
559 struct work_struct work;
560 u32 pm_iir;
561 /* lock - irqsave spinlock that protectects the work_struct and
562 * pm_iir. */
563 spinlock_t lock;
564
565 /* The below variables an all the rps hw state are protected by
566 * dev->struct mutext. */
567 u8 cur_delay;
568 u8 min_delay;
569 u8 max_delay;
570
571 struct delayed_work delayed_resume_work;
572
573 /*
574 * Protects RPS/RC6 register access and PCU communication.
575 * Must be taken after struct_mutex if nested.
576 */
577 struct mutex hw_lock;
578};
579
580struct intel_ilk_power_mgmt {
581 u8 cur_delay;
582 u8 min_delay;
583 u8 max_delay;
584 u8 fmax;
585 u8 fstart;
586
587 u64 last_count1;
588 unsigned long last_time1;
589 unsigned long chipset_power;
590 u64 last_count2;
591 struct timespec last_time2;
592 unsigned long gfx_power;
593 u8 corr;
594
595 int c_m;
596 int r_t;
597
598 struct drm_i915_gem_object *pwrctx;
599 struct drm_i915_gem_object *renderctx;
600};
601
602struct i915_dri1_state {
603 unsigned allow_batchbuffer : 1;
604 u32 __iomem *gfx_hws_cpu_addr;
605
606 unsigned int cpp;
607 int back_offset;
608 int front_offset;
609 int current_page;
610 int page_flipping;
611
612 uint32_t counter;
613};
614
615struct intel_l3_parity {
616 u32 *remap_info;
617 struct work_struct error_work;
618};
619
620typedef struct drm_i915_private {
621 struct drm_device *dev;
622
623 const struct intel_device_info *info;
624
625 int relative_constants_mode;
626
627 void __iomem *regs;
628
629 struct drm_i915_gt_funcs gt;
630 /** gt_fifo_count and the subsequent register write are synchronized
631 * with dev->struct_mutex. */
632 unsigned gt_fifo_count;
633 /** forcewake_count is protected by gt_lock */
634 unsigned forcewake_count;
635 /** gt_lock is also taken in irq contexts. */
636 struct spinlock gt_lock;
637
638 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
639
640 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
641 * controller on different i2c buses. */
642 struct mutex gmbus_mutex;
643
644 /**
645 * Base address of the gmbus and gpio block.
646 */
647 uint32_t gpio_mmio_base;
648
649 struct pci_dev *bridge_dev;
650 struct intel_ring_buffer ring[I915_NUM_RINGS];
651 uint32_t next_seqno;
652
653 drm_dma_handle_t *status_page_dmah;
654 struct resource mch_res;
655
656 atomic_t irq_received;
657
658 /* protects the irq masks */
659 spinlock_t irq_lock;
660
661 /* DPIO indirect register protection */
662 spinlock_t dpio_lock;
663
664 /** Cached value of IMR to avoid reads in updating the bitfield */
665 u32 pipestat[2];
666 u32 irq_mask;
667 u32 gt_irq_mask;
668 u32 pch_irq_mask;
669
670 u32 hotplug_supported_mask;
671 struct work_struct hotplug_work;
672
673 int num_pipe;
674 int num_pch_pll;
675
676 /* For hangcheck timer */
677#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
678#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
679 struct timer_list hangcheck_timer;
680 int hangcheck_count;
681 uint32_t last_acthd[I915_NUM_RINGS];
682 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
683
684 unsigned int stop_rings;
685
686 unsigned long cfb_size;
687 unsigned int cfb_fb;
688 enum plane cfb_plane;
689 int cfb_y;
690 struct intel_fbc_work *fbc_work;
691
692 struct intel_opregion opregion;
693
694 /* overlay */
695 struct intel_overlay *overlay;
696 bool sprite_scaling_enabled;
697
698 /* LVDS info */
699 int backlight_level; /* restore backlight to this value */
700 bool backlight_enabled;
701 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
702 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
703
704 /* Feature bits from the VBIOS */
705 unsigned int int_tv_support:1;
706 unsigned int lvds_dither:1;
707 unsigned int lvds_vbt:1;
708 unsigned int int_crt_support:1;
709 unsigned int lvds_use_ssc:1;
710 unsigned int display_clock_mode:1;
711 int lvds_ssc_freq;
712 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
713 unsigned int lvds_val; /* used for checking LVDS channel mode */
714 struct {
715 int rate;
716 int lanes;
717 int preemphasis;
718 int vswing;
719
720 bool initialized;
721 bool support;
722 int bpp;
723 struct edp_power_seq pps;
724 } edp;
725 bool no_aux_handshake;
726
727 int crt_ddc_pin;
728 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
729 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
730 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
731
732 unsigned int fsb_freq, mem_freq, is_ddr3;
733
734 spinlock_t error_lock;
735 /* Protected by dev->error_lock. */
736 struct drm_i915_error_state *first_error;
737 struct work_struct error_work;
738 struct completion error_completion;
739 struct workqueue_struct *wq;
740
741 /* Display functions */
742 struct drm_i915_display_funcs display;
743
744 /* PCH chipset type */
745 enum intel_pch pch_type;
746 unsigned short pch_id;
747
748 unsigned long quirks;
749
750 /* Register state */
751 bool modeset_on_lid;
679 752
680 struct { 753 struct {
681 /** Bridge to intel-gtt-ko */ 754 /** Bridge to intel-gtt-ko */
682 const struct intel_gtt *gtt; 755 struct intel_gtt *gtt;
683 /** Memory allocator for GTT stolen memory */ 756 /** Memory allocator for GTT stolen memory */
684 struct drm_mm stolen; 757 struct drm_mm stolen;
685 /** Memory allocator for GTT */ 758 /** Memory allocator for GTT */
@@ -706,9 +779,8 @@ typedef struct drm_i915_private {
706 /** PPGTT used for aliasing the PPGTT with the GTT */ 779 /** PPGTT used for aliasing the PPGTT with the GTT */
707 struct i915_hw_ppgtt *aliasing_ppgtt; 780 struct i915_hw_ppgtt *aliasing_ppgtt;
708 781
709 u32 *l3_remap_info;
710
711 struct shrinker inactive_shrinker; 782 struct shrinker inactive_shrinker;
783 bool shrinker_no_lock_stealing;
712 784
713 /** 785 /**
714 * List of objects currently involved in rendering. 786 * List of objects currently involved in rendering.
@@ -785,19 +857,6 @@ typedef struct drm_i915_private {
785 u32 object_count; 857 u32 object_count;
786 } mm; 858 } mm;
787 859
788 /* Old dri1 support infrastructure, beware the dragons ya fools entering
789 * here! */
790 struct {
791 unsigned allow_batchbuffer : 1;
792 u32 __iomem *gfx_hws_cpu_addr;
793
794 unsigned int cpp;
795 int back_offset;
796 int front_offset;
797 int current_page;
798 int page_flipping;
799 } dri1;
800
801 /* Kernel Modesetting */ 860 /* Kernel Modesetting */
802 861
803 struct sdvo_device_mapping sdvo_mappings[2]; 862 struct sdvo_device_mapping sdvo_mappings[2];
@@ -811,6 +870,7 @@ typedef struct drm_i915_private {
811 wait_queue_head_t pending_flip_queue; 870 wait_queue_head_t pending_flip_queue;
812 871
813 struct intel_pch_pll pch_plls[I915_NUM_PLLS]; 872 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
873 struct intel_ddi_plls ddi_plls;
814 874
815 /* Reclocking support */ 875 /* Reclocking support */
816 bool render_reclock_avail; 876 bool render_reclock_avail;
@@ -820,46 +880,17 @@ typedef struct drm_i915_private {
820 u16 orig_clock; 880 u16 orig_clock;
821 int child_dev_num; 881 int child_dev_num;
822 struct child_device_config *child_dev; 882 struct child_device_config *child_dev;
823 struct drm_connector *int_lvds_connector;
824 struct drm_connector *int_edp_connector;
825 883
826 bool mchbar_need_disable; 884 bool mchbar_need_disable;
827 885
886 struct intel_l3_parity l3_parity;
887
828 /* gen6+ rps state */ 888 /* gen6+ rps state */
829 struct { 889 struct intel_gen6_power_mgmt rps;
830 struct work_struct work;
831 u32 pm_iir;
832 /* lock - irqsave spinlock that protectects the work_struct and
833 * pm_iir. */
834 spinlock_t lock;
835
836 /* The below variables an all the rps hw state are protected by
837 * dev->struct mutext. */
838 u8 cur_delay;
839 u8 min_delay;
840 u8 max_delay;
841 } rps;
842 890
843 /* ilk-only ips/rps state. Everything in here is protected by the global 891 /* ilk-only ips/rps state. Everything in here is protected by the global
844 * mchdev_lock in intel_pm.c */ 892 * mchdev_lock in intel_pm.c */
845 struct { 893 struct intel_ilk_power_mgmt ips;
846 u8 cur_delay;
847 u8 min_delay;
848 u8 max_delay;
849 u8 fmax;
850 u8 fstart;
851
852 u64 last_count1;
853 unsigned long last_time1;
854 unsigned long chipset_power;
855 u64 last_count2;
856 struct timespec last_time2;
857 unsigned long gfx_power;
858 u8 corr;
859
860 int c_m;
861 int r_t;
862 } ips;
863 894
864 enum no_fbc_reason no_fbc_reason; 895 enum no_fbc_reason no_fbc_reason;
865 896
@@ -871,14 +902,27 @@ typedef struct drm_i915_private {
871 /* list of fbdev register on this device */ 902 /* list of fbdev register on this device */
872 struct intel_fbdev *fbdev; 903 struct intel_fbdev *fbdev;
873 904
905 /*
906 * The console may be contended at resume, but we don't
907 * want it to block on it.
908 */
909 struct work_struct console_resume_work;
910
874 struct backlight_device *backlight; 911 struct backlight_device *backlight;
875 912
876 struct drm_property *broadcast_rgb_property; 913 struct drm_property *broadcast_rgb_property;
877 struct drm_property *force_audio_property; 914 struct drm_property *force_audio_property;
878 915
879 struct work_struct parity_error_work;
880 bool hw_contexts_disabled; 916 bool hw_contexts_disabled;
881 uint32_t hw_context_size; 917 uint32_t hw_context_size;
918
919 bool fdi_rx_polarity_reversed;
920
921 struct i915_suspend_saved_registers regfile;
922
923 /* Old dri1 support infrastructure, beware the dragons ya fools entering
924 * here! */
925 struct i915_dri1_state dri1;
882} drm_i915_private_t; 926} drm_i915_private_t;
883 927
884/* Iterate over initialised rings */ 928/* Iterate over initialised rings */
@@ -1057,6 +1101,7 @@ struct drm_i915_gem_object {
1057 */ 1101 */
1058 atomic_t pending_flip; 1102 atomic_t pending_flip;
1059}; 1103};
1104#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1060 1105
1061#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 1106#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1062 1107
@@ -1120,9 +1165,17 @@ struct drm_i915_file_private {
1120#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1165#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1121#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1166#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1122#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1167#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1168#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1169 (dev)->pci_device == 0x0152 || \
1170 (dev)->pci_device == 0x015a)
1171#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1172 (dev)->pci_device == 0x0106 || \
1173 (dev)->pci_device == 0x010A)
1123#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1174#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1124#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1175#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1125#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1176#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1177#define IS_ULT(dev) (IS_HASWELL(dev) && \
1178 ((dev)->pci_device & 0xFF00) == 0x0A00)
1126 1179
1127/* 1180/*
1128 * The genX designation typically refers to the render engine, so render 1181 * The genX designation typically refers to the render engine, so render
@@ -1148,6 +1201,9 @@ struct drm_i915_file_private {
1148#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) 1201#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1149#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) 1202#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1150 1203
1204/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1205#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1206
1151/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1207/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1152 * rows, which changed the alignment requirements and fence programming. 1208 * rows, which changed the alignment requirements and fence programming.
1153 */ 1209 */
@@ -1168,6 +1224,13 @@ struct drm_i915_file_private {
1168 1224
1169#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1225#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1170 1226
1227#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1228#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1229#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1230#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1231#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1232#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1233
1171#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1234#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1172#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1235#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1173#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1236#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1250,6 +1313,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1250extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1313extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1251extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 1314extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1252 1315
1316extern void intel_console_resume(struct work_struct *work);
1253 1317
1254/* i915_irq.c */ 1318/* i915_irq.c */
1255void i915_hangcheck_elapsed(unsigned long data); 1319void i915_hangcheck_elapsed(unsigned long data);
@@ -1257,6 +1321,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
1257 1321
1258extern void intel_irq_init(struct drm_device *dev); 1322extern void intel_irq_init(struct drm_device *dev);
1259extern void intel_gt_init(struct drm_device *dev); 1323extern void intel_gt_init(struct drm_device *dev);
1324extern void intel_gt_reset(struct drm_device *dev);
1260 1325
1261void i915_error_state_free(struct kref *error_ref); 1326void i915_error_state_free(struct kref *error_ref);
1262 1327
@@ -1368,8 +1433,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1368int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1433int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1369 struct intel_ring_buffer *to); 1434 struct intel_ring_buffer *to);
1370void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1435void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1371 struct intel_ring_buffer *ring, 1436 struct intel_ring_buffer *ring);
1372 u32 seqno);
1373 1437
1374int i915_gem_dumb_create(struct drm_file *file_priv, 1438int i915_gem_dumb_create(struct drm_file *file_priv,
1375 struct drm_device *dev, 1439 struct drm_device *dev,
@@ -1387,7 +1451,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1387 return (int32_t)(seq1 - seq2) >= 0; 1451 return (int32_t)(seq1 - seq2) >= 0;
1388} 1452}
1389 1453
1390u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); 1454extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1391 1455
1392int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1456int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1393int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1457int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1499,6 +1563,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
1499 unsigned long start, 1563 unsigned long start,
1500 unsigned long mappable_end, 1564 unsigned long mappable_end,
1501 unsigned long end); 1565 unsigned long end);
1566int i915_gem_gtt_init(struct drm_device *dev);
1567void i915_gem_gtt_fini(struct drm_device *dev);
1568static inline void i915_gem_chipset_flush(struct drm_device *dev)
1569{
1570 if (INTEL_INFO(dev)->gen < 6)
1571 intel_gtt_chipset_flush();
1572}
1573
1502 1574
1503/* i915_gem_evict.c */ 1575/* i915_gem_evict.c */
1504int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1576int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1595,11 +1667,12 @@ extern void intel_modeset_init(struct drm_device *dev);
1595extern void intel_modeset_gem_init(struct drm_device *dev); 1667extern void intel_modeset_gem_init(struct drm_device *dev);
1596extern void intel_modeset_cleanup(struct drm_device *dev); 1668extern void intel_modeset_cleanup(struct drm_device *dev);
1597extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1669extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1598extern void intel_modeset_setup_hw_state(struct drm_device *dev); 1670extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1671 bool force_restore);
1599extern bool intel_fbc_enabled(struct drm_device *dev); 1672extern bool intel_fbc_enabled(struct drm_device *dev);
1600extern void intel_disable_fbc(struct drm_device *dev); 1673extern void intel_disable_fbc(struct drm_device *dev);
1601extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1674extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1602extern void ironlake_init_pch_refclk(struct drm_device *dev); 1675extern void intel_init_pch_refclk(struct drm_device *dev);
1603extern void gen6_set_rps(struct drm_device *dev, u8 val); 1676extern void gen6_set_rps(struct drm_device *dev, u8 val);
1604extern void intel_detect_pch(struct drm_device *dev); 1677extern void intel_detect_pch(struct drm_device *dev);
1605extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1678extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@@ -1628,6 +1701,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1628void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1701void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1629int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1702int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1630 1703
1704int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1705int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1706
1631#define __i915_read(x, y) \ 1707#define __i915_read(x, y) \
1632 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1708 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1633 1709
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 107f09befe92..8febea6daa08 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ out:
845 * domain anymore. */ 845 * domain anymore. */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj); 847 i915_gem_clflush_object(obj);
848 intel_gtt_chipset_flush(); 848 i915_gem_chipset_flush(dev);
849 } 849 }
850 } 850 }
851 851
852 if (needs_clflush_after) 852 if (needs_clflush_after)
853 intel_gtt_chipset_flush(); 853 i915_gem_chipset_flush(dev);
854 854
855 return ret; 855 return ret;
856} 856}
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1345 trace_i915_gem_object_fault(obj, page_offset, true, write); 1345 trace_i915_gem_object_fault(obj, page_offset, true, write);
1346 1346
1347 /* Now bind it into the GTT if needed */ 1347 /* Now bind it into the GTT if needed */
1348 if (!obj->map_and_fenceable) { 1348 ret = i915_gem_object_pin(obj, 0, true, false);
1349 ret = i915_gem_object_unbind(obj); 1349 if (ret)
1350 if (ret) 1350 goto unlock;
1351 goto unlock;
1352 }
1353 if (!obj->gtt_space) {
1354 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1355 if (ret)
1356 goto unlock;
1357
1358 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1359 if (ret)
1360 goto unlock;
1361 }
1362 1351
1363 if (!obj->has_global_gtt_mapping) 1352 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1364 i915_gem_gtt_bind_object(obj, obj->cache_level); 1353 if (ret)
1354 goto unpin;
1365 1355
1366 ret = i915_gem_object_get_fence(obj); 1356 ret = i915_gem_object_get_fence(obj);
1367 if (ret) 1357 if (ret)
1368 goto unlock; 1358 goto unpin;
1369
1370 if (i915_gem_object_is_inactive(obj))
1371 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1372 1359
1373 obj->fault_mappable = true; 1360 obj->fault_mappable = true;
1374 1361
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1377 1364
1378 /* Finally, remap it using the new GTT offset */ 1365 /* Finally, remap it using the new GTT offset */
1379 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1366 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1367unpin:
1368 i915_gem_object_unpin(obj);
1380unlock: 1369unlock:
1381 mutex_unlock(&dev->struct_mutex); 1370 mutex_unlock(&dev->struct_mutex);
1382out: 1371out:
@@ -1528,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1528 if (obj->base.map_list.map) 1517 if (obj->base.map_list.map)
1529 return 0; 1518 return 0;
1530 1519
1520 dev_priv->mm.shrinker_no_lock_stealing = true;
1521
1531 ret = drm_gem_create_mmap_offset(&obj->base); 1522 ret = drm_gem_create_mmap_offset(&obj->base);
1532 if (ret != -ENOSPC) 1523 if (ret != -ENOSPC)
1533 return ret; 1524 goto out;
1534 1525
1535 /* Badly fragmented mmap space? The only way we can recover 1526 /* Badly fragmented mmap space? The only way we can recover
1536 * space is by destroying unwanted objects. We can't randomly release 1527 * space is by destroying unwanted objects. We can't randomly release
@@ -1542,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1542 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT); 1533 i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
1543 ret = drm_gem_create_mmap_offset(&obj->base); 1534 ret = drm_gem_create_mmap_offset(&obj->base);
1544 if (ret != -ENOSPC) 1535 if (ret != -ENOSPC)
1545 return ret; 1536 goto out;
1546 1537
1547 i915_gem_shrink_all(dev_priv); 1538 i915_gem_shrink_all(dev_priv);
1548 return drm_gem_create_mmap_offset(&obj->base); 1539 ret = drm_gem_create_mmap_offset(&obj->base);
1540out:
1541 dev_priv->mm.shrinker_no_lock_stealing = false;
1542
1543 return ret;
1549} 1544}
1550 1545
1551static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj) 1546static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
@@ -1707,10 +1702,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1707 if (obj->pages_pin_count) 1702 if (obj->pages_pin_count)
1708 return -EBUSY; 1703 return -EBUSY;
1709 1704
1705 /* ->put_pages might need to allocate memory for the bit17 swizzle
1706 * array, hence protect them from being reaped by removing them from gtt
1707 * lists early. */
1708 list_del(&obj->gtt_list);
1709
1710 ops->put_pages(obj); 1710 ops->put_pages(obj);
1711 obj->pages = NULL; 1711 obj->pages = NULL;
1712 1712
1713 list_del(&obj->gtt_list);
1714 if (i915_gem_object_is_purgeable(obj)) 1713 if (i915_gem_object_is_purgeable(obj))
1715 i915_gem_object_truncate(obj); 1714 i915_gem_object_truncate(obj);
1716 1715
@@ -1718,7 +1717,8 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1718} 1717}
1719 1718
1720static long 1719static long
1721i915_gem_purge(struct drm_i915_private *dev_priv, long target) 1720__i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1721 bool purgeable_only)
1722{ 1722{
1723 struct drm_i915_gem_object *obj, *next; 1723 struct drm_i915_gem_object *obj, *next;
1724 long count = 0; 1724 long count = 0;
@@ -1726,7 +1726,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1726 list_for_each_entry_safe(obj, next, 1726 list_for_each_entry_safe(obj, next,
1727 &dev_priv->mm.unbound_list, 1727 &dev_priv->mm.unbound_list,
1728 gtt_list) { 1728 gtt_list) {
1729 if (i915_gem_object_is_purgeable(obj) && 1729 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1730 i915_gem_object_put_pages(obj) == 0) { 1730 i915_gem_object_put_pages(obj) == 0) {
1731 count += obj->base.size >> PAGE_SHIFT; 1731 count += obj->base.size >> PAGE_SHIFT;
1732 if (count >= target) 1732 if (count >= target)
@@ -1737,7 +1737,7 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1737 list_for_each_entry_safe(obj, next, 1737 list_for_each_entry_safe(obj, next,
1738 &dev_priv->mm.inactive_list, 1738 &dev_priv->mm.inactive_list,
1739 mm_list) { 1739 mm_list) {
1740 if (i915_gem_object_is_purgeable(obj) && 1740 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1741 i915_gem_object_unbind(obj) == 0 && 1741 i915_gem_object_unbind(obj) == 0 &&
1742 i915_gem_object_put_pages(obj) == 0) { 1742 i915_gem_object_put_pages(obj) == 0) {
1743 count += obj->base.size >> PAGE_SHIFT; 1743 count += obj->base.size >> PAGE_SHIFT;
@@ -1749,6 +1749,12 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1749 return count; 1749 return count;
1750} 1750}
1751 1751
1752static long
1753i915_gem_purge(struct drm_i915_private *dev_priv, long target)
1754{
1755 return __i915_gem_shrink(dev_priv, target, true);
1756}
1757
1752static void 1758static void
1753i915_gem_shrink_all(struct drm_i915_private *dev_priv) 1759i915_gem_shrink_all(struct drm_i915_private *dev_priv)
1754{ 1760{
@@ -1796,7 +1802,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1796 */ 1802 */
1797 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; 1803 mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
1798 gfp = mapping_gfp_mask(mapping); 1804 gfp = mapping_gfp_mask(mapping);
1799 gfp |= __GFP_NORETRY | __GFP_NOWARN; 1805 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1800 gfp &= ~(__GFP_IO | __GFP_WAIT); 1806 gfp &= ~(__GFP_IO | __GFP_WAIT);
1801 for_each_sg(st->sgl, sg, page_count, i) { 1807 for_each_sg(st->sgl, sg, page_count, i) {
1802 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 1808 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
@@ -1809,7 +1815,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1809 * our own buffer, now let the real VM do its job and 1815 * our own buffer, now let the real VM do its job and
1810 * go down in flames if truly OOM. 1816 * go down in flames if truly OOM.
1811 */ 1817 */
1812 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN); 1818 gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
1813 gfp |= __GFP_IO | __GFP_WAIT; 1819 gfp |= __GFP_IO | __GFP_WAIT;
1814 1820
1815 i915_gem_shrink_all(dev_priv); 1821 i915_gem_shrink_all(dev_priv);
@@ -1817,7 +1823,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
1817 if (IS_ERR(page)) 1823 if (IS_ERR(page))
1818 goto err_pages; 1824 goto err_pages;
1819 1825
1820 gfp |= __GFP_NORETRY | __GFP_NOWARN; 1826 gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
1821 gfp &= ~(__GFP_IO | __GFP_WAIT); 1827 gfp &= ~(__GFP_IO | __GFP_WAIT);
1822 } 1828 }
1823 1829
@@ -1868,11 +1874,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1868 1874
1869void 1875void
1870i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1876i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1871 struct intel_ring_buffer *ring, 1877 struct intel_ring_buffer *ring)
1872 u32 seqno)
1873{ 1878{
1874 struct drm_device *dev = obj->base.dev; 1879 struct drm_device *dev = obj->base.dev;
1875 struct drm_i915_private *dev_priv = dev->dev_private; 1880 struct drm_i915_private *dev_priv = dev->dev_private;
1881 u32 seqno = intel_ring_get_seqno(ring);
1876 1882
1877 BUG_ON(ring == NULL); 1883 BUG_ON(ring == NULL);
1878 obj->ring = ring; 1884 obj->ring = ring;
@@ -1933,26 +1939,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1933 WARN_ON(i915_verify_lists(dev)); 1939 WARN_ON(i915_verify_lists(dev));
1934} 1940}
1935 1941
1936static u32 1942static int
1937i915_gem_get_seqno(struct drm_device *dev) 1943i915_gem_handle_seqno_wrap(struct drm_device *dev)
1938{ 1944{
1939 drm_i915_private_t *dev_priv = dev->dev_private; 1945 struct drm_i915_private *dev_priv = dev->dev_private;
1940 u32 seqno = dev_priv->next_seqno; 1946 struct intel_ring_buffer *ring;
1947 int ret, i, j;
1941 1948
1942 /* reserve 0 for non-seqno */ 1949 /* The hardware uses various monotonic 32-bit counters, if we
1943 if (++dev_priv->next_seqno == 0) 1950 * detect that they will wraparound we need to idle the GPU
1944 dev_priv->next_seqno = 1; 1951 * and reset those counters.
1952 */
1953 ret = 0;
1954 for_each_ring(ring, dev_priv, i) {
1955 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1956 ret |= ring->sync_seqno[j] != 0;
1957 }
1958 if (ret == 0)
1959 return ret;
1960
1961 ret = i915_gpu_idle(dev);
1962 if (ret)
1963 return ret;
1964
1965 i915_gem_retire_requests(dev);
1966 for_each_ring(ring, dev_priv, i) {
1967 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1968 ring->sync_seqno[j] = 0;
1969 }
1945 1970
1946 return seqno; 1971 return 0;
1947} 1972}
1948 1973
1949u32 1974int
1950i915_gem_next_request_seqno(struct intel_ring_buffer *ring) 1975i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1951{ 1976{
1952 if (ring->outstanding_lazy_request == 0) 1977 struct drm_i915_private *dev_priv = dev->dev_private;
1953 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); 1978
1979 /* reserve 0 for non-seqno */
1980 if (dev_priv->next_seqno == 0) {
1981 int ret = i915_gem_handle_seqno_wrap(dev);
1982 if (ret)
1983 return ret;
1984
1985 dev_priv->next_seqno = 1;
1986 }
1954 1987
1955 return ring->outstanding_lazy_request; 1988 *seqno = dev_priv->next_seqno++;
1989 return 0;
1956} 1990}
1957 1991
1958int 1992int
@@ -1963,7 +1997,6 @@ i915_add_request(struct intel_ring_buffer *ring,
1963 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1997 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1964 struct drm_i915_gem_request *request; 1998 struct drm_i915_gem_request *request;
1965 u32 request_ring_position; 1999 u32 request_ring_position;
1966 u32 seqno;
1967 int was_empty; 2000 int was_empty;
1968 int ret; 2001 int ret;
1969 2002
@@ -1982,7 +2015,6 @@ i915_add_request(struct intel_ring_buffer *ring,
1982 if (request == NULL) 2015 if (request == NULL)
1983 return -ENOMEM; 2016 return -ENOMEM;
1984 2017
1985 seqno = i915_gem_next_request_seqno(ring);
1986 2018
1987 /* Record the position of the start of the request so that 2019 /* Record the position of the start of the request so that
1988 * should we detect the updated seqno part-way through the 2020 * should we detect the updated seqno part-way through the
@@ -1991,15 +2023,13 @@ i915_add_request(struct intel_ring_buffer *ring,
1991 */ 2023 */
1992 request_ring_position = intel_ring_get_tail(ring); 2024 request_ring_position = intel_ring_get_tail(ring);
1993 2025
1994 ret = ring->add_request(ring, &seqno); 2026 ret = ring->add_request(ring);
1995 if (ret) { 2027 if (ret) {
1996 kfree(request); 2028 kfree(request);
1997 return ret; 2029 return ret;
1998 } 2030 }
1999 2031
2000 trace_i915_gem_request_add(ring, seqno); 2032 request->seqno = intel_ring_get_seqno(ring);
2001
2002 request->seqno = seqno;
2003 request->ring = ring; 2033 request->ring = ring;
2004 request->tail = request_ring_position; 2034 request->tail = request_ring_position;
2005 request->emitted_jiffies = jiffies; 2035 request->emitted_jiffies = jiffies;
@@ -2017,23 +2047,24 @@ i915_add_request(struct intel_ring_buffer *ring,
2017 spin_unlock(&file_priv->mm.lock); 2047 spin_unlock(&file_priv->mm.lock);
2018 } 2048 }
2019 2049
2050 trace_i915_gem_request_add(ring, request->seqno);
2020 ring->outstanding_lazy_request = 0; 2051 ring->outstanding_lazy_request = 0;
2021 2052
2022 if (!dev_priv->mm.suspended) { 2053 if (!dev_priv->mm.suspended) {
2023 if (i915_enable_hangcheck) { 2054 if (i915_enable_hangcheck) {
2024 mod_timer(&dev_priv->hangcheck_timer, 2055 mod_timer(&dev_priv->hangcheck_timer,
2025 jiffies + 2056 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2026 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
2027 } 2057 }
2028 if (was_empty) { 2058 if (was_empty) {
2029 queue_delayed_work(dev_priv->wq, 2059 queue_delayed_work(dev_priv->wq,
2030 &dev_priv->mm.retire_work, HZ); 2060 &dev_priv->mm.retire_work,
2061 round_jiffies_up_relative(HZ));
2031 intel_mark_busy(dev_priv->dev); 2062 intel_mark_busy(dev_priv->dev);
2032 } 2063 }
2033 } 2064 }
2034 2065
2035 if (out_seqno) 2066 if (out_seqno)
2036 *out_seqno = seqno; 2067 *out_seqno = request->seqno;
2037 return 0; 2068 return 0;
2038} 2069}
2039 2070
@@ -2131,7 +2162,6 @@ void
2131i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 2162i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2132{ 2163{
2133 uint32_t seqno; 2164 uint32_t seqno;
2134 int i;
2135 2165
2136 if (list_empty(&ring->request_list)) 2166 if (list_empty(&ring->request_list))
2137 return; 2167 return;
@@ -2140,10 +2170,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2140 2170
2141 seqno = ring->get_seqno(ring, true); 2171 seqno = ring->get_seqno(ring, true);
2142 2172
2143 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
2144 if (seqno >= ring->sync_seqno[i])
2145 ring->sync_seqno[i] = 0;
2146
2147 while (!list_empty(&ring->request_list)) { 2173 while (!list_empty(&ring->request_list)) {
2148 struct drm_i915_gem_request *request; 2174 struct drm_i915_gem_request *request;
2149 2175
@@ -2218,7 +2244,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
2218 2244
2219 /* Come back later if the device is busy... */ 2245 /* Come back later if the device is busy... */
2220 if (!mutex_trylock(&dev->struct_mutex)) { 2246 if (!mutex_trylock(&dev->struct_mutex)) {
2221 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 2247 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2248 round_jiffies_up_relative(HZ));
2222 return; 2249 return;
2223 } 2250 }
2224 2251
@@ -2236,7 +2263,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
2236 } 2263 }
2237 2264
2238 if (!dev_priv->mm.suspended && !idle) 2265 if (!dev_priv->mm.suspended && !idle)
2239 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 2266 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2267 round_jiffies_up_relative(HZ));
2240 if (idle) 2268 if (idle)
2241 intel_mark_idle(dev); 2269 intel_mark_idle(dev);
2242 2270
@@ -2386,7 +2414,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2386 2414
2387 ret = to->sync_to(to, from, seqno); 2415 ret = to->sync_to(to, from, seqno);
2388 if (!ret) 2416 if (!ret)
2389 from->sync_seqno[idx] = seqno; 2417 /* We use last_read_seqno because sync_to()
2418 * might have just caused seqno wrap under
2419 * the radar.
2420 */
2421 from->sync_seqno[idx] = obj->last_read_seqno;
2390 2422
2391 return ret; 2423 return ret;
2392} 2424}
@@ -2469,14 +2501,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2469 return 0; 2501 return 0;
2470} 2502}
2471 2503
2472static int i915_ring_idle(struct intel_ring_buffer *ring)
2473{
2474 if (list_empty(&ring->active_list))
2475 return 0;
2476
2477 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
2478}
2479
2480int i915_gpu_idle(struct drm_device *dev) 2504int i915_gpu_idle(struct drm_device *dev)
2481{ 2505{
2482 drm_i915_private_t *dev_priv = dev->dev_private; 2506 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2489,7 +2513,7 @@ int i915_gpu_idle(struct drm_device *dev)
2489 if (ret) 2513 if (ret)
2490 return ret; 2514 return ret;
2491 2515
2492 ret = i915_ring_idle(ring); 2516 ret = intel_ring_idle(ring);
2493 if (ret) 2517 if (ret)
2494 return ret; 2518 return ret;
2495 } 2519 }
@@ -2879,7 +2903,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2879{ 2903{
2880 struct drm_device *dev = obj->base.dev; 2904 struct drm_device *dev = obj->base.dev;
2881 drm_i915_private_t *dev_priv = dev->dev_private; 2905 drm_i915_private_t *dev_priv = dev->dev_private;
2882 struct drm_mm_node *free_space; 2906 struct drm_mm_node *node;
2883 u32 size, fence_size, fence_alignment, unfenced_alignment; 2907 u32 size, fence_size, fence_alignment, unfenced_alignment;
2884 bool mappable, fenceable; 2908 bool mappable, fenceable;
2885 int ret; 2909 int ret;
@@ -2923,74 +2947,63 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2923 if (ret) 2947 if (ret)
2924 return ret; 2948 return ret;
2925 2949
2950 i915_gem_object_pin_pages(obj);
2951
2952 node = kzalloc(sizeof(*node), GFP_KERNEL);
2953 if (node == NULL) {
2954 i915_gem_object_unpin_pages(obj);
2955 return -ENOMEM;
2956 }
2957
2926 search_free: 2958 search_free:
2927 if (map_and_fenceable) 2959 if (map_and_fenceable)
2928 free_space = 2960 ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
2929 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2930 size, alignment, obj->cache_level, 2961 size, alignment, obj->cache_level,
2931 0, dev_priv->mm.gtt_mappable_end, 2962 0, dev_priv->mm.gtt_mappable_end);
2932 false);
2933 else 2963 else
2934 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, 2964 ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
2935 size, alignment, obj->cache_level, 2965 size, alignment, obj->cache_level);
2936 false); 2966 if (ret) {
2937
2938 if (free_space != NULL) {
2939 if (map_and_fenceable)
2940 obj->gtt_space =
2941 drm_mm_get_block_range_generic(free_space,
2942 size, alignment, obj->cache_level,
2943 0, dev_priv->mm.gtt_mappable_end,
2944 false);
2945 else
2946 obj->gtt_space =
2947 drm_mm_get_block_generic(free_space,
2948 size, alignment, obj->cache_level,
2949 false);
2950 }
2951 if (obj->gtt_space == NULL) {
2952 ret = i915_gem_evict_something(dev, size, alignment, 2967 ret = i915_gem_evict_something(dev, size, alignment,
2953 obj->cache_level, 2968 obj->cache_level,
2954 map_and_fenceable, 2969 map_and_fenceable,
2955 nonblocking); 2970 nonblocking);
2956 if (ret) 2971 if (ret == 0)
2957 return ret; 2972 goto search_free;
2958 2973
2959 goto search_free; 2974 i915_gem_object_unpin_pages(obj);
2975 kfree(node);
2976 return ret;
2960 } 2977 }
2961 if (WARN_ON(!i915_gem_valid_gtt_space(dev, 2978 if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
2962 obj->gtt_space, 2979 i915_gem_object_unpin_pages(obj);
2963 obj->cache_level))) { 2980 drm_mm_put_block(node);
2964 drm_mm_put_block(obj->gtt_space);
2965 obj->gtt_space = NULL;
2966 return -EINVAL; 2981 return -EINVAL;
2967 } 2982 }
2968 2983
2969
2970 ret = i915_gem_gtt_prepare_object(obj); 2984 ret = i915_gem_gtt_prepare_object(obj);
2971 if (ret) { 2985 if (ret) {
2972 drm_mm_put_block(obj->gtt_space); 2986 i915_gem_object_unpin_pages(obj);
2973 obj->gtt_space = NULL; 2987 drm_mm_put_block(node);
2974 return ret; 2988 return ret;
2975 } 2989 }
2976 2990
2977 if (!dev_priv->mm.aliasing_ppgtt)
2978 i915_gem_gtt_bind_object(obj, obj->cache_level);
2979
2980 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 2991 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2981 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2992 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2982 2993
2983 obj->gtt_offset = obj->gtt_space->start; 2994 obj->gtt_space = node;
2995 obj->gtt_offset = node->start;
2984 2996
2985 fenceable = 2997 fenceable =
2986 obj->gtt_space->size == fence_size && 2998 node->size == fence_size &&
2987 (obj->gtt_space->start & (fence_alignment - 1)) == 0; 2999 (node->start & (fence_alignment - 1)) == 0;
2988 3000
2989 mappable = 3001 mappable =
2990 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 3002 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2991 3003
2992 obj->map_and_fenceable = mappable && fenceable; 3004 obj->map_and_fenceable = mappable && fenceable;
2993 3005
3006 i915_gem_object_unpin_pages(obj);
2994 trace_i915_gem_object_bind(obj, map_and_fenceable); 3007 trace_i915_gem_object_bind(obj, map_and_fenceable);
2995 i915_gem_verify_gtt(dev); 3008 i915_gem_verify_gtt(dev);
2996 return 0; 3009 return 0;
@@ -3059,7 +3072,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3059 return; 3072 return;
3060 3073
3061 i915_gem_clflush_object(obj); 3074 i915_gem_clflush_object(obj);
3062 intel_gtt_chipset_flush(); 3075 i915_gem_chipset_flush(obj->base.dev);
3063 old_write_domain = obj->base.write_domain; 3076 old_write_domain = obj->base.write_domain;
3064 obj->base.write_domain = 0; 3077 obj->base.write_domain = 0;
3065 3078
@@ -3454,11 +3467,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3454 } 3467 }
3455 3468
3456 if (obj->gtt_space == NULL) { 3469 if (obj->gtt_space == NULL) {
3470 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3471
3457 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3472 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3458 map_and_fenceable, 3473 map_and_fenceable,
3459 nonblocking); 3474 nonblocking);
3460 if (ret) 3475 if (ret)
3461 return ret; 3476 return ret;
3477
3478 if (!dev_priv->mm.aliasing_ppgtt)
3479 i915_gem_gtt_bind_object(obj, obj->cache_level);
3462 } 3480 }
3463 3481
3464 if (!obj->has_global_gtt_mapping && map_and_fenceable) 3482 if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3511,14 +3529,15 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
3511 goto out; 3529 goto out;
3512 } 3530 }
3513 3531
3514 obj->user_pin_count++; 3532 if (obj->user_pin_count == 0) {
3515 obj->pin_filp = file;
3516 if (obj->user_pin_count == 1) {
3517 ret = i915_gem_object_pin(obj, args->alignment, true, false); 3533 ret = i915_gem_object_pin(obj, args->alignment, true, false);
3518 if (ret) 3534 if (ret)
3519 goto out; 3535 goto out;
3520 } 3536 }
3521 3537
3538 obj->user_pin_count++;
3539 obj->pin_filp = file;
3540
3522 /* XXX - flush the CPU caches for pinned objects 3541 /* XXX - flush the CPU caches for pinned objects
3523 * as the X server doesn't manage domains yet 3542 * as the X server doesn't manage domains yet
3524 */ 3543 */
@@ -3832,7 +3851,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
3832 if (!IS_IVYBRIDGE(dev)) 3851 if (!IS_IVYBRIDGE(dev))
3833 return; 3852 return;
3834 3853
3835 if (!dev_priv->mm.l3_remap_info) 3854 if (!dev_priv->l3_parity.remap_info)
3836 return; 3855 return;
3837 3856
3838 misccpctl = I915_READ(GEN7_MISCCPCTL); 3857 misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3841,12 +3860,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
3841 3860
3842 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 3861 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3843 u32 remap = I915_READ(GEN7_L3LOG_BASE + i); 3862 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3844 if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) 3863 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3845 DRM_DEBUG("0x%x was already programmed to %x\n", 3864 DRM_DEBUG("0x%x was already programmed to %x\n",
3846 GEN7_L3LOG_BASE + i, remap); 3865 GEN7_L3LOG_BASE + i, remap);
3847 if (remap && !dev_priv->mm.l3_remap_info[i/4]) 3866 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3848 DRM_DEBUG_DRIVER("Clearing remapped register\n"); 3867 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3849 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); 3868 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3850 } 3869 }
3851 3870
3852 /* Make sure all the writes land before disabling dop clock gating */ 3871 /* Make sure all the writes land before disabling dop clock gating */
@@ -3876,68 +3895,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
3876 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 3895 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3877} 3896}
3878 3897
3879void i915_gem_init_ppgtt(struct drm_device *dev)
3880{
3881 drm_i915_private_t *dev_priv = dev->dev_private;
3882 uint32_t pd_offset;
3883 struct intel_ring_buffer *ring;
3884 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3885 uint32_t __iomem *pd_addr;
3886 uint32_t pd_entry;
3887 int i;
3888
3889 if (!dev_priv->mm.aliasing_ppgtt)
3890 return;
3891
3892
3893 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3894 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3895 dma_addr_t pt_addr;
3896
3897 if (dev_priv->mm.gtt->needs_dmar)
3898 pt_addr = ppgtt->pt_dma_addr[i];
3899 else
3900 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3901
3902 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3903 pd_entry |= GEN6_PDE_VALID;
3904
3905 writel(pd_entry, pd_addr + i);
3906 }
3907 readl(pd_addr);
3908
3909 pd_offset = ppgtt->pd_offset;
3910 pd_offset /= 64; /* in cachelines, */
3911 pd_offset <<= 16;
3912
3913 if (INTEL_INFO(dev)->gen == 6) {
3914 uint32_t ecochk, gab_ctl, ecobits;
3915
3916 ecobits = I915_READ(GAC_ECO_BITS);
3917 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
3918
3919 gab_ctl = I915_READ(GAB_CTL);
3920 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3921
3922 ecochk = I915_READ(GAM_ECOCHK);
3923 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3924 ECOCHK_PPGTT_CACHE64B);
3925 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3926 } else if (INTEL_INFO(dev)->gen >= 7) {
3927 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3928 /* GFX_MODE is per-ring on gen7+ */
3929 }
3930
3931 for_each_ring(ring, dev_priv, i) {
3932 if (INTEL_INFO(dev)->gen >= 7)
3933 I915_WRITE(RING_MODE_GEN7(ring),
3934 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3935
3936 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3937 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3938 }
3939}
3940
3941static bool 3898static bool
3942intel_enable_blt(struct drm_device *dev) 3899intel_enable_blt(struct drm_device *dev)
3943{ 3900{
@@ -3960,7 +3917,7 @@ i915_gem_init_hw(struct drm_device *dev)
3960 drm_i915_private_t *dev_priv = dev->dev_private; 3917 drm_i915_private_t *dev_priv = dev->dev_private;
3961 int ret; 3918 int ret;
3962 3919
3963 if (!intel_enable_gtt()) 3920 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3964 return -EIO; 3921 return -EIO;
3965 3922
3966 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 3923 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4295,7 +4252,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4295 page_cache_release(page); 4252 page_cache_release(page);
4296 } 4253 }
4297 } 4254 }
4298 intel_gtt_chipset_flush(); 4255 i915_gem_chipset_flush(dev);
4299 4256
4300 obj->phys_obj->cur_obj = NULL; 4257 obj->phys_obj->cur_obj = NULL;
4301 obj->phys_obj = NULL; 4258 obj->phys_obj = NULL;
@@ -4382,7 +4339,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
4382 return -EFAULT; 4339 return -EFAULT;
4383 } 4340 }
4384 4341
4385 intel_gtt_chipset_flush(); 4342 i915_gem_chipset_flush(dev);
4386 return 0; 4343 return 0;
4387} 4344}
4388 4345
@@ -4407,6 +4364,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4407 spin_unlock(&file_priv->mm.lock); 4364 spin_unlock(&file_priv->mm.lock);
4408} 4365}
4409 4366
4367static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4368{
4369 if (!mutex_is_locked(mutex))
4370 return false;
4371
4372#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4373 return mutex->owner == task;
4374#else
4375 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4376 return false;
4377#endif
4378}
4379
4410static int 4380static int
4411i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) 4381i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4412{ 4382{
@@ -4417,14 +4387,25 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4417 struct drm_device *dev = dev_priv->dev; 4387 struct drm_device *dev = dev_priv->dev;
4418 struct drm_i915_gem_object *obj; 4388 struct drm_i915_gem_object *obj;
4419 int nr_to_scan = sc->nr_to_scan; 4389 int nr_to_scan = sc->nr_to_scan;
4390 bool unlock = true;
4420 int cnt; 4391 int cnt;
4421 4392
4422 if (!mutex_trylock(&dev->struct_mutex)) 4393 if (!mutex_trylock(&dev->struct_mutex)) {
4423 return 0; 4394 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4395 return 0;
4396
4397 if (dev_priv->mm.shrinker_no_lock_stealing)
4398 return 0;
4399
4400 unlock = false;
4401 }
4424 4402
4425 if (nr_to_scan) { 4403 if (nr_to_scan) {
4426 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); 4404 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
4427 if (nr_to_scan > 0) 4405 if (nr_to_scan > 0)
4406 nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
4407 false);
4408 if (nr_to_scan > 0)
4428 i915_gem_shrink_all(dev_priv); 4409 i915_gem_shrink_all(dev_priv);
4429 } 4410 }
4430 4411
@@ -4432,10 +4413,11 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4432 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) 4413 list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list)
4433 if (obj->pages_pin_count == 0) 4414 if (obj->pages_pin_count == 0)
4434 cnt += obj->base.size >> PAGE_SHIFT; 4415 cnt += obj->base.size >> PAGE_SHIFT;
4435 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) 4416 list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list)
4436 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4417 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4437 cnt += obj->base.size >> PAGE_SHIFT; 4418 cnt += obj->base.size >> PAGE_SHIFT;
4438 4419
4439 mutex_unlock(&dev->struct_mutex); 4420 if (unlock)
4421 mutex_unlock(&dev->struct_mutex);
4440 return cnt; 4422 return cnt;
4441} 4423}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 05ed42f203d7..a3f06bcad551 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
146 struct i915_hw_context *ctx; 146 struct i915_hw_context *ctx;
147 int ret, id; 147 int ret, id;
148 148
149 ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); 149 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
150 if (ctx == NULL) 150 if (ctx == NULL)
151 return ERR_PTR(-ENOMEM); 151 return ERR_PTR(-ENOMEM);
152 152
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
410 * MI_SET_CONTEXT instead of when the next seqno has completed. 410 * MI_SET_CONTEXT instead of when the next seqno has completed.
411 */ 411 */
412 if (from_obj != NULL) { 412 if (from_obj != NULL) {
413 u32 seqno = i915_gem_next_request_seqno(ring);
414 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 413 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
415 i915_gem_object_move_to_active(from_obj, ring, seqno); 414 i915_gem_object_move_to_active(from_obj, ring);
416 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 415 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
417 * whole damn pipeline, we don't need to explicitly mark the 416 * whole damn pipeline, we don't need to explicitly mark the
418 * object dirty. The only exception is that the context must be 417 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 773ef77b6c22..abeaafef6d7e 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -226,7 +226,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
226{ 226{
227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 227 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
228 228
229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, 0600); 229 return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
230} 230}
231 231
232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) 232static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -266,7 +266,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
266 obj = dma_buf->priv; 266 obj = dma_buf->priv;
267 /* is it from our device? */ 267 /* is it from our device? */
268 if (obj->base.dev == dev) { 268 if (obj->base.dev == dev) {
269 /*
270 * Importing dmabuf exported from out own gem increases
271 * refcount on gem itself instead of f_count of dmabuf.
272 */
269 drm_gem_object_reference(&obj->base); 273 drm_gem_object_reference(&obj->base);
274 dma_buf_put(dma_buf);
270 return &obj->base; 275 return &obj->base;
271 } 276 }
272 } 277 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eea143749f6..d6a994a07393 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
128 target_i915_obj->cache_level); 128 target_i915_obj->cache_level);
129 } 129 }
130 130
131 /* The target buffer should have appeared before us in the
132 * exec_object list, so it should have a GTT space bound by now.
133 */
134 if (unlikely(target_offset == 0)) {
135 DRM_DEBUG("No GTT space found for object %d\n",
136 reloc->target_handle);
137 return ret;
138 }
139
140 /* Validate that the target is in a valid r/w GPU domain */ 131 /* Validate that the target is in a valid r/w GPU domain */
141 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 132 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
142 DRM_DEBUG("reloc with multiple write domains: " 133 DRM_DEBUG("reloc with multiple write domains: "
@@ -672,7 +663,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
672 } 663 }
673 664
674 if (flush_domains & I915_GEM_DOMAIN_CPU) 665 if (flush_domains & I915_GEM_DOMAIN_CPU)
675 intel_gtt_chipset_flush(); 666 i915_gem_chipset_flush(ring->dev);
676 667
677 if (flush_domains & I915_GEM_DOMAIN_GTT) 668 if (flush_domains & I915_GEM_DOMAIN_GTT)
678 wmb(); 669 wmb();
@@ -722,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
722 713
723static void 714static void
724i915_gem_execbuffer_move_to_active(struct list_head *objects, 715i915_gem_execbuffer_move_to_active(struct list_head *objects,
725 struct intel_ring_buffer *ring, 716 struct intel_ring_buffer *ring)
726 u32 seqno)
727{ 717{
728 struct drm_i915_gem_object *obj; 718 struct drm_i915_gem_object *obj;
729 719
@@ -735,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
735 obj->base.write_domain = obj->base.pending_write_domain; 725 obj->base.write_domain = obj->base.pending_write_domain;
736 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 726 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
737 727
738 i915_gem_object_move_to_active(obj, ring, seqno); 728 i915_gem_object_move_to_active(obj, ring);
739 if (obj->base.write_domain) { 729 if (obj->base.write_domain) {
740 obj->dirty = 1; 730 obj->dirty = 1;
741 obj->last_write_seqno = seqno; 731 obj->last_write_seqno = intel_ring_get_seqno(ring);
742 if (obj->pin_count) /* check for potential scanout */ 732 if (obj->pin_count) /* check for potential scanout */
743 intel_mark_fb_busy(obj); 733 intel_mark_fb_busy(obj);
744 } 734 }
@@ -798,8 +788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
798 struct intel_ring_buffer *ring; 788 struct intel_ring_buffer *ring;
799 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 789 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
800 u32 exec_start, exec_len; 790 u32 exec_start, exec_len;
801 u32 seqno;
802 u32 mask; 791 u32 mask;
792 u32 flags;
803 int ret, mode, i; 793 int ret, mode, i;
804 794
805 if (!i915_gem_check_execbuffer(args)) { 795 if (!i915_gem_check_execbuffer(args)) {
@@ -811,6 +801,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
811 if (ret) 801 if (ret)
812 return ret; 802 return ret;
813 803
804 flags = 0;
805 if (args->flags & I915_EXEC_SECURE) {
806 if (!file->is_master || !capable(CAP_SYS_ADMIN))
807 return -EPERM;
808
809 flags |= I915_DISPATCH_SECURE;
810 }
811 if (args->flags & I915_EXEC_IS_PINNED)
812 flags |= I915_DISPATCH_PINNED;
813
814 switch (args->flags & I915_EXEC_RING_MASK) { 814 switch (args->flags & I915_EXEC_RING_MASK) {
815 case I915_EXEC_DEFAULT: 815 case I915_EXEC_DEFAULT:
816 case I915_EXEC_RENDER: 816 case I915_EXEC_RENDER:
@@ -983,26 +983,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
983 } 983 }
984 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 984 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
985 985
986 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
987 * batch" bit. Hence we need to pin secure batches into the global gtt.
988 * hsw should have this fixed, but let's be paranoid and do it
989 * unconditionally for now. */
990 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
991 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
992
986 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); 993 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
987 if (ret) 994 if (ret)
988 goto err; 995 goto err;
989 996
990 seqno = i915_gem_next_request_seqno(ring);
991 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
992 if (seqno < ring->sync_seqno[i]) {
993 /* The GPU can not handle its semaphore value wrapping,
994 * so every billion or so execbuffers, we need to stall
995 * the GPU in order to reset the counters.
996 */
997 ret = i915_gpu_idle(dev);
998 if (ret)
999 goto err;
1000 i915_gem_retire_requests(dev);
1001
1002 BUG_ON(ring->sync_seqno[i]);
1003 }
1004 }
1005
1006 ret = i915_switch_context(ring, file, ctx_id); 997 ret = i915_switch_context(ring, file, ctx_id);
1007 if (ret) 998 if (ret)
1008 goto err; 999 goto err;
@@ -1028,8 +1019,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1028 goto err; 1019 goto err;
1029 } 1020 }
1030 1021
1031 trace_i915_gem_ring_dispatch(ring, seqno);
1032
1033 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1022 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1034 exec_len = args->batch_len; 1023 exec_len = args->batch_len;
1035 if (cliprects) { 1024 if (cliprects) {
@@ -1040,17 +1029,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1040 goto err; 1029 goto err;
1041 1030
1042 ret = ring->dispatch_execbuffer(ring, 1031 ret = ring->dispatch_execbuffer(ring,
1043 exec_start, exec_len); 1032 exec_start, exec_len,
1033 flags);
1044 if (ret) 1034 if (ret)
1045 goto err; 1035 goto err;
1046 } 1036 }
1047 } else { 1037 } else {
1048 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); 1038 ret = ring->dispatch_execbuffer(ring,
1039 exec_start, exec_len,
1040 flags);
1049 if (ret) 1041 if (ret)
1050 goto err; 1042 goto err;
1051 } 1043 }
1052 1044
1053 i915_gem_execbuffer_move_to_active(&objects, ring, seqno); 1045 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1046
1047 i915_gem_execbuffer_move_to_active(&objects, ring);
1054 i915_gem_execbuffer_retire_commands(dev, file, ring); 1048 i915_gem_execbuffer_retire_commands(dev, file, ring);
1055 1049
1056err: 1050err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df470b5e8d36..2c150dee78a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,19 +28,67 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31typedef uint32_t gtt_pte_t;
32
33/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
47static inline gtt_pte_t pte_encode(struct drm_device *dev,
48 dma_addr_t addr,
49 enum i915_cache_level level)
50{
51 gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
53
54 switch (level) {
55 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break;
62 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default:
72 BUG();
73 }
74
75
76 return pte;
77}
78
31/* PPGTT support for Sandybdrige/Gen6 and later */ 79/* PPGTT support for Sandybdrige/Gen6 and later */
32static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 80static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
33 unsigned first_entry, 81 unsigned first_entry,
34 unsigned num_entries) 82 unsigned num_entries)
35{ 83{
36 uint32_t *pt_vaddr; 84 gtt_pte_t *pt_vaddr;
37 uint32_t scratch_pte; 85 gtt_pte_t scratch_pte;
38 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 86 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
39 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
40 unsigned last_pte, i; 88 unsigned last_pte, i;
41 89
42 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); 90 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
43 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; 91 I915_CACHE_LLC);
44 92
45 while (num_entries) { 93 while (num_entries) {
46 last_pte = first_pte + num_entries; 94 last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
77 if (!ppgtt) 125 if (!ppgtt)
78 return ret; 126 return ret;
79 127
128 ppgtt->dev = dev;
80 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 129 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
81 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 130 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
82 GFP_KERNEL); 131 GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
118 i915_ppgtt_clear_range(ppgtt, 0, 167 i915_ppgtt_clear_range(ppgtt, 0,
119 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 168 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
120 169
121 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); 170 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
122 171
123 dev_priv->mm.aliasing_ppgtt = ppgtt; 172 dev_priv->mm.aliasing_ppgtt = ppgtt;
124 173
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
168static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 217static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
169 const struct sg_table *pages, 218 const struct sg_table *pages,
170 unsigned first_entry, 219 unsigned first_entry,
171 uint32_t pte_flags) 220 enum i915_cache_level cache_level)
172{ 221{
173 uint32_t *pt_vaddr, pte; 222 gtt_pte_t *pt_vaddr;
174 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 223 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
175 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 224 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
176 unsigned i, j, m, segment_len; 225 unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
188 237
189 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 238 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
190 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 239 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
191 pte = GEN6_PTE_ADDR_ENCODE(page_addr); 240 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
192 pt_vaddr[j] = pte | pte_flags; 241 cache_level);
193 242
194 /* grab the next page */ 243 /* grab the next page */
195 if (++m == segment_len) { 244 if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
213 struct drm_i915_gem_object *obj, 262 struct drm_i915_gem_object *obj,
214 enum i915_cache_level cache_level) 263 enum i915_cache_level cache_level)
215{ 264{
216 uint32_t pte_flags = GEN6_PTE_VALID;
217
218 switch (cache_level) {
219 case I915_CACHE_LLC_MLC:
220 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
221 break;
222 case I915_CACHE_LLC:
223 pte_flags |= GEN6_PTE_CACHE_LLC;
224 break;
225 case I915_CACHE_NONE:
226 if (IS_HASWELL(obj->base.dev))
227 pte_flags |= HSW_PTE_UNCACHED;
228 else
229 pte_flags |= GEN6_PTE_UNCACHED;
230 break;
231 default:
232 BUG();
233 }
234
235 i915_ppgtt_insert_sg_entries(ppgtt, 265 i915_ppgtt_insert_sg_entries(ppgtt,
236 obj->pages, 266 obj->pages,
237 obj->gtt_space->start >> PAGE_SHIFT, 267 obj->gtt_space->start >> PAGE_SHIFT,
238 pte_flags); 268 cache_level);
239} 269}
240 270
241void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 271void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
246 obj->base.size >> PAGE_SHIFT); 276 obj->base.size >> PAGE_SHIFT);
247} 277}
248 278
249/* XXX kill agp_type! */ 279void i915_gem_init_ppgtt(struct drm_device *dev)
250static unsigned int cache_level_to_agp_type(struct drm_device *dev,
251 enum i915_cache_level cache_level)
252{ 280{
253 switch (cache_level) { 281 drm_i915_private_t *dev_priv = dev->dev_private;
254 case I915_CACHE_LLC_MLC: 282 uint32_t pd_offset;
255 if (INTEL_INFO(dev)->gen >= 6) 283 struct intel_ring_buffer *ring;
256 return AGP_USER_CACHED_MEMORY_LLC_MLC; 284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
257 /* Older chipsets do not have this extra level of CPU 285 uint32_t __iomem *pd_addr;
258 * cacheing, so fallthrough and request the PTE simply 286 uint32_t pd_entry;
259 * as cached. 287 int i;
260 */ 288
261 case I915_CACHE_LLC: 289 if (!dev_priv->mm.aliasing_ppgtt)
262 return AGP_USER_CACHED_MEMORY; 290 return;
263 default: 291
264 case I915_CACHE_NONE: 292
265 return AGP_USER_MEMORY; 293 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
294 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 dma_addr_t pt_addr;
296
297 if (dev_priv->mm.gtt->needs_dmar)
298 pt_addr = ppgtt->pt_dma_addr[i];
299 else
300 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
301
302 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
303 pd_entry |= GEN6_PDE_VALID;
304
305 writel(pd_entry, pd_addr + i);
306 }
307 readl(pd_addr);
308
309 pd_offset = ppgtt->pd_offset;
310 pd_offset /= 64; /* in cachelines, */
311 pd_offset <<= 16;
312
313 if (INTEL_INFO(dev)->gen == 6) {
314 uint32_t ecochk, gab_ctl, ecobits;
315
316 ecobits = I915_READ(GAC_ECO_BITS);
317 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
318
319 gab_ctl = I915_READ(GAB_CTL);
320 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
321
322 ecochk = I915_READ(GAM_ECOCHK);
323 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
324 ECOCHK_PPGTT_CACHE64B);
325 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
326 } else if (INTEL_INFO(dev)->gen >= 7) {
327 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
328 /* GFX_MODE is per-ring on gen7+ */
329 }
330
331 for_each_ring(ring, dev_priv, i) {
332 if (INTEL_INFO(dev)->gen >= 7)
333 I915_WRITE(RING_MODE_GEN7(ring),
334 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
335
336 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
337 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
266 } 338 }
267} 339}
268 340
@@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
288 dev_priv->mm.interruptible = interruptible; 360 dev_priv->mm.interruptible = interruptible;
289} 361}
290 362
363
364static void i915_ggtt_clear_range(struct drm_device *dev,
365 unsigned first_entry,
366 unsigned num_entries)
367{
368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte;
370 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372 int i;
373
374 if (INTEL_INFO(dev)->gen < 6) {
375 intel_gtt_clear_range(first_entry, num_entries);
376 return;
377 }
378
379 if (WARN(num_entries > max_entries,
380 "First entry = %d; Num entries = %d (max=%d)\n",
381 first_entry, num_entries, max_entries))
382 num_entries = max_entries;
383
384 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
385 for (i = 0; i < num_entries; i++)
386 iowrite32(scratch_pte, &gtt_base[i]);
387 readl(gtt_base);
388}
389
291void i915_gem_restore_gtt_mappings(struct drm_device *dev) 390void i915_gem_restore_gtt_mappings(struct drm_device *dev)
292{ 391{
293 struct drm_i915_private *dev_priv = dev->dev_private; 392 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct drm_i915_gem_object *obj; 393 struct drm_i915_gem_object *obj;
295 394
296 /* First fill our portion of the GTT with scratch pages */ 395 /* First fill our portion of the GTT with scratch pages */
297 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, 396 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
298 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 397 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
299 398
300 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 399 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
302 i915_gem_gtt_bind_object(obj, obj->cache_level); 401 i915_gem_gtt_bind_object(obj, obj->cache_level);
303 } 402 }
304 403
305 intel_gtt_chipset_flush(); 404 i915_gem_chipset_flush(dev);
306} 405}
307 406
308int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 407int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
318 return 0; 417 return 0;
319} 418}
320 419
420/*
421 * Binds an object into the global gtt with the specified cache level. The object
422 * will be accessible to the GPU via commands whose operands reference offsets
423 * within the global GTT as well as accessible by the GPU through the GMADR
424 * mapped BAR (dev_priv->mm.gtt->gtt).
425 */
426static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
427 enum i915_cache_level level)
428{
429 struct drm_device *dev = obj->base.dev;
430 struct drm_i915_private *dev_priv = dev->dev_private;
431 struct sg_table *st = obj->pages;
432 struct scatterlist *sg = st->sgl;
433 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
434 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
435 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
436 int unused, i = 0;
437 unsigned int len, m = 0;
438 dma_addr_t addr;
439
440 for_each_sg(st->sgl, sg, st->nents, unused) {
441 len = sg_dma_len(sg) >> PAGE_SHIFT;
442 for (m = 0; m < len; m++) {
443 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
444 iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
445 i++;
446 }
447 }
448
449 BUG_ON(i > max_entries);
450 BUG_ON(i != obj->base.size / PAGE_SIZE);
451
452 /* XXX: This serves as a posting read to make sure that the PTE has
453 * actually been updated. There is some concern that even though
454 * registers and PTEs are within the same BAR that they are potentially
455 * of NUMA access patterns. Therefore, even with the way we assume
456 * hardware should work, we must keep this posting read for paranoia.
457 */
458 if (i != 0)
459 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
460
461 /* This next bit makes the above posting read even more important. We
462 * want to flush the TLBs only after we're certain all the PTE updates
463 * have finished.
464 */
465 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
466 POSTING_READ(GFX_FLSH_CNTL_GEN6);
467}
468
321void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 469void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
322 enum i915_cache_level cache_level) 470 enum i915_cache_level cache_level)
323{ 471{
324 struct drm_device *dev = obj->base.dev; 472 struct drm_device *dev = obj->base.dev;
325 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 473 if (INTEL_INFO(dev)->gen < 6) {
474 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
475 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
476 intel_gtt_insert_sg_entries(obj->pages,
477 obj->gtt_space->start >> PAGE_SHIFT,
478 flags);
479 } else {
480 gen6_ggtt_bind_object(obj, cache_level);
481 }
326 482
327 intel_gtt_insert_sg_entries(obj->pages,
328 obj->gtt_space->start >> PAGE_SHIFT,
329 agp_type);
330 obj->has_global_gtt_mapping = 1; 483 obj->has_global_gtt_mapping = 1;
331} 484}
332 485
333void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 486void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
334{ 487{
335 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 488 i915_ggtt_clear_range(obj->base.dev,
489 obj->gtt_space->start >> PAGE_SHIFT,
336 obj->base.size >> PAGE_SHIFT); 490 obj->base.size >> PAGE_SHIFT);
337 491
338 obj->has_global_gtt_mapping = 0; 492 obj->has_global_gtt_mapping = 0;
@@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
390 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 544 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
391 545
392 /* ... but ensure that we clear the entire range. */ 546 /* ... but ensure that we clear the entire range. */
393 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); 547 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
548}
549
550static int setup_scratch_page(struct drm_device *dev)
551{
552 struct drm_i915_private *dev_priv = dev->dev_private;
553 struct page *page;
554 dma_addr_t dma_addr;
555
556 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
557 if (page == NULL)
558 return -ENOMEM;
559 get_page(page);
560 set_pages_uc(page, 1);
561
562#ifdef CONFIG_INTEL_IOMMU
563 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
564 PCI_DMA_BIDIRECTIONAL);
565 if (pci_dma_mapping_error(dev->pdev, dma_addr))
566 return -EINVAL;
567#else
568 dma_addr = page_to_phys(page);
569#endif
570 dev_priv->mm.gtt->scratch_page = page;
571 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
572
573 return 0;
574}
575
576static void teardown_scratch_page(struct drm_device *dev)
577{
578 struct drm_i915_private *dev_priv = dev->dev_private;
579 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
580 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
581 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
582 put_page(dev_priv->mm.gtt->scratch_page);
583 __free_page(dev_priv->mm.gtt->scratch_page);
584}
585
586static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
587{
588 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
589 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
590 return snb_gmch_ctl << 20;
591}
592
593static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
594{
595 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
596 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
597 return snb_gmch_ctl << 25; /* 32 MB units */
598}
599
600static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
601{
602 static const int stolen_decoder[] = {
603 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
604 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
605 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
606 return stolen_decoder[snb_gmch_ctl] << 20;
607}
608
609int i915_gem_gtt_init(struct drm_device *dev)
610{
611 struct drm_i915_private *dev_priv = dev->dev_private;
612 phys_addr_t gtt_bus_addr;
613 u16 snb_gmch_ctl;
614 int ret;
615
616 /* On modern platforms we need not worry ourself with the legacy
617 * hostbridge query stuff. Skip it entirely
618 */
619 if (INTEL_INFO(dev)->gen < 6) {
620 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
621 if (!ret) {
622 DRM_ERROR("failed to set up gmch\n");
623 return -EIO;
624 }
625
626 dev_priv->mm.gtt = intel_gtt_get();
627 if (!dev_priv->mm.gtt) {
628 DRM_ERROR("Failed to initialize GTT\n");
629 intel_gmch_remove();
630 return -ENODEV;
631 }
632 return 0;
633 }
634
635 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
636 if (!dev_priv->mm.gtt)
637 return -ENOMEM;
638
639 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
640 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
641
642#ifdef CONFIG_INTEL_IOMMU
643 dev_priv->mm.gtt->needs_dmar = 1;
644#endif
645
646 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
647 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
648 dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
649
650 /* i9xx_setup */
651 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
652 dev_priv->mm.gtt->gtt_total_entries =
653 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
654 if (INTEL_INFO(dev)->gen < 7)
655 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
656 else
657 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
658
659 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
660 /* 64/512MB is the current min/max we actually know of, but this is just a
661 * coarse sanity check.
662 */
663 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
664 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
665 DRM_ERROR("Unknown GMADR entries (%d)\n",
666 dev_priv->mm.gtt->gtt_mappable_entries);
667 ret = -ENXIO;
668 goto err_out;
669 }
670
671 ret = setup_scratch_page(dev);
672 if (ret) {
673 DRM_ERROR("Scratch setup failed\n");
674 goto err_out;
675 }
676
677 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
678 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
679 if (!dev_priv->mm.gtt->gtt) {
680 DRM_ERROR("Failed to map the gtt page table\n");
681 teardown_scratch_page(dev);
682 ret = -ENOMEM;
683 goto err_out;
684 }
685
686 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
687 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
688 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
689 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
690
691 return 0;
692
693err_out:
694 kfree(dev_priv->mm.gtt);
695 if (INTEL_INFO(dev)->gen < 6)
696 intel_gmch_remove();
697 return ret;
698}
699
700void i915_gem_gtt_fini(struct drm_device *dev)
701{
702 struct drm_i915_private *dev_priv = dev->dev_private;
703 iounmap(dev_priv->mm.gtt->gtt);
704 teardown_scratch_page(dev);
705 if (INTEL_INFO(dev)->gen < 6)
706 intel_gmch_remove();
707 kfree(dev_priv->mm.gtt);
394} 708}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 32e1bda865b8..2220dec3e5d9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -122,7 +122,10 @@ static int
122i915_pipe_enabled(struct drm_device *dev, int pipe) 122i915_pipe_enabled(struct drm_device *dev, int pipe)
123{ 123{
124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
125 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
126 pipe);
127
128 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
126} 129}
127 130
128/* Called from drm generic code, passed a 'crtc', which 131/* Called from drm generic code, passed a 'crtc', which
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
182 int vbl_start, vbl_end, htotal, vtotal; 185 int vbl_start, vbl_end, htotal, vtotal;
183 bool in_vbl = true; 186 bool in_vbl = true;
184 int ret = 0; 187 int ret = 0;
188 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
189 pipe);
185 190
186 if (!i915_pipe_enabled(dev, pipe)) { 191 if (!i915_pipe_enabled(dev, pipe)) {
187 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
190 } 195 }
191 196
192 /* Get vtotal. */ 197 /* Get vtotal. */
193 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); 198 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
194 199
195 if (INTEL_INFO(dev)->gen >= 4) { 200 if (INTEL_INFO(dev)->gen >= 4) {
196 /* No obvious pixelcount register. Only query vertical 201 /* No obvious pixelcount register. Only query vertical
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
210 */ 215 */
211 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 216 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
212 217
213 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); 218 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
214 *vpos = position / htotal; 219 *vpos = position / htotal;
215 *hpos = position - (*vpos * htotal); 220 *hpos = position - (*vpos * htotal);
216 } 221 }
217 222
218 /* Query vblank area. */ 223 /* Query vblank area. */
219 vbl = I915_READ(VBLANK(pipe)); 224 vbl = I915_READ(VBLANK(cpu_transcoder));
220 225
221 /* Test position against vblank region. */ 226 /* Test position against vblank region. */
222 vbl_start = vbl & 0x1fff; 227 vbl_start = vbl & 0x1fff;
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
352 if (i915_enable_hangcheck) { 357 if (i915_enable_hangcheck) {
353 dev_priv->hangcheck_count = 0; 358 dev_priv->hangcheck_count = 0;
354 mod_timer(&dev_priv->hangcheck_timer, 359 mod_timer(&dev_priv->hangcheck_timer,
355 jiffies + 360 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
356 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
357 } 361 }
358} 362}
359 363
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
374 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 378 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
375 return; 379 return;
376 380
377 mutex_lock(&dev_priv->dev->struct_mutex); 381 mutex_lock(&dev_priv->rps.hw_lock);
378 382
379 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 383 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
380 new_delay = dev_priv->rps.cur_delay + 1; 384 new_delay = dev_priv->rps.cur_delay + 1;
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
389 gen6_set_rps(dev_priv->dev, new_delay); 393 gen6_set_rps(dev_priv->dev, new_delay);
390 } 394 }
391 395
392 mutex_unlock(&dev_priv->dev->struct_mutex); 396 mutex_unlock(&dev_priv->rps.hw_lock);
393} 397}
394 398
395 399
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
405static void ivybridge_parity_work(struct work_struct *work) 409static void ivybridge_parity_work(struct work_struct *work)
406{ 410{
407 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 411 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
408 parity_error_work); 412 l3_parity.error_work);
409 u32 error_status, row, bank, subbank; 413 u32 error_status, row, bank, subbank;
410 char *parity_event[5]; 414 char *parity_event[5];
411 uint32_t misccpctl; 415 uint32_t misccpctl;
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
469 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 473 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
470 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 474 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
471 475
472 queue_work(dev_priv->wq, &dev_priv->parity_error_work); 476 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
473} 477}
474 478
475static void snb_gt_irq_handler(struct drm_device *dev, 479static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
520 queue_work(dev_priv->wq, &dev_priv->rps.work); 524 queue_work(dev_priv->wq, &dev_priv->rps.work);
521} 525}
522 526
523static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) 527static irqreturn_t valleyview_irq_handler(int irq, void *arg)
524{ 528{
525 struct drm_device *dev = (struct drm_device *) arg; 529 struct drm_device *dev = (struct drm_device *) arg;
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
606 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
607 int pipe; 611 int pipe;
608 612
613 if (pch_iir & SDE_HOTPLUG_MASK)
614 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
615
609 if (pch_iir & SDE_AUDIO_POWER_MASK) 616 if (pch_iir & SDE_AUDIO_POWER_MASK)
610 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 617 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
611 (pch_iir & SDE_AUDIO_POWER_MASK) >> 618 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
647 int pipe; 654 int pipe;
648 655
656 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
657 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
658
649 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 659 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
650 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 660 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
651 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 661 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
670 I915_READ(FDI_RX_IIR(pipe))); 680 I915_READ(FDI_RX_IIR(pipe)));
671} 681}
672 682
673static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) 683static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
674{ 684{
675 struct drm_device *dev = (struct drm_device *) arg; 685 struct drm_device *dev = (struct drm_device *) arg;
676 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 686 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
709 if (de_iir & DE_PCH_EVENT_IVB) { 719 if (de_iir & DE_PCH_EVENT_IVB) {
710 u32 pch_iir = I915_READ(SDEIIR); 720 u32 pch_iir = I915_READ(SDEIIR);
711 721
712 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
713 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
714 cpt_irq_handler(dev, pch_iir); 722 cpt_irq_handler(dev, pch_iir);
715 723
716 /* clear PCH hotplug event before clear CPU irq */ 724 /* clear PCH hotplug event before clear CPU irq */
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
745 notify_ring(dev, &dev_priv->ring[VCS]); 753 notify_ring(dev, &dev_priv->ring[VCS]);
746} 754}
747 755
748static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) 756static irqreturn_t ironlake_irq_handler(int irq, void *arg)
749{ 757{
750 struct drm_device *dev = (struct drm_device *) arg; 758 struct drm_device *dev = (struct drm_device *) arg;
751 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
752 int ret = IRQ_NONE; 760 int ret = IRQ_NONE;
753 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 761 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
754 u32 hotplug_mask;
755 762
756 atomic_inc(&dev_priv->irq_received); 763 atomic_inc(&dev_priv->irq_received);
757 764
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
769 (!IS_GEN6(dev) || pm_iir == 0)) 776 (!IS_GEN6(dev) || pm_iir == 0))
770 goto done; 777 goto done;
771 778
772 if (HAS_PCH_CPT(dev))
773 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
774 else
775 hotplug_mask = SDE_HOTPLUG_MASK;
776
777 ret = IRQ_HANDLED; 779 ret = IRQ_HANDLED;
778 780
779 if (IS_GEN5(dev)) 781 if (IS_GEN5(dev))
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
802 804
803 /* check event from PCH */ 805 /* check event from PCH */
804 if (de_iir & DE_PCH_EVENT) { 806 if (de_iir & DE_PCH_EVENT) {
805 if (pch_iir & hotplug_mask)
806 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
807 if (HAS_PCH_CPT(dev)) 807 if (HAS_PCH_CPT(dev))
808 cpt_irq_handler(dev, pch_iir); 808 cpt_irq_handler(dev, pch_iir);
809 else 809 else
@@ -1087,6 +1087,18 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1087 if (!ring->get_seqno) 1087 if (!ring->get_seqno)
1088 return NULL; 1088 return NULL;
1089 1089
1090 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1091 u32 acthd = I915_READ(ACTHD);
1092
1093 if (WARN_ON(ring->id != RCS))
1094 return NULL;
1095
1096 obj = ring->private;
1097 if (acthd >= obj->gtt_offset &&
1098 acthd < obj->gtt_offset + obj->base.size)
1099 return i915_error_object_create(dev_priv, obj);
1100 }
1101
1090 seqno = ring->get_seqno(ring, false); 1102 seqno = ring->get_seqno(ring, false);
1091 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 1103 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1092 if (obj->ring != ring) 1104 if (obj->ring != ring)
@@ -1120,6 +1132,8 @@ static void i915_record_ring_state(struct drm_device *dev,
1120 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1132 = I915_READ(RING_SYNC_0(ring->mmio_base));
1121 error->semaphore_mboxes[ring->id][1] 1133 error->semaphore_mboxes[ring->id][1]
1122 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1134 = I915_READ(RING_SYNC_1(ring->mmio_base));
1135 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1136 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1123 } 1137 }
1124 1138
1125 if (INTEL_INFO(dev)->gen >= 4) { 1139 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1464,7 +1478,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1464 spin_lock_irqsave(&dev->event_lock, flags); 1478 spin_lock_irqsave(&dev->event_lock, flags);
1465 work = intel_crtc->unpin_work; 1479 work = intel_crtc->unpin_work;
1466 1480
1467 if (work == NULL || work->pending || !work->enable_stall_check) { 1481 if (work == NULL ||
1482 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1483 !work->enable_stall_check) {
1468 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1484 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1469 spin_unlock_irqrestore(&dev->event_lock, flags); 1485 spin_unlock_irqrestore(&dev->event_lock, flags);
1470 return; 1486 return;
@@ -1751,7 +1767,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1751repeat: 1767repeat:
1752 /* Reset timer case chip hangs without another request being added */ 1768 /* Reset timer case chip hangs without another request being added */
1753 mod_timer(&dev_priv->hangcheck_timer, 1769 mod_timer(&dev_priv->hangcheck_timer,
1754 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1770 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1755} 1771}
1756 1772
1757/* drm_dma.h hooks 1773/* drm_dma.h hooks
@@ -1956,6 +1972,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1956 u32 enable_mask; 1972 u32 enable_mask;
1957 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1973 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1958 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 1974 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1975 u32 render_irqs;
1959 u16 msid; 1976 u16 msid;
1960 1977
1961 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 1978 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1995,21 +2012,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1995 I915_WRITE(VLV_IIR, 0xffffffff); 2012 I915_WRITE(VLV_IIR, 0xffffffff);
1996 I915_WRITE(VLV_IIR, 0xffffffff); 2013 I915_WRITE(VLV_IIR, 0xffffffff);
1997 2014
1998 dev_priv->gt_irq_mask = ~0;
1999
2000 I915_WRITE(GTIIR, I915_READ(GTIIR));
2001 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2015 I915_WRITE(GTIIR, I915_READ(GTIIR));
2002 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2016 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2003 I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | 2017
2004 GT_GEN6_BLT_CS_ERROR_INTERRUPT | 2018 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2005 GT_GEN6_BLT_USER_INTERRUPT | 2019 GEN6_BLITTER_USER_INTERRUPT;
2006 GT_GEN6_BSD_USER_INTERRUPT | 2020 I915_WRITE(GTIER, render_irqs);
2007 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
2008 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
2009 GT_PIPE_NOTIFY |
2010 GT_RENDER_CS_ERROR_INTERRUPT |
2011 GT_SYNC_STATUS |
2012 GT_USER_INTERRUPT);
2013 POSTING_READ(GTIER); 2021 POSTING_READ(GTIER);
2014 2022
2015 /* ack & enable invalid PTE error interrupts */ 2023 /* ack & enable invalid PTE error interrupts */
@@ -2019,7 +2027,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2019#endif 2027#endif
2020 2028
2021 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2029 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2022#if 0 /* FIXME: check register definitions; some have moved */
2023 /* Note HDMI and DP share bits */ 2030 /* Note HDMI and DP share bits */
2024 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2031 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2025 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2032 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2027,15 +2034,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2027 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2034 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2028 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2035 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2029 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2036 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2030 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 2037 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2031 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2038 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2032 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 2039 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2033 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2040 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2034 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2041 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2035 hotplug_en |= CRT_HOTPLUG_INT_EN; 2042 hotplug_en |= CRT_HOTPLUG_INT_EN;
2036 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2043 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2037 } 2044 }
2038#endif
2039 2045
2040 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2046 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2041 2047
@@ -2129,7 +2135,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2129 return 0; 2135 return 0;
2130} 2136}
2131 2137
2132static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) 2138static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2133{ 2139{
2134 struct drm_device *dev = (struct drm_device *) arg; 2140 struct drm_device *dev = (struct drm_device *) arg;
2135 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2141 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2307,7 +2313,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
2307 return 0; 2313 return 0;
2308} 2314}
2309 2315
2310static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) 2316static irqreturn_t i915_irq_handler(int irq, void *arg)
2311{ 2317{
2312 struct drm_device *dev = (struct drm_device *) arg; 2318 struct drm_device *dev = (struct drm_device *) arg;
2313 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2319 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2545,7 +2551,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
2545 return 0; 2551 return 0;
2546} 2552}
2547 2553
2548static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) 2554static irqreturn_t i965_irq_handler(int irq, void *arg)
2549{ 2555{
2550 struct drm_device *dev = (struct drm_device *) arg; 2556 struct drm_device *dev = (struct drm_device *) arg;
2551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2557 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2691,7 +2697,7 @@ void intel_irq_init(struct drm_device *dev)
2691 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2697 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2692 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2698 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2693 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2699 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2694 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); 2700 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2695 2701
2696 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2702 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2697 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2703 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a4162ddff6c5..186ee5c85b51 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
29 30
30#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
31 32
@@ -40,6 +41,14 @@
40 */ 41 */
41#define INTEL_GMCH_CTRL 0x52 42#define INTEL_GMCH_CTRL 0x52
42#define INTEL_GMCH_VGA_DISABLE (1 << 1) 43#define INTEL_GMCH_VGA_DISABLE (1 << 1)
44#define SNB_GMCH_CTRL 0x50
45#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51
43 52
44/* PCI config space */ 53/* PCI config space */
45 54
@@ -105,23 +114,6 @@
105#define GEN6_GRDOM_MEDIA (1 << 2) 114#define GEN6_GRDOM_MEDIA (1 << 2)
106#define GEN6_GRDOM_BLT (1 << 3) 115#define GEN6_GRDOM_BLT (1 << 3)
107 116
108/* PPGTT stuff */
109#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
110
111#define GEN6_PDE_VALID (1 << 0)
112#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
113/* gen6+ has bit 11-4 for physical addr bit 39-32 */
114#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
115
116#define GEN6_PTE_VALID (1 << 0)
117#define GEN6_PTE_UNCACHED (1 << 1)
118#define HSW_PTE_UNCACHED (0)
119#define GEN6_PTE_CACHE_LLC (2 << 1)
120#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
121#define GEN6_PTE_CACHE_BITS (3 << 1)
122#define GEN6_PTE_GFDT (1 << 3)
123#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
124
125#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) 117#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
126#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) 118#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
127#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) 119#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -241,11 +233,18 @@
241 */ 233 */
242#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 234#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
243#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 235#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
244#define MI_INVALIDATE_TLB (1<<18) 236#define MI_FLUSH_DW_STORE_INDEX (1<<21)
245#define MI_INVALIDATE_BSD (1<<7) 237#define MI_INVALIDATE_TLB (1<<18)
238#define MI_FLUSH_DW_OP_STOREDW (1<<14)
239#define MI_INVALIDATE_BSD (1<<7)
240#define MI_FLUSH_DW_USE_GTT (1<<2)
241#define MI_FLUSH_DW_USE_PPGTT (0<<2)
246#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 242#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
247#define MI_BATCH_NON_SECURE (1) 243#define MI_BATCH_NON_SECURE (1)
248#define MI_BATCH_NON_SECURE_I965 (1<<8) 244/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
245#define MI_BATCH_NON_SECURE_I965 (1<<8)
246#define MI_BATCH_PPGTT_HSW (1<<8)
247#define MI_BATCH_NON_SECURE_HSW (1<<13)
249#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 248#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
250#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 249#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
251#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 250#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@@ -369,6 +368,7 @@
369#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ 368#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
370#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ 369#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
371#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ 370#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
371#define DPIO_PLL_REFCLK_SEL_MASK 3
372#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ 372#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
373#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ 373#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
374#define _DPIO_REFSFR_B 0x8034 374#define _DPIO_REFSFR_B 0x8034
@@ -384,6 +384,9 @@
384 384
385#define DPIO_FASTCLK_DISABLE 0x8100 385#define DPIO_FASTCLK_DISABLE 0x8100
386 386
387#define DPIO_DATA_CHANNEL1 0x8220
388#define DPIO_DATA_CHANNEL2 0x8420
389
387/* 390/*
388 * Fence registers 391 * Fence registers
389 */ 392 */
@@ -514,6 +517,7 @@
514 * the enables for writing to the corresponding low bit. 517 * the enables for writing to the corresponding low bit.
515 */ 518 */
516#define _3D_CHICKEN 0x02084 519#define _3D_CHICKEN 0x02084
520#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
517#define _3D_CHICKEN2 0x0208c 521#define _3D_CHICKEN2 0x0208c
518/* Disables pipelining of read flushes past the SF-WIZ interface. 522/* Disables pipelining of read flushes past the SF-WIZ interface.
519 * Required on all Ironlake steppings according to the B-Spec, but the 523 * Required on all Ironlake steppings according to the B-Spec, but the
@@ -521,6 +525,7 @@
521 */ 525 */
522# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 526# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
523#define _3D_CHICKEN3 0x02090 527#define _3D_CHICKEN3 0x02090
528#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
524#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 529#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
525 530
526#define MI_MODE 0x0209c 531#define MI_MODE 0x0209c
@@ -528,7 +533,8 @@
528# define MI_FLUSH_ENABLE (1 << 12) 533# define MI_FLUSH_ENABLE (1 << 12)
529 534
530#define GEN6_GT_MODE 0x20d0 535#define GEN6_GT_MODE 0x20d0
531#define GEN6_GT_MODE_HI (1 << 9) 536#define GEN6_GT_MODE_HI (1 << 9)
537#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
532 538
533#define GFX_MODE 0x02520 539#define GFX_MODE 0x02520
534#define GFX_MODE_GEN7 0x0229c 540#define GFX_MODE_GEN7 0x0229c
@@ -547,6 +553,8 @@
547#define IIR 0x020a4 553#define IIR 0x020a4
548#define IMR 0x020a8 554#define IMR 0x020a8
549#define ISR 0x020ac 555#define ISR 0x020ac
556#define VLV_GUNIT_CLOCK_GATE 0x182060
557#define GCFG_DIS (1<<8)
550#define VLV_IIR_RW 0x182084 558#define VLV_IIR_RW 0x182084
551#define VLV_IER 0x1820a0 559#define VLV_IER 0x1820a0
552#define VLV_IIR 0x1820a4 560#define VLV_IIR 0x1820a4
@@ -661,6 +669,7 @@
661#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 669#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
662 670
663#define CACHE_MODE_0 0x02120 /* 915+ only */ 671#define CACHE_MODE_0 0x02120 /* 915+ only */
672#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
664#define CM0_IZ_OPT_DISABLE (1<<6) 673#define CM0_IZ_OPT_DISABLE (1<<6)
665#define CM0_ZR_OPT_DISABLE (1<<5) 674#define CM0_ZR_OPT_DISABLE (1<<5)
666#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) 675#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -670,6 +679,8 @@
670#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 679#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
671#define BB_ADDR 0x02140 /* 8 bytes */ 680#define BB_ADDR 0x02140 /* 8 bytes */
672#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 681#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
682#define GFX_FLSH_CNTL_GEN6 0x101008
683#define GFX_FLSH_CNTL_EN (1<<0)
673#define ECOSKPD 0x021d0 684#define ECOSKPD 0x021d0
674#define ECO_GATING_CX_ONLY (1<<3) 685#define ECO_GATING_CX_ONLY (1<<3)
675#define ECO_FLIP_DONE (1<<0) 686#define ECO_FLIP_DONE (1<<0)
@@ -1559,14 +1570,14 @@
1559#define _VSYNCSHIFT_B 0x61028 1570#define _VSYNCSHIFT_B 0x61028
1560 1571
1561 1572
1562#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) 1573#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
1563#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) 1574#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
1564#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) 1575#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
1565#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) 1576#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
1566#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) 1577#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
1567#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) 1578#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
1568#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1579#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1569#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1580#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1570 1581
1571/* VGA port control */ 1582/* VGA port control */
1572#define ADPA 0x61100 1583#define ADPA 0x61100
@@ -2641,6 +2652,7 @@
2641#define PIPECONF_GAMMA (1<<24) 2652#define PIPECONF_GAMMA (1<<24)
2642#define PIPECONF_FORCE_BORDER (1<<25) 2653#define PIPECONF_FORCE_BORDER (1<<25)
2643#define PIPECONF_INTERLACE_MASK (7 << 21) 2654#define PIPECONF_INTERLACE_MASK (7 << 21)
2655#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
2644/* Note that pre-gen3 does not support interlaced display directly. Panel 2656/* Note that pre-gen3 does not support interlaced display directly. Panel
2645 * fitting must be disabled on pre-ilk for interlaced. */ 2657 * fitting must be disabled on pre-ilk for interlaced. */
2646#define PIPECONF_PROGRESSIVE (0 << 21) 2658#define PIPECONF_PROGRESSIVE (0 << 21)
@@ -2711,7 +2723,7 @@
2711#define PIPE_12BPC (3 << 5) 2723#define PIPE_12BPC (3 << 5)
2712 2724
2713#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 2725#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
2714#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) 2726#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
2715#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) 2727#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
2716#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) 2728#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
2717#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 2729#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@@ -2998,12 +3010,19 @@
2998#define DISPPLANE_GAMMA_ENABLE (1<<30) 3010#define DISPPLANE_GAMMA_ENABLE (1<<30)
2999#define DISPPLANE_GAMMA_DISABLE 0 3011#define DISPPLANE_GAMMA_DISABLE 0
3000#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) 3012#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
3013#define DISPPLANE_YUV422 (0x0<<26)
3001#define DISPPLANE_8BPP (0x2<<26) 3014#define DISPPLANE_8BPP (0x2<<26)
3002#define DISPPLANE_15_16BPP (0x4<<26) 3015#define DISPPLANE_BGRA555 (0x3<<26)
3003#define DISPPLANE_16BPP (0x5<<26) 3016#define DISPPLANE_BGRX555 (0x4<<26)
3004#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 3017#define DISPPLANE_BGRX565 (0x5<<26)
3005#define DISPPLANE_32BPP (0x7<<26) 3018#define DISPPLANE_BGRX888 (0x6<<26)
3006#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) 3019#define DISPPLANE_BGRA888 (0x7<<26)
3020#define DISPPLANE_RGBX101010 (0x8<<26)
3021#define DISPPLANE_RGBA101010 (0x9<<26)
3022#define DISPPLANE_BGRX101010 (0xa<<26)
3023#define DISPPLANE_RGBX161616 (0xc<<26)
3024#define DISPPLANE_RGBX888 (0xe<<26)
3025#define DISPPLANE_RGBA888 (0xf<<26)
3007#define DISPPLANE_STEREO_ENABLE (1<<25) 3026#define DISPPLANE_STEREO_ENABLE (1<<25)
3008#define DISPPLANE_STEREO_DISABLE 0 3027#define DISPPLANE_STEREO_DISABLE 0
3009#define DISPPLANE_SEL_PIPE_SHIFT 24 3028#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -3024,6 +3043,8 @@
3024#define _DSPASIZE 0x70190 3043#define _DSPASIZE 0x70190
3025#define _DSPASURF 0x7019C /* 965+ only */ 3044#define _DSPASURF 0x7019C /* 965+ only */
3026#define _DSPATILEOFF 0x701A4 /* 965+ only */ 3045#define _DSPATILEOFF 0x701A4 /* 965+ only */
3046#define _DSPAOFFSET 0x701A4 /* HSW */
3047#define _DSPASURFLIVE 0x701AC
3027 3048
3028#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 3049#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
3029#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) 3050#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3033,6 +3054,8 @@
3033#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 3054#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
3034#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 3055#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
3035#define DSPLINOFF(plane) DSPADDR(plane) 3056#define DSPLINOFF(plane) DSPADDR(plane)
3057#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
3058#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
3036 3059
3037/* Display/Sprite base address macros */ 3060/* Display/Sprite base address macros */
3038#define DISP_BASEADDR_MASK (0xfffff000) 3061#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3078,6 +3101,8 @@
3078#define _DSPBSIZE 0x71190 3101#define _DSPBSIZE 0x71190
3079#define _DSPBSURF 0x7119C 3102#define _DSPBSURF 0x7119C
3080#define _DSPBTILEOFF 0x711A4 3103#define _DSPBTILEOFF 0x711A4
3104#define _DSPBOFFSET 0x711A4
3105#define _DSPBSURFLIVE 0x711AC
3081 3106
3082/* Sprite A control */ 3107/* Sprite A control */
3083#define _DVSACNTR 0x72180 3108#define _DVSACNTR 0x72180
@@ -3143,6 +3168,7 @@
3143#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) 3168#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
3144#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) 3169#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
3145#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) 3170#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
3171#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
3146 3172
3147#define _SPRA_CTL 0x70280 3173#define _SPRA_CTL 0x70280
3148#define SPRITE_ENABLE (1<<31) 3174#define SPRITE_ENABLE (1<<31)
@@ -3177,6 +3203,8 @@
3177#define _SPRA_SURF 0x7029c 3203#define _SPRA_SURF 0x7029c
3178#define _SPRA_KEYMAX 0x702a0 3204#define _SPRA_KEYMAX 0x702a0
3179#define _SPRA_TILEOFF 0x702a4 3205#define _SPRA_TILEOFF 0x702a4
3206#define _SPRA_OFFSET 0x702a4
3207#define _SPRA_SURFLIVE 0x702ac
3180#define _SPRA_SCALE 0x70304 3208#define _SPRA_SCALE 0x70304
3181#define SPRITE_SCALE_ENABLE (1<<31) 3209#define SPRITE_SCALE_ENABLE (1<<31)
3182#define SPRITE_FILTER_MASK (3<<29) 3210#define SPRITE_FILTER_MASK (3<<29)
@@ -3197,6 +3225,8 @@
3197#define _SPRB_SURF 0x7129c 3225#define _SPRB_SURF 0x7129c
3198#define _SPRB_KEYMAX 0x712a0 3226#define _SPRB_KEYMAX 0x712a0
3199#define _SPRB_TILEOFF 0x712a4 3227#define _SPRB_TILEOFF 0x712a4
3228#define _SPRB_OFFSET 0x712a4
3229#define _SPRB_SURFLIVE 0x712ac
3200#define _SPRB_SCALE 0x71304 3230#define _SPRB_SCALE 0x71304
3201#define _SPRB_GAMC 0x71400 3231#define _SPRB_GAMC 0x71400
3202 3232
@@ -3210,8 +3240,10 @@
3210#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) 3240#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
3211#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) 3241#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
3212#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) 3242#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
3243#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
3213#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) 3244#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
3214#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 3245#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
3246#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
3215 3247
3216/* VBIOS regs */ 3248/* VBIOS regs */
3217#define VGACNTRL 0x71400 3249#define VGACNTRL 0x71400
@@ -3246,12 +3278,6 @@
3246#define DISPLAY_PORT_PLL_BIOS_1 0x46010 3278#define DISPLAY_PORT_PLL_BIOS_1 0x46010
3247#define DISPLAY_PORT_PLL_BIOS_2 0x46014 3279#define DISPLAY_PORT_PLL_BIOS_2 0x46014
3248 3280
3249#define PCH_DSPCLK_GATE_D 0x42020
3250# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
3251# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
3252# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
3253# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
3254
3255#define PCH_3DCGDIS0 0x46020 3281#define PCH_3DCGDIS0 0x46020
3256# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 3282# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
3257# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 3283# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3301,20 +3327,22 @@
3301#define _PIPEB_LINK_M2 0x61048 3327#define _PIPEB_LINK_M2 0x61048
3302#define _PIPEB_LINK_N2 0x6104c 3328#define _PIPEB_LINK_N2 0x6104c
3303 3329
3304#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 3330#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
3305#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) 3331#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
3306#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) 3332#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
3307#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) 3333#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
3308#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) 3334#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
3309#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) 3335#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
3310#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) 3336#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
3311#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) 3337#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
3312 3338
3313/* CPU panel fitter */ 3339/* CPU panel fitter */
3314/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ 3340/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
3315#define _PFA_CTL_1 0x68080 3341#define _PFA_CTL_1 0x68080
3316#define _PFB_CTL_1 0x68880 3342#define _PFB_CTL_1 0x68880
3317#define PF_ENABLE (1<<31) 3343#define PF_ENABLE (1<<31)
3344#define PF_PIPE_SEL_MASK_IVB (3<<29)
3345#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
3318#define PF_FILTER_MASK (3<<23) 3346#define PF_FILTER_MASK (3<<23)
3319#define PF_FILTER_PROGRAMMED (0<<23) 3347#define PF_FILTER_PROGRAMMED (0<<23)
3320#define PF_FILTER_MED_3x3 (1<<23) 3348#define PF_FILTER_MED_3x3 (1<<23)
@@ -3423,15 +3451,13 @@
3423#define ILK_HDCP_DISABLE (1<<25) 3451#define ILK_HDCP_DISABLE (1<<25)
3424#define ILK_eDP_A_DISABLE (1<<24) 3452#define ILK_eDP_A_DISABLE (1<<24)
3425#define ILK_DESKTOP (1<<23) 3453#define ILK_DESKTOP (1<<23)
3426#define ILK_DSPCLK_GATE 0x42020
3427#define IVB_VRHUNIT_CLK_GATE (1<<28)
3428#define ILK_DPARB_CLK_GATE (1<<5)
3429#define ILK_DPFD_CLK_GATE (1<<7)
3430 3454
3431/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ 3455#define ILK_DSPCLK_GATE_D 0x42020
3432#define ILK_CLK_FBC (1<<7) 3456#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
3433#define ILK_DPFC_DIS1 (1<<8) 3457#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
3434#define ILK_DPFC_DIS2 (1<<9) 3458#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
3459#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
3460#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
3435 3461
3436#define IVB_CHICKEN3 0x4200c 3462#define IVB_CHICKEN3 0x4200c
3437# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) 3463# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@@ -3447,14 +3473,21 @@
3447 3473
3448#define GEN7_L3CNTLREG1 0xB01C 3474#define GEN7_L3CNTLREG1 0xB01C
3449#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C 3475#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
3476#define GEN7_L3AGDIS (1<<19)
3450 3477
3451#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 3478#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
3452#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 3479#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
3453 3480
3481#define GEN7_L3SQCREG4 0xb034
3482#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
3483
3454/* WaCatErrorRejectionIssue */ 3484/* WaCatErrorRejectionIssue */
3455#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 3485#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
3456#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 3486#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
3457 3487
3488#define HSW_FUSE_STRAP 0x42014
3489#define HSW_CDCLK_LIMIT (1 << 24)
3490
3458/* PCH */ 3491/* PCH */
3459 3492
3460/* south display engine interrupt: IBX */ 3493/* south display engine interrupt: IBX */
@@ -3686,7 +3719,7 @@
3686#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 3719#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
3687#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 3720#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
3688 3721
3689#define VLV_VIDEO_DIP_CTL_A 0x60220 3722#define VLV_VIDEO_DIP_CTL_A 0x60200
3690#define VLV_VIDEO_DIP_DATA_A 0x60208 3723#define VLV_VIDEO_DIP_DATA_A 0x60208
3691#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 3724#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
3692 3725
@@ -3795,18 +3828,26 @@
3795#define TRANS_6BPC (2<<5) 3828#define TRANS_6BPC (2<<5)
3796#define TRANS_12BPC (3<<5) 3829#define TRANS_12BPC (3<<5)
3797 3830
3831#define _TRANSA_CHICKEN1 0xf0060
3832#define _TRANSB_CHICKEN1 0xf1060
3833#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
3834#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
3798#define _TRANSA_CHICKEN2 0xf0064 3835#define _TRANSA_CHICKEN2 0xf0064
3799#define _TRANSB_CHICKEN2 0xf1064 3836#define _TRANSB_CHICKEN2 0xf1064
3800#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) 3837#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
3801#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) 3838#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
3839
3802 3840
3803#define SOUTH_CHICKEN1 0xc2000 3841#define SOUTH_CHICKEN1 0xc2000
3804#define FDIA_PHASE_SYNC_SHIFT_OVR 19 3842#define FDIA_PHASE_SYNC_SHIFT_OVR 19
3805#define FDIA_PHASE_SYNC_SHIFT_EN 18 3843#define FDIA_PHASE_SYNC_SHIFT_EN 18
3806#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) 3844#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
3807#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 3845#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
3846#define FDI_BC_BIFURCATION_SELECT (1 << 12)
3808#define SOUTH_CHICKEN2 0xc2004 3847#define SOUTH_CHICKEN2 0xc2004
3809#define DPLS_EDP_PPS_FIX_DIS (1<<0) 3848#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
3849#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
3850#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3810 3851
3811#define _FDI_RXA_CHICKEN 0xc200c 3852#define _FDI_RXA_CHICKEN 0xc200c
3812#define _FDI_RXB_CHICKEN 0xc2010 3853#define _FDI_RXB_CHICKEN 0xc2010
@@ -3816,6 +3857,7 @@
3816 3857
3817#define SOUTH_DSPCLK_GATE_D 0xc2020 3858#define SOUTH_DSPCLK_GATE_D 0xc2020
3818#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 3859#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
3860#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
3819 3861
3820/* CPU: FDI_TX */ 3862/* CPU: FDI_TX */
3821#define _FDI_TXA_CTL 0x60100 3863#define _FDI_TXA_CTL 0x60100
@@ -3877,6 +3919,7 @@
3877#define FDI_FS_ERRC_ENABLE (1<<27) 3919#define FDI_FS_ERRC_ENABLE (1<<27)
3878#define FDI_FE_ERRC_ENABLE (1<<26) 3920#define FDI_FE_ERRC_ENABLE (1<<26)
3879#define FDI_DP_PORT_WIDTH_X8 (7<<19) 3921#define FDI_DP_PORT_WIDTH_X8 (7<<19)
3922#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
3880#define FDI_8BPC (0<<16) 3923#define FDI_8BPC (0<<16)
3881#define FDI_10BPC (1<<16) 3924#define FDI_10BPC (1<<16)
3882#define FDI_6BPC (2<<16) 3925#define FDI_6BPC (2<<16)
@@ -3901,16 +3944,21 @@
3901#define FDI_PORT_WIDTH_2X_LPT (1<<19) 3944#define FDI_PORT_WIDTH_2X_LPT (1<<19)
3902#define FDI_PORT_WIDTH_1X_LPT (0<<19) 3945#define FDI_PORT_WIDTH_1X_LPT (0<<19)
3903 3946
3904#define _FDI_RXA_MISC 0xf0010 3947#define _FDI_RXA_MISC 0xf0010
3905#define _FDI_RXB_MISC 0xf1010 3948#define _FDI_RXB_MISC 0xf1010
3949#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
3950#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
3951#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
3952#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
3953#define FDI_RX_TP1_TO_TP2_48 (2<<20)
3954#define FDI_RX_TP1_TO_TP2_64 (3<<20)
3955#define FDI_RX_FDI_DELAY_90 (0x90<<0)
3956#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
3957
3906#define _FDI_RXA_TUSIZE1 0xf0030 3958#define _FDI_RXA_TUSIZE1 0xf0030
3907#define _FDI_RXA_TUSIZE2 0xf0038 3959#define _FDI_RXA_TUSIZE2 0xf0038
3908#define _FDI_RXB_TUSIZE1 0xf1030 3960#define _FDI_RXB_TUSIZE1 0xf1030
3909#define _FDI_RXB_TUSIZE2 0xf1038 3961#define _FDI_RXB_TUSIZE2 0xf1038
3910#define FDI_RX_TP1_TO_TP2_48 (2<<20)
3911#define FDI_RX_TP1_TO_TP2_64 (3<<20)
3912#define FDI_RX_FDI_DELAY_90 (0x90<<0)
3913#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
3914#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) 3962#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
3915#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) 3963#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
3916 3964
@@ -4003,6 +4051,11 @@
4003#define PANEL_LIGHT_ON_DELAY_SHIFT 0 4051#define PANEL_LIGHT_ON_DELAY_SHIFT 0
4004 4052
4005#define PCH_PP_OFF_DELAYS 0xc720c 4053#define PCH_PP_OFF_DELAYS 0xc720c
4054#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
4055#define PANEL_POWER_PORT_LVDS (0 << 30)
4056#define PANEL_POWER_PORT_DP_A (1 << 30)
4057#define PANEL_POWER_PORT_DP_C (2 << 30)
4058#define PANEL_POWER_PORT_DP_D (3 << 30)
4006#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) 4059#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
4007#define PANEL_POWER_DOWN_DELAY_SHIFT 16 4060#define PANEL_POWER_DOWN_DELAY_SHIFT 16
4008#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) 4061#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4050,7 +4103,7 @@
4050#define TRANS_DP_CTL_A 0xe0300 4103#define TRANS_DP_CTL_A 0xe0300
4051#define TRANS_DP_CTL_B 0xe1300 4104#define TRANS_DP_CTL_B 0xe1300
4052#define TRANS_DP_CTL_C 0xe2300 4105#define TRANS_DP_CTL_C 0xe2300
4053#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) 4106#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
4054#define TRANS_DP_OUTPUT_ENABLE (1<<31) 4107#define TRANS_DP_OUTPUT_ENABLE (1<<31)
4055#define TRANS_DP_PORT_SEL_B (0<<29) 4108#define TRANS_DP_PORT_SEL_B (0<<29)
4056#define TRANS_DP_PORT_SEL_C (1<<29) 4109#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -4108,6 +4161,8 @@
4108#define FORCEWAKE_ACK_HSW 0x130044 4161#define FORCEWAKE_ACK_HSW 0x130044
4109#define FORCEWAKE_ACK 0x130090 4162#define FORCEWAKE_ACK 0x130090
4110#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 4163#define FORCEWAKE_MT 0xa188 /* multi-threaded */
4164#define FORCEWAKE_KERNEL 0x1
4165#define FORCEWAKE_USER 0x2
4111#define FORCEWAKE_MT_ACK 0x130040 4166#define FORCEWAKE_MT_ACK 0x130040
4112#define ECOBUS 0xa180 4167#define ECOBUS 0xa180
4113#define FORCEWAKE_MT_ENABLE (1<<5) 4168#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -4220,6 +4275,10 @@
4220#define GEN6_READ_OC_PARAMS 0xc 4275#define GEN6_READ_OC_PARAMS 0xc
4221#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 4276#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
4222#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 4277#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
4278#define GEN6_PCODE_WRITE_RC6VIDS 0x4
4279#define GEN6_PCODE_READ_RC6VIDS 0x5
4280#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
4281#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
4223#define GEN6_PCODE_DATA 0x138128 4282#define GEN6_PCODE_DATA 0x138128
4224#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4283#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4225 4284
@@ -4251,6 +4310,15 @@
4251#define GEN7_L3LOG_BASE 0xB070 4310#define GEN7_L3LOG_BASE 0xB070
4252#define GEN7_L3LOG_SIZE 0x80 4311#define GEN7_L3LOG_SIZE 0x80
4253 4312
4313#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
4314#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
4315#define GEN7_MAX_PS_THREAD_DEP (8<<12)
4316#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
4317
4318#define GEN7_ROW_CHICKEN2 0xe4f4
4319#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
4320#define DOP_CLOCK_GATING_DISABLE (1<<0)
4321
4254#define G4X_AUD_VID_DID 0x62020 4322#define G4X_AUD_VID_DID 0x62020
4255#define INTEL_AUDIO_DEVCL 0x808629FB 4323#define INTEL_AUDIO_DEVCL 0x808629FB
4256#define INTEL_AUDIO_DEVBLC 0x80862801 4324#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4380,33 +4448,39 @@
4380#define HSW_PWR_WELL_CTL6 0x45414 4448#define HSW_PWR_WELL_CTL6 0x45414
4381 4449
4382/* Per-pipe DDI Function Control */ 4450/* Per-pipe DDI Function Control */
4383#define PIPE_DDI_FUNC_CTL_A 0x60400 4451#define TRANS_DDI_FUNC_CTL_A 0x60400
4384#define PIPE_DDI_FUNC_CTL_B 0x61400 4452#define TRANS_DDI_FUNC_CTL_B 0x61400
4385#define PIPE_DDI_FUNC_CTL_C 0x62400 4453#define TRANS_DDI_FUNC_CTL_C 0x62400
4386#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 4454#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
4387#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ 4455#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
4388 PIPE_DDI_FUNC_CTL_B) 4456 TRANS_DDI_FUNC_CTL_B)
4389#define PIPE_DDI_FUNC_ENABLE (1<<31) 4457#define TRANS_DDI_FUNC_ENABLE (1<<31)
4390/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 4458/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
4391#define PIPE_DDI_PORT_MASK (7<<28) 4459#define TRANS_DDI_PORT_MASK (7<<28)
4392#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) 4460#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
4393#define PIPE_DDI_MODE_SELECT_MASK (7<<24) 4461#define TRANS_DDI_PORT_NONE (0<<28)
4394#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) 4462#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
4395#define PIPE_DDI_MODE_SELECT_DVI (1<<24) 4463#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
4396#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) 4464#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
4397#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) 4465#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
4398#define PIPE_DDI_MODE_SELECT_FDI (4<<24) 4466#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
4399#define PIPE_DDI_BPC_MASK (7<<20) 4467#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
4400#define PIPE_DDI_BPC_8 (0<<20) 4468#define TRANS_DDI_BPC_MASK (7<<20)
4401#define PIPE_DDI_BPC_10 (1<<20) 4469#define TRANS_DDI_BPC_8 (0<<20)
4402#define PIPE_DDI_BPC_6 (2<<20) 4470#define TRANS_DDI_BPC_10 (1<<20)
4403#define PIPE_DDI_BPC_12 (3<<20) 4471#define TRANS_DDI_BPC_6 (2<<20)
4404#define PIPE_DDI_PVSYNC (1<<17) 4472#define TRANS_DDI_BPC_12 (3<<20)
4405#define PIPE_DDI_PHSYNC (1<<16) 4473#define TRANS_DDI_PVSYNC (1<<17)
4406#define PIPE_DDI_BFI_ENABLE (1<<4) 4474#define TRANS_DDI_PHSYNC (1<<16)
4407#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) 4475#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
4408#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) 4476#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
4409#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) 4477#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
4478#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
4479#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
4480#define TRANS_DDI_BFI_ENABLE (1<<4)
4481#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
4482#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
4483#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
4410 4484
4411/* DisplayPort Transport Control */ 4485/* DisplayPort Transport Control */
4412#define DP_TP_CTL_A 0x64040 4486#define DP_TP_CTL_A 0x64040
@@ -4420,12 +4494,16 @@
4420#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) 4494#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
4421#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) 4495#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
4422#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) 4496#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
4497#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
4498#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
4423#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) 4499#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
4500#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
4424 4501
4425/* DisplayPort Transport Status */ 4502/* DisplayPort Transport Status */
4426#define DP_TP_STATUS_A 0x64044 4503#define DP_TP_STATUS_A 0x64044
4427#define DP_TP_STATUS_B 0x64144 4504#define DP_TP_STATUS_B 0x64144
4428#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) 4505#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
4506#define DP_TP_STATUS_IDLE_DONE (1<<25)
4429#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) 4507#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
4430 4508
4431/* DDI Buffer Control */ 4509/* DDI Buffer Control */
@@ -4444,6 +4522,7 @@
4444#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 4522#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4445#define DDI_BUF_EMP_MASK (0xf<<24) 4523#define DDI_BUF_EMP_MASK (0xf<<24)
4446#define DDI_BUF_IS_IDLE (1<<7) 4524#define DDI_BUF_IS_IDLE (1<<7)
4525#define DDI_A_4_LANES (1<<4)
4447#define DDI_PORT_WIDTH_X1 (0<<1) 4526#define DDI_PORT_WIDTH_X1 (0<<1)
4448#define DDI_PORT_WIDTH_X2 (1<<1) 4527#define DDI_PORT_WIDTH_X2 (1<<1)
4449#define DDI_PORT_WIDTH_X4 (3<<1) 4528#define DDI_PORT_WIDTH_X4 (3<<1)
@@ -4460,6 +4539,10 @@
4460#define SBI_ADDR 0xC6000 4539#define SBI_ADDR 0xC6000
4461#define SBI_DATA 0xC6004 4540#define SBI_DATA 0xC6004
4462#define SBI_CTL_STAT 0xC6008 4541#define SBI_CTL_STAT 0xC6008
4542#define SBI_CTL_DEST_ICLK (0x0<<16)
4543#define SBI_CTL_DEST_MPHY (0x1<<16)
4544#define SBI_CTL_OP_IORD (0x2<<8)
4545#define SBI_CTL_OP_IOWR (0x3<<8)
4463#define SBI_CTL_OP_CRRD (0x6<<8) 4546#define SBI_CTL_OP_CRRD (0x6<<8)
4464#define SBI_CTL_OP_CRWR (0x7<<8) 4547#define SBI_CTL_OP_CRWR (0x7<<8)
4465#define SBI_RESPONSE_FAIL (0x1<<1) 4548#define SBI_RESPONSE_FAIL (0x1<<1)
@@ -4477,10 +4560,12 @@
4477#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) 4560#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4478#define SBI_SSCCTL 0x020c 4561#define SBI_SSCCTL 0x020c
4479#define SBI_SSCCTL6 0x060C 4562#define SBI_SSCCTL6 0x060C
4563#define SBI_SSCCTL_PATHALT (1<<3)
4480#define SBI_SSCCTL_DISABLE (1<<0) 4564#define SBI_SSCCTL_DISABLE (1<<0)
4481#define SBI_SSCAUXDIV6 0x0610 4565#define SBI_SSCAUXDIV6 0x0610
4482#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) 4566#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4483#define SBI_DBUFF0 0x2a00 4567#define SBI_DBUFF0 0x2a00
4568#define SBI_DBUFF0_ENABLE (1<<0)
4484 4569
4485/* LPT PIXCLK_GATE */ 4570/* LPT PIXCLK_GATE */
4486#define PIXCLK_GATE 0xC6020 4571#define PIXCLK_GATE 0xC6020
@@ -4490,8 +4575,8 @@
4490/* SPLL */ 4575/* SPLL */
4491#define SPLL_CTL 0x46020 4576#define SPLL_CTL 0x46020
4492#define SPLL_PLL_ENABLE (1<<31) 4577#define SPLL_PLL_ENABLE (1<<31)
4493#define SPLL_PLL_SCC (1<<28) 4578#define SPLL_PLL_SSC (1<<28)
4494#define SPLL_PLL_NON_SCC (2<<28) 4579#define SPLL_PLL_NON_SSC (2<<28)
4495#define SPLL_PLL_FREQ_810MHz (0<<26) 4580#define SPLL_PLL_FREQ_810MHz (0<<26)
4496#define SPLL_PLL_FREQ_1350MHz (1<<26) 4581#define SPLL_PLL_FREQ_1350MHz (1<<26)
4497 4582
@@ -4500,7 +4585,7 @@
4500#define WRPLL_CTL2 0x46060 4585#define WRPLL_CTL2 0x46060
4501#define WRPLL_PLL_ENABLE (1<<31) 4586#define WRPLL_PLL_ENABLE (1<<31)
4502#define WRPLL_PLL_SELECT_SSC (0x01<<28) 4587#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4503#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) 4588#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
4504#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 4589#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4505/* WRPLL divider programming */ 4590/* WRPLL divider programming */
4506#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 4591#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@@ -4517,21 +4602,36 @@
4517#define PORT_CLK_SEL_SPLL (3<<29) 4602#define PORT_CLK_SEL_SPLL (3<<29)
4518#define PORT_CLK_SEL_WRPLL1 (4<<29) 4603#define PORT_CLK_SEL_WRPLL1 (4<<29)
4519#define PORT_CLK_SEL_WRPLL2 (5<<29) 4604#define PORT_CLK_SEL_WRPLL2 (5<<29)
4520 4605#define PORT_CLK_SEL_NONE (7<<29)
4521/* Pipe clock selection */ 4606
4522#define PIPE_CLK_SEL_A 0x46140 4607/* Transcoder clock selection */
4523#define PIPE_CLK_SEL_B 0x46144 4608#define TRANS_CLK_SEL_A 0x46140
4524#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) 4609#define TRANS_CLK_SEL_B 0x46144
4525/* For each pipe, we need to select the corresponding port clock */ 4610#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
4526#define PIPE_CLK_SEL_DISABLED (0x0<<29) 4611/* For each transcoder, we need to select the corresponding port clock */
4527#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) 4612#define TRANS_CLK_SEL_DISABLED (0x0<<29)
4613#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
4614
4615#define _TRANSA_MSA_MISC 0x60410
4616#define _TRANSB_MSA_MISC 0x61410
4617#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
4618 _TRANSB_MSA_MISC)
4619#define TRANS_MSA_SYNC_CLK (1<<0)
4620#define TRANS_MSA_6_BPC (0<<5)
4621#define TRANS_MSA_8_BPC (1<<5)
4622#define TRANS_MSA_10_BPC (2<<5)
4623#define TRANS_MSA_12_BPC (3<<5)
4624#define TRANS_MSA_16_BPC (4<<5)
4528 4625
4529/* LCPLL Control */ 4626/* LCPLL Control */
4530#define LCPLL_CTL 0x130040 4627#define LCPLL_CTL 0x130040
4531#define LCPLL_PLL_DISABLE (1<<31) 4628#define LCPLL_PLL_DISABLE (1<<31)
4532#define LCPLL_PLL_LOCK (1<<30) 4629#define LCPLL_PLL_LOCK (1<<30)
4630#define LCPLL_CLK_FREQ_MASK (3<<26)
4631#define LCPLL_CLK_FREQ_450 (0<<26)
4533#define LCPLL_CD_CLOCK_DISABLE (1<<25) 4632#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4534#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 4633#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
4634#define LCPLL_CD_SOURCE_FCLK (1<<21)
4535 4635
4536/* Pipe WM_LINETIME - watermark line time */ 4636/* Pipe WM_LINETIME - watermark line time */
4537#define PIPE_WM_LINETIME_A 0x45270 4637#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5854bddb1e9f..63d4d30c39de 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
60 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 60 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
61 61
62 if (pipe == PIPE_A) 62 if (pipe == PIPE_A)
63 array = dev_priv->save_palette_a; 63 array = dev_priv->regfile.save_palette_a;
64 else 64 else
65 array = dev_priv->save_palette_b; 65 array = dev_priv->regfile.save_palette_b;
66 66
67 for (i = 0; i < 256; i++) 67 for (i = 0; i < 256; i++)
68 array[i] = I915_READ(reg + (i << 2)); 68 array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
82 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 82 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
83 83
84 if (pipe == PIPE_A) 84 if (pipe == PIPE_A)
85 array = dev_priv->save_palette_a; 85 array = dev_priv->regfile.save_palette_a;
86 else 86 else
87 array = dev_priv->save_palette_b; 87 array = dev_priv->regfile.save_palette_b;
88 88
89 for (i = 0; i < 256; i++) 89 for (i = 0; i < 256; i++)
90 I915_WRITE(reg + (i << 2), array[i]); 90 I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
131 u16 cr_index, cr_data, st01; 131 u16 cr_index, cr_data, st01;
132 132
133 /* VGA color palette registers */ 133 /* VGA color palette registers */
134 dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); 134 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
135 135
136 /* MSR bits */ 136 /* MSR bits */
137 dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); 137 dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
138 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 138 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
139 cr_index = VGA_CR_INDEX_CGA; 139 cr_index = VGA_CR_INDEX_CGA;
140 cr_data = VGA_CR_DATA_CGA; 140 cr_data = VGA_CR_DATA_CGA;
141 st01 = VGA_ST01_CGA; 141 st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
150 i915_read_indexed(dev, cr_index, cr_data, 0x11) & 150 i915_read_indexed(dev, cr_index, cr_data, 0x11) &
151 (~0x80)); 151 (~0x80));
152 for (i = 0; i <= 0x24; i++) 152 for (i = 0; i <= 0x24; i++)
153 dev_priv->saveCR[i] = 153 dev_priv->regfile.saveCR[i] =
154 i915_read_indexed(dev, cr_index, cr_data, i); 154 i915_read_indexed(dev, cr_index, cr_data, i);
155 /* Make sure we don't turn off CR group 0 writes */ 155 /* Make sure we don't turn off CR group 0 writes */
156 dev_priv->saveCR[0x11] &= ~0x80; 156 dev_priv->regfile.saveCR[0x11] &= ~0x80;
157 157
158 /* Attribute controller registers */ 158 /* Attribute controller registers */
159 I915_READ8(st01); 159 I915_READ8(st01);
160 dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); 160 dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
161 for (i = 0; i <= 0x14; i++) 161 for (i = 0; i <= 0x14; i++)
162 dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); 162 dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
163 I915_READ8(st01); 163 I915_READ8(st01);
164 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); 164 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
165 I915_READ8(st01); 165 I915_READ8(st01);
166 166
167 /* Graphics controller registers */ 167 /* Graphics controller registers */
168 for (i = 0; i < 9; i++) 168 for (i = 0; i < 9; i++)
169 dev_priv->saveGR[i] = 169 dev_priv->regfile.saveGR[i] =
170 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); 170 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
171 171
172 dev_priv->saveGR[0x10] = 172 dev_priv->regfile.saveGR[0x10] =
173 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); 173 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
174 dev_priv->saveGR[0x11] = 174 dev_priv->regfile.saveGR[0x11] =
175 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); 175 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
176 dev_priv->saveGR[0x18] = 176 dev_priv->regfile.saveGR[0x18] =
177 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); 177 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
178 178
179 /* Sequencer registers */ 179 /* Sequencer registers */
180 for (i = 0; i < 8; i++) 180 for (i = 0; i < 8; i++)
181 dev_priv->saveSR[i] = 181 dev_priv->regfile.saveSR[i] =
182 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); 182 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
183} 183}
184 184
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
189 u16 cr_index, cr_data, st01; 189 u16 cr_index, cr_data, st01;
190 190
191 /* MSR bits */ 191 /* MSR bits */
192 I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); 192 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
193 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 193 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
194 cr_index = VGA_CR_INDEX_CGA; 194 cr_index = VGA_CR_INDEX_CGA;
195 cr_data = VGA_CR_DATA_CGA; 195 cr_data = VGA_CR_DATA_CGA;
196 st01 = VGA_ST01_CGA; 196 st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
203 /* Sequencer registers, don't write SR07 */ 203 /* Sequencer registers, don't write SR07 */
204 for (i = 0; i < 7; i++) 204 for (i = 0; i < 7; i++)
205 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, 205 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
206 dev_priv->saveSR[i]); 206 dev_priv->regfile.saveSR[i]);
207 207
208 /* CRT controller regs */ 208 /* CRT controller regs */
209 /* Enable CR group 0 writes */ 209 /* Enable CR group 0 writes */
210 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); 210 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
211 for (i = 0; i <= 0x24; i++) 211 for (i = 0; i <= 0x24; i++)
212 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); 212 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
213 213
214 /* Graphics controller regs */ 214 /* Graphics controller regs */
215 for (i = 0; i < 9; i++) 215 for (i = 0; i < 9; i++)
216 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, 216 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
217 dev_priv->saveGR[i]); 217 dev_priv->regfile.saveGR[i]);
218 218
219 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, 219 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
220 dev_priv->saveGR[0x10]); 220 dev_priv->regfile.saveGR[0x10]);
221 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, 221 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
222 dev_priv->saveGR[0x11]); 222 dev_priv->regfile.saveGR[0x11]);
223 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, 223 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
224 dev_priv->saveGR[0x18]); 224 dev_priv->regfile.saveGR[0x18]);
225 225
226 /* Attribute controller registers */ 226 /* Attribute controller registers */
227 I915_READ8(st01); /* switch back to index mode */ 227 I915_READ8(st01); /* switch back to index mode */
228 for (i = 0; i <= 0x14; i++) 228 for (i = 0; i <= 0x14; i++)
229 i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); 229 i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
230 I915_READ8(st01); /* switch back to index mode */ 230 I915_READ8(st01); /* switch back to index mode */
231 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); 231 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
232 I915_READ8(st01); 232 I915_READ8(st01);
233 233
234 /* VGA color palette registers */ 234 /* VGA color palette registers */
235 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); 235 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
236} 236}
237 237
238static void i915_save_modeset_reg(struct drm_device *dev) 238static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
244 return; 244 return;
245 245
246 /* Cursor state */ 246 /* Cursor state */
247 dev_priv->saveCURACNTR = I915_READ(_CURACNTR); 247 dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
248 dev_priv->saveCURAPOS = I915_READ(_CURAPOS); 248 dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
249 dev_priv->saveCURABASE = I915_READ(_CURABASE); 249 dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
250 dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); 250 dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
251 dev_priv->saveCURBPOS = I915_READ(_CURBPOS); 251 dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
252 dev_priv->saveCURBBASE = I915_READ(_CURBBASE); 252 dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
253 if (IS_GEN2(dev)) 253 if (IS_GEN2(dev))
254 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 254 dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
255 255
256 if (HAS_PCH_SPLIT(dev)) { 256 if (HAS_PCH_SPLIT(dev)) {
257 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 257 dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
258 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 258 dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
259 } 259 }
260 260
261 /* Pipe & plane A info */ 261 /* Pipe & plane A info */
262 dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); 262 dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
263 dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); 263 dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
264 if (HAS_PCH_SPLIT(dev)) { 264 if (HAS_PCH_SPLIT(dev)) {
265 dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); 265 dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
266 dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); 266 dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
267 dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); 267 dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
268 } else { 268 } else {
269 dev_priv->saveFPA0 = I915_READ(_FPA0); 269 dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
270 dev_priv->saveFPA1 = I915_READ(_FPA1); 270 dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
271 dev_priv->saveDPLL_A = I915_READ(_DPLL_A); 271 dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
272 } 272 }
273 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 273 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
274 dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); 274 dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
275 dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); 275 dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
276 dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); 276 dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
277 dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); 277 dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
278 dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); 278 dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
279 dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); 279 dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
280 dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); 280 dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
281 if (!HAS_PCH_SPLIT(dev)) 281 if (!HAS_PCH_SPLIT(dev))
282 dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); 282 dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
283 283
284 if (HAS_PCH_SPLIT(dev)) { 284 if (HAS_PCH_SPLIT(dev)) {
285 dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); 285 dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
286 dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); 286 dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
287 dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); 287 dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
288 dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); 288 dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
289 289
290 dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); 290 dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
291 dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); 291 dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
292 292
293 dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); 293 dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
294 dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); 294 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
295 dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); 295 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
296 296
297 dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); 297 dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
298 dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); 298 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
299 dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); 299 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
300 dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); 300 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
301 dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); 301 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
302 dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); 302 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
303 dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); 303 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
304 } 304 }
305 305
306 dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); 306 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
307 dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); 307 dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
308 dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); 308 dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
309 dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); 309 dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
310 dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); 310 dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
311 if (INTEL_INFO(dev)->gen >= 4) { 311 if (INTEL_INFO(dev)->gen >= 4) {
312 dev_priv->saveDSPASURF = I915_READ(_DSPASURF); 312 dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
313 dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); 313 dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
314 } 314 }
315 i915_save_palette(dev, PIPE_A); 315 i915_save_palette(dev, PIPE_A);
316 dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); 316 dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
317 317
318 /* Pipe & plane B info */ 318 /* Pipe & plane B info */
319 dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); 319 dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
320 dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); 320 dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
321 if (HAS_PCH_SPLIT(dev)) { 321 if (HAS_PCH_SPLIT(dev)) {
322 dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); 322 dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
323 dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); 323 dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
324 dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); 324 dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
325 } else { 325 } else {
326 dev_priv->saveFPB0 = I915_READ(_FPB0); 326 dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
327 dev_priv->saveFPB1 = I915_READ(_FPB1); 327 dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
328 dev_priv->saveDPLL_B = I915_READ(_DPLL_B); 328 dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
329 } 329 }
330 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 330 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
331 dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); 331 dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
332 dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); 332 dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
333 dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); 333 dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
334 dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); 334 dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
335 dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); 335 dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
336 dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); 336 dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
337 dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); 337 dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
338 if (!HAS_PCH_SPLIT(dev)) 338 if (!HAS_PCH_SPLIT(dev))
339 dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); 339 dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
340 340
341 if (HAS_PCH_SPLIT(dev)) { 341 if (HAS_PCH_SPLIT(dev)) {
342 dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); 342 dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
343 dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); 343 dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
344 dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); 344 dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
345 dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); 345 dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
346 346
347 dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); 347 dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
348 dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); 348 dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
349 349
350 dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); 350 dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
351 dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); 351 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
352 dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); 352 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
353 353
354 dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); 354 dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
355 dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); 355 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
356 dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); 356 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
357 dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); 357 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
358 dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); 358 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
359 dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); 359 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
360 dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); 360 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
361 } 361 }
362 362
363 dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); 363 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
364 dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); 364 dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
365 dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); 365 dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
366 dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); 366 dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
367 dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); 367 dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
368 if (INTEL_INFO(dev)->gen >= 4) { 368 if (INTEL_INFO(dev)->gen >= 4) {
369 dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); 369 dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
370 dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); 370 dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
371 } 371 }
372 i915_save_palette(dev, PIPE_B); 372 i915_save_palette(dev, PIPE_B);
373 dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); 373 dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
374 374
375 /* Fences */ 375 /* Fences */
376 switch (INTEL_INFO(dev)->gen) { 376 switch (INTEL_INFO(dev)->gen) {
377 case 7: 377 case 7:
378 case 6: 378 case 6:
379 for (i = 0; i < 16; i++) 379 for (i = 0; i < 16; i++)
380 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 380 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
381 break; 381 break;
382 case 5: 382 case 5:
383 case 4: 383 case 4:
384 for (i = 0; i < 16; i++) 384 for (i = 0; i < 16; i++)
385 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 385 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
386 break; 386 break;
387 case 3: 387 case 3:
388 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 388 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
389 for (i = 0; i < 8; i++) 389 for (i = 0; i < 8; i++)
390 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 390 dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
391 case 2: 391 case 2:
392 for (i = 0; i < 8; i++) 392 for (i = 0; i < 8; i++)
393 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 393 dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
394 break; 394 break;
395 } 395 }
396 396
397 /* CRT state */
398 if (HAS_PCH_SPLIT(dev))
399 dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
400 else
401 dev_priv->regfile.saveADPA = I915_READ(ADPA);
402
397 return; 403 return;
398} 404}
399 405
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
412 case 7: 418 case 7:
413 case 6: 419 case 6:
414 for (i = 0; i < 16; i++) 420 for (i = 0; i < 16; i++)
415 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); 421 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
416 break; 422 break;
417 case 5: 423 case 5:
418 case 4: 424 case 4:
419 for (i = 0; i < 16; i++) 425 for (i = 0; i < 16; i++)
420 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 426 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
421 break; 427 break;
422 case 3: 428 case 3:
423 case 2: 429 case 2:
424 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 430 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
425 for (i = 0; i < 8; i++) 431 for (i = 0; i < 8; i++)
426 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 432 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
427 for (i = 0; i < 8; i++) 433 for (i = 0; i < 8; i++)
428 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 434 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
429 break; 435 break;
430 } 436 }
431 437
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
447 } 453 }
448 454
449 if (HAS_PCH_SPLIT(dev)) { 455 if (HAS_PCH_SPLIT(dev)) {
450 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); 456 I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
451 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); 457 I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
452 } 458 }
453 459
454 /* Pipe & plane A info */ 460 /* Pipe & plane A info */
455 /* Prime the clock */ 461 /* Prime the clock */
456 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 462 if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
457 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & 463 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
458 ~DPLL_VCO_ENABLE); 464 ~DPLL_VCO_ENABLE);
459 POSTING_READ(dpll_a_reg); 465 POSTING_READ(dpll_a_reg);
460 udelay(150); 466 udelay(150);
461 } 467 }
462 I915_WRITE(fpa0_reg, dev_priv->saveFPA0); 468 I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
463 I915_WRITE(fpa1_reg, dev_priv->saveFPA1); 469 I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
464 /* Actually enable it */ 470 /* Actually enable it */
465 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 471 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
466 POSTING_READ(dpll_a_reg); 472 POSTING_READ(dpll_a_reg);
467 udelay(150); 473 udelay(150);
468 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 474 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
469 I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); 475 I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
470 POSTING_READ(_DPLL_A_MD); 476 POSTING_READ(_DPLL_A_MD);
471 } 477 }
472 udelay(150); 478 udelay(150);
473 479
474 /* Restore mode */ 480 /* Restore mode */
475 I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); 481 I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
476 I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); 482 I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
477 I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); 483 I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
478 I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); 484 I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
479 I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); 485 I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
480 I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); 486 I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
481 if (!HAS_PCH_SPLIT(dev)) 487 if (!HAS_PCH_SPLIT(dev))
482 I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); 488 I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
483 489
484 if (HAS_PCH_SPLIT(dev)) { 490 if (HAS_PCH_SPLIT(dev)) {
485 I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); 491 I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
486 I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); 492 I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
487 I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); 493 I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
488 I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); 494 I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
489 495
490 I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); 496 I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
491 I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); 497 I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
492 498
493 I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); 499 I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
494 I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); 500 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
495 I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); 501 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
496 502
497 I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); 503 I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
498 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); 504 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
499 I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); 505 I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
500 I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); 506 I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
501 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); 507 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
502 I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); 508 I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
503 I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); 509 I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
504 } 510 }
505 511
506 /* Restore plane info */ 512 /* Restore plane info */
507 I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); 513 I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
508 I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); 514 I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
509 I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); 515 I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
510 I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); 516 I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
511 I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); 517 I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
512 if (INTEL_INFO(dev)->gen >= 4) { 518 if (INTEL_INFO(dev)->gen >= 4) {
513 I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); 519 I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
514 I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); 520 I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
515 } 521 }
516 522
517 I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); 523 I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
518 524
519 i915_restore_palette(dev, PIPE_A); 525 i915_restore_palette(dev, PIPE_A);
520 /* Enable the plane */ 526 /* Enable the plane */
521 I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); 527 I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
522 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); 528 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
523 529
524 /* Pipe & plane B info */ 530 /* Pipe & plane B info */
525 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 531 if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
526 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & 532 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
527 ~DPLL_VCO_ENABLE); 533 ~DPLL_VCO_ENABLE);
528 POSTING_READ(dpll_b_reg); 534 POSTING_READ(dpll_b_reg);
529 udelay(150); 535 udelay(150);
530 } 536 }
531 I915_WRITE(fpb0_reg, dev_priv->saveFPB0); 537 I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
532 I915_WRITE(fpb1_reg, dev_priv->saveFPB1); 538 I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
533 /* Actually enable it */ 539 /* Actually enable it */
534 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 540 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
535 POSTING_READ(dpll_b_reg); 541 POSTING_READ(dpll_b_reg);
536 udelay(150); 542 udelay(150);
537 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 543 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
538 I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); 544 I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
539 POSTING_READ(_DPLL_B_MD); 545 POSTING_READ(_DPLL_B_MD);
540 } 546 }
541 udelay(150); 547 udelay(150);
542 548
543 /* Restore mode */ 549 /* Restore mode */
544 I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); 550 I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
545 I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); 551 I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
546 I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); 552 I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
547 I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); 553 I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
548 I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); 554 I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
549 I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); 555 I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
550 if (!HAS_PCH_SPLIT(dev)) 556 if (!HAS_PCH_SPLIT(dev))
551 I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); 557 I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
552 558
553 if (HAS_PCH_SPLIT(dev)) { 559 if (HAS_PCH_SPLIT(dev)) {
554 I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); 560 I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
555 I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); 561 I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
556 I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); 562 I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
557 I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); 563 I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
558 564
559 I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); 565 I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
560 I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); 566 I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
561 567
562 I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); 568 I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
563 I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); 569 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
564 I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); 570 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
565 571
566 I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); 572 I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
567 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); 573 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
568 I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); 574 I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
569 I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); 575 I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
570 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); 576 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
571 I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); 577 I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
572 I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); 578 I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
573 } 579 }
574 580
575 /* Restore plane info */ 581 /* Restore plane info */
576 I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); 582 I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
577 I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); 583 I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
578 I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); 584 I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
579 I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); 585 I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
580 I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); 586 I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
581 if (INTEL_INFO(dev)->gen >= 4) { 587 if (INTEL_INFO(dev)->gen >= 4) {
582 I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); 588 I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
583 I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 589 I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
584 } 590 }
585 591
586 I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); 592 I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
587 593
588 i915_restore_palette(dev, PIPE_B); 594 i915_restore_palette(dev, PIPE_B);
589 /* Enable the plane */ 595 /* Enable the plane */
590 I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); 596 I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
591 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); 597 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
592 598
593 /* Cursor state */ 599 /* Cursor state */
594 I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); 600 I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
595 I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); 601 I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
596 I915_WRITE(_CURABASE, dev_priv->saveCURABASE); 602 I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
597 I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); 603 I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
598 I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); 604 I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
599 I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); 605 I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
600 if (IS_GEN2(dev)) 606 if (IS_GEN2(dev))
601 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 607 I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
608
609 /* CRT state */
610 if (HAS_PCH_SPLIT(dev))
611 I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
612 else
613 I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
602 614
603 return; 615 return;
604} 616}
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev)
608 struct drm_i915_private *dev_priv = dev->dev_private; 620 struct drm_i915_private *dev_priv = dev->dev_private;
609 621
610 /* Display arbitration control */ 622 /* Display arbitration control */
611 dev_priv->saveDSPARB = I915_READ(DSPARB); 623 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
612 624
613 /* This is only meaningful in non-KMS mode */ 625 /* This is only meaningful in non-KMS mode */
614 /* Don't save them in KMS mode */ 626 /* Don't regfile.save them in KMS mode */
615 i915_save_modeset_reg(dev); 627 i915_save_modeset_reg(dev);
616 628
617 /* CRT state */
618 if (HAS_PCH_SPLIT(dev)) {
619 dev_priv->saveADPA = I915_READ(PCH_ADPA);
620 } else {
621 dev_priv->saveADPA = I915_READ(ADPA);
622 }
623
624 /* LVDS state */ 629 /* LVDS state */
625 if (HAS_PCH_SPLIT(dev)) { 630 if (HAS_PCH_SPLIT(dev)) {
626 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 631 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
627 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 632 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
628 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 633 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
629 dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); 634 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
630 dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); 635 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
631 dev_priv->saveLVDS = I915_READ(PCH_LVDS); 636 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
632 } else { 637 } else {
633 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 638 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
634 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 639 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
635 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 640 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
636 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); 641 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
637 if (INTEL_INFO(dev)->gen >= 4) 642 if (INTEL_INFO(dev)->gen >= 4)
638 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 643 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
639 if (IS_MOBILE(dev) && !IS_I830(dev)) 644 if (IS_MOBILE(dev) && !IS_I830(dev))
640 dev_priv->saveLVDS = I915_READ(LVDS); 645 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
641 } 646 }
642 647
643 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 648 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
644 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 649 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
645 650
646 if (HAS_PCH_SPLIT(dev)) { 651 if (HAS_PCH_SPLIT(dev)) {
647 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 652 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
648 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 653 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
649 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 654 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
650 } else { 655 } else {
651 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 656 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
652 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 657 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
653 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 658 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
654 } 659 }
655 660
656 /* Display Port state */ 661 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
657 if (SUPPORTS_INTEGRATED_DP(dev)) { 662 /* Display Port state */
658 dev_priv->saveDP_B = I915_READ(DP_B); 663 if (SUPPORTS_INTEGRATED_DP(dev)) {
659 dev_priv->saveDP_C = I915_READ(DP_C); 664 dev_priv->regfile.saveDP_B = I915_READ(DP_B);
660 dev_priv->saveDP_D = I915_READ(DP_D); 665 dev_priv->regfile.saveDP_C = I915_READ(DP_C);
661 dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); 666 dev_priv->regfile.saveDP_D = I915_READ(DP_D);
662 dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); 667 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
663 dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); 668 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
664 dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); 669 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
665 dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); 670 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
666 dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); 671 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
667 dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); 672 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
668 dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); 673 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
669 } 674 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
670 /* FIXME: save TV & SDVO state */ 675 }
671 676 /* FIXME: regfile.save TV & SDVO state */
672 /* Only save FBC state on the platform that supports FBC */ 677 }
678
679 /* Only regfile.save FBC state on the platform that supports FBC */
673 if (I915_HAS_FBC(dev)) { 680 if (I915_HAS_FBC(dev)) {
674 if (HAS_PCH_SPLIT(dev)) { 681 if (HAS_PCH_SPLIT(dev)) {
675 dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); 682 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
676 } else if (IS_GM45(dev)) { 683 } else if (IS_GM45(dev)) {
677 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 684 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
678 } else { 685 } else {
679 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 686 dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
680 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 687 dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
681 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 688 dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
682 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 689 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
683 } 690 }
684 } 691 }
685 692
686 /* VGA state */ 693 /* VGA state */
687 dev_priv->saveVGA0 = I915_READ(VGA0); 694 dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
688 dev_priv->saveVGA1 = I915_READ(VGA1); 695 dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
689 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 696 dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
690 if (HAS_PCH_SPLIT(dev)) 697 if (HAS_PCH_SPLIT(dev))
691 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); 698 dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
692 else 699 else
693 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 700 dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
694 701
695 i915_save_vga(dev); 702 i915_save_vga(dev);
696} 703}
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev)
700 struct drm_i915_private *dev_priv = dev->dev_private; 707 struct drm_i915_private *dev_priv = dev->dev_private;
701 708
702 /* Display arbitration */ 709 /* Display arbitration */
703 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 710 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
704 711
705 /* Display port ratios (must be done before clock is set) */ 712 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
706 if (SUPPORTS_INTEGRATED_DP(dev)) { 713 /* Display port ratios (must be done before clock is set) */
707 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); 714 if (SUPPORTS_INTEGRATED_DP(dev)) {
708 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); 715 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
709 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); 716 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
710 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); 717 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
711 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); 718 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
712 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); 719 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
713 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); 720 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
714 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); 721 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
722 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
723 }
715 } 724 }
716 725
717 /* This is only meaningful in non-KMS mode */ 726 /* This is only meaningful in non-KMS mode */
718 /* Don't restore them in KMS mode */ 727 /* Don't restore them in KMS mode */
719 i915_restore_modeset_reg(dev); 728 i915_restore_modeset_reg(dev);
720 729
721 /* CRT state */
722 if (HAS_PCH_SPLIT(dev))
723 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
724 else
725 I915_WRITE(ADPA, dev_priv->saveADPA);
726
727 /* LVDS state */ 730 /* LVDS state */
728 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 731 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
729 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 732 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
730 733
731 if (HAS_PCH_SPLIT(dev)) { 734 if (HAS_PCH_SPLIT(dev)) {
732 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); 735 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
733 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 736 } else if (IS_MOBILE(dev) && !IS_I830(dev))
734 I915_WRITE(LVDS, dev_priv->saveLVDS); 737 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
735 738
736 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 739 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
737 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 740 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
738 741
739 if (HAS_PCH_SPLIT(dev)) { 742 if (HAS_PCH_SPLIT(dev)) {
740 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); 743 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
741 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); 744 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
742 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; 745 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
743 * otherwise we get blank eDP screen after S3 on some machines 746 * otherwise we get blank eDP screen after S3 on some machines
744 */ 747 */
745 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); 748 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
746 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); 749 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
747 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); 750 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
748 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 751 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
749 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 752 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
750 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 753 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
751 I915_WRITE(RSTDBYCTL, 754 I915_WRITE(RSTDBYCTL,
752 dev_priv->saveMCHBAR_RENDER_STANDBY); 755 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
753 } else { 756 } else {
754 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 757 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
755 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 758 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
756 I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); 759 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
757 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); 760 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
758 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 761 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
759 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 762 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
760 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 763 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
761 } 764 }
762 765
763 /* Display Port state */ 766 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
764 if (SUPPORTS_INTEGRATED_DP(dev)) { 767 /* Display Port state */
765 I915_WRITE(DP_B, dev_priv->saveDP_B); 768 if (SUPPORTS_INTEGRATED_DP(dev)) {
766 I915_WRITE(DP_C, dev_priv->saveDP_C); 769 I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
767 I915_WRITE(DP_D, dev_priv->saveDP_D); 770 I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
771 I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
772 }
773 /* FIXME: restore TV & SDVO state */
768 } 774 }
769 /* FIXME: restore TV & SDVO state */
770 775
771 /* only restore FBC info on the platform that supports FBC*/ 776 /* only restore FBC info on the platform that supports FBC*/
772 intel_disable_fbc(dev); 777 intel_disable_fbc(dev);
773 if (I915_HAS_FBC(dev)) { 778 if (I915_HAS_FBC(dev)) {
774 if (HAS_PCH_SPLIT(dev)) { 779 if (HAS_PCH_SPLIT(dev)) {
775 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 780 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
776 } else if (IS_GM45(dev)) { 781 } else if (IS_GM45(dev)) {
777 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 782 I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
778 } else { 783 } else {
779 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 784 I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
780 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 785 I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
781 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 786 I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
782 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); 787 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
783 } 788 }
784 } 789 }
785 /* VGA state */ 790 /* VGA state */
786 if (HAS_PCH_SPLIT(dev)) 791 if (HAS_PCH_SPLIT(dev))
787 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 792 I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
788 else 793 else
789 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 794 I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
790 795
791 I915_WRITE(VGA0, dev_priv->saveVGA0); 796 I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
792 I915_WRITE(VGA1, dev_priv->saveVGA1); 797 I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
793 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 798 I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
794 POSTING_READ(VGA_PD); 799 POSTING_READ(VGA_PD);
795 udelay(150); 800 udelay(150);
796 801
@@ -802,46 +807,45 @@ int i915_save_state(struct drm_device *dev)
802 struct drm_i915_private *dev_priv = dev->dev_private; 807 struct drm_i915_private *dev_priv = dev->dev_private;
803 int i; 808 int i;
804 809
805 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 810 pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
806 811
807 mutex_lock(&dev->struct_mutex); 812 mutex_lock(&dev->struct_mutex);
808 813
809 /* Hardware status page */
810 dev_priv->saveHWS = I915_READ(HWS_PGA);
811
812 i915_save_display(dev); 814 i915_save_display(dev);
813 815
814 /* Interrupt state */ 816 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
815 if (HAS_PCH_SPLIT(dev)) { 817 /* Interrupt state */
816 dev_priv->saveDEIER = I915_READ(DEIER); 818 if (HAS_PCH_SPLIT(dev)) {
817 dev_priv->saveDEIMR = I915_READ(DEIMR); 819 dev_priv->regfile.saveDEIER = I915_READ(DEIER);
818 dev_priv->saveGTIER = I915_READ(GTIER); 820 dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
819 dev_priv->saveGTIMR = I915_READ(GTIMR); 821 dev_priv->regfile.saveGTIER = I915_READ(GTIER);
820 dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); 822 dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
821 dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); 823 dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
822 dev_priv->saveMCHBAR_RENDER_STANDBY = 824 dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
823 I915_READ(RSTDBYCTL); 825 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
824 dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); 826 I915_READ(RSTDBYCTL);
825 } else { 827 dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
826 dev_priv->saveIER = I915_READ(IER); 828 } else {
827 dev_priv->saveIMR = I915_READ(IMR); 829 dev_priv->regfile.saveIER = I915_READ(IER);
830 dev_priv->regfile.saveIMR = I915_READ(IMR);
831 }
828 } 832 }
829 833
830 intel_disable_gt_powersave(dev); 834 intel_disable_gt_powersave(dev);
831 835
832 /* Cache mode state */ 836 /* Cache mode state */
833 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 837 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
834 838
835 /* Memory Arbitration state */ 839 /* Memory Arbitration state */
836 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 840 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
837 841
838 /* Scratch space */ 842 /* Scratch space */
839 for (i = 0; i < 16; i++) { 843 for (i = 0; i < 16; i++) {
840 dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); 844 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
841 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 845 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
842 } 846 }
843 for (i = 0; i < 3; i++) 847 for (i = 0; i < 3; i++)
844 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 848 dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
845 849
846 mutex_unlock(&dev->struct_mutex); 850 mutex_unlock(&dev->struct_mutex);
847 851
@@ -853,41 +857,40 @@ int i915_restore_state(struct drm_device *dev)
853 struct drm_i915_private *dev_priv = dev->dev_private; 857 struct drm_i915_private *dev_priv = dev->dev_private;
854 int i; 858 int i;
855 859
856 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 860 pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
857 861
858 mutex_lock(&dev->struct_mutex); 862 mutex_lock(&dev->struct_mutex);
859 863
860 /* Hardware status page */
861 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
862
863 i915_restore_display(dev); 864 i915_restore_display(dev);
864 865
865 /* Interrupt state */ 866 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
866 if (HAS_PCH_SPLIT(dev)) { 867 /* Interrupt state */
867 I915_WRITE(DEIER, dev_priv->saveDEIER); 868 if (HAS_PCH_SPLIT(dev)) {
868 I915_WRITE(DEIMR, dev_priv->saveDEIMR); 869 I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
869 I915_WRITE(GTIER, dev_priv->saveGTIER); 870 I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
870 I915_WRITE(GTIMR, dev_priv->saveGTIMR); 871 I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
871 I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); 872 I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
872 I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); 873 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
873 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); 874 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
874 } else { 875 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
875 I915_WRITE(IER, dev_priv->saveIER); 876 } else {
876 I915_WRITE(IMR, dev_priv->saveIMR); 877 I915_WRITE(IER, dev_priv->regfile.saveIER);
878 I915_WRITE(IMR, dev_priv->regfile.saveIMR);
879 }
877 } 880 }
878 881
879 /* Cache mode state */ 882 /* Cache mode state */
880 I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 883 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
881 884
882 /* Memory arbitration state */ 885 /* Memory arbitration state */
883 I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); 886 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
884 887
885 for (i = 0; i < 16; i++) { 888 for (i = 0; i < 16; i++) {
886 I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); 889 I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
887 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); 890 I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
888 } 891 }
889 for (i = 0; i < 3; i++) 892 for (i = 0; i < 3; i++)
890 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 893 I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
891 894
892 mutex_unlock(&dev->struct_mutex); 895 mutex_unlock(&dev->struct_mutex);
893 896
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 903eebd2117a..9462081b1e60 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
97 97
98static int l3_access_valid(struct drm_device *dev, loff_t offset) 98static int l3_access_valid(struct drm_device *dev, loff_t offset)
99{ 99{
100 if (!IS_IVYBRIDGE(dev)) 100 if (!HAS_L3_GPU_CACHE(dev))
101 return -EPERM; 101 return -EPERM;
102 102
103 if (offset % 4 != 0) 103 if (offset % 4 != 0)
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
162 if (ret) 162 if (ret)
163 return ret; 163 return ret;
164 164
165 if (!dev_priv->mm.l3_remap_info) { 165 if (!dev_priv->l3_parity.remap_info) {
166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
167 if (!temp) { 167 if (!temp) {
168 mutex_unlock(&drm_dev->struct_mutex); 168 mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
182 * at this point it is left as a TODO. 182 * at this point it is left as a TODO.
183 */ 183 */
184 if (temp) 184 if (temp)
185 dev_priv->mm.l3_remap_info = temp; 185 dev_priv->l3_parity.remap_info = temp;
186 186
187 memcpy(dev_priv->mm.l3_remap_info + (offset/4), 187 memcpy(dev_priv->l3_parity.remap_info + (offset/4),
188 buf + (offset/4), 188 buf + (offset/4),
189 count); 189 count);
190 190
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
211 struct drm_i915_private *dev_priv = dev->dev_private; 211 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret; 212 int ret;
213 213
214 ret = i915_mutex_lock_interruptible(dev); 214 mutex_lock(&dev_priv->rps.hw_lock);
215 if (ret)
216 return ret;
217
218 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 215 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
219 mutex_unlock(&dev->struct_mutex); 216 mutex_unlock(&dev_priv->rps.hw_lock);
220 217
221 return snprintf(buf, PAGE_SIZE, "%d", ret); 218 return snprintf(buf, PAGE_SIZE, "%d", ret);
222} 219}
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
228 struct drm_i915_private *dev_priv = dev->dev_private; 225 struct drm_i915_private *dev_priv = dev->dev_private;
229 int ret; 226 int ret;
230 227
231 ret = i915_mutex_lock_interruptible(dev); 228 mutex_lock(&dev_priv->rps.hw_lock);
232 if (ret)
233 return ret;
234
235 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 229 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
236 mutex_unlock(&dev->struct_mutex); 230 mutex_unlock(&dev_priv->rps.hw_lock);
237 231
238 return snprintf(buf, PAGE_SIZE, "%d", ret); 232 return snprintf(buf, PAGE_SIZE, "%d", ret);
239} 233}
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
254 248
255 val /= GT_FREQUENCY_MULTIPLIER; 249 val /= GT_FREQUENCY_MULTIPLIER;
256 250
257 ret = mutex_lock_interruptible(&dev->struct_mutex); 251 mutex_lock(&dev_priv->rps.hw_lock);
258 if (ret)
259 return ret;
260 252
261 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 253 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
262 hw_max = (rp_state_cap & 0xff); 254 hw_max = (rp_state_cap & 0xff);
263 hw_min = ((rp_state_cap & 0xff0000) >> 16); 255 hw_min = ((rp_state_cap & 0xff0000) >> 16);
264 256
265 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { 257 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
266 mutex_unlock(&dev->struct_mutex); 258 mutex_unlock(&dev_priv->rps.hw_lock);
267 return -EINVAL; 259 return -EINVAL;
268 } 260 }
269 261
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
272 264
273 dev_priv->rps.max_delay = val; 265 dev_priv->rps.max_delay = val;
274 266
275 mutex_unlock(&dev->struct_mutex); 267 mutex_unlock(&dev_priv->rps.hw_lock);
276 268
277 return count; 269 return count;
278} 270}
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
284 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
285 int ret; 277 int ret;
286 278
287 ret = i915_mutex_lock_interruptible(dev); 279 mutex_lock(&dev_priv->rps.hw_lock);
288 if (ret)
289 return ret;
290
291 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 280 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
292 mutex_unlock(&dev->struct_mutex); 281 mutex_unlock(&dev_priv->rps.hw_lock);
293 282
294 return snprintf(buf, PAGE_SIZE, "%d", ret); 283 return snprintf(buf, PAGE_SIZE, "%d", ret);
295} 284}
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
310 299
311 val /= GT_FREQUENCY_MULTIPLIER; 300 val /= GT_FREQUENCY_MULTIPLIER;
312 301
313 ret = mutex_lock_interruptible(&dev->struct_mutex); 302 mutex_lock(&dev_priv->rps.hw_lock);
314 if (ret)
315 return ret;
316 303
317 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 304 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
318 hw_max = (rp_state_cap & 0xff); 305 hw_max = (rp_state_cap & 0xff);
319 hw_min = ((rp_state_cap & 0xff0000) >> 16); 306 hw_min = ((rp_state_cap & 0xff0000) >> 16);
320 307
321 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { 308 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
322 mutex_unlock(&dev->struct_mutex); 309 mutex_unlock(&dev_priv->rps.hw_lock);
323 return -EINVAL; 310 return -EINVAL;
324 } 311 }
325 312
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
328 315
329 dev_priv->rps.min_delay = val; 316 dev_priv->rps.min_delay = val;
330 317
331 mutex_unlock(&dev->struct_mutex); 318 mutex_unlock(&dev_priv->rps.hw_lock);
332 319
333 return count; 320 return count;
334 321
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8134421b89a6..3db4a6817713 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
229); 229);
230 230
231TRACE_EVENT(i915_gem_ring_dispatch, 231TRACE_EVENT(i915_gem_ring_dispatch,
232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
233 TP_ARGS(ring, seqno), 233 TP_ARGS(ring, seqno, flags),
234 234
235 TP_STRUCT__entry( 235 TP_STRUCT__entry(
236 __field(u32, dev) 236 __field(u32, dev)
237 __field(u32, ring) 237 __field(u32, ring)
238 __field(u32, seqno) 238 __field(u32, seqno)
239 __field(u32, flags)
239 ), 240 ),
240 241
241 TP_fast_assign( 242 TP_fast_assign(
242 __entry->dev = ring->dev->primary->index; 243 __entry->dev = ring->dev->primary->index;
243 __entry->ring = ring->id; 244 __entry->ring = ring->id;
244 __entry->seqno = seqno; 245 __entry->seqno = seqno;
246 __entry->flags = flags;
245 i915_trace_irq_get(ring, seqno); 247 i915_trace_irq_get(ring, seqno);
246 ), 248 ),
247 249
248 TP_printk("dev=%u, ring=%u, seqno=%u", 250 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
249 __entry->dev, __entry->ring, __entry->seqno) 251 __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
250); 252);
251 253
252TRACE_EVENT(i915_gem_ring_flush, 254TRACE_EVENT(i915_gem_ring_flush,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 0ed6baff4b0c..55ffba1f5818 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -499,12 +499,8 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
499 499
500 edp = find_section(bdb, BDB_EDP); 500 edp = find_section(bdb, BDB_EDP);
501 if (!edp) { 501 if (!edp) {
502 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) { 502 if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support)
503 DRM_DEBUG_KMS("No eDP BDB found but eDP panel " 503 DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n");
504 "supported, assume %dbpp panel color "
505 "depth.\n",
506 dev_priv->edp.bpp);
507 }
508 return; 504 return;
509 } 505 }
510 506
@@ -657,9 +653,6 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
657 dev_priv->lvds_use_ssc = 1; 653 dev_priv->lvds_use_ssc = 1;
658 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); 654 dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
659 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); 655 DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
660
661 /* eDP data */
662 dev_priv->edp.bpp = 18;
663} 656}
664 657
665static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) 658static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -762,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
762 struct drm_i915_private *dev_priv = dev->dev_private; 755 struct drm_i915_private *dev_priv = dev->dev_private;
763 756
764 /* Set the Panel Power On/Off timings if uninitialized. */ 757 /* Set the Panel Power On/Off timings if uninitialized. */
765 if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { 758 if (!HAS_PCH_SPLIT(dev) &&
759 I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
766 /* Set T2 to 40ms and T5 to 200ms */ 760 /* Set T2 to 40ms and T5 to 200ms */
767 I915_WRITE(PP_ON_DELAYS, 0x019007d0); 761 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
768 762
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index f78061af7045..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -143,7 +143,7 @@ static void intel_crt_dpms(struct drm_connector *connector, int mode)
143 int old_dpms; 143 int old_dpms;
144 144
145 /* PCH platforms and VLV only support on/off. */ 145 /* PCH platforms and VLV only support on/off. */
146 if (INTEL_INFO(dev)->gen < 5 && mode != DRM_MODE_DPMS_ON) 146 if (INTEL_INFO(dev)->gen >= 5 && mode != DRM_MODE_DPMS_ON)
147 mode = DRM_MODE_DPMS_OFF; 147 mode = DRM_MODE_DPMS_OFF;
148 148
149 if (mode == connector->dpms) 149 if (mode == connector->dpms)
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
198 if (mode->clock > max_clock) 198 if (mode->clock > max_clock)
199 return MODE_CLOCK_HIGH; 199 return MODE_CLOCK_HIGH;
200 200
201 /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
202 if (HAS_PCH_LPT(dev) &&
203 (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
204 return MODE_CLOCK_HIGH;
205
201 return MODE_OK; 206 return MODE_OK;
202} 207}
203 208
@@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
221 struct drm_i915_private *dev_priv = dev->dev_private; 226 struct drm_i915_private *dev_priv = dev->dev_private;
222 u32 adpa; 227 u32 adpa;
223 228
224 adpa = ADPA_HOTPLUG_BITS; 229 if (HAS_PCH_SPLIT(dev))
230 adpa = ADPA_HOTPLUG_BITS;
231 else
232 adpa = 0;
233
225 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 234 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
226 adpa |= ADPA_HSYNC_ACTIVE_HIGH; 235 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
227 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 236 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
228 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 237 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
229 238
230 /* For CPT allow 3 pipe config, for others just use A or B */ 239 /* For CPT allow 3 pipe config, for others just use A or B */
231 if (HAS_PCH_CPT(dev)) 240 if (HAS_PCH_LPT(dev))
241 ; /* Those bits don't exist here */
242 else if (HAS_PCH_CPT(dev))
232 adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 243 adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
233 else if (intel_crtc->pipe == 0) 244 else if (intel_crtc->pipe == 0)
234 adpa |= ADPA_PIPE_A_SELECT; 245 adpa |= ADPA_PIPE_A_SELECT;
@@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
401 struct i2c_adapter *adapter) 412 struct i2c_adapter *adapter)
402{ 413{
403 struct edid *edid; 414 struct edid *edid;
415 int ret;
404 416
405 edid = intel_crt_get_edid(connector, adapter); 417 edid = intel_crt_get_edid(connector, adapter);
406 if (!edid) 418 if (!edid)
407 return 0; 419 return 0;
408 420
409 return intel_connector_update_modes(connector, edid); 421 ret = intel_connector_update_modes(connector, edid);
422 kfree(edid);
423
424 return ret;
410} 425}
411 426
412static bool intel_crt_detect_ddc(struct drm_connector *connector) 427static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
644static void intel_crt_reset(struct drm_connector *connector) 659static void intel_crt_reset(struct drm_connector *connector)
645{ 660{
646 struct drm_device *dev = connector->dev; 661 struct drm_device *dev = connector->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private;
647 struct intel_crt *crt = intel_attached_crt(connector); 663 struct intel_crt *crt = intel_attached_crt(connector);
648 664
649 if (HAS_PCH_SPLIT(dev)) 665 if (HAS_PCH_SPLIT(dev)) {
666 u32 adpa;
667
668 adpa = I915_READ(PCH_ADPA);
669 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
670 adpa |= ADPA_HOTPLUG_BITS;
671 I915_WRITE(PCH_ADPA, adpa);
672 POSTING_READ(PCH_ADPA);
673
674 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
650 crt->force_hotplug_required = 1; 675 crt->force_hotplug_required = 1;
676 }
677
651} 678}
652 679
653/* 680/*
@@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
729 756
730 crt->base.type = INTEL_OUTPUT_ANALOG; 757 crt->base.type = INTEL_OUTPUT_ANALOG;
731 crt->base.cloneable = true; 758 crt->base.cloneable = true;
732 if (IS_HASWELL(dev)) 759 if (IS_I830(dev))
733 crt->base.crtc_mask = (1 << 0); 760 crt->base.crtc_mask = (1 << 0);
734 else 761 else
735 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 762 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
749 776
750 crt->base.disable = intel_disable_crt; 777 crt->base.disable = intel_disable_crt;
751 crt->base.enable = intel_enable_crt; 778 crt->base.enable = intel_enable_crt;
752 crt->base.get_hw_state = intel_crt_get_hw_state; 779 if (IS_HASWELL(dev))
780 crt->base.get_hw_state = intel_ddi_get_hw_state;
781 else
782 crt->base.get_hw_state = intel_crt_get_hw_state;
753 intel_connector->get_hw_state = intel_connector_get_hw_state; 783 intel_connector->get_hw_state = intel_connector_get_hw_state;
754 784
755 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); 785 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
766 * Configure the automatic hotplug detection stuff 796 * Configure the automatic hotplug detection stuff
767 */ 797 */
768 crt->force_hotplug_required = 0; 798 crt->force_hotplug_required = 0;
769 if (HAS_PCH_SPLIT(dev)) {
770 u32 adpa;
771
772 adpa = I915_READ(PCH_ADPA);
773 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
774 adpa |= ADPA_HOTPLUG_BITS;
775 I915_WRITE(PCH_ADPA, adpa);
776 POSTING_READ(PCH_ADPA);
777
778 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
779 crt->force_hotplug_required = 1;
780 }
781 799
782 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 800 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
801
802 /*
803 * TODO: find a proper way to discover whether we need to set the
804 * polarity reversal bit or not, instead of relying on the BIOS.
805 */
806 if (HAS_PCH_LPT(dev))
807 dev_priv->fdi_rx_polarity_reversed =
808 !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
783} 809}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index bfe375466a0e..4bad0f724019 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */ 58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59}; 59};
60 60
61static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
62{
63 struct drm_encoder *encoder = &intel_encoder->base;
64 int type = intel_encoder->type;
65
66 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
67 type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
68 struct intel_digital_port *intel_dig_port =
69 enc_to_dig_port(encoder);
70 return intel_dig_port->port;
71
72 } else if (type == INTEL_OUTPUT_ANALOG) {
73 return PORT_E;
74
75 } else {
76 DRM_ERROR("Invalid DDI encoder type %d\n", type);
77 BUG();
78 }
79}
80
61/* On Haswell, DDI port buffers must be programmed with correct values 81/* On Haswell, DDI port buffers must be programmed with correct values
62 * in advance. The buffer values are different for FDI and DP modes, 82 * in advance. The buffer values are different for FDI and DP modes,
63 * but the HDMI/DVI fields are shared among those. So we program the DDI 83 * but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -118,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
118 DDI_BUF_EMP_800MV_3_5DB_HSW 138 DDI_BUF_EMP_800MV_3_5DB_HSW
119}; 139};
120 140
141static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
142 enum port port)
143{
144 uint32_t reg = DDI_BUF_CTL(port);
145 int i;
146
147 for (i = 0; i < 8; i++) {
148 udelay(1);
149 if (I915_READ(reg) & DDI_BUF_IS_IDLE)
150 return;
151 }
152 DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
153}
121 154
122/* Starting with Haswell, different DDI ports can work in FDI mode for 155/* Starting with Haswell, different DDI ports can work in FDI mode for
123 * connection to the PCH-located connectors. For this, it is necessary to train 156 * connection to the PCH-located connectors. For this, it is necessary to train
@@ -133,25 +166,36 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
133 struct drm_device *dev = crtc->dev; 166 struct drm_device *dev = crtc->dev;
134 struct drm_i915_private *dev_priv = dev->dev_private; 167 struct drm_i915_private *dev_priv = dev->dev_private;
135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
136 int pipe = intel_crtc->pipe; 169 u32 temp, i, rx_ctl_val;
137 u32 reg, temp, i;
138
139 /* Configure CPU PLL, wait for warmup */
140 I915_WRITE(SPLL_CTL,
141 SPLL_PLL_ENABLE |
142 SPLL_PLL_FREQ_1350MHz |
143 SPLL_PLL_SCC);
144 170
145 /* Use SPLL to drive the output when in FDI mode */ 171 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
146 I915_WRITE(PORT_CLK_SEL(PORT_E), 172 * mode set "sequence for CRT port" document:
147 PORT_CLK_SEL_SPLL); 173 * - TP1 to TP2 time with the default value
148 I915_WRITE(PIPE_CLK_SEL(pipe), 174 * - FDI delay to 90h
149 PIPE_CLK_SEL_PORT(PORT_E)); 175 */
150 176 I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
151 udelay(20); 177 FDI_RX_PWRDN_LANE0_VAL(2) |
152 178 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
153 /* Start the training iterating through available voltages and emphasis */ 179
154 for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { 180 /* Enable the PCH Receiver FDI PLL */
181 rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
182 ((intel_crtc->fdi_lanes - 1) << 19);
183 if (dev_priv->fdi_rx_polarity_reversed)
184 rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
186 POSTING_READ(_FDI_RXA_CTL);
187 udelay(220);
188
189 /* Switch from Rawclk to PCDclk */
190 rx_ctl_val |= FDI_PCDCLK;
191 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
192
193 /* Configure Port Clock Select */
194 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
195
196 /* Start the training iterating through available voltages and emphasis,
197 * testing each value twice. */
198 for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
155 /* Configure DP_TP_CTL with auto-training */ 199 /* Configure DP_TP_CTL with auto-training */
156 I915_WRITE(DP_TP_CTL(PORT_E), 200 I915_WRITE(DP_TP_CTL(PORT_E),
157 DP_TP_CTL_FDI_AUTOTRAIN | 201 DP_TP_CTL_FDI_AUTOTRAIN |
@@ -160,103 +204,75 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
160 DP_TP_CTL_ENABLE); 204 DP_TP_CTL_ENABLE);
161 205
162 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ 206 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
163 temp = I915_READ(DDI_BUF_CTL(PORT_E));
164 temp = (temp & ~DDI_BUF_EMP_MASK);
165 I915_WRITE(DDI_BUF_CTL(PORT_E), 207 I915_WRITE(DDI_BUF_CTL(PORT_E),
166 temp | 208 DDI_BUF_CTL_ENABLE |
167 DDI_BUF_CTL_ENABLE | 209 ((intel_crtc->fdi_lanes - 1) << 1) |
168 DDI_PORT_WIDTH_X2 | 210 hsw_ddi_buf_ctl_values[i / 2]);
169 hsw_ddi_buf_ctl_values[i]); 211 POSTING_READ(DDI_BUF_CTL(PORT_E));
170 212
171 udelay(600); 213 udelay(600);
172 214
173 /* We need to program FDI_RX_MISC with the default TP1 to TP2 215 /* Program PCH FDI Receiver TU */
174 * values before enabling the receiver, and configure the delay 216 I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
175 * for the FDI timing generator to 90h. Luckily, all the other 217
176 * bits are supposed to be zeroed, so we can write those values 218 /* Enable PCH FDI Receiver with auto-training */
177 * directly. 219 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
178 */ 220 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
179 I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | 221 POSTING_READ(_FDI_RXA_CTL);
180 FDI_RX_FDI_DELAY_90); 222
181 223 /* Wait for FDI receiver lane calibration */
182 /* Enable CPU FDI Receiver with auto-training */ 224 udelay(30);
183 reg = FDI_RX_CTL(pipe); 225
184 I915_WRITE(reg, 226 /* Unset FDI_RX_MISC pwrdn lanes */
185 I915_READ(reg) | 227 temp = I915_READ(_FDI_RXA_MISC);
186 FDI_LINK_TRAIN_AUTO | 228 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
187 FDI_RX_ENABLE | 229 I915_WRITE(_FDI_RXA_MISC, temp);
188 FDI_LINK_TRAIN_PATTERN_1_CPT | 230 POSTING_READ(_FDI_RXA_MISC);
189 FDI_RX_ENHANCE_FRAME_ENABLE | 231
190 FDI_PORT_WIDTH_2X_LPT | 232 /* Wait for FDI auto training time */
191 FDI_RX_PLL_ENABLE); 233 udelay(5);
192 POSTING_READ(reg);
193 udelay(100);
194 234
195 temp = I915_READ(DP_TP_STATUS(PORT_E)); 235 temp = I915_READ(DP_TP_STATUS(PORT_E));
196 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { 236 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
197 DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); 237 DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
198 238
199 /* Enable normal pixel sending for FDI */ 239 /* Enable normal pixel sending for FDI */
200 I915_WRITE(DP_TP_CTL(PORT_E), 240 I915_WRITE(DP_TP_CTL(PORT_E),
201 DP_TP_CTL_FDI_AUTOTRAIN | 241 DP_TP_CTL_FDI_AUTOTRAIN |
202 DP_TP_CTL_LINK_TRAIN_NORMAL | 242 DP_TP_CTL_LINK_TRAIN_NORMAL |
203 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 243 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
204 DP_TP_CTL_ENABLE); 244 DP_TP_CTL_ENABLE);
205
206 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
207 temp = I915_READ(DDI_FUNC_CTL(pipe));
208 temp &= ~PIPE_DDI_PORT_MASK;
209 temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
210 PIPE_DDI_MODE_SELECT_FDI |
211 PIPE_DDI_FUNC_ENABLE |
212 PIPE_DDI_PORT_WIDTH_X2;
213 I915_WRITE(DDI_FUNC_CTL(pipe),
214 temp);
215 break;
216 } else {
217 DRM_ERROR("Error training BUF_CTL %d\n", i);
218 245
219 /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ 246 return;
220 I915_WRITE(DP_TP_CTL(PORT_E),
221 I915_READ(DP_TP_CTL(PORT_E)) &
222 ~DP_TP_CTL_ENABLE);
223 I915_WRITE(FDI_RX_CTL(pipe),
224 I915_READ(FDI_RX_CTL(pipe)) &
225 ~FDI_RX_PLL_ENABLE);
226 continue;
227 } 247 }
228 }
229 248
230 DRM_DEBUG_KMS("FDI train done.\n"); 249 temp = I915_READ(DDI_BUF_CTL(PORT_E));
231} 250 temp &= ~DDI_BUF_CTL_ENABLE;
232 251 I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
233/* For DDI connections, it is possible to support different outputs over the 252 POSTING_READ(DDI_BUF_CTL(PORT_E));
234 * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by 253
235 * the time the output is detected what exactly is on the other end of it. This 254 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
236 * function aims at providing support for this detection and proper output 255 temp = I915_READ(DP_TP_CTL(PORT_E));
237 * configuration. 256 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
238 */ 257 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
239void intel_ddi_init(struct drm_device *dev, enum port port) 258 I915_WRITE(DP_TP_CTL(PORT_E), temp);
240{ 259 POSTING_READ(DP_TP_CTL(PORT_E));
241 /* For now, we don't do any proper output detection and assume that we 260
242 * handle HDMI only */ 261 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
243 262
244 switch(port){ 263 rx_ctl_val &= ~FDI_RX_ENABLE;
245 case PORT_A: 264 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
246 /* We don't handle eDP and DP yet */ 265 POSTING_READ(_FDI_RXA_CTL);
247 DRM_DEBUG_DRIVER("Found digital output on DDI port A\n"); 266
248 break; 267 /* Reset FDI_RX_MISC pwrdn lanes */
249 /* Assume that the ports B, C and D are working in HDMI mode for now */ 268 temp = I915_READ(_FDI_RXA_MISC);
250 case PORT_B: 269 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
251 case PORT_C: 270 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
252 case PORT_D: 271 I915_WRITE(_FDI_RXA_MISC, temp);
253 intel_hdmi_init(dev, DDI_BUF_CTL(port), port); 272 POSTING_READ(_FDI_RXA_MISC);
254 break;
255 default:
256 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
257 port);
258 break;
259 } 273 }
274
275 DRM_ERROR("FDI link training failed!\n");
260} 276}
261 277
262/* WRPLL clock dividers */ 278/* WRPLL clock dividers */
@@ -645,116 +661,435 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
645 {298000, 2, 21, 19}, 661 {298000, 2, 21, 19},
646}; 662};
647 663
648void intel_ddi_mode_set(struct drm_encoder *encoder, 664static void intel_ddi_mode_set(struct drm_encoder *encoder,
649 struct drm_display_mode *mode, 665 struct drm_display_mode *mode,
650 struct drm_display_mode *adjusted_mode) 666 struct drm_display_mode *adjusted_mode)
651{ 667{
652 struct drm_device *dev = encoder->dev;
653 struct drm_i915_private *dev_priv = dev->dev_private;
654 struct drm_crtc *crtc = encoder->crtc; 668 struct drm_crtc *crtc = encoder->crtc;
655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 669 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
656 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 670 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
657 int port = intel_hdmi->ddi_port; 671 int port = intel_ddi_get_encoder_port(intel_encoder);
658 int pipe = intel_crtc->pipe; 672 int pipe = intel_crtc->pipe;
659 int p, n2, r2; 673 int type = intel_encoder->type;
660 u32 temp, i;
661 674
662 /* On Haswell, we need to enable the clocks and prepare DDI function to 675 DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
663 * work in HDMI mode for this pipe. 676 port_name(port), pipe_name(pipe));
664 */ 677
665 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); 678 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
680
681 intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
682 switch (intel_dp->lane_count) {
683 case 1:
684 intel_dp->DP |= DDI_PORT_WIDTH_X1;
685 break;
686 case 2:
687 intel_dp->DP |= DDI_PORT_WIDTH_X2;
688 break;
689 case 4:
690 intel_dp->DP |= DDI_PORT_WIDTH_X4;
691 break;
692 default:
693 intel_dp->DP |= DDI_PORT_WIDTH_X4;
694 WARN(1, "Unexpected DP lane count %d\n",
695 intel_dp->lane_count);
696 break;
697 }
698
699 if (intel_dp->has_audio) {
700 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
701 pipe_name(intel_crtc->pipe));
702
703 /* write eld */
704 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
705 intel_write_eld(encoder, adjusted_mode);
706 }
707
708 intel_dp_init_link_config(intel_dp);
709
710 } else if (type == INTEL_OUTPUT_HDMI) {
711 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
712
713 if (intel_hdmi->has_audio) {
714 /* Proper support for digital audio needs a new logic
715 * and a new set of registers, so we leave it for future
716 * patch bombing.
717 */
718 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
719 pipe_name(intel_crtc->pipe));
720
721 /* write eld */
722 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
723 intel_write_eld(encoder, adjusted_mode);
724 }
725
726 intel_hdmi->set_infoframes(encoder, adjusted_mode);
727 }
728}
729
730static struct intel_encoder *
731intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
732{
733 struct drm_device *dev = crtc->dev;
734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
735 struct intel_encoder *intel_encoder, *ret = NULL;
736 int num_encoders = 0;
737
738 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
739 ret = intel_encoder;
740 num_encoders++;
741 }
742
743 if (num_encoders != 1)
744 WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
745 intel_crtc->pipe);
746
747 BUG_ON(ret == NULL);
748 return ret;
749}
750
751void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
752{
753 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
754 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
755 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756 uint32_t val;
757
758 switch (intel_crtc->ddi_pll_sel) {
759 case PORT_CLK_SEL_SPLL:
760 plls->spll_refcount--;
761 if (plls->spll_refcount == 0) {
762 DRM_DEBUG_KMS("Disabling SPLL\n");
763 val = I915_READ(SPLL_CTL);
764 WARN_ON(!(val & SPLL_PLL_ENABLE));
765 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
766 POSTING_READ(SPLL_CTL);
767 }
768 break;
769 case PORT_CLK_SEL_WRPLL1:
770 plls->wrpll1_refcount--;
771 if (plls->wrpll1_refcount == 0) {
772 DRM_DEBUG_KMS("Disabling WRPLL 1\n");
773 val = I915_READ(WRPLL_CTL1);
774 WARN_ON(!(val & WRPLL_PLL_ENABLE));
775 I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
776 POSTING_READ(WRPLL_CTL1);
777 }
778 break;
779 case PORT_CLK_SEL_WRPLL2:
780 plls->wrpll2_refcount--;
781 if (plls->wrpll2_refcount == 0) {
782 DRM_DEBUG_KMS("Disabling WRPLL 2\n");
783 val = I915_READ(WRPLL_CTL2);
784 WARN_ON(!(val & WRPLL_PLL_ENABLE));
785 I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
786 POSTING_READ(WRPLL_CTL2);
787 }
788 break;
789 }
790
791 WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
792 WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
793 WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
794
795 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
796}
797
798static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
799{
800 u32 i;
666 801
667 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) 802 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
668 if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) 803 if (clock <= wrpll_tmds_clock_table[i].clock)
669 break; 804 break;
670 805
671 if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) 806 if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
672 i--; 807 i--;
673 808
674 p = wrpll_tmds_clock_table[i].p; 809 *p = wrpll_tmds_clock_table[i].p;
675 n2 = wrpll_tmds_clock_table[i].n2; 810 *n2 = wrpll_tmds_clock_table[i].n2;
676 r2 = wrpll_tmds_clock_table[i].r2; 811 *r2 = wrpll_tmds_clock_table[i].r2;
677 812
678 if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) 813 if (wrpll_tmds_clock_table[i].clock != clock)
679 DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", 814 DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
680 wrpll_tmds_clock_table[i].clock, crtc->mode.clock); 815 wrpll_tmds_clock_table[i].clock, clock);
681 816
682 DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", 817 DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
683 crtc->mode.clock, p, n2, r2); 818 clock, *p, *n2, *r2);
819}
684 820
685 /* Enable LCPLL if disabled */ 821bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
686 temp = I915_READ(LCPLL_CTL); 822{
687 if (temp & LCPLL_PLL_DISABLE) 823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
688 I915_WRITE(LCPLL_CTL, 824 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
689 temp & ~LCPLL_PLL_DISABLE); 825 struct drm_encoder *encoder = &intel_encoder->base;
826 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
827 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
828 int type = intel_encoder->type;
829 enum pipe pipe = intel_crtc->pipe;
830 uint32_t reg, val;
690 831
691 /* Configure WR PLL 1, program the correct divider values for 832 /* TODO: reuse PLLs when possible (compare values) */
692 * the desired frequency and wait for warmup */
693 I915_WRITE(WRPLL_CTL1,
694 WRPLL_PLL_ENABLE |
695 WRPLL_PLL_SELECT_LCPLL_2700 |
696 WRPLL_DIVIDER_REFERENCE(r2) |
697 WRPLL_DIVIDER_FEEDBACK(n2) |
698 WRPLL_DIVIDER_POST(p));
699 833
700 udelay(20); 834 intel_ddi_put_crtc_pll(crtc);
701 835
702 /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use 836 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
703 * this port for connection. 837 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
704 */ 838
705 I915_WRITE(PORT_CLK_SEL(port), 839 switch (intel_dp->link_bw) {
706 PORT_CLK_SEL_WRPLL1); 840 case DP_LINK_BW_1_62:
707 I915_WRITE(PIPE_CLK_SEL(pipe), 841 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
708 PIPE_CLK_SEL_PORT(port)); 842 break;
843 case DP_LINK_BW_2_7:
844 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
845 break;
846 case DP_LINK_BW_5_4:
847 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
848 break;
849 default:
850 DRM_ERROR("Link bandwidth %d unsupported\n",
851 intel_dp->link_bw);
852 return false;
853 }
854
855 /* We don't need to turn any PLL on because we'll use LCPLL. */
856 return true;
857
858 } else if (type == INTEL_OUTPUT_HDMI) {
859 int p, n2, r2;
860
861 if (plls->wrpll1_refcount == 0) {
862 DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
863 pipe_name(pipe));
864 plls->wrpll1_refcount++;
865 reg = WRPLL_CTL1;
866 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
867 } else if (plls->wrpll2_refcount == 0) {
868 DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
869 pipe_name(pipe));
870 plls->wrpll2_refcount++;
871 reg = WRPLL_CTL2;
872 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
873 } else {
874 DRM_ERROR("No WRPLLs available!\n");
875 return false;
876 }
709 877
878 WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
879 "WRPLL already enabled\n");
880
881 intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
882
883 val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
884 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
885 WRPLL_DIVIDER_POST(p);
886
887 } else if (type == INTEL_OUTPUT_ANALOG) {
888 if (plls->spll_refcount == 0) {
889 DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
890 pipe_name(pipe));
891 plls->spll_refcount++;
892 reg = SPLL_CTL;
893 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
894 }
895
896 WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
897 "SPLL already enabled\n");
898
899 val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
900
901 } else {
902 WARN(1, "Invalid DDI encoder type %d\n", type);
903 return false;
904 }
905
906 I915_WRITE(reg, val);
710 udelay(20); 907 udelay(20);
711 908
712 if (intel_hdmi->has_audio) { 909 return true;
713 /* Proper support for digital audio needs a new logic and a new set 910}
714 * of registers, so we leave it for future patch bombing.
715 */
716 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
717 pipe_name(intel_crtc->pipe));
718 911
719 /* write eld */ 912void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
720 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); 913{
721 intel_write_eld(encoder, adjusted_mode); 914 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
915 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
916 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
917 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
918 int type = intel_encoder->type;
919 uint32_t temp;
920
921 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
922
923 temp = TRANS_MSA_SYNC_CLK;
924 switch (intel_crtc->bpp) {
925 case 18:
926 temp |= TRANS_MSA_6_BPC;
927 break;
928 case 24:
929 temp |= TRANS_MSA_8_BPC;
930 break;
931 case 30:
932 temp |= TRANS_MSA_10_BPC;
933 break;
934 case 36:
935 temp |= TRANS_MSA_12_BPC;
936 break;
937 default:
938 temp |= TRANS_MSA_8_BPC;
939 WARN(1, "%d bpp unsupported by DDI function\n",
940 intel_crtc->bpp);
941 }
942 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
722 } 943 }
944}
723 945
724 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ 946void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
725 temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); 947{
948 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
949 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
950 struct drm_encoder *encoder = &intel_encoder->base;
951 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
952 enum pipe pipe = intel_crtc->pipe;
953 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
954 enum port port = intel_ddi_get_encoder_port(intel_encoder);
955 int type = intel_encoder->type;
956 uint32_t temp;
957
958 /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
959 temp = TRANS_DDI_FUNC_ENABLE;
960 temp |= TRANS_DDI_SELECT_PORT(port);
726 961
727 switch (intel_crtc->bpp) { 962 switch (intel_crtc->bpp) {
728 case 18: 963 case 18:
729 temp |= PIPE_DDI_BPC_6; 964 temp |= TRANS_DDI_BPC_6;
730 break; 965 break;
731 case 24: 966 case 24:
732 temp |= PIPE_DDI_BPC_8; 967 temp |= TRANS_DDI_BPC_8;
733 break; 968 break;
734 case 30: 969 case 30:
735 temp |= PIPE_DDI_BPC_10; 970 temp |= TRANS_DDI_BPC_10;
736 break; 971 break;
737 case 36: 972 case 36:
738 temp |= PIPE_DDI_BPC_12; 973 temp |= TRANS_DDI_BPC_12;
739 break; 974 break;
740 default: 975 default:
741 WARN(1, "%d bpp unsupported by pipe DDI function\n", 976 WARN(1, "%d bpp unsupported by transcoder DDI function\n",
742 intel_crtc->bpp); 977 intel_crtc->bpp);
743 } 978 }
744 979
745 if (intel_hdmi->has_hdmi_sink) 980 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
746 temp |= PIPE_DDI_MODE_SELECT_HDMI; 981 temp |= TRANS_DDI_PVSYNC;
982 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
983 temp |= TRANS_DDI_PHSYNC;
984
985 if (cpu_transcoder == TRANSCODER_EDP) {
986 switch (pipe) {
987 case PIPE_A:
988 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
989 break;
990 case PIPE_B:
991 temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
992 break;
993 case PIPE_C:
994 temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
995 break;
996 default:
997 BUG();
998 break;
999 }
1000 }
1001
1002 if (type == INTEL_OUTPUT_HDMI) {
1003 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1004
1005 if (intel_hdmi->has_hdmi_sink)
1006 temp |= TRANS_DDI_MODE_SELECT_HDMI;
1007 else
1008 temp |= TRANS_DDI_MODE_SELECT_DVI;
1009
1010 } else if (type == INTEL_OUTPUT_ANALOG) {
1011 temp |= TRANS_DDI_MODE_SELECT_FDI;
1012 temp |= (intel_crtc->fdi_lanes - 1) << 1;
1013
1014 } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
1015 type == INTEL_OUTPUT_EDP) {
1016 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1017
1018 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
1019
1020 switch (intel_dp->lane_count) {
1021 case 1:
1022 temp |= TRANS_DDI_PORT_WIDTH_X1;
1023 break;
1024 case 2:
1025 temp |= TRANS_DDI_PORT_WIDTH_X2;
1026 break;
1027 case 4:
1028 temp |= TRANS_DDI_PORT_WIDTH_X4;
1029 break;
1030 default:
1031 temp |= TRANS_DDI_PORT_WIDTH_X4;
1032 WARN(1, "Unsupported lane count %d\n",
1033 intel_dp->lane_count);
1034 }
1035
1036 } else {
1037 WARN(1, "Invalid encoder type %d for pipe %d\n",
1038 intel_encoder->type, pipe);
1039 }
1040
1041 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
1042}
1043
1044void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1045 enum transcoder cpu_transcoder)
1046{
1047 uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1048 uint32_t val = I915_READ(reg);
1049
1050 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
1051 val |= TRANS_DDI_PORT_NONE;
1052 I915_WRITE(reg, val);
1053}
1054
1055bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1056{
1057 struct drm_device *dev = intel_connector->base.dev;
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 struct intel_encoder *intel_encoder = intel_connector->encoder;
1060 int type = intel_connector->base.connector_type;
1061 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1062 enum pipe pipe = 0;
1063 enum transcoder cpu_transcoder;
1064 uint32_t tmp;
1065
1066 if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
1067 return false;
1068
1069 if (port == PORT_A)
1070 cpu_transcoder = TRANSCODER_EDP;
747 else 1071 else
748 temp |= PIPE_DDI_MODE_SELECT_DVI; 1072 cpu_transcoder = pipe;
1073
1074 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
749 1075
750 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1076 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
751 temp |= PIPE_DDI_PVSYNC; 1077 case TRANS_DDI_MODE_SELECT_HDMI:
752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1078 case TRANS_DDI_MODE_SELECT_DVI:
753 temp |= PIPE_DDI_PHSYNC; 1079 return (type == DRM_MODE_CONNECTOR_HDMIA);
1080
1081 case TRANS_DDI_MODE_SELECT_DP_SST:
1082 if (type == DRM_MODE_CONNECTOR_eDP)
1083 return true;
1084 case TRANS_DDI_MODE_SELECT_DP_MST:
1085 return (type == DRM_MODE_CONNECTOR_DisplayPort);
754 1086
755 I915_WRITE(DDI_FUNC_CTL(pipe), temp); 1087 case TRANS_DDI_MODE_SELECT_FDI:
1088 return (type == DRM_MODE_CONNECTOR_VGA);
756 1089
757 intel_hdmi->set_infoframes(encoder, adjusted_mode); 1090 default:
1091 return false;
1092 }
758} 1093}
759 1094
760bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 1095bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -762,58 +1097,418 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
762{ 1097{
763 struct drm_device *dev = encoder->base.dev; 1098 struct drm_device *dev = encoder->base.dev;
764 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
765 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1100 enum port port = intel_ddi_get_encoder_port(encoder);
766 u32 tmp; 1101 u32 tmp;
767 int i; 1102 int i;
768 1103
769 tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); 1104 tmp = I915_READ(DDI_BUF_CTL(port));
770 1105
771 if (!(tmp & DDI_BUF_CTL_ENABLE)) 1106 if (!(tmp & DDI_BUF_CTL_ENABLE))
772 return false; 1107 return false;
773 1108
774 for_each_pipe(i) { 1109 if (port == PORT_A) {
775 tmp = I915_READ(DDI_FUNC_CTL(i)); 1110 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
776 1111
777 if ((tmp & PIPE_DDI_PORT_MASK) 1112 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
778 == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { 1113 case TRANS_DDI_EDP_INPUT_A_ON:
779 *pipe = i; 1114 case TRANS_DDI_EDP_INPUT_A_ONOFF:
780 return true; 1115 *pipe = PIPE_A;
1116 break;
1117 case TRANS_DDI_EDP_INPUT_B_ONOFF:
1118 *pipe = PIPE_B;
1119 break;
1120 case TRANS_DDI_EDP_INPUT_C_ONOFF:
1121 *pipe = PIPE_C;
1122 break;
1123 }
1124
1125 return true;
1126 } else {
1127 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
1128 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
1129
1130 if ((tmp & TRANS_DDI_PORT_MASK)
1131 == TRANS_DDI_SELECT_PORT(port)) {
1132 *pipe = i;
1133 return true;
1134 }
781 } 1135 }
782 } 1136 }
783 1137
784 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); 1138 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
785 1139
786 return true; 1140 return true;
787} 1141}
788 1142
789void intel_enable_ddi(struct intel_encoder *encoder) 1143static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
1144 enum pipe pipe)
1145{
1146 uint32_t temp, ret;
1147 enum port port;
1148 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1149 pipe);
1150 int i;
1151
1152 if (cpu_transcoder == TRANSCODER_EDP) {
1153 port = PORT_A;
1154 } else {
1155 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1156 temp &= TRANS_DDI_PORT_MASK;
1157
1158 for (i = PORT_B; i <= PORT_E; i++)
1159 if (temp == TRANS_DDI_SELECT_PORT(i))
1160 port = i;
1161 }
1162
1163 ret = I915_READ(PORT_CLK_SEL(port));
1164
1165 DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
1166 pipe_name(pipe), port_name(port), ret);
1167
1168 return ret;
1169}
1170
1171void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
790{ 1172{
791 struct drm_device *dev = encoder->base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
793 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1174 enum pipe pipe;
794 int port = intel_hdmi->ddi_port; 1175 struct intel_crtc *intel_crtc;
795 u32 temp;
796 1176
797 temp = I915_READ(DDI_BUF_CTL(port)); 1177 for_each_pipe(pipe) {
798 temp |= DDI_BUF_CTL_ENABLE; 1178 intel_crtc =
1179 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
799 1180
800 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, 1181 if (!intel_crtc->active)
801 * and swing/emphasis values are ignored so nothing special needs 1182 continue;
802 * to be done besides enabling the port. 1183
803 */ 1184 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
804 I915_WRITE(DDI_BUF_CTL(port), temp); 1185 pipe);
1186
1187 switch (intel_crtc->ddi_pll_sel) {
1188 case PORT_CLK_SEL_SPLL:
1189 dev_priv->ddi_plls.spll_refcount++;
1190 break;
1191 case PORT_CLK_SEL_WRPLL1:
1192 dev_priv->ddi_plls.wrpll1_refcount++;
1193 break;
1194 case PORT_CLK_SEL_WRPLL2:
1195 dev_priv->ddi_plls.wrpll2_refcount++;
1196 break;
1197 }
1198 }
805} 1199}
806 1200
807void intel_disable_ddi(struct intel_encoder *encoder) 1201void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
808{ 1202{
809 struct drm_device *dev = encoder->base.dev; 1203 struct drm_crtc *crtc = &intel_crtc->base;
1204 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1205 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1206 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1207 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
1208
1209 if (cpu_transcoder != TRANSCODER_EDP)
1210 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1211 TRANS_CLK_SEL_PORT(port));
1212}
1213
1214void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1215{
1216 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1217 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
1218
1219 if (cpu_transcoder != TRANSCODER_EDP)
1220 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1221 TRANS_CLK_SEL_DISABLED);
1222}
1223
1224static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1225{
1226 struct drm_encoder *encoder = &intel_encoder->base;
1227 struct drm_crtc *crtc = encoder->crtc;
1228 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1230 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1231 int type = intel_encoder->type;
1232
1233 if (type == INTEL_OUTPUT_EDP) {
1234 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1235 ironlake_edp_panel_vdd_on(intel_dp);
1236 ironlake_edp_panel_on(intel_dp);
1237 ironlake_edp_panel_vdd_off(intel_dp, true);
1238 }
1239
1240 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
1241 I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
1242
1243 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1244 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1245
1246 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1247 intel_dp_start_link_train(intel_dp);
1248 intel_dp_complete_link_train(intel_dp);
1249 }
1250}
1251
1252static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1253{
1254 struct drm_encoder *encoder = &intel_encoder->base;
1255 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1256 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1257 int type = intel_encoder->type;
1258 uint32_t val;
1259 bool wait = false;
1260
1261 val = I915_READ(DDI_BUF_CTL(port));
1262 if (val & DDI_BUF_CTL_ENABLE) {
1263 val &= ~DDI_BUF_CTL_ENABLE;
1264 I915_WRITE(DDI_BUF_CTL(port), val);
1265 wait = true;
1266 }
1267
1268 val = I915_READ(DP_TP_CTL(port));
1269 val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
1270 val |= DP_TP_CTL_LINK_TRAIN_PAT1;
1271 I915_WRITE(DP_TP_CTL(port), val);
1272
1273 if (wait)
1274 intel_wait_ddi_buf_idle(dev_priv, port);
1275
1276 if (type == INTEL_OUTPUT_EDP) {
1277 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1278 ironlake_edp_panel_vdd_on(intel_dp);
1279 ironlake_edp_panel_off(intel_dp);
1280 }
1281
1282 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
1283}
1284
1285static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1286{
1287 struct drm_encoder *encoder = &intel_encoder->base;
1288 struct drm_device *dev = encoder->dev;
810 struct drm_i915_private *dev_priv = dev->dev_private; 1289 struct drm_i915_private *dev_priv = dev->dev_private;
811 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1290 enum port port = intel_ddi_get_encoder_port(intel_encoder);
812 int port = intel_hdmi->ddi_port; 1291 int type = intel_encoder->type;
813 u32 temp; 1292
1293 if (type == INTEL_OUTPUT_HDMI) {
1294 /* In HDMI/DVI mode, the port width, and swing/emphasis values
1295 * are ignored so nothing special needs to be done besides
1296 * enabling the port.
1297 */
1298 I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
1299 } else if (type == INTEL_OUTPUT_EDP) {
1300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1301
1302 ironlake_edp_backlight_on(intel_dp);
1303 }
1304}
1305
1306static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1307{
1308 struct drm_encoder *encoder = &intel_encoder->base;
1309 int type = intel_encoder->type;
1310
1311 if (type == INTEL_OUTPUT_EDP) {
1312 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1313
1314 ironlake_edp_backlight_off(intel_dp);
1315 }
1316}
1317
1318int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1319{
1320 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1321 return 450;
1322 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
1323 LCPLL_CLK_FREQ_450)
1324 return 450;
1325 else if (IS_ULT(dev_priv->dev))
1326 return 338;
1327 else
1328 return 540;
1329}
1330
1331void intel_ddi_pll_init(struct drm_device *dev)
1332{
1333 struct drm_i915_private *dev_priv = dev->dev_private;
1334 uint32_t val = I915_READ(LCPLL_CTL);
1335
1336 /* The LCPLL register should be turned on by the BIOS. For now let's
1337 * just check its state and print errors in case something is wrong.
1338 * Don't even try to turn it on.
1339 */
1340
1341 DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
1342 intel_ddi_get_cdclk_freq(dev_priv));
1343
1344 if (val & LCPLL_CD_SOURCE_FCLK)
1345 DRM_ERROR("CDCLK source is not LCPLL\n");
1346
1347 if (val & LCPLL_PLL_DISABLE)
1348 DRM_ERROR("LCPLL is disabled\n");
1349}
1350
1351void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1352{
1353 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
1354 struct intel_dp *intel_dp = &intel_dig_port->dp;
1355 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1356 enum port port = intel_dig_port->port;
1357 bool wait;
1358 uint32_t val;
1359
1360 if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
1361 val = I915_READ(DDI_BUF_CTL(port));
1362 if (val & DDI_BUF_CTL_ENABLE) {
1363 val &= ~DDI_BUF_CTL_ENABLE;
1364 I915_WRITE(DDI_BUF_CTL(port), val);
1365 wait = true;
1366 }
1367
1368 val = I915_READ(DP_TP_CTL(port));
1369 val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
1370 val |= DP_TP_CTL_LINK_TRAIN_PAT1;
1371 I915_WRITE(DP_TP_CTL(port), val);
1372 POSTING_READ(DP_TP_CTL(port));
1373
1374 if (wait)
1375 intel_wait_ddi_buf_idle(dev_priv, port);
1376 }
1377
1378 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
1379 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
1380 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
1381 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
1382 I915_WRITE(DP_TP_CTL(port), val);
1383 POSTING_READ(DP_TP_CTL(port));
1384
1385 intel_dp->DP |= DDI_BUF_CTL_ENABLE;
1386 I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
1387 POSTING_READ(DDI_BUF_CTL(port));
1388
1389 udelay(600);
1390}
1391
1392void intel_ddi_fdi_disable(struct drm_crtc *crtc)
1393{
1394 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1395 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1396 uint32_t val;
1397
1398 intel_ddi_post_disable(intel_encoder);
1399
1400 val = I915_READ(_FDI_RXA_CTL);
1401 val &= ~FDI_RX_ENABLE;
1402 I915_WRITE(_FDI_RXA_CTL, val);
1403
1404 val = I915_READ(_FDI_RXA_MISC);
1405 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
1406 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
1407 I915_WRITE(_FDI_RXA_MISC, val);
1408
1409 val = I915_READ(_FDI_RXA_CTL);
1410 val &= ~FDI_PCDCLK;
1411 I915_WRITE(_FDI_RXA_CTL, val);
1412
1413 val = I915_READ(_FDI_RXA_CTL);
1414 val &= ~FDI_RX_PLL_ENABLE;
1415 I915_WRITE(_FDI_RXA_CTL, val);
1416}
1417
1418static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1419{
1420 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
1421 int type = intel_encoder->type;
1422
1423 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
1424 intel_dp_check_link_status(intel_dp);
1425}
1426
1427static void intel_ddi_destroy(struct drm_encoder *encoder)
1428{
1429 /* HDMI has nothing special to destroy, so we can go with this. */
1430 intel_dp_encoder_destroy(encoder);
1431}
1432
1433static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
1434 const struct drm_display_mode *mode,
1435 struct drm_display_mode *adjusted_mode)
1436{
1437 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
1438 int type = intel_encoder->type;
1439
1440 WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
1441
1442 if (type == INTEL_OUTPUT_HDMI)
1443 return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
1444 else
1445 return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
1446}
1447
1448static const struct drm_encoder_funcs intel_ddi_funcs = {
1449 .destroy = intel_ddi_destroy,
1450};
1451
1452static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
1453 .mode_fixup = intel_ddi_mode_fixup,
1454 .mode_set = intel_ddi_mode_set,
1455 .disable = intel_encoder_noop,
1456};
1457
1458void intel_ddi_init(struct drm_device *dev, enum port port)
1459{
1460 struct intel_digital_port *intel_dig_port;
1461 struct intel_encoder *intel_encoder;
1462 struct drm_encoder *encoder;
1463 struct intel_connector *hdmi_connector = NULL;
1464 struct intel_connector *dp_connector = NULL;
1465
1466 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
1467 if (!intel_dig_port)
1468 return;
1469
1470 dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1471 if (!dp_connector) {
1472 kfree(intel_dig_port);
1473 return;
1474 }
1475
1476 if (port != PORT_A) {
1477 hdmi_connector = kzalloc(sizeof(struct intel_connector),
1478 GFP_KERNEL);
1479 if (!hdmi_connector) {
1480 kfree(dp_connector);
1481 kfree(intel_dig_port);
1482 return;
1483 }
1484 }
1485
1486 intel_encoder = &intel_dig_port->base;
1487 encoder = &intel_encoder->base;
1488
1489 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
1490 DRM_MODE_ENCODER_TMDS);
1491 drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
1492
1493 intel_encoder->enable = intel_enable_ddi;
1494 intel_encoder->pre_enable = intel_ddi_pre_enable;
1495 intel_encoder->disable = intel_disable_ddi;
1496 intel_encoder->post_disable = intel_ddi_post_disable;
1497 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1498
1499 intel_dig_port->port = port;
1500 if (hdmi_connector)
1501 intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
1502 else
1503 intel_dig_port->hdmi.sdvox_reg = 0;
1504 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
814 1505
815 temp = I915_READ(DDI_BUF_CTL(port)); 1506 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
816 temp &= ~DDI_BUF_CTL_ENABLE; 1507 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1508 intel_encoder->cloneable = false;
1509 intel_encoder->hot_plug = intel_ddi_hot_plug;
817 1510
818 I915_WRITE(DDI_BUF_CTL(port), temp); 1511 if (hdmi_connector)
1512 intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
1513 intel_dp_init_connector(intel_dig_port, dp_connector);
819} 1514}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 461a637f1ef7..da1ad9c80bb5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,8 +41,6 @@
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h> 42#include <linux/dma_remapping.h>
43 43
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 44bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47static void intel_increase_pllclock(struct drm_crtc *crtc); 45static void intel_increase_pllclock(struct drm_crtc *crtc);
48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -80,6 +78,16 @@ struct intel_limit {
80/* FDI */ 78/* FDI */
81#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 79#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
82 80
81int
82intel_pch_rawclk(struct drm_device *dev)
83{
84 struct drm_i915_private *dev_priv = dev->dev_private;
85
86 WARN_ON(!HAS_PCH_SPLIT(dev));
87
88 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
89}
90
83static bool 91static bool
84intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 92intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
85 int target, int refclk, intel_clock_t *match_clock, 93 int target, int refclk, intel_clock_t *match_clock,
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
380 388
381static const intel_limit_t intel_limits_vlv_hdmi = { 389static const intel_limit_t intel_limits_vlv_hdmi = {
382 .dot = { .min = 20000, .max = 165000 }, 390 .dot = { .min = 20000, .max = 165000 },
383 .vco = { .min = 5994000, .max = 4000000 }, 391 .vco = { .min = 4000000, .max = 5994000},
384 .n = { .min = 1, .max = 7 }, 392 .n = { .min = 1, .max = 7 },
385 .m = { .min = 60, .max = 300 }, /* guess */ 393 .m = { .min = 60, .max = 300 }, /* guess */
386 .m1 = { .min = 2, .max = 3 }, 394 .m1 = { .min = 2, .max = 3 },
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
393}; 401};
394 402
395static const intel_limit_t intel_limits_vlv_dp = { 403static const intel_limit_t intel_limits_vlv_dp = {
396 .dot = { .min = 162000, .max = 270000 }, 404 .dot = { .min = 25000, .max = 270000 },
397 .vco = { .min = 5994000, .max = 4000000 }, 405 .vco = { .min = 4000000, .max = 6000000 },
398 .n = { .min = 1, .max = 7 }, 406 .n = { .min = 1, .max = 7 },
399 .m = { .min = 60, .max = 300 }, /* guess */ 407 .m = { .min = 22, .max = 450 },
400 .m1 = { .min = 2, .max = 3 }, 408 .m1 = { .min = 2, .max = 3 },
401 .m2 = { .min = 11, .max = 156 }, 409 .m2 = { .min = 11, .max = 156 },
402 .p = { .min = 10, .max = 30 }, 410 .p = { .min = 10, .max = 30 },
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
531 limit = &intel_limits_ironlake_single_lvds; 539 limit = &intel_limits_ironlake_single_lvds;
532 } 540 }
533 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 541 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
534 HAS_eDP) 542 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
535 limit = &intel_limits_ironlake_display_port; 543 limit = &intel_limits_ironlake_display_port;
536 else 544 else
537 limit = &intel_limits_ironlake_dac; 545 limit = &intel_limits_ironlake_dac;
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
927 return true; 935 return true;
928} 936}
929 937
938enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
939 enum pipe pipe)
940{
941 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
942 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
943
944 return intel_crtc->cpu_transcoder;
945}
946
930static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) 947static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
931{ 948{
932 struct drm_i915_private *dev_priv = dev->dev_private; 949 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
999void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 1016void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1000{ 1017{
1001 struct drm_i915_private *dev_priv = dev->dev_private; 1018 struct drm_i915_private *dev_priv = dev->dev_private;
1019 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1020 pipe);
1002 1021
1003 if (INTEL_INFO(dev)->gen >= 4) { 1022 if (INTEL_INFO(dev)->gen >= 4) {
1004 int reg = PIPECONF(pipe); 1023 int reg = PIPECONF(cpu_transcoder);
1005 1024
1006 /* Wait for the Pipe State to go off */ 1025 /* Wait for the Pipe State to go off */
1007 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1026 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1103 int reg; 1122 int reg;
1104 u32 val; 1123 u32 val;
1105 bool cur_state; 1124 bool cur_state;
1125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1126 pipe);
1106 1127
1107 if (IS_HASWELL(dev_priv->dev)) { 1128 if (IS_HASWELL(dev_priv->dev)) {
1108 /* On Haswell, DDI is used instead of FDI_TX_CTL */ 1129 /* On Haswell, DDI is used instead of FDI_TX_CTL */
1109 reg = DDI_FUNC_CTL(pipe); 1130 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1110 val = I915_READ(reg); 1131 val = I915_READ(reg);
1111 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE); 1132 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1112 } else { 1133 } else {
1113 reg = FDI_TX_CTL(pipe); 1134 reg = FDI_TX_CTL(pipe);
1114 val = I915_READ(reg); 1135 val = I915_READ(reg);
@@ -1128,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1128 u32 val; 1149 u32 val;
1129 bool cur_state; 1150 bool cur_state;
1130 1151
1131 if (IS_HASWELL(dev_priv->dev) && pipe > 0) { 1152 reg = FDI_RX_CTL(pipe);
1132 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n"); 1153 val = I915_READ(reg);
1133 return; 1154 cur_state = !!(val & FDI_RX_ENABLE);
1134 } else {
1135 reg = FDI_RX_CTL(pipe);
1136 val = I915_READ(reg);
1137 cur_state = !!(val & FDI_RX_ENABLE);
1138 }
1139 WARN(cur_state != state, 1155 WARN(cur_state != state,
1140 "FDI RX state assertion failure (expected %s, current %s)\n", 1156 "FDI RX state assertion failure (expected %s, current %s)\n",
1141 state_string(state), state_string(cur_state)); 1157 state_string(state), state_string(cur_state));
@@ -1168,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1168 int reg; 1184 int reg;
1169 u32 val; 1185 u32 val;
1170 1186
1171 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1172 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1173 return;
1174 }
1175 reg = FDI_RX_CTL(pipe); 1187 reg = FDI_RX_CTL(pipe);
1176 val = I915_READ(reg); 1188 val = I915_READ(reg);
1177 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1189 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1212,12 +1224,14 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1212 int reg; 1224 int reg;
1213 u32 val; 1225 u32 val;
1214 bool cur_state; 1226 bool cur_state;
1227 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1228 pipe);
1215 1229
1216 /* if we need the pipe A quirk it must be always on */ 1230 /* if we need the pipe A quirk it must be always on */
1217 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1231 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1218 state = true; 1232 state = true;
1219 1233
1220 reg = PIPECONF(pipe); 1234 reg = PIPECONF(cpu_transcoder);
1221 val = I915_READ(reg); 1235 val = I915_READ(reg);
1222 cur_state = !!(val & PIPECONF_ENABLE); 1236 cur_state = !!(val & PIPECONF_ENABLE);
1223 WARN(cur_state != state, 1237 WARN(cur_state != state,
@@ -1492,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1492 1506
1493/* SBI access */ 1507/* SBI access */
1494static void 1508static void
1495intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) 1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1510 enum intel_sbi_destination destination)
1496{ 1511{
1497 unsigned long flags; 1512 unsigned long flags;
1513 u32 tmp;
1498 1514
1499 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1515 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1500 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1516 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1501 100)) {
1502 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1517 DRM_ERROR("timeout waiting for SBI to become ready\n");
1503 goto out_unlock; 1518 goto out_unlock;
1504 } 1519 }
1505 1520
1506 I915_WRITE(SBI_ADDR, 1521 I915_WRITE(SBI_ADDR, (reg << 16));
1507 (reg << 16)); 1522 I915_WRITE(SBI_DATA, value);
1508 I915_WRITE(SBI_DATA, 1523
1509 value); 1524 if (destination == SBI_ICLK)
1510 I915_WRITE(SBI_CTL_STAT, 1525 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1511 SBI_BUSY | 1526 else
1512 SBI_CTL_OP_CRWR); 1527 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1528 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1513 1529
1514 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1515 100)) { 1531 100)) {
@@ -1522,23 +1538,25 @@ out_unlock:
1522} 1538}
1523 1539
1524static u32 1540static u32
1525intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) 1541intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1542 enum intel_sbi_destination destination)
1526{ 1543{
1527 unsigned long flags; 1544 unsigned long flags;
1528 u32 value = 0; 1545 u32 value = 0;
1529 1546
1530 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1547 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1531 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1548 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1532 100)) {
1533 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1549 DRM_ERROR("timeout waiting for SBI to become ready\n");
1534 goto out_unlock; 1550 goto out_unlock;
1535 } 1551 }
1536 1552
1537 I915_WRITE(SBI_ADDR, 1553 I915_WRITE(SBI_ADDR, (reg << 16));
1538 (reg << 16)); 1554
1539 I915_WRITE(SBI_CTL_STAT, 1555 if (destination == SBI_ICLK)
1540 SBI_BUSY | 1556 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1541 SBI_CTL_OP_CRRD); 1557 else
1558 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1559 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1542 1560
1543 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1544 100)) { 1562 100)) {
@@ -1554,14 +1572,14 @@ out_unlock:
1554} 1572}
1555 1573
1556/** 1574/**
1557 * intel_enable_pch_pll - enable PCH PLL 1575 * ironlake_enable_pch_pll - enable PCH PLL
1558 * @dev_priv: i915 private structure 1576 * @dev_priv: i915 private structure
1559 * @pipe: pipe PLL to enable 1577 * @pipe: pipe PLL to enable
1560 * 1578 *
1561 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1579 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1562 * drives the transcoder clock. 1580 * drives the transcoder clock.
1563 */ 1581 */
1564static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) 1582static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
1565{ 1583{
1566 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1584 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1567 struct intel_pch_pll *pll; 1585 struct intel_pch_pll *pll;
@@ -1645,12 +1663,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1645 pll->on = false; 1663 pll->on = false;
1646} 1664}
1647 1665
1648static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1666static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1649 enum pipe pipe) 1667 enum pipe pipe)
1650{ 1668{
1651 int reg; 1669 struct drm_device *dev = dev_priv->dev;
1652 u32 val, pipeconf_val;
1653 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1670 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1671 uint32_t reg, val, pipeconf_val;
1654 1672
1655 /* PCH only available on ILK+ */ 1673 /* PCH only available on ILK+ */
1656 BUG_ON(dev_priv->info->gen < 5); 1674 BUG_ON(dev_priv->info->gen < 5);
@@ -1664,10 +1682,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1664 assert_fdi_tx_enabled(dev_priv, pipe); 1682 assert_fdi_tx_enabled(dev_priv, pipe);
1665 assert_fdi_rx_enabled(dev_priv, pipe); 1683 assert_fdi_rx_enabled(dev_priv, pipe);
1666 1684
1667 if (IS_HASWELL(dev_priv->dev) && pipe > 0) { 1685 if (HAS_PCH_CPT(dev)) {
1668 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n"); 1686 /* Workaround: Set the timing override bit before enabling the
1669 return; 1687 * pch transcoder. */
1688 reg = TRANS_CHICKEN2(pipe);
1689 val = I915_READ(reg);
1690 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1691 I915_WRITE(reg, val);
1670 } 1692 }
1693
1671 reg = TRANSCONF(pipe); 1694 reg = TRANSCONF(pipe);
1672 val = I915_READ(reg); 1695 val = I915_READ(reg);
1673 pipeconf_val = I915_READ(PIPECONF(pipe)); 1696 pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1696,11 +1719,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1696 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1719 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1697} 1720}
1698 1721
1699static void intel_disable_transcoder(struct drm_i915_private *dev_priv, 1722static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1700 enum pipe pipe) 1723 enum transcoder cpu_transcoder)
1701{ 1724{
1702 int reg; 1725 u32 val, pipeconf_val;
1703 u32 val; 1726
1727 /* PCH only available on ILK+ */
1728 BUG_ON(dev_priv->info->gen < 5);
1729
1730 /* FDI must be feeding us bits for PCH ports */
1731 assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
1732 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1733
1734 /* Workaround: set timing override bit. */
1735 val = I915_READ(_TRANSA_CHICKEN2);
1736 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1737 I915_WRITE(_TRANSA_CHICKEN2, val);
1738
1739 val = TRANS_ENABLE;
1740 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1741
1742 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1743 PIPECONF_INTERLACED_ILK)
1744 val |= TRANS_INTERLACED;
1745 else
1746 val |= TRANS_PROGRESSIVE;
1747
1748 I915_WRITE(TRANSCONF(TRANSCODER_A), val);
1749 if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
1750 DRM_ERROR("Failed to enable PCH transcoder\n");
1751}
1752
1753static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1754 enum pipe pipe)
1755{
1756 struct drm_device *dev = dev_priv->dev;
1757 uint32_t reg, val;
1704 1758
1705 /* FDI relies on the transcoder */ 1759 /* FDI relies on the transcoder */
1706 assert_fdi_tx_disabled(dev_priv, pipe); 1760 assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1716,6 +1770,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1716 /* wait for PCH transcoder off, transcoder state */ 1770 /* wait for PCH transcoder off, transcoder state */
1717 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1771 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1718 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1772 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1773
1774 if (!HAS_PCH_IBX(dev)) {
1775 /* Workaround: Clear the timing override chicken bit again. */
1776 reg = TRANS_CHICKEN2(pipe);
1777 val = I915_READ(reg);
1778 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1779 I915_WRITE(reg, val);
1780 }
1781}
1782
1783static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1784{
1785 u32 val;
1786
1787 val = I915_READ(_TRANSACONF);
1788 val &= ~TRANS_ENABLE;
1789 I915_WRITE(_TRANSACONF, val);
1790 /* wait for PCH transcoder off, transcoder state */
1791 if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
1792 DRM_ERROR("Failed to disable PCH transcoder\n");
1793
1794 /* Workaround: clear timing override bit. */
1795 val = I915_READ(_TRANSA_CHICKEN2);
1796 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1797 I915_WRITE(_TRANSA_CHICKEN2, val);
1719} 1798}
1720 1799
1721/** 1800/**
@@ -1735,9 +1814,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1735static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1814static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1736 bool pch_port) 1815 bool pch_port)
1737{ 1816{
1817 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1818 pipe);
1819 enum transcoder pch_transcoder;
1738 int reg; 1820 int reg;
1739 u32 val; 1821 u32 val;
1740 1822
1823 if (IS_HASWELL(dev_priv->dev))
1824 pch_transcoder = TRANSCODER_A;
1825 else
1826 pch_transcoder = pipe;
1827
1741 /* 1828 /*
1742 * A pipe without a PLL won't actually be able to drive bits from 1829 * A pipe without a PLL won't actually be able to drive bits from
1743 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1830 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1748,13 +1835,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1748 else { 1835 else {
1749 if (pch_port) { 1836 if (pch_port) {
1750 /* if driving the PCH, we need FDI enabled */ 1837 /* if driving the PCH, we need FDI enabled */
1751 assert_fdi_rx_pll_enabled(dev_priv, pipe); 1838 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1752 assert_fdi_tx_pll_enabled(dev_priv, pipe); 1839 assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
1753 } 1840 }
1754 /* FIXME: assert CPU port conditions for SNB+ */ 1841 /* FIXME: assert CPU port conditions for SNB+ */
1755 } 1842 }
1756 1843
1757 reg = PIPECONF(pipe); 1844 reg = PIPECONF(cpu_transcoder);
1758 val = I915_READ(reg); 1845 val = I915_READ(reg);
1759 if (val & PIPECONF_ENABLE) 1846 if (val & PIPECONF_ENABLE)
1760 return; 1847 return;
@@ -1778,6 +1865,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1778static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1865static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1779 enum pipe pipe) 1866 enum pipe pipe)
1780{ 1867{
1868 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1869 pipe);
1781 int reg; 1870 int reg;
1782 u32 val; 1871 u32 val;
1783 1872
@@ -1791,7 +1880,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1791 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1880 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1792 return; 1881 return;
1793 1882
1794 reg = PIPECONF(pipe); 1883 reg = PIPECONF(cpu_transcoder);
1795 val = I915_READ(reg); 1884 val = I915_READ(reg);
1796 if ((val & PIPECONF_ENABLE) == 0) 1885 if ((val & PIPECONF_ENABLE) == 0)
1797 return; 1886 return;
@@ -1807,8 +1896,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1807void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1896void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1808 enum plane plane) 1897 enum plane plane)
1809{ 1898{
1810 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1899 if (dev_priv->info->gen >= 4)
1811 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1900 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1901 else
1902 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1812} 1903}
1813 1904
1814/** 1905/**
@@ -1926,9 +2017,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1926 2017
1927/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2018/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1928 * is assumed to be a power-of-two. */ 2019 * is assumed to be a power-of-two. */
1929static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, 2020unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
1930 unsigned int bpp, 2021 unsigned int bpp,
1931 unsigned int pitch) 2022 unsigned int pitch)
1932{ 2023{
1933 int tile_rows, tiles; 2024 int tile_rows, tiles;
1934 2025
@@ -1969,24 +2060,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1969 dspcntr = I915_READ(reg); 2060 dspcntr = I915_READ(reg);
1970 /* Mask out pixel format bits in case we change it */ 2061 /* Mask out pixel format bits in case we change it */
1971 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2062 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1972 switch (fb->bits_per_pixel) { 2063 switch (fb->pixel_format) {
1973 case 8: 2064 case DRM_FORMAT_C8:
1974 dspcntr |= DISPPLANE_8BPP; 2065 dspcntr |= DISPPLANE_8BPP;
1975 break; 2066 break;
1976 case 16: 2067 case DRM_FORMAT_XRGB1555:
1977 if (fb->depth == 15) 2068 case DRM_FORMAT_ARGB1555:
1978 dspcntr |= DISPPLANE_15_16BPP; 2069 dspcntr |= DISPPLANE_BGRX555;
1979 else
1980 dspcntr |= DISPPLANE_16BPP;
1981 break; 2070 break;
1982 case 24: 2071 case DRM_FORMAT_RGB565:
1983 case 32: 2072 dspcntr |= DISPPLANE_BGRX565;
1984 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 2073 break;
2074 case DRM_FORMAT_XRGB8888:
2075 case DRM_FORMAT_ARGB8888:
2076 dspcntr |= DISPPLANE_BGRX888;
2077 break;
2078 case DRM_FORMAT_XBGR8888:
2079 case DRM_FORMAT_ABGR8888:
2080 dspcntr |= DISPPLANE_RGBX888;
2081 break;
2082 case DRM_FORMAT_XRGB2101010:
2083 case DRM_FORMAT_ARGB2101010:
2084 dspcntr |= DISPPLANE_BGRX101010;
2085 break;
2086 case DRM_FORMAT_XBGR2101010:
2087 case DRM_FORMAT_ABGR2101010:
2088 dspcntr |= DISPPLANE_RGBX101010;
1985 break; 2089 break;
1986 default: 2090 default:
1987 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 2091 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
1988 return -EINVAL; 2092 return -EINVAL;
1989 } 2093 }
2094
1990 if (INTEL_INFO(dev)->gen >= 4) { 2095 if (INTEL_INFO(dev)->gen >= 4) {
1991 if (obj->tiling_mode != I915_TILING_NONE) 2096 if (obj->tiling_mode != I915_TILING_NONE)
1992 dspcntr |= DISPPLANE_TILED; 2097 dspcntr |= DISPPLANE_TILED;
@@ -2000,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2000 2105
2001 if (INTEL_INFO(dev)->gen >= 4) { 2106 if (INTEL_INFO(dev)->gen >= 4) {
2002 intel_crtc->dspaddr_offset = 2107 intel_crtc->dspaddr_offset =
2003 gen4_compute_dspaddr_offset_xtiled(&x, &y, 2108 intel_gen4_compute_offset_xtiled(&x, &y,
2004 fb->bits_per_pixel / 8, 2109 fb->bits_per_pixel / 8,
2005 fb->pitches[0]); 2110 fb->pitches[0]);
2006 linear_offset -= intel_crtc->dspaddr_offset; 2111 linear_offset -= intel_crtc->dspaddr_offset;
2007 } else { 2112 } else {
2008 intel_crtc->dspaddr_offset = linear_offset; 2113 intel_crtc->dspaddr_offset = linear_offset;
@@ -2053,27 +2158,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2053 dspcntr = I915_READ(reg); 2158 dspcntr = I915_READ(reg);
2054 /* Mask out pixel format bits in case we change it */ 2159 /* Mask out pixel format bits in case we change it */
2055 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2160 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2056 switch (fb->bits_per_pixel) { 2161 switch (fb->pixel_format) {
2057 case 8: 2162 case DRM_FORMAT_C8:
2058 dspcntr |= DISPPLANE_8BPP; 2163 dspcntr |= DISPPLANE_8BPP;
2059 break; 2164 break;
2060 case 16: 2165 case DRM_FORMAT_RGB565:
2061 if (fb->depth != 16) 2166 dspcntr |= DISPPLANE_BGRX565;
2062 return -EINVAL;
2063
2064 dspcntr |= DISPPLANE_16BPP;
2065 break; 2167 break;
2066 case 24: 2168 case DRM_FORMAT_XRGB8888:
2067 case 32: 2169 case DRM_FORMAT_ARGB8888:
2068 if (fb->depth == 24) 2170 dspcntr |= DISPPLANE_BGRX888;
2069 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 2171 break;
2070 else if (fb->depth == 30) 2172 case DRM_FORMAT_XBGR8888:
2071 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; 2173 case DRM_FORMAT_ABGR8888:
2072 else 2174 dspcntr |= DISPPLANE_RGBX888;
2073 return -EINVAL; 2175 break;
2176 case DRM_FORMAT_XRGB2101010:
2177 case DRM_FORMAT_ARGB2101010:
2178 dspcntr |= DISPPLANE_BGRX101010;
2179 break;
2180 case DRM_FORMAT_XBGR2101010:
2181 case DRM_FORMAT_ABGR2101010:
2182 dspcntr |= DISPPLANE_RGBX101010;
2074 break; 2183 break;
2075 default: 2184 default:
2076 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 2185 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2077 return -EINVAL; 2186 return -EINVAL;
2078 } 2187 }
2079 2188
@@ -2089,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2089 2198
2090 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2199 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2091 intel_crtc->dspaddr_offset = 2200 intel_crtc->dspaddr_offset =
2092 gen4_compute_dspaddr_offset_xtiled(&x, &y, 2201 intel_gen4_compute_offset_xtiled(&x, &y,
2093 fb->bits_per_pixel / 8, 2202 fb->bits_per_pixel / 8,
2094 fb->pitches[0]); 2203 fb->pitches[0]);
2095 linear_offset -= intel_crtc->dspaddr_offset; 2204 linear_offset -= intel_crtc->dspaddr_offset;
2096 2205
2097 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2206 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2099,8 +2208,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2099 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2208 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2100 I915_MODIFY_DISPBASE(DSPSURF(plane), 2209 I915_MODIFY_DISPBASE(DSPSURF(plane),
2101 obj->gtt_offset + intel_crtc->dspaddr_offset); 2210 obj->gtt_offset + intel_crtc->dspaddr_offset);
2102 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2211 if (IS_HASWELL(dev)) {
2103 I915_WRITE(DSPLINOFF(plane), linear_offset); 2212 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2213 } else {
2214 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2215 I915_WRITE(DSPLINOFF(plane), linear_offset);
2216 }
2104 POSTING_READ(reg); 2217 POSTING_READ(reg);
2105 2218
2106 return 0; 2219 return 0;
@@ -2148,13 +2261,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
2148 return ret; 2261 return ret;
2149} 2262}
2150 2263
2264static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2265{
2266 struct drm_device *dev = crtc->dev;
2267 struct drm_i915_master_private *master_priv;
2268 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2269
2270 if (!dev->primary->master)
2271 return;
2272
2273 master_priv = dev->primary->master->driver_priv;
2274 if (!master_priv->sarea_priv)
2275 return;
2276
2277 switch (intel_crtc->pipe) {
2278 case 0:
2279 master_priv->sarea_priv->pipeA_x = x;
2280 master_priv->sarea_priv->pipeA_y = y;
2281 break;
2282 case 1:
2283 master_priv->sarea_priv->pipeB_x = x;
2284 master_priv->sarea_priv->pipeB_y = y;
2285 break;
2286 default:
2287 break;
2288 }
2289}
2290
2151static int 2291static int
2152intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2292intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2153 struct drm_framebuffer *fb) 2293 struct drm_framebuffer *fb)
2154{ 2294{
2155 struct drm_device *dev = crtc->dev; 2295 struct drm_device *dev = crtc->dev;
2156 struct drm_i915_private *dev_priv = dev->dev_private; 2296 struct drm_i915_private *dev_priv = dev->dev_private;
2157 struct drm_i915_master_private *master_priv;
2158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2159 struct drm_framebuffer *old_fb; 2298 struct drm_framebuffer *old_fb;
2160 int ret; 2299 int ret;
@@ -2206,20 +2345,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2206 intel_update_fbc(dev); 2345 intel_update_fbc(dev);
2207 mutex_unlock(&dev->struct_mutex); 2346 mutex_unlock(&dev->struct_mutex);
2208 2347
2209 if (!dev->primary->master) 2348 intel_crtc_update_sarea_pos(crtc, x, y);
2210 return 0;
2211
2212 master_priv = dev->primary->master->driver_priv;
2213 if (!master_priv->sarea_priv)
2214 return 0;
2215
2216 if (intel_crtc->pipe) {
2217 master_priv->sarea_priv->pipeB_x = x;
2218 master_priv->sarea_priv->pipeB_y = y;
2219 } else {
2220 master_priv->sarea_priv->pipeA_x = x;
2221 master_priv->sarea_priv->pipeA_y = y;
2222 }
2223 2349
2224 return 0; 2350 return 0;
2225} 2351}
@@ -2302,16 +2428,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2302 FDI_FE_ERRC_ENABLE); 2428 FDI_FE_ERRC_ENABLE);
2303} 2429}
2304 2430
2305static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) 2431static void ivb_modeset_global_resources(struct drm_device *dev)
2306{ 2432{
2307 struct drm_i915_private *dev_priv = dev->dev_private; 2433 struct drm_i915_private *dev_priv = dev->dev_private;
2308 u32 flags = I915_READ(SOUTH_CHICKEN1); 2434 struct intel_crtc *pipe_B_crtc =
2435 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2436 struct intel_crtc *pipe_C_crtc =
2437 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2438 uint32_t temp;
2309 2439
2310 flags |= FDI_PHASE_SYNC_OVR(pipe); 2440 /* When everything is off disable fdi C so that we could enable fdi B
2311 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ 2441 * with all lanes. XXX: This misses the case where a pipe is not using
2312 flags |= FDI_PHASE_SYNC_EN(pipe); 2442 * any pch resources and so doesn't need any fdi lanes. */
2313 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ 2443 if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
2314 POSTING_READ(SOUTH_CHICKEN1); 2444 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2445 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2446
2447 temp = I915_READ(SOUTH_CHICKEN1);
2448 temp &= ~FDI_BC_BIFURCATION_SELECT;
2449 DRM_DEBUG_KMS("disabling fdi C rx\n");
2450 I915_WRITE(SOUTH_CHICKEN1, temp);
2451 }
2315} 2452}
2316 2453
2317/* The FDI link training functions for ILK/Ibexpeak. */ 2454/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2357,11 +2494,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2357 udelay(150); 2494 udelay(150);
2358 2495
2359 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2496 /* Ironlake workaround, enable clock pointer after FDI enable*/
2360 if (HAS_PCH_IBX(dev)) { 2497 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2361 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2498 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2362 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2499 FDI_RX_PHASE_SYNC_POINTER_EN);
2363 FDI_RX_PHASE_SYNC_POINTER_EN);
2364 }
2365 2500
2366 reg = FDI_RX_IIR(pipe); 2501 reg = FDI_RX_IIR(pipe);
2367 for (tries = 0; tries < 5; tries++) { 2502 for (tries = 0; tries < 5; tries++) {
@@ -2450,6 +2585,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2450 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2585 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2451 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2586 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2452 2587
2588 I915_WRITE(FDI_RX_MISC(pipe),
2589 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2590
2453 reg = FDI_RX_CTL(pipe); 2591 reg = FDI_RX_CTL(pipe);
2454 temp = I915_READ(reg); 2592 temp = I915_READ(reg);
2455 if (HAS_PCH_CPT(dev)) { 2593 if (HAS_PCH_CPT(dev)) {
@@ -2464,9 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2464 POSTING_READ(reg); 2602 POSTING_READ(reg);
2465 udelay(150); 2603 udelay(150);
2466 2604
2467 if (HAS_PCH_CPT(dev))
2468 cpt_phase_pointer_enable(dev, pipe);
2469
2470 for (i = 0; i < 4; i++) { 2605 for (i = 0; i < 4; i++) {
2471 reg = FDI_TX_CTL(pipe); 2606 reg = FDI_TX_CTL(pipe);
2472 temp = I915_READ(reg); 2607 temp = I915_READ(reg);
@@ -2570,6 +2705,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2570 POSTING_READ(reg); 2705 POSTING_READ(reg);
2571 udelay(150); 2706 udelay(150);
2572 2707
2708 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2709 I915_READ(FDI_RX_IIR(pipe)));
2710
2573 /* enable CPU FDI TX and PCH FDI RX */ 2711 /* enable CPU FDI TX and PCH FDI RX */
2574 reg = FDI_TX_CTL(pipe); 2712 reg = FDI_TX_CTL(pipe);
2575 temp = I915_READ(reg); 2713 temp = I915_READ(reg);
@@ -2582,6 +2720,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2582 temp |= FDI_COMPOSITE_SYNC; 2720 temp |= FDI_COMPOSITE_SYNC;
2583 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2721 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2584 2722
2723 I915_WRITE(FDI_RX_MISC(pipe),
2724 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2725
2585 reg = FDI_RX_CTL(pipe); 2726 reg = FDI_RX_CTL(pipe);
2586 temp = I915_READ(reg); 2727 temp = I915_READ(reg);
2587 temp &= ~FDI_LINK_TRAIN_AUTO; 2728 temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2593,9 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2593 POSTING_READ(reg); 2734 POSTING_READ(reg);
2594 udelay(150); 2735 udelay(150);
2595 2736
2596 if (HAS_PCH_CPT(dev))
2597 cpt_phase_pointer_enable(dev, pipe);
2598
2599 for (i = 0; i < 4; i++) { 2737 for (i = 0; i < 4; i++) {
2600 reg = FDI_TX_CTL(pipe); 2738 reg = FDI_TX_CTL(pipe);
2601 temp = I915_READ(reg); 2739 temp = I915_READ(reg);
@@ -2613,7 +2751,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2613 if (temp & FDI_RX_BIT_LOCK || 2751 if (temp & FDI_RX_BIT_LOCK ||
2614 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2752 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2615 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2753 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2616 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2754 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2617 break; 2755 break;
2618 } 2756 }
2619 } 2757 }
@@ -2654,7 +2792,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2654 2792
2655 if (temp & FDI_RX_SYMBOL_LOCK) { 2793 if (temp & FDI_RX_SYMBOL_LOCK) {
2656 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2794 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2657 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2795 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
2658 break; 2796 break;
2659 } 2797 }
2660 } 2798 }
@@ -2671,9 +2809,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2671 int pipe = intel_crtc->pipe; 2809 int pipe = intel_crtc->pipe;
2672 u32 reg, temp; 2810 u32 reg, temp;
2673 2811
2674 /* Write the TU size bits so error detection works */
2675 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2676 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2677 2812
2678 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2813 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2679 reg = FDI_RX_CTL(pipe); 2814 reg = FDI_RX_CTL(pipe);
@@ -2737,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2737 udelay(100); 2872 udelay(100);
2738} 2873}
2739 2874
2740static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2741{
2742 struct drm_i915_private *dev_priv = dev->dev_private;
2743 u32 flags = I915_READ(SOUTH_CHICKEN1);
2744
2745 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2746 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2747 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2748 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2749 POSTING_READ(SOUTH_CHICKEN1);
2750}
2751static void ironlake_fdi_disable(struct drm_crtc *crtc) 2875static void ironlake_fdi_disable(struct drm_crtc *crtc)
2752{ 2876{
2753 struct drm_device *dev = crtc->dev; 2877 struct drm_device *dev = crtc->dev;
@@ -2774,11 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2774 /* Ironlake workaround, disable clock pointer after downing FDI */ 2898 /* Ironlake workaround, disable clock pointer after downing FDI */
2775 if (HAS_PCH_IBX(dev)) { 2899 if (HAS_PCH_IBX(dev)) {
2776 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2900 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2777 I915_WRITE(FDI_RX_CHICKEN(pipe),
2778 I915_READ(FDI_RX_CHICKEN(pipe) &
2779 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2780 } else if (HAS_PCH_CPT(dev)) {
2781 cpt_phase_pointer_disable(dev, pipe);
2782 } 2901 }
2783 2902
2784 /* still set train pattern 1 */ 2903 /* still set train pattern 1 */
@@ -2839,7 +2958,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2839 mutex_unlock(&dev->struct_mutex); 2958 mutex_unlock(&dev->struct_mutex);
2840} 2959}
2841 2960
2842static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2961static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
2843{ 2962{
2844 struct drm_device *dev = crtc->dev; 2963 struct drm_device *dev = crtc->dev;
2845 struct intel_encoder *intel_encoder; 2964 struct intel_encoder *intel_encoder;
@@ -2849,23 +2968,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2849 * must be driven by its own crtc; no sharing is possible. 2968 * must be driven by its own crtc; no sharing is possible.
2850 */ 2969 */
2851 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2970 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2852
2853 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2854 * CPU handles all others */
2855 if (IS_HASWELL(dev)) {
2856 /* It is still unclear how this will work on PPT, so throw up a warning */
2857 WARN_ON(!HAS_PCH_LPT(dev));
2858
2859 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
2860 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2861 return true;
2862 } else {
2863 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2864 intel_encoder->type);
2865 return false;
2866 }
2867 }
2868
2869 switch (intel_encoder->type) { 2971 switch (intel_encoder->type) {
2870 case INTEL_OUTPUT_EDP: 2972 case INTEL_OUTPUT_EDP:
2871 if (!intel_encoder_is_pch_edp(&intel_encoder->base)) 2973 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -2877,6 +2979,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2877 return true; 2979 return true;
2878} 2980}
2879 2981
2982static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2983{
2984 return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2985}
2986
2880/* Program iCLKIP clock to the desired frequency */ 2987/* Program iCLKIP clock to the desired frequency */
2881static void lpt_program_iclkip(struct drm_crtc *crtc) 2988static void lpt_program_iclkip(struct drm_crtc *crtc)
2882{ 2989{
@@ -2892,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2892 2999
2893 /* Disable SSCCTL */ 3000 /* Disable SSCCTL */
2894 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3001 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2895 intel_sbi_read(dev_priv, SBI_SSCCTL6) | 3002 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2896 SBI_SSCCTL_DISABLE); 3003 SBI_SSCCTL_DISABLE,
3004 SBI_ICLK);
2897 3005
2898 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3006 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2899 if (crtc->mode.clock == 20000) { 3007 if (crtc->mode.clock == 20000) {
@@ -2934,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2934 phaseinc); 3042 phaseinc);
2935 3043
2936 /* Program SSCDIVINTPHASE6 */ 3044 /* Program SSCDIVINTPHASE6 */
2937 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6); 3045 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2938 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3046 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2939 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3047 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2940 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3048 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2941 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3049 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2942 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3050 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2943 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3051 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2944 3052 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2945 intel_sbi_write(dev_priv,
2946 SBI_SSCDIVINTPHASE6,
2947 temp);
2948 3053
2949 /* Program SSCAUXDIV */ 3054 /* Program SSCAUXDIV */
2950 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6); 3055 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2951 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3056 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2952 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3057 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2953 intel_sbi_write(dev_priv, 3058 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2954 SBI_SSCAUXDIV6,
2955 temp);
2956
2957 3059
2958 /* Enable modulator and associated divider */ 3060 /* Enable modulator and associated divider */
2959 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6); 3061 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2960 temp &= ~SBI_SSCCTL_DISABLE; 3062 temp &= ~SBI_SSCCTL_DISABLE;
2961 intel_sbi_write(dev_priv, 3063 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2962 SBI_SSCCTL6,
2963 temp);
2964 3064
2965 /* Wait for initialization time */ 3065 /* Wait for initialization time */
2966 udelay(24); 3066 udelay(24);
@@ -2986,15 +3086,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2986 3086
2987 assert_transcoder_disabled(dev_priv, pipe); 3087 assert_transcoder_disabled(dev_priv, pipe);
2988 3088
3089 /* Write the TU size bits before fdi link training, so that error
3090 * detection works. */
3091 I915_WRITE(FDI_RX_TUSIZE1(pipe),
3092 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3093
2989 /* For PCH output, training FDI link */ 3094 /* For PCH output, training FDI link */
2990 dev_priv->display.fdi_link_train(crtc); 3095 dev_priv->display.fdi_link_train(crtc);
2991 3096
2992 intel_enable_pch_pll(intel_crtc); 3097 /* XXX: pch pll's can be enabled any time before we enable the PCH
3098 * transcoder, and we actually should do this to not upset any PCH
3099 * transcoder that already use the clock when we share it.
3100 *
3101 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3102 * unconditionally resets the pll - we need that to have the right LVDS
3103 * enable sequence. */
3104 ironlake_enable_pch_pll(intel_crtc);
2993 3105
2994 if (HAS_PCH_LPT(dev)) { 3106 if (HAS_PCH_CPT(dev)) {
2995 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2996 lpt_program_iclkip(crtc);
2997 } else if (HAS_PCH_CPT(dev)) {
2998 u32 sel; 3107 u32 sel;
2999 3108
3000 temp = I915_READ(PCH_DPLL_SEL); 3109 temp = I915_READ(PCH_DPLL_SEL);
@@ -3031,8 +3140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3031 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 3140 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
3032 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 3141 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
3033 3142
3034 if (!IS_HASWELL(dev)) 3143 intel_fdi_normal_train(crtc);
3035 intel_fdi_normal_train(crtc);
3036 3144
3037 /* For PCH DP, enable TRANS_DP_CTL */ 3145 /* For PCH DP, enable TRANS_DP_CTL */
3038 if (HAS_PCH_CPT(dev) && 3146 if (HAS_PCH_CPT(dev) &&
@@ -3064,15 +3172,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3064 temp |= TRANS_DP_PORT_SEL_D; 3172 temp |= TRANS_DP_PORT_SEL_D;
3065 break; 3173 break;
3066 default: 3174 default:
3067 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); 3175 BUG();
3068 temp |= TRANS_DP_PORT_SEL_B;
3069 break;
3070 } 3176 }
3071 3177
3072 I915_WRITE(reg, temp); 3178 I915_WRITE(reg, temp);
3073 } 3179 }
3074 3180
3075 intel_enable_transcoder(dev_priv, pipe); 3181 ironlake_enable_pch_transcoder(dev_priv, pipe);
3182}
3183
3184static void lpt_pch_enable(struct drm_crtc *crtc)
3185{
3186 struct drm_device *dev = crtc->dev;
3187 struct drm_i915_private *dev_priv = dev->dev_private;
3188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3189 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3190
3191 assert_transcoder_disabled(dev_priv, TRANSCODER_A);
3192
3193 lpt_program_iclkip(crtc);
3194
3195 /* Set transcoder timing. */
3196 I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
3197 I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3198 I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
3199
3200 I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3201 I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3202 I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
3203 I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
3204
3205 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3076} 3206}
3077 3207
3078static void intel_put_pch_pll(struct intel_crtc *intel_crtc) 3208static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3165,16 +3295,12 @@ prepare: /* separate function? */
3165void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 3295void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3166{ 3296{
3167 struct drm_i915_private *dev_priv = dev->dev_private; 3297 struct drm_i915_private *dev_priv = dev->dev_private;
3168 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); 3298 int dslreg = PIPEDSL(pipe);
3169 u32 temp; 3299 u32 temp;
3170 3300
3171 temp = I915_READ(dslreg); 3301 temp = I915_READ(dslreg);
3172 udelay(500); 3302 udelay(500);
3173 if (wait_for(I915_READ(dslreg) != temp, 5)) { 3303 if (wait_for(I915_READ(dslreg) != temp, 5)) {
3174 /* Without this, mode sets may fail silently on FDI */
3175 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3176 udelay(250);
3177 I915_WRITE(tc2reg, 0);
3178 if (wait_for(I915_READ(dslreg) != temp, 5)) 3304 if (wait_for(I915_READ(dslreg) != temp, 5))
3179 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); 3305 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3180 } 3306 }
@@ -3205,9 +3331,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3205 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 3331 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3206 } 3332 }
3207 3333
3208 is_pch_port = intel_crtc_driving_pch(crtc); 3334 is_pch_port = ironlake_crtc_driving_pch(crtc);
3209 3335
3210 if (is_pch_port) { 3336 if (is_pch_port) {
3337 /* Note: FDI PLL enabling _must_ be done before we enable the
3338 * cpu pipes, hence this is separate from all the other fdi/pch
3339 * enabling. */
3211 ironlake_fdi_pll_enable(intel_crtc); 3340 ironlake_fdi_pll_enable(intel_crtc);
3212 } else { 3341 } else {
3213 assert_fdi_tx_disabled(dev_priv, pipe); 3342 assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3220,12 +3349,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3220 3349
3221 /* Enable panel fitting for LVDS */ 3350 /* Enable panel fitting for LVDS */
3222 if (dev_priv->pch_pf_size && 3351 if (dev_priv->pch_pf_size &&
3223 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { 3352 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3353 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3224 /* Force use of hard-coded filter coefficients 3354 /* Force use of hard-coded filter coefficients
3225 * as some pre-programmed values are broken, 3355 * as some pre-programmed values are broken,
3226 * e.g. x201. 3356 * e.g. x201.
3227 */ 3357 */
3228 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 3358 if (IS_IVYBRIDGE(dev))
3359 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3360 PF_PIPE_SEL_IVB(pipe));
3361 else
3362 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3229 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 3363 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3230 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 3364 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3231 } 3365 }
@@ -3265,6 +3399,83 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3265 intel_wait_for_vblank(dev, intel_crtc->pipe); 3399 intel_wait_for_vblank(dev, intel_crtc->pipe);
3266} 3400}
3267 3401
3402static void haswell_crtc_enable(struct drm_crtc *crtc)
3403{
3404 struct drm_device *dev = crtc->dev;
3405 struct drm_i915_private *dev_priv = dev->dev_private;
3406 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3407 struct intel_encoder *encoder;
3408 int pipe = intel_crtc->pipe;
3409 int plane = intel_crtc->plane;
3410 bool is_pch_port;
3411
3412 WARN_ON(!crtc->enabled);
3413
3414 if (intel_crtc->active)
3415 return;
3416
3417 intel_crtc->active = true;
3418 intel_update_watermarks(dev);
3419
3420 is_pch_port = haswell_crtc_driving_pch(crtc);
3421
3422 if (is_pch_port)
3423 dev_priv->display.fdi_link_train(crtc);
3424
3425 for_each_encoder_on_crtc(dev, crtc, encoder)
3426 if (encoder->pre_enable)
3427 encoder->pre_enable(encoder);
3428
3429 intel_ddi_enable_pipe_clock(intel_crtc);
3430
3431 /* Enable panel fitting for eDP */
3432 if (dev_priv->pch_pf_size &&
3433 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3434 /* Force use of hard-coded filter coefficients
3435 * as some pre-programmed values are broken,
3436 * e.g. x201.
3437 */
3438 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3439 PF_PIPE_SEL_IVB(pipe));
3440 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3441 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3442 }
3443
3444 /*
3445 * On ILK+ LUT must be loaded before the pipe is running but with
3446 * clocks enabled
3447 */
3448 intel_crtc_load_lut(crtc);
3449
3450 intel_ddi_set_pipe_settings(crtc);
3451 intel_ddi_enable_pipe_func(crtc);
3452
3453 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3454 intel_enable_plane(dev_priv, plane, pipe);
3455
3456 if (is_pch_port)
3457 lpt_pch_enable(crtc);
3458
3459 mutex_lock(&dev->struct_mutex);
3460 intel_update_fbc(dev);
3461 mutex_unlock(&dev->struct_mutex);
3462
3463 intel_crtc_update_cursor(crtc, true);
3464
3465 for_each_encoder_on_crtc(dev, crtc, encoder)
3466 encoder->enable(encoder);
3467
3468 /*
3469 * There seems to be a race in PCH platform hw (at least on some
3470 * outputs) where an enabled pipe still completes any pageflip right
3471 * away (as if the pipe is off) instead of waiting for vblank. As soon
3472 * as the first vblank happend, everything works as expected. Hence just
3473 * wait for one vblank before returning to avoid strange things
3474 * happening.
3475 */
3476 intel_wait_for_vblank(dev, intel_crtc->pipe);
3477}
3478
3268static void ironlake_crtc_disable(struct drm_crtc *crtc) 3479static void ironlake_crtc_disable(struct drm_crtc *crtc)
3269{ 3480{
3270 struct drm_device *dev = crtc->dev; 3481 struct drm_device *dev = crtc->dev;
@@ -3303,7 +3514,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3303 3514
3304 ironlake_fdi_disable(crtc); 3515 ironlake_fdi_disable(crtc);
3305 3516
3306 intel_disable_transcoder(dev_priv, pipe); 3517 ironlake_disable_pch_transcoder(dev_priv, pipe);
3307 3518
3308 if (HAS_PCH_CPT(dev)) { 3519 if (HAS_PCH_CPT(dev)) {
3309 /* disable TRANS_DP_CTL */ 3520 /* disable TRANS_DP_CTL */
@@ -3345,12 +3556,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3345 mutex_unlock(&dev->struct_mutex); 3556 mutex_unlock(&dev->struct_mutex);
3346} 3557}
3347 3558
3559static void haswell_crtc_disable(struct drm_crtc *crtc)
3560{
3561 struct drm_device *dev = crtc->dev;
3562 struct drm_i915_private *dev_priv = dev->dev_private;
3563 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3564 struct intel_encoder *encoder;
3565 int pipe = intel_crtc->pipe;
3566 int plane = intel_crtc->plane;
3567 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3568 bool is_pch_port;
3569
3570 if (!intel_crtc->active)
3571 return;
3572
3573 is_pch_port = haswell_crtc_driving_pch(crtc);
3574
3575 for_each_encoder_on_crtc(dev, crtc, encoder)
3576 encoder->disable(encoder);
3577
3578 intel_crtc_wait_for_pending_flips(crtc);
3579 drm_vblank_off(dev, pipe);
3580 intel_crtc_update_cursor(crtc, false);
3581
3582 intel_disable_plane(dev_priv, plane, pipe);
3583
3584 if (dev_priv->cfb_plane == plane)
3585 intel_disable_fbc(dev);
3586
3587 intel_disable_pipe(dev_priv, pipe);
3588
3589 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3590
3591 /* Disable PF */
3592 I915_WRITE(PF_CTL(pipe), 0);
3593 I915_WRITE(PF_WIN_SZ(pipe), 0);
3594
3595 intel_ddi_disable_pipe_clock(intel_crtc);
3596
3597 for_each_encoder_on_crtc(dev, crtc, encoder)
3598 if (encoder->post_disable)
3599 encoder->post_disable(encoder);
3600
3601 if (is_pch_port) {
3602 lpt_disable_pch_transcoder(dev_priv);
3603 intel_ddi_fdi_disable(crtc);
3604 }
3605
3606 intel_crtc->active = false;
3607 intel_update_watermarks(dev);
3608
3609 mutex_lock(&dev->struct_mutex);
3610 intel_update_fbc(dev);
3611 mutex_unlock(&dev->struct_mutex);
3612}
3613
3348static void ironlake_crtc_off(struct drm_crtc *crtc) 3614static void ironlake_crtc_off(struct drm_crtc *crtc)
3349{ 3615{
3350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3616 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3351 intel_put_pch_pll(intel_crtc); 3617 intel_put_pch_pll(intel_crtc);
3352} 3618}
3353 3619
3620static void haswell_crtc_off(struct drm_crtc *crtc)
3621{
3622 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3623
3624 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3625 * start using it. */
3626 intel_crtc->cpu_transcoder = intel_crtc->pipe;
3627
3628 intel_ddi_put_crtc_pll(crtc);
3629}
3630
3354static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 3631static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3355{ 3632{
3356 if (!enable && intel_crtc->overlay) { 3633 if (!enable && intel_crtc->overlay) {
@@ -3841,6 +4118,17 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3841 } 4118 }
3842 } 4119 }
3843 4120
4121 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
4122 /* Use VBT settings if we have an eDP panel */
4123 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4124
4125 if (edp_bpc && edp_bpc < display_bpc) {
4126 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4127 display_bpc = edp_bpc;
4128 }
4129 continue;
4130 }
4131
3844 /* 4132 /*
3845 * HDMI is either 12 or 8, so if the display lets 10bpc sneak 4133 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3846 * through, clamp it down. (Note: >12bpc will be caught below.) 4134 * through, clamp it down. (Note: >12bpc will be caught below.)
@@ -4050,7 +4338,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4050 struct drm_display_mode *mode, 4338 struct drm_display_mode *mode,
4051 struct drm_display_mode *adjusted_mode, 4339 struct drm_display_mode *adjusted_mode,
4052 intel_clock_t *clock, intel_clock_t *reduced_clock, 4340 intel_clock_t *clock, intel_clock_t *reduced_clock,
4053 int refclk, int num_connectors) 4341 int num_connectors)
4054{ 4342{
4055 struct drm_device *dev = crtc->dev; 4343 struct drm_device *dev = crtc->dev;
4056 struct drm_i915_private *dev_priv = dev->dev_private; 4344 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4058,9 +4346,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4058 int pipe = intel_crtc->pipe; 4346 int pipe = intel_crtc->pipe;
4059 u32 dpll, mdiv, pdiv; 4347 u32 dpll, mdiv, pdiv;
4060 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4348 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4061 bool is_hdmi; 4349 bool is_sdvo;
4350 u32 temp;
4351
4352 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4353 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4062 4354
4063 is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4355 dpll = DPLL_VGA_MODE_DIS;
4356 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
4357 dpll |= DPLL_REFA_CLK_ENABLE_VLV;
4358 dpll |= DPLL_INTEGRATED_CLOCK_VLV;
4359
4360 I915_WRITE(DPLL(pipe), dpll);
4361 POSTING_READ(DPLL(pipe));
4064 4362
4065 bestn = clock->n; 4363 bestn = clock->n;
4066 bestm1 = clock->m1; 4364 bestm1 = clock->m1;
@@ -4068,12 +4366,10 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4068 bestp1 = clock->p1; 4366 bestp1 = clock->p1;
4069 bestp2 = clock->p2; 4367 bestp2 = clock->p2;
4070 4368
4071 /* Enable DPIO clock input */ 4369 /*
4072 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4370 * In Valleyview PLL and program lane counter registers are exposed
4073 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4371 * through DPIO interface
4074 I915_WRITE(DPLL(pipe), dpll); 4372 */
4075 POSTING_READ(DPLL(pipe));
4076
4077 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4373 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4078 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 4374 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4079 mdiv |= ((bestn << DPIO_N_SHIFT)); 4375 mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4084,12 +4380,13 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4084 4380
4085 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); 4381 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
4086 4382
4087 pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | 4383 pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
4088 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | 4384 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
4089 (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); 4385 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
4386 (5 << DPIO_CLK_BIAS_CTL_SHIFT);
4090 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); 4387 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4091 4388
4092 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); 4389 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
4093 4390
4094 dpll |= DPLL_VCO_ENABLE; 4391 dpll |= DPLL_VCO_ENABLE;
4095 I915_WRITE(DPLL(pipe), dpll); 4392 I915_WRITE(DPLL(pipe), dpll);
@@ -4097,19 +4394,44 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4097 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 4394 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4098 DRM_ERROR("DPLL %d failed to lock\n", pipe); 4395 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4099 4396
4100 if (is_hdmi) { 4397 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
4101 u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode); 4398
4399 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4400 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4401
4402 I915_WRITE(DPLL(pipe), dpll);
4403
4404 /* Wait for the clocks to stabilize. */
4405 POSTING_READ(DPLL(pipe));
4406 udelay(150);
4102 4407
4408 temp = 0;
4409 if (is_sdvo) {
4410 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4103 if (temp > 1) 4411 if (temp > 1)
4104 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4412 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4105 else 4413 else
4106 temp = 0; 4414 temp = 0;
4107
4108 I915_WRITE(DPLL_MD(pipe), temp);
4109 POSTING_READ(DPLL_MD(pipe));
4110 } 4415 }
4416 I915_WRITE(DPLL_MD(pipe), temp);
4417 POSTING_READ(DPLL_MD(pipe));
4111 4418
4112 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ 4419 /* Now program lane control registers */
4420 if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
4421 || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4422 {
4423 temp = 0x1000C4;
4424 if(pipe == 1)
4425 temp |= (1 << 21);
4426 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
4427 }
4428 if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
4429 {
4430 temp = 0x1000C4;
4431 if(pipe == 1)
4432 temp |= (1 << 21);
4433 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4434 }
4113} 4435}
4114 4436
4115static void i9xx_update_pll(struct drm_crtc *crtc, 4437static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4125,6 +4447,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4125 u32 dpll; 4447 u32 dpll;
4126 bool is_sdvo; 4448 bool is_sdvo;
4127 4449
4450 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4451
4128 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4452 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4129 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4453 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4130 4454
@@ -4225,7 +4549,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4225 4549
4226static void i8xx_update_pll(struct drm_crtc *crtc, 4550static void i8xx_update_pll(struct drm_crtc *crtc,
4227 struct drm_display_mode *adjusted_mode, 4551 struct drm_display_mode *adjusted_mode,
4228 intel_clock_t *clock, 4552 intel_clock_t *clock, intel_clock_t *reduced_clock,
4229 int num_connectors) 4553 int num_connectors)
4230{ 4554{
4231 struct drm_device *dev = crtc->dev; 4555 struct drm_device *dev = crtc->dev;
@@ -4234,6 +4558,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4234 int pipe = intel_crtc->pipe; 4558 int pipe = intel_crtc->pipe;
4235 u32 dpll; 4559 u32 dpll;
4236 4560
4561 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4562
4237 dpll = DPLL_VGA_MODE_DIS; 4563 dpll = DPLL_VGA_MODE_DIS;
4238 4564
4239 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 4565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4283,6 +4609,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4283 I915_WRITE(DPLL(pipe), dpll); 4609 I915_WRITE(DPLL(pipe), dpll);
4284} 4610}
4285 4611
4612static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4613 struct drm_display_mode *mode,
4614 struct drm_display_mode *adjusted_mode)
4615{
4616 struct drm_device *dev = intel_crtc->base.dev;
4617 struct drm_i915_private *dev_priv = dev->dev_private;
4618 enum pipe pipe = intel_crtc->pipe;
4619 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
4620 uint32_t vsyncshift;
4621
4622 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4623 /* the chip adds 2 halflines automatically */
4624 adjusted_mode->crtc_vtotal -= 1;
4625 adjusted_mode->crtc_vblank_end -= 1;
4626 vsyncshift = adjusted_mode->crtc_hsync_start
4627 - adjusted_mode->crtc_htotal / 2;
4628 } else {
4629 vsyncshift = 0;
4630 }
4631
4632 if (INTEL_INFO(dev)->gen > 3)
4633 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
4634
4635 I915_WRITE(HTOTAL(cpu_transcoder),
4636 (adjusted_mode->crtc_hdisplay - 1) |
4637 ((adjusted_mode->crtc_htotal - 1) << 16));
4638 I915_WRITE(HBLANK(cpu_transcoder),
4639 (adjusted_mode->crtc_hblank_start - 1) |
4640 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4641 I915_WRITE(HSYNC(cpu_transcoder),
4642 (adjusted_mode->crtc_hsync_start - 1) |
4643 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4644
4645 I915_WRITE(VTOTAL(cpu_transcoder),
4646 (adjusted_mode->crtc_vdisplay - 1) |
4647 ((adjusted_mode->crtc_vtotal - 1) << 16));
4648 I915_WRITE(VBLANK(cpu_transcoder),
4649 (adjusted_mode->crtc_vblank_start - 1) |
4650 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4651 I915_WRITE(VSYNC(cpu_transcoder),
4652 (adjusted_mode->crtc_vsync_start - 1) |
4653 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4654
4655 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4656 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4657 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4658 * bits. */
4659 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4660 (pipe == PIPE_B || pipe == PIPE_C))
4661 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4662
4663 /* pipesrc controls the size that is scaled from, which should
4664 * always be the user's requested size.
4665 */
4666 I915_WRITE(PIPESRC(pipe),
4667 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4668}
4669
4286static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4670static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4287 struct drm_display_mode *mode, 4671 struct drm_display_mode *mode,
4288 struct drm_display_mode *adjusted_mode, 4672 struct drm_display_mode *adjusted_mode,
@@ -4296,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4296 int plane = intel_crtc->plane; 4680 int plane = intel_crtc->plane;
4297 int refclk, num_connectors = 0; 4681 int refclk, num_connectors = 0;
4298 intel_clock_t clock, reduced_clock; 4682 intel_clock_t clock, reduced_clock;
4299 u32 dspcntr, pipeconf, vsyncshift; 4683 u32 dspcntr, pipeconf;
4300 bool ok, has_reduced_clock = false, is_sdvo = false; 4684 bool ok, has_reduced_clock = false, is_sdvo = false;
4301 bool is_lvds = false, is_tv = false, is_dp = false; 4685 bool is_lvds = false, is_tv = false, is_dp = false;
4302 struct intel_encoder *encoder; 4686 struct intel_encoder *encoder;
@@ -4360,14 +4744,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4360 if (is_sdvo && is_tv) 4744 if (is_sdvo && is_tv)
4361 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 4745 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4362 4746
4363 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
4364 &reduced_clock : NULL);
4365
4366 if (IS_GEN2(dev)) 4747 if (IS_GEN2(dev))
4367 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); 4748 i8xx_update_pll(crtc, adjusted_mode, &clock,
4749 has_reduced_clock ? &reduced_clock : NULL,
4750 num_connectors);
4368 else if (IS_VALLEYVIEW(dev)) 4751 else if (IS_VALLEYVIEW(dev))
4369 vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, 4752 vlv_update_pll(crtc, mode, adjusted_mode, &clock,
4370 refclk, num_connectors); 4753 has_reduced_clock ? &reduced_clock : NULL,
4754 num_connectors);
4371 else 4755 else
4372 i9xx_update_pll(crtc, mode, adjusted_mode, &clock, 4756 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4373 has_reduced_clock ? &reduced_clock : NULL, 4757 has_reduced_clock ? &reduced_clock : NULL,
@@ -4408,6 +4792,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4408 } 4792 }
4409 } 4793 }
4410 4794
4795 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4796 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4797 pipeconf |= PIPECONF_BPP_6 |
4798 PIPECONF_ENABLE |
4799 I965_PIPECONF_ACTIVE;
4800 }
4801 }
4802
4411 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4803 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4412 drm_mode_debug_printmodeline(mode); 4804 drm_mode_debug_printmodeline(mode);
4413 4805
@@ -4423,40 +4815,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4423 4815
4424 pipeconf &= ~PIPECONF_INTERLACE_MASK; 4816 pipeconf &= ~PIPECONF_INTERLACE_MASK;
4425 if (!IS_GEN2(dev) && 4817 if (!IS_GEN2(dev) &&
4426 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4818 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4427 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 4819 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4428 /* the chip adds 2 halflines automatically */ 4820 else
4429 adjusted_mode->crtc_vtotal -= 1;
4430 adjusted_mode->crtc_vblank_end -= 1;
4431 vsyncshift = adjusted_mode->crtc_hsync_start
4432 - adjusted_mode->crtc_htotal/2;
4433 } else {
4434 pipeconf |= PIPECONF_PROGRESSIVE; 4821 pipeconf |= PIPECONF_PROGRESSIVE;
4435 vsyncshift = 0;
4436 }
4437
4438 if (!IS_GEN3(dev))
4439 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
4440 4822
4441 I915_WRITE(HTOTAL(pipe), 4823 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
4442 (adjusted_mode->crtc_hdisplay - 1) |
4443 ((adjusted_mode->crtc_htotal - 1) << 16));
4444 I915_WRITE(HBLANK(pipe),
4445 (adjusted_mode->crtc_hblank_start - 1) |
4446 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4447 I915_WRITE(HSYNC(pipe),
4448 (adjusted_mode->crtc_hsync_start - 1) |
4449 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4450
4451 I915_WRITE(VTOTAL(pipe),
4452 (adjusted_mode->crtc_vdisplay - 1) |
4453 ((adjusted_mode->crtc_vtotal - 1) << 16));
4454 I915_WRITE(VBLANK(pipe),
4455 (adjusted_mode->crtc_vblank_start - 1) |
4456 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4457 I915_WRITE(VSYNC(pipe),
4458 (adjusted_mode->crtc_vsync_start - 1) |
4459 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4460 4824
4461 /* pipesrc and dspsize control the size that is scaled from, 4825 /* pipesrc and dspsize control the size that is scaled from,
4462 * which should always be the user's requested size. 4826 * which should always be the user's requested size.
@@ -4465,8 +4829,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4465 ((mode->vdisplay - 1) << 16) | 4829 ((mode->vdisplay - 1) << 16) |
4466 (mode->hdisplay - 1)); 4830 (mode->hdisplay - 1));
4467 I915_WRITE(DSPPOS(plane), 0); 4831 I915_WRITE(DSPPOS(plane), 0);
4468 I915_WRITE(PIPESRC(pipe),
4469 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4470 4832
4471 I915_WRITE(PIPECONF(pipe), pipeconf); 4833 I915_WRITE(PIPECONF(pipe), pipeconf);
4472 POSTING_READ(PIPECONF(pipe)); 4834 POSTING_READ(PIPECONF(pipe));
@@ -4484,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4484 return ret; 4846 return ret;
4485} 4847}
4486 4848
4487/* 4849static void ironlake_init_pch_refclk(struct drm_device *dev)
4488 * Initialize reference clocks when the driver loads
4489 */
4490void ironlake_init_pch_refclk(struct drm_device *dev)
4491{ 4850{
4492 struct drm_i915_private *dev_priv = dev->dev_private; 4851 struct drm_i915_private *dev_priv = dev->dev_private;
4493 struct drm_mode_config *mode_config = &dev->mode_config; 4852 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4601,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
4601 } 4960 }
4602} 4961}
4603 4962
4963/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
4964static void lpt_init_pch_refclk(struct drm_device *dev)
4965{
4966 struct drm_i915_private *dev_priv = dev->dev_private;
4967 struct drm_mode_config *mode_config = &dev->mode_config;
4968 struct intel_encoder *encoder;
4969 bool has_vga = false;
4970 bool is_sdv = false;
4971 u32 tmp;
4972
4973 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4974 switch (encoder->type) {
4975 case INTEL_OUTPUT_ANALOG:
4976 has_vga = true;
4977 break;
4978 }
4979 }
4980
4981 if (!has_vga)
4982 return;
4983
4984 /* XXX: Rip out SDV support once Haswell ships for real. */
4985 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4986 is_sdv = true;
4987
4988 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4989 tmp &= ~SBI_SSCCTL_DISABLE;
4990 tmp |= SBI_SSCCTL_PATHALT;
4991 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4992
4993 udelay(24);
4994
4995 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4996 tmp &= ~SBI_SSCCTL_PATHALT;
4997 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4998
4999 if (!is_sdv) {
5000 tmp = I915_READ(SOUTH_CHICKEN2);
5001 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5002 I915_WRITE(SOUTH_CHICKEN2, tmp);
5003
5004 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5005 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5006 DRM_ERROR("FDI mPHY reset assert timeout\n");
5007
5008 tmp = I915_READ(SOUTH_CHICKEN2);
5009 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5010 I915_WRITE(SOUTH_CHICKEN2, tmp);
5011
5012 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5013 FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
5014 100))
5015 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5016 }
5017
5018 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5019 tmp &= ~(0xFF << 24);
5020 tmp |= (0x12 << 24);
5021 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5022
5023 if (!is_sdv) {
5024 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
5025 tmp &= ~(0x3 << 6);
5026 tmp |= (1 << 6) | (1 << 0);
5027 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
5028 }
5029
5030 if (is_sdv) {
5031 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5032 tmp |= 0x7FFF;
5033 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5034 }
5035
5036 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5037 tmp |= (1 << 11);
5038 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5039
5040 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5041 tmp |= (1 << 11);
5042 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5043
5044 if (is_sdv) {
5045 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5046 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5047 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5048
5049 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5050 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5051 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5052
5053 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5054 tmp |= (0x3F << 8);
5055 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5056
5057 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5058 tmp |= (0x3F << 8);
5059 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5060 }
5061
5062 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5063 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5064 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5065
5066 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5067 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5068 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5069
5070 if (!is_sdv) {
5071 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5072 tmp &= ~(7 << 13);
5073 tmp |= (5 << 13);
5074 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5075
5076 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5077 tmp &= ~(7 << 13);
5078 tmp |= (5 << 13);
5079 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5080 }
5081
5082 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5083 tmp &= ~0xFF;
5084 tmp |= 0x1C;
5085 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5086
5087 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5088 tmp &= ~0xFF;
5089 tmp |= 0x1C;
5090 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5091
5092 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5093 tmp &= ~(0xFF << 16);
5094 tmp |= (0x1C << 16);
5095 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5096
5097 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5098 tmp &= ~(0xFF << 16);
5099 tmp |= (0x1C << 16);
5100 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5101
5102 if (!is_sdv) {
5103 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5104 tmp |= (1 << 27);
5105 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5106
5107 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5108 tmp |= (1 << 27);
5109 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5110
5111 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5112 tmp &= ~(0xF << 28);
5113 tmp |= (4 << 28);
5114 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5115
5116 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5117 tmp &= ~(0xF << 28);
5118 tmp |= (4 << 28);
5119 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5120 }
5121
5122 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5123 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5124 tmp |= SBI_DBUFF0_ENABLE;
5125 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5126}
5127
5128/*
5129 * Initialize reference clocks when the driver loads
5130 */
5131void intel_init_pch_refclk(struct drm_device *dev)
5132{
5133 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5134 ironlake_init_pch_refclk(dev);
5135 else if (HAS_PCH_LPT(dev))
5136 lpt_init_pch_refclk(dev);
5137}
5138
4604static int ironlake_get_refclk(struct drm_crtc *crtc) 5139static int ironlake_get_refclk(struct drm_crtc *crtc)
4605{ 5140{
4606 struct drm_device *dev = crtc->dev; 5141 struct drm_device *dev = crtc->dev;
@@ -4657,8 +5192,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
4657 val |= PIPE_12BPC; 5192 val |= PIPE_12BPC;
4658 break; 5193 break;
4659 default: 5194 default:
4660 val |= PIPE_8BPC; 5195 /* Case prevented by intel_choose_pipe_bpp_dither. */
4661 break; 5196 BUG();
4662 } 5197 }
4663 5198
4664 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); 5199 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -4675,6 +5210,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
4675 POSTING_READ(PIPECONF(pipe)); 5210 POSTING_READ(PIPECONF(pipe));
4676} 5211}
4677 5212
5213static void haswell_set_pipeconf(struct drm_crtc *crtc,
5214 struct drm_display_mode *adjusted_mode,
5215 bool dither)
5216{
5217 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5218 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5219 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5220 uint32_t val;
5221
5222 val = I915_READ(PIPECONF(cpu_transcoder));
5223
5224 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5225 if (dither)
5226 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5227
5228 val &= ~PIPECONF_INTERLACE_MASK_HSW;
5229 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5230 val |= PIPECONF_INTERLACED_ILK;
5231 else
5232 val |= PIPECONF_PROGRESSIVE;
5233
5234 I915_WRITE(PIPECONF(cpu_transcoder), val);
5235 POSTING_READ(PIPECONF(cpu_transcoder));
5236}
5237
4678static bool ironlake_compute_clocks(struct drm_crtc *crtc, 5238static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4679 struct drm_display_mode *adjusted_mode, 5239 struct drm_display_mode *adjusted_mode,
4680 intel_clock_t *clock, 5240 intel_clock_t *clock,
@@ -4738,74 +5298,126 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4738 return true; 5298 return true;
4739} 5299}
4740 5300
4741static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5301static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
4742 struct drm_display_mode *mode, 5302{
4743 struct drm_display_mode *adjusted_mode, 5303 struct drm_i915_private *dev_priv = dev->dev_private;
4744 int x, int y, 5304 uint32_t temp;
4745 struct drm_framebuffer *fb) 5305
5306 temp = I915_READ(SOUTH_CHICKEN1);
5307 if (temp & FDI_BC_BIFURCATION_SELECT)
5308 return;
5309
5310 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5311 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5312
5313 temp |= FDI_BC_BIFURCATION_SELECT;
5314 DRM_DEBUG_KMS("enabling fdi C rx\n");
5315 I915_WRITE(SOUTH_CHICKEN1, temp);
5316 POSTING_READ(SOUTH_CHICKEN1);
5317}
5318
5319static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5320{
5321 struct drm_device *dev = intel_crtc->base.dev;
5322 struct drm_i915_private *dev_priv = dev->dev_private;
5323 struct intel_crtc *pipe_B_crtc =
5324 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5325
5326 DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5327 intel_crtc->pipe, intel_crtc->fdi_lanes);
5328 if (intel_crtc->fdi_lanes > 4) {
5329 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5330 intel_crtc->pipe, intel_crtc->fdi_lanes);
5331 /* Clamp lanes to avoid programming the hw with bogus values. */
5332 intel_crtc->fdi_lanes = 4;
5333
5334 return false;
5335 }
5336
5337 if (dev_priv->num_pipe == 2)
5338 return true;
5339
5340 switch (intel_crtc->pipe) {
5341 case PIPE_A:
5342 return true;
5343 case PIPE_B:
5344 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5345 intel_crtc->fdi_lanes > 2) {
5346 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5347 intel_crtc->pipe, intel_crtc->fdi_lanes);
5348 /* Clamp lanes to avoid programming the hw with bogus values. */
5349 intel_crtc->fdi_lanes = 2;
5350
5351 return false;
5352 }
5353
5354 if (intel_crtc->fdi_lanes > 2)
5355 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5356 else
5357 cpt_enable_fdi_bc_bifurcation(dev);
5358
5359 return true;
5360 case PIPE_C:
5361 if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
5362 if (intel_crtc->fdi_lanes > 2) {
5363 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5364 intel_crtc->pipe, intel_crtc->fdi_lanes);
5365 /* Clamp lanes to avoid programming the hw with bogus values. */
5366 intel_crtc->fdi_lanes = 2;
5367
5368 return false;
5369 }
5370 } else {
5371 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5372 return false;
5373 }
5374
5375 cpt_enable_fdi_bc_bifurcation(dev);
5376
5377 return true;
5378 default:
5379 BUG();
5380 }
5381}
5382
5383int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5384{
5385 /*
5386 * Account for spread spectrum to avoid
5387 * oversubscribing the link. Max center spread
5388 * is 2.5%; use 5% for safety's sake.
5389 */
5390 u32 bps = target_clock * bpp * 21 / 20;
5391 return bps / (link_bw * 8) + 1;
5392}
5393
5394static void ironlake_set_m_n(struct drm_crtc *crtc,
5395 struct drm_display_mode *mode,
5396 struct drm_display_mode *adjusted_mode)
4746{ 5397{
4747 struct drm_device *dev = crtc->dev; 5398 struct drm_device *dev = crtc->dev;
4748 struct drm_i915_private *dev_priv = dev->dev_private; 5399 struct drm_i915_private *dev_priv = dev->dev_private;
4749 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4750 int pipe = intel_crtc->pipe; 5401 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
4751 int plane = intel_crtc->plane; 5402 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
4752 int num_connectors = 0;
4753 intel_clock_t clock, reduced_clock;
4754 u32 dpll, fp = 0, fp2 = 0;
4755 bool ok, has_reduced_clock = false, is_sdvo = false;
4756 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4757 struct intel_encoder *encoder, *edp_encoder = NULL;
4758 int ret;
4759 struct fdi_m_n m_n = {0}; 5403 struct fdi_m_n m_n = {0};
4760 u32 temp; 5404 int target_clock, pixel_multiplier, lane, link_bw;
4761 int target_clock, pixel_multiplier, lane, link_bw, factor; 5405 bool is_dp = false, is_cpu_edp = false;
4762 unsigned int pipe_bpp;
4763 bool dither;
4764 bool is_cpu_edp = false, is_pch_edp = false;
4765 5406
4766 for_each_encoder_on_crtc(dev, crtc, encoder) { 5407 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4767 switch (encoder->type) { 5408 switch (intel_encoder->type) {
4768 case INTEL_OUTPUT_LVDS:
4769 is_lvds = true;
4770 break;
4771 case INTEL_OUTPUT_SDVO:
4772 case INTEL_OUTPUT_HDMI:
4773 is_sdvo = true;
4774 if (encoder->needs_tv_clock)
4775 is_tv = true;
4776 break;
4777 case INTEL_OUTPUT_TVOUT:
4778 is_tv = true;
4779 break;
4780 case INTEL_OUTPUT_ANALOG:
4781 is_crt = true;
4782 break;
4783 case INTEL_OUTPUT_DISPLAYPORT: 5409 case INTEL_OUTPUT_DISPLAYPORT:
4784 is_dp = true; 5410 is_dp = true;
4785 break; 5411 break;
4786 case INTEL_OUTPUT_EDP: 5412 case INTEL_OUTPUT_EDP:
4787 is_dp = true; 5413 is_dp = true;
4788 if (intel_encoder_is_pch_edp(&encoder->base)) 5414 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
4789 is_pch_edp = true;
4790 else
4791 is_cpu_edp = true; 5415 is_cpu_edp = true;
4792 edp_encoder = encoder; 5416 edp_encoder = intel_encoder;
4793 break; 5417 break;
4794 } 5418 }
4795
4796 num_connectors++;
4797 } 5419 }
4798 5420
4799 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
4800 &has_reduced_clock, &reduced_clock);
4801 if (!ok) {
4802 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4803 return -EINVAL;
4804 }
4805
4806 /* Ensure that the cursor is valid for the new mode before changing... */
4807 intel_crtc_update_cursor(crtc, true);
4808
4809 /* FDI link */ 5421 /* FDI link */
4810 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5422 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4811 lane = 0; 5423 lane = 0;
@@ -4832,29 +5444,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4832 else 5444 else
4833 target_clock = adjusted_mode->clock; 5445 target_clock = adjusted_mode->clock;
4834 5446
4835 /* determine panel color depth */ 5447 if (!lane)
4836 dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, 5448 lane = ironlake_get_lanes_required(target_clock, link_bw,
4837 adjusted_mode); 5449 intel_crtc->bpp);
4838 if (is_lvds && dev_priv->lvds_dither)
4839 dither = true;
4840
4841 if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
4842 pipe_bpp != 36) {
4843 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4844 pipe_bpp);
4845 pipe_bpp = 24;
4846 }
4847 intel_crtc->bpp = pipe_bpp;
4848
4849 if (!lane) {
4850 /*
4851 * Account for spread spectrum to avoid
4852 * oversubscribing the link. Max center spread
4853 * is 2.5%; use 5% for safety's sake.
4854 */
4855 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4856 lane = bps / (link_bw * 8) + 1;
4857 }
4858 5450
4859 intel_crtc->fdi_lanes = lane; 5451 intel_crtc->fdi_lanes = lane;
4860 5452
@@ -4863,10 +5455,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4863 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5455 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4864 &m_n); 5456 &m_n);
4865 5457
4866 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5458 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
4867 if (has_reduced_clock) 5459 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
4868 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 5460 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
4869 reduced_clock.m2; 5461 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
5462}
5463
5464static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5465 struct drm_display_mode *adjusted_mode,
5466 intel_clock_t *clock, u32 fp)
5467{
5468 struct drm_crtc *crtc = &intel_crtc->base;
5469 struct drm_device *dev = crtc->dev;
5470 struct drm_i915_private *dev_priv = dev->dev_private;
5471 struct intel_encoder *intel_encoder;
5472 uint32_t dpll;
5473 int factor, pixel_multiplier, num_connectors = 0;
5474 bool is_lvds = false, is_sdvo = false, is_tv = false;
5475 bool is_dp = false, is_cpu_edp = false;
5476
5477 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5478 switch (intel_encoder->type) {
5479 case INTEL_OUTPUT_LVDS:
5480 is_lvds = true;
5481 break;
5482 case INTEL_OUTPUT_SDVO:
5483 case INTEL_OUTPUT_HDMI:
5484 is_sdvo = true;
5485 if (intel_encoder->needs_tv_clock)
5486 is_tv = true;
5487 break;
5488 case INTEL_OUTPUT_TVOUT:
5489 is_tv = true;
5490 break;
5491 case INTEL_OUTPUT_DISPLAYPORT:
5492 is_dp = true;
5493 break;
5494 case INTEL_OUTPUT_EDP:
5495 is_dp = true;
5496 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5497 is_cpu_edp = true;
5498 break;
5499 }
5500
5501 num_connectors++;
5502 }
4870 5503
4871 /* Enable autotuning of the PLL clock (if permissible) */ 5504 /* Enable autotuning of the PLL clock (if permissible) */
4872 factor = 21; 5505 factor = 21;
@@ -4878,7 +5511,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4878 } else if (is_sdvo && is_tv) 5511 } else if (is_sdvo && is_tv)
4879 factor = 20; 5512 factor = 20;
4880 5513
4881 if (clock.m < factor * clock.n) 5514 if (clock->m < factor * clock->n)
4882 fp |= FP_CB_TUNE; 5515 fp |= FP_CB_TUNE;
4883 5516
4884 dpll = 0; 5517 dpll = 0;
@@ -4888,7 +5521,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4888 else 5521 else
4889 dpll |= DPLLB_MODE_DAC_SERIAL; 5522 dpll |= DPLLB_MODE_DAC_SERIAL;
4890 if (is_sdvo) { 5523 if (is_sdvo) {
4891 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5524 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4892 if (pixel_multiplier > 1) { 5525 if (pixel_multiplier > 1) {
4893 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5526 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4894 } 5527 }
@@ -4898,11 +5531,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4898 dpll |= DPLL_DVO_HIGH_SPEED; 5531 dpll |= DPLL_DVO_HIGH_SPEED;
4899 5532
4900 /* compute bitmask from p1 value */ 5533 /* compute bitmask from p1 value */
4901 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5534 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4902 /* also FPA1 */ 5535 /* also FPA1 */
4903 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5536 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4904 5537
4905 switch (clock.p2) { 5538 switch (clock->p2) {
4906 case 5: 5539 case 5:
4907 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5540 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4908 break; 5541 break;
@@ -4928,15 +5561,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4928 else 5561 else
4929 dpll |= PLL_REF_INPUT_DREFCLK; 5562 dpll |= PLL_REF_INPUT_DREFCLK;
4930 5563
5564 return dpll;
5565}
5566
5567static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5568 struct drm_display_mode *mode,
5569 struct drm_display_mode *adjusted_mode,
5570 int x, int y,
5571 struct drm_framebuffer *fb)
5572{
5573 struct drm_device *dev = crtc->dev;
5574 struct drm_i915_private *dev_priv = dev->dev_private;
5575 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5576 int pipe = intel_crtc->pipe;
5577 int plane = intel_crtc->plane;
5578 int num_connectors = 0;
5579 intel_clock_t clock, reduced_clock;
5580 u32 dpll, fp = 0, fp2 = 0;
5581 bool ok, has_reduced_clock = false;
5582 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5583 struct intel_encoder *encoder;
5584 u32 temp;
5585 int ret;
5586 bool dither, fdi_config_ok;
5587
5588 for_each_encoder_on_crtc(dev, crtc, encoder) {
5589 switch (encoder->type) {
5590 case INTEL_OUTPUT_LVDS:
5591 is_lvds = true;
5592 break;
5593 case INTEL_OUTPUT_DISPLAYPORT:
5594 is_dp = true;
5595 break;
5596 case INTEL_OUTPUT_EDP:
5597 is_dp = true;
5598 if (!intel_encoder_is_pch_edp(&encoder->base))
5599 is_cpu_edp = true;
5600 break;
5601 }
5602
5603 num_connectors++;
5604 }
5605
5606 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5607 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5608
5609 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5610 &has_reduced_clock, &reduced_clock);
5611 if (!ok) {
5612 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5613 return -EINVAL;
5614 }
5615
5616 /* Ensure that the cursor is valid for the new mode before changing... */
5617 intel_crtc_update_cursor(crtc, true);
5618
5619 /* determine panel color depth */
5620 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5621 adjusted_mode);
5622 if (is_lvds && dev_priv->lvds_dither)
5623 dither = true;
5624
5625 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5626 if (has_reduced_clock)
5627 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5628 reduced_clock.m2;
5629
5630 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
5631
4931 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5632 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4932 drm_mode_debug_printmodeline(mode); 5633 drm_mode_debug_printmodeline(mode);
4933 5634
4934 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on 5635 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
4935 * pre-Haswell/LPT generation */ 5636 if (!is_cpu_edp) {
4936 if (HAS_PCH_LPT(dev)) {
4937 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4938 pipe);
4939 } else if (!is_cpu_edp) {
4940 struct intel_pch_pll *pll; 5637 struct intel_pch_pll *pll;
4941 5638
4942 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5639 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5022,47 +5719,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5022 } 5719 }
5023 } 5720 }
5024 5721
5025 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5722 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5026 /* the chip adds 2 halflines automatically */
5027 adjusted_mode->crtc_vtotal -= 1;
5028 adjusted_mode->crtc_vblank_end -= 1;
5029 I915_WRITE(VSYNCSHIFT(pipe),
5030 adjusted_mode->crtc_hsync_start
5031 - adjusted_mode->crtc_htotal/2);
5032 } else {
5033 I915_WRITE(VSYNCSHIFT(pipe), 0);
5034 }
5035 5723
5036 I915_WRITE(HTOTAL(pipe), 5724 /* Note, this also computes intel_crtc->fdi_lanes which is used below in
5037 (adjusted_mode->crtc_hdisplay - 1) | 5725 * ironlake_check_fdi_lanes. */
5038 ((adjusted_mode->crtc_htotal - 1) << 16)); 5726 ironlake_set_m_n(crtc, mode, adjusted_mode);
5039 I915_WRITE(HBLANK(pipe),
5040 (adjusted_mode->crtc_hblank_start - 1) |
5041 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5042 I915_WRITE(HSYNC(pipe),
5043 (adjusted_mode->crtc_hsync_start - 1) |
5044 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5045 5727
5046 I915_WRITE(VTOTAL(pipe), 5728 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5047 (adjusted_mode->crtc_vdisplay - 1) |
5048 ((adjusted_mode->crtc_vtotal - 1) << 16));
5049 I915_WRITE(VBLANK(pipe),
5050 (adjusted_mode->crtc_vblank_start - 1) |
5051 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5052 I915_WRITE(VSYNC(pipe),
5053 (adjusted_mode->crtc_vsync_start - 1) |
5054 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5055
5056 /* pipesrc controls the size that is scaled from, which should
5057 * always be the user's requested size.
5058 */
5059 I915_WRITE(PIPESRC(pipe),
5060 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5061
5062 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5063 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5064 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5065 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5066 5729
5067 if (is_cpu_edp) 5730 if (is_cpu_edp)
5068 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5731 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5081,6 +5744,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5081 5744
5082 intel_update_linetime_watermarks(dev, pipe, adjusted_mode); 5745 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5083 5746
5747 return fdi_config_ok ? ret : -EINVAL;
5748}
5749
5750static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5751 struct drm_display_mode *mode,
5752 struct drm_display_mode *adjusted_mode,
5753 int x, int y,
5754 struct drm_framebuffer *fb)
5755{
5756 struct drm_device *dev = crtc->dev;
5757 struct drm_i915_private *dev_priv = dev->dev_private;
5758 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5759 int pipe = intel_crtc->pipe;
5760 int plane = intel_crtc->plane;
5761 int num_connectors = 0;
5762 intel_clock_t clock, reduced_clock;
5763 u32 dpll = 0, fp = 0, fp2 = 0;
5764 bool ok, has_reduced_clock = false;
5765 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5766 struct intel_encoder *encoder;
5767 u32 temp;
5768 int ret;
5769 bool dither;
5770
5771 for_each_encoder_on_crtc(dev, crtc, encoder) {
5772 switch (encoder->type) {
5773 case INTEL_OUTPUT_LVDS:
5774 is_lvds = true;
5775 break;
5776 case INTEL_OUTPUT_DISPLAYPORT:
5777 is_dp = true;
5778 break;
5779 case INTEL_OUTPUT_EDP:
5780 is_dp = true;
5781 if (!intel_encoder_is_pch_edp(&encoder->base))
5782 is_cpu_edp = true;
5783 break;
5784 }
5785
5786 num_connectors++;
5787 }
5788
5789 if (is_cpu_edp)
5790 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5791 else
5792 intel_crtc->cpu_transcoder = pipe;
5793
5794 /* We are not sure yet this won't happen. */
5795 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5796 INTEL_PCH_TYPE(dev));
5797
5798 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5799 num_connectors, pipe_name(pipe));
5800
5801 WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
5802 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5803
5804 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5805
5806 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5807 return -EINVAL;
5808
5809 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5810 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5811 &has_reduced_clock,
5812 &reduced_clock);
5813 if (!ok) {
5814 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5815 return -EINVAL;
5816 }
5817 }
5818
5819 /* Ensure that the cursor is valid for the new mode before changing... */
5820 intel_crtc_update_cursor(crtc, true);
5821
5822 /* determine panel color depth */
5823 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5824 adjusted_mode);
5825 if (is_lvds && dev_priv->lvds_dither)
5826 dither = true;
5827
5828 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5829 drm_mode_debug_printmodeline(mode);
5830
5831 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5832 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5833 if (has_reduced_clock)
5834 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5835 reduced_clock.m2;
5836
5837 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
5838 fp);
5839
5840 /* CPU eDP is the only output that doesn't need a PCH PLL of its
5841 * own on pre-Haswell/LPT generation */
5842 if (!is_cpu_edp) {
5843 struct intel_pch_pll *pll;
5844
5845 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5846 if (pll == NULL) {
5847 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5848 pipe);
5849 return -EINVAL;
5850 }
5851 } else
5852 intel_put_pch_pll(intel_crtc);
5853
5854 /* The LVDS pin pair needs to be on before the DPLLs are
5855 * enabled. This is an exception to the general rule that
5856 * mode_set doesn't turn things on.
5857 */
5858 if (is_lvds) {
5859 temp = I915_READ(PCH_LVDS);
5860 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5861 if (HAS_PCH_CPT(dev)) {
5862 temp &= ~PORT_TRANS_SEL_MASK;
5863 temp |= PORT_TRANS_SEL_CPT(pipe);
5864 } else {
5865 if (pipe == 1)
5866 temp |= LVDS_PIPEB_SELECT;
5867 else
5868 temp &= ~LVDS_PIPEB_SELECT;
5869 }
5870
5871 /* set the corresponsding LVDS_BORDER bit */
5872 temp |= dev_priv->lvds_border_bits;
5873 /* Set the B0-B3 data pairs corresponding to whether
5874 * we're going to set the DPLLs for dual-channel mode or
5875 * not.
5876 */
5877 if (clock.p2 == 7)
5878 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5879 else
5880 temp &= ~(LVDS_B0B3_POWER_UP |
5881 LVDS_CLKB_POWER_UP);
5882
5883 /* It would be nice to set 24 vs 18-bit mode
5884 * (LVDS_A3_POWER_UP) appropriately here, but we need to
5885 * look more thoroughly into how panels behave in the
5886 * two modes.
5887 */
5888 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5889 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5890 temp |= LVDS_HSYNC_POLARITY;
5891 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5892 temp |= LVDS_VSYNC_POLARITY;
5893 I915_WRITE(PCH_LVDS, temp);
5894 }
5895 }
5896
5897 if (is_dp && !is_cpu_edp) {
5898 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5899 } else {
5900 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5901 /* For non-DP output, clear any trans DP clock recovery
5902 * setting.*/
5903 I915_WRITE(TRANSDATA_M1(pipe), 0);
5904 I915_WRITE(TRANSDATA_N1(pipe), 0);
5905 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5906 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5907 }
5908 }
5909
5910 intel_crtc->lowfreq_avail = false;
5911 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5912 if (intel_crtc->pch_pll) {
5913 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5914
5915 /* Wait for the clocks to stabilize. */
5916 POSTING_READ(intel_crtc->pch_pll->pll_reg);
5917 udelay(150);
5918
5919 /* The pixel multiplier can only be updated once the
5920 * DPLL is enabled and the clocks are stable.
5921 *
5922 * So write it again.
5923 */
5924 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5925 }
5926
5927 if (intel_crtc->pch_pll) {
5928 if (is_lvds && has_reduced_clock && i915_powersave) {
5929 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
5930 intel_crtc->lowfreq_avail = true;
5931 } else {
5932 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5933 }
5934 }
5935 }
5936
5937 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5938
5939 if (!is_dp || is_cpu_edp)
5940 ironlake_set_m_n(crtc, mode, adjusted_mode);
5941
5942 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5943 if (is_cpu_edp)
5944 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5945
5946 haswell_set_pipeconf(crtc, adjusted_mode, dither);
5947
5948 /* Set up the display plane register */
5949 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5950 POSTING_READ(DSPCNTR(plane));
5951
5952 ret = intel_pipe_set_base(crtc, x, y, fb);
5953
5954 intel_update_watermarks(dev);
5955
5956 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5957
5084 return ret; 5958 return ret;
5085} 5959}
5086 5960
@@ -5092,6 +5966,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5092{ 5966{
5093 struct drm_device *dev = crtc->dev; 5967 struct drm_device *dev = crtc->dev;
5094 struct drm_i915_private *dev_priv = dev->dev_private; 5968 struct drm_i915_private *dev_priv = dev->dev_private;
5969 struct drm_encoder_helper_funcs *encoder_funcs;
5970 struct intel_encoder *encoder;
5095 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5971 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5096 int pipe = intel_crtc->pipe; 5972 int pipe = intel_crtc->pipe;
5097 int ret; 5973 int ret;
@@ -5102,7 +5978,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5102 x, y, fb); 5978 x, y, fb);
5103 drm_vblank_post_modeset(dev, pipe); 5979 drm_vblank_post_modeset(dev, pipe);
5104 5980
5105 return ret; 5981 if (ret != 0)
5982 return ret;
5983
5984 for_each_encoder_on_crtc(dev, crtc, encoder) {
5985 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
5986 encoder->base.base.id,
5987 drm_get_encoder_name(&encoder->base),
5988 mode->base.id, mode->name);
5989 encoder_funcs = encoder->base.helper_private;
5990 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
5991 }
5992
5993 return 0;
5106} 5994}
5107 5995
5108static bool intel_eld_uptodate(struct drm_connector *connector, 5996static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -5738,7 +6626,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
5738 int depth, int bpp) 6626 int depth, int bpp)
5739{ 6627{
5740 struct drm_i915_gem_object *obj; 6628 struct drm_i915_gem_object *obj;
5741 struct drm_mode_fb_cmd2 mode_cmd; 6629 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
5742 6630
5743 obj = i915_gem_alloc_object(dev, 6631 obj = i915_gem_alloc_object(dev,
5744 intel_framebuffer_size_for_mode(mode, bpp)); 6632 intel_framebuffer_size_for_mode(mode, bpp));
@@ -5868,24 +6756,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
5868 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 6756 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5869 if (IS_ERR(fb)) { 6757 if (IS_ERR(fb)) {
5870 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 6758 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5871 goto fail; 6759 return false;
5872 } 6760 }
5873 6761
5874 if (!intel_set_mode(crtc, mode, 0, 0, fb)) { 6762 if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
5875 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 6763 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5876 if (old->release_fb) 6764 if (old->release_fb)
5877 old->release_fb->funcs->destroy(old->release_fb); 6765 old->release_fb->funcs->destroy(old->release_fb);
5878 goto fail; 6766 return false;
5879 } 6767 }
5880 6768
5881 /* let the connector get through one full cycle before testing */ 6769 /* let the connector get through one full cycle before testing */
5882 intel_wait_for_vblank(dev, intel_crtc->pipe); 6770 intel_wait_for_vblank(dev, intel_crtc->pipe);
5883
5884 return true; 6771 return true;
5885fail:
5886 connector->encoder = NULL;
5887 encoder->crtc = NULL;
5888 return false;
5889} 6772}
5890 6773
5891void intel_release_load_detect_pipe(struct drm_connector *connector, 6774void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6010,12 +6893,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6010{ 6893{
6011 struct drm_i915_private *dev_priv = dev->dev_private; 6894 struct drm_i915_private *dev_priv = dev->dev_private;
6012 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6013 int pipe = intel_crtc->pipe; 6896 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
6014 struct drm_display_mode *mode; 6897 struct drm_display_mode *mode;
6015 int htot = I915_READ(HTOTAL(pipe)); 6898 int htot = I915_READ(HTOTAL(cpu_transcoder));
6016 int hsync = I915_READ(HSYNC(pipe)); 6899 int hsync = I915_READ(HSYNC(cpu_transcoder));
6017 int vtot = I915_READ(VTOTAL(pipe)); 6900 int vtot = I915_READ(VTOTAL(cpu_transcoder));
6018 int vsync = I915_READ(VSYNC(pipe)); 6901 int vsync = I915_READ(VSYNC(cpu_transcoder));
6019 6902
6020 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 6903 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6021 if (!mode) 6904 if (!mode)
@@ -6172,14 +7055,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
6172{ 7055{
6173 struct intel_unpin_work *work = 7056 struct intel_unpin_work *work =
6174 container_of(__work, struct intel_unpin_work, work); 7057 container_of(__work, struct intel_unpin_work, work);
7058 struct drm_device *dev = work->crtc->dev;
6175 7059
6176 mutex_lock(&work->dev->struct_mutex); 7060 mutex_lock(&dev->struct_mutex);
6177 intel_unpin_fb_obj(work->old_fb_obj); 7061 intel_unpin_fb_obj(work->old_fb_obj);
6178 drm_gem_object_unreference(&work->pending_flip_obj->base); 7062 drm_gem_object_unreference(&work->pending_flip_obj->base);
6179 drm_gem_object_unreference(&work->old_fb_obj->base); 7063 drm_gem_object_unreference(&work->old_fb_obj->base);
6180 7064
6181 intel_update_fbc(work->dev); 7065 intel_update_fbc(dev);
6182 mutex_unlock(&work->dev->struct_mutex); 7066 mutex_unlock(&dev->struct_mutex);
7067
7068 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
7069 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
7070
6183 kfree(work); 7071 kfree(work);
6184} 7072}
6185 7073
@@ -6190,8 +7078,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6191 struct intel_unpin_work *work; 7079 struct intel_unpin_work *work;
6192 struct drm_i915_gem_object *obj; 7080 struct drm_i915_gem_object *obj;
6193 struct drm_pending_vblank_event *e;
6194 struct timeval tvbl;
6195 unsigned long flags; 7081 unsigned long flags;
6196 7082
6197 /* Ignore early vblank irqs */ 7083 /* Ignore early vblank irqs */
@@ -6200,24 +7086,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6200 7086
6201 spin_lock_irqsave(&dev->event_lock, flags); 7087 spin_lock_irqsave(&dev->event_lock, flags);
6202 work = intel_crtc->unpin_work; 7088 work = intel_crtc->unpin_work;
6203 if (work == NULL || !work->pending) { 7089
7090 /* Ensure we don't miss a work->pending update ... */
7091 smp_rmb();
7092
7093 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
6204 spin_unlock_irqrestore(&dev->event_lock, flags); 7094 spin_unlock_irqrestore(&dev->event_lock, flags);
6205 return; 7095 return;
6206 } 7096 }
6207 7097
6208 intel_crtc->unpin_work = NULL; 7098 /* and that the unpin work is consistent wrt ->pending. */
6209 7099 smp_rmb();
6210 if (work->event) {
6211 e = work->event;
6212 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
6213 7100
6214 e->event.tv_sec = tvbl.tv_sec; 7101 intel_crtc->unpin_work = NULL;
6215 e->event.tv_usec = tvbl.tv_usec;
6216 7102
6217 list_add_tail(&e->base.link, 7103 if (work->event)
6218 &e->base.file_priv->event_list); 7104 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
6219 wake_up_interruptible(&e->base.file_priv->event_wait);
6220 }
6221 7105
6222 drm_vblank_put(dev, intel_crtc->pipe); 7106 drm_vblank_put(dev, intel_crtc->pipe);
6223 7107
@@ -6227,9 +7111,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6227 7111
6228 atomic_clear_mask(1 << intel_crtc->plane, 7112 atomic_clear_mask(1 << intel_crtc->plane,
6229 &obj->pending_flip.counter); 7113 &obj->pending_flip.counter);
6230
6231 wake_up(&dev_priv->pending_flip_queue); 7114 wake_up(&dev_priv->pending_flip_queue);
6232 schedule_work(&work->work); 7115
7116 queue_work(dev_priv->wq, &work->work);
6233 7117
6234 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 7118 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6235} 7119}
@@ -6257,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6257 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 7141 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6258 unsigned long flags; 7142 unsigned long flags;
6259 7143
7144 /* NB: An MMIO update of the plane base pointer will also
7145 * generate a page-flip completion irq, i.e. every modeset
7146 * is also accompanied by a spurious intel_prepare_page_flip().
7147 */
6260 spin_lock_irqsave(&dev->event_lock, flags); 7148 spin_lock_irqsave(&dev->event_lock, flags);
6261 if (intel_crtc->unpin_work) { 7149 if (intel_crtc->unpin_work)
6262 if ((++intel_crtc->unpin_work->pending) > 1) 7150 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6263 DRM_ERROR("Prepared flip multiple times\n");
6264 } else {
6265 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6266 }
6267 spin_unlock_irqrestore(&dev->event_lock, flags); 7151 spin_unlock_irqrestore(&dev->event_lock, flags);
6268} 7152}
6269 7153
7154inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7155{
7156 /* Ensure that the work item is consistent when activating it ... */
7157 smp_wmb();
7158 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7159 /* and that it is marked active as soon as the irq could fire. */
7160 smp_wmb();
7161}
7162
6270static int intel_gen2_queue_flip(struct drm_device *dev, 7163static int intel_gen2_queue_flip(struct drm_device *dev,
6271 struct drm_crtc *crtc, 7164 struct drm_crtc *crtc,
6272 struct drm_framebuffer *fb, 7165 struct drm_framebuffer *fb,
@@ -6300,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
6300 intel_ring_emit(ring, fb->pitches[0]); 7193 intel_ring_emit(ring, fb->pitches[0]);
6301 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7194 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6302 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7195 intel_ring_emit(ring, 0); /* aux display base address, unused */
7196
7197 intel_mark_page_flip_active(intel_crtc);
6303 intel_ring_advance(ring); 7198 intel_ring_advance(ring);
6304 return 0; 7199 return 0;
6305 7200
@@ -6340,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
6340 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7235 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6341 intel_ring_emit(ring, MI_NOOP); 7236 intel_ring_emit(ring, MI_NOOP);
6342 7237
7238 intel_mark_page_flip_active(intel_crtc);
6343 intel_ring_advance(ring); 7239 intel_ring_advance(ring);
6344 return 0; 7240 return 0;
6345 7241
@@ -6386,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
6386 pf = 0; 7282 pf = 0;
6387 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7283 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6388 intel_ring_emit(ring, pf | pipesrc); 7284 intel_ring_emit(ring, pf | pipesrc);
7285
7286 intel_mark_page_flip_active(intel_crtc);
6389 intel_ring_advance(ring); 7287 intel_ring_advance(ring);
6390 return 0; 7288 return 0;
6391 7289
@@ -6428,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
6428 pf = 0; 7326 pf = 0;
6429 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7327 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6430 intel_ring_emit(ring, pf | pipesrc); 7328 intel_ring_emit(ring, pf | pipesrc);
7329
7330 intel_mark_page_flip_active(intel_crtc);
6431 intel_ring_advance(ring); 7331 intel_ring_advance(ring);
6432 return 0; 7332 return 0;
6433 7333
@@ -6482,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
6482 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7382 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6483 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7383 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6484 intel_ring_emit(ring, (MI_NOOP)); 7384 intel_ring_emit(ring, (MI_NOOP));
7385
7386 intel_mark_page_flip_active(intel_crtc);
6485 intel_ring_advance(ring); 7387 intel_ring_advance(ring);
6486 return 0; 7388 return 0;
6487 7389
@@ -6530,7 +7432,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6530 return -ENOMEM; 7432 return -ENOMEM;
6531 7433
6532 work->event = event; 7434 work->event = event;
6533 work->dev = crtc->dev; 7435 work->crtc = crtc;
6534 intel_fb = to_intel_framebuffer(crtc->fb); 7436 intel_fb = to_intel_framebuffer(crtc->fb);
6535 work->old_fb_obj = intel_fb->obj; 7437 work->old_fb_obj = intel_fb->obj;
6536 INIT_WORK(&work->work, intel_unpin_work_fn); 7438 INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -6555,6 +7457,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6555 intel_fb = to_intel_framebuffer(fb); 7457 intel_fb = to_intel_framebuffer(fb);
6556 obj = intel_fb->obj; 7458 obj = intel_fb->obj;
6557 7459
7460 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7461 flush_workqueue(dev_priv->wq);
7462
6558 ret = i915_mutex_lock_interruptible(dev); 7463 ret = i915_mutex_lock_interruptible(dev);
6559 if (ret) 7464 if (ret)
6560 goto cleanup; 7465 goto cleanup;
@@ -6573,6 +7478,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6573 * the flip occurs and the object is no longer visible. 7478 * the flip occurs and the object is no longer visible.
6574 */ 7479 */
6575 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7480 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7481 atomic_inc(&intel_crtc->unpin_work_count);
6576 7482
6577 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7483 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6578 if (ret) 7484 if (ret)
@@ -6587,6 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6587 return 0; 7493 return 0;
6588 7494
6589cleanup_pending: 7495cleanup_pending:
7496 atomic_dec(&intel_crtc->unpin_work_count);
6590 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7497 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6591 drm_gem_object_unreference(&work->old_fb_obj->base); 7498 drm_gem_object_unreference(&work->old_fb_obj->base);
6592 drm_gem_object_unreference(&obj->base); 7499 drm_gem_object_unreference(&obj->base);
@@ -6882,7 +7789,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
6882 dev->mode_config.dpms_property; 7789 dev->mode_config.dpms_property;
6883 7790
6884 connector->dpms = DRM_MODE_DPMS_ON; 7791 connector->dpms = DRM_MODE_DPMS_ON;
6885 drm_connector_property_set_value(connector, 7792 drm_object_property_set_value(&connector->base,
6886 dpms_property, 7793 dpms_property,
6887 DRM_MODE_DPMS_ON); 7794 DRM_MODE_DPMS_ON);
6888 7795
@@ -7004,8 +7911,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
7004 struct drm_device *dev = crtc->dev; 7911 struct drm_device *dev = crtc->dev;
7005 drm_i915_private_t *dev_priv = dev->dev_private; 7912 drm_i915_private_t *dev_priv = dev->dev_private;
7006 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 7913 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
7007 struct drm_encoder_helper_funcs *encoder_funcs;
7008 struct drm_encoder *encoder;
7009 struct intel_crtc *intel_crtc; 7914 struct intel_crtc *intel_crtc;
7010 unsigned disable_pipes, prepare_pipes, modeset_pipes; 7915 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7011 bool ret = true; 7916 bool ret = true;
@@ -7050,6 +7955,9 @@ bool intel_set_mode(struct drm_crtc *crtc,
7050 * update the the output configuration. */ 7955 * update the the output configuration. */
7051 intel_modeset_update_state(dev, prepare_pipes); 7956 intel_modeset_update_state(dev, prepare_pipes);
7052 7957
7958 if (dev_priv->display.modeset_global_resources)
7959 dev_priv->display.modeset_global_resources(dev);
7960
7053 /* Set up the DPLL and any encoders state that needs to adjust or depend 7961 /* Set up the DPLL and any encoders state that needs to adjust or depend
7054 * on the DPLL. 7962 * on the DPLL.
7055 */ 7963 */
@@ -7059,18 +7967,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
7059 x, y, fb); 7967 x, y, fb);
7060 if (!ret) 7968 if (!ret)
7061 goto done; 7969 goto done;
7062
7063 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7064
7065 if (encoder->crtc != &intel_crtc->base)
7066 continue;
7067
7068 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7069 encoder->base.id, drm_get_encoder_name(encoder),
7070 mode->base.id, mode->name);
7071 encoder_funcs = encoder->helper_private;
7072 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
7073 }
7074 } 7970 }
7075 7971
7076 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7972 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7248,10 +8144,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
7248 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 8144 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
7249 config->mode_changed = true; 8145 config->mode_changed = true;
7250 } 8146 }
7251
7252 /* Disable all disconnected encoders. */
7253 if (connector->base.status == connector_status_disconnected)
7254 connector->new_encoder = NULL;
7255 } 8147 }
7256 /* connector->new_encoder is now updated for all connectors. */ 8148 /* connector->new_encoder is now updated for all connectors. */
7257 8149
@@ -7409,6 +8301,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
7409 .page_flip = intel_crtc_page_flip, 8301 .page_flip = intel_crtc_page_flip,
7410}; 8302};
7411 8303
8304static void intel_cpu_pll_init(struct drm_device *dev)
8305{
8306 if (IS_HASWELL(dev))
8307 intel_ddi_pll_init(dev);
8308}
8309
7412static void intel_pch_pll_init(struct drm_device *dev) 8310static void intel_pch_pll_init(struct drm_device *dev)
7413{ 8311{
7414 drm_i915_private_t *dev_priv = dev->dev_private; 8312 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7448,6 +8346,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
7448 /* Swap pipes & planes for FBC on pre-965 */ 8346 /* Swap pipes & planes for FBC on pre-965 */
7449 intel_crtc->pipe = pipe; 8347 intel_crtc->pipe = pipe;
7450 intel_crtc->plane = pipe; 8348 intel_crtc->plane = pipe;
8349 intel_crtc->cpu_transcoder = pipe;
7451 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 8350 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7452 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 8351 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7453 intel_crtc->plane = !pipe; 8352 intel_crtc->plane = !pipe;
@@ -7540,17 +8439,9 @@ static void intel_setup_outputs(struct drm_device *dev)
7540 I915_WRITE(PFIT_CONTROL, 0); 8439 I915_WRITE(PFIT_CONTROL, 0);
7541 } 8440 }
7542 8441
7543 if (HAS_PCH_SPLIT(dev)) { 8442 if (!(IS_HASWELL(dev) &&
7544 dpd_is_edp = intel_dpd_is_edp(dev); 8443 (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
7545 8444 intel_crt_init(dev);
7546 if (has_edp_a(dev))
7547 intel_dp_init(dev, DP_A, PORT_A);
7548
7549 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7550 intel_dp_init(dev, PCH_DP_D, PORT_D);
7551 }
7552
7553 intel_crt_init(dev);
7554 8445
7555 if (IS_HASWELL(dev)) { 8446 if (IS_HASWELL(dev)) {
7556 int found; 8447 int found;
@@ -7573,6 +8464,10 @@ static void intel_setup_outputs(struct drm_device *dev)
7573 intel_ddi_init(dev, PORT_D); 8464 intel_ddi_init(dev, PORT_D);
7574 } else if (HAS_PCH_SPLIT(dev)) { 8465 } else if (HAS_PCH_SPLIT(dev)) {
7575 int found; 8466 int found;
8467 dpd_is_edp = intel_dpd_is_edp(dev);
8468
8469 if (has_edp_a(dev))
8470 intel_dp_init(dev, DP_A, PORT_A);
7576 8471
7577 if (I915_READ(HDMIB) & PORT_DETECTED) { 8472 if (I915_READ(HDMIB) & PORT_DETECTED) {
7578 /* PCH SDVOB multiplex with HDMIB */ 8473 /* PCH SDVOB multiplex with HDMIB */
@@ -7592,11 +8487,15 @@ static void intel_setup_outputs(struct drm_device *dev)
7592 if (I915_READ(PCH_DP_C) & DP_DETECTED) 8487 if (I915_READ(PCH_DP_C) & DP_DETECTED)
7593 intel_dp_init(dev, PCH_DP_C, PORT_C); 8488 intel_dp_init(dev, PCH_DP_C, PORT_C);
7594 8489
7595 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 8490 if (I915_READ(PCH_DP_D) & DP_DETECTED)
7596 intel_dp_init(dev, PCH_DP_D, PORT_D); 8491 intel_dp_init(dev, PCH_DP_D, PORT_D);
7597 } else if (IS_VALLEYVIEW(dev)) { 8492 } else if (IS_VALLEYVIEW(dev)) {
7598 int found; 8493 int found;
7599 8494
8495 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8496 if (I915_READ(DP_C) & DP_DETECTED)
8497 intel_dp_init(dev, DP_C, PORT_C);
8498
7600 if (I915_READ(SDVOB) & PORT_DETECTED) { 8499 if (I915_READ(SDVOB) & PORT_DETECTED) {
7601 /* SDVOB multiplex with HDMIB */ 8500 /* SDVOB multiplex with HDMIB */
7602 found = intel_sdvo_init(dev, SDVOB, true); 8501 found = intel_sdvo_init(dev, SDVOB, true);
@@ -7609,9 +8508,6 @@ static void intel_setup_outputs(struct drm_device *dev)
7609 if (I915_READ(SDVOC) & PORT_DETECTED) 8508 if (I915_READ(SDVOC) & PORT_DETECTED)
7610 intel_hdmi_init(dev, SDVOC, PORT_C); 8509 intel_hdmi_init(dev, SDVOC, PORT_C);
7611 8510
7612 /* Shares lanes with HDMI on SDVOC */
7613 if (I915_READ(DP_C) & DP_DETECTED)
7614 intel_dp_init(dev, DP_C, PORT_C);
7615 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 8511 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7616 bool found = false; 8512 bool found = false;
7617 8513
@@ -7665,8 +8561,9 @@ static void intel_setup_outputs(struct drm_device *dev)
7665 intel_encoder_clones(encoder); 8561 intel_encoder_clones(encoder);
7666 } 8562 }
7667 8563
7668 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8564 intel_init_pch_refclk(dev);
7669 ironlake_init_pch_refclk(dev); 8565
8566 drm_helper_move_panel_connectors_to_head(dev);
7670} 8567}
7671 8568
7672static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 8569static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7701,33 +8598,74 @@ int intel_framebuffer_init(struct drm_device *dev,
7701{ 8598{
7702 int ret; 8599 int ret;
7703 8600
7704 if (obj->tiling_mode == I915_TILING_Y) 8601 if (obj->tiling_mode == I915_TILING_Y) {
8602 DRM_DEBUG("hardware does not support tiling Y\n");
7705 return -EINVAL; 8603 return -EINVAL;
8604 }
7706 8605
7707 if (mode_cmd->pitches[0] & 63) 8606 if (mode_cmd->pitches[0] & 63) {
8607 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
8608 mode_cmd->pitches[0]);
7708 return -EINVAL; 8609 return -EINVAL;
8610 }
7709 8611
8612 /* FIXME <= Gen4 stride limits are bit unclear */
8613 if (mode_cmd->pitches[0] > 32768) {
8614 DRM_DEBUG("pitch (%d) must be at less than 32768\n",
8615 mode_cmd->pitches[0]);
8616 return -EINVAL;
8617 }
8618
8619 if (obj->tiling_mode != I915_TILING_NONE &&
8620 mode_cmd->pitches[0] != obj->stride) {
8621 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
8622 mode_cmd->pitches[0], obj->stride);
8623 return -EINVAL;
8624 }
8625
8626 /* Reject formats not supported by any plane early. */
7710 switch (mode_cmd->pixel_format) { 8627 switch (mode_cmd->pixel_format) {
7711 case DRM_FORMAT_RGB332: 8628 case DRM_FORMAT_C8:
7712 case DRM_FORMAT_RGB565: 8629 case DRM_FORMAT_RGB565:
7713 case DRM_FORMAT_XRGB8888: 8630 case DRM_FORMAT_XRGB8888:
7714 case DRM_FORMAT_XBGR8888:
7715 case DRM_FORMAT_ARGB8888: 8631 case DRM_FORMAT_ARGB8888:
8632 break;
8633 case DRM_FORMAT_XRGB1555:
8634 case DRM_FORMAT_ARGB1555:
8635 if (INTEL_INFO(dev)->gen > 3) {
8636 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8637 return -EINVAL;
8638 }
8639 break;
8640 case DRM_FORMAT_XBGR8888:
8641 case DRM_FORMAT_ABGR8888:
7716 case DRM_FORMAT_XRGB2101010: 8642 case DRM_FORMAT_XRGB2101010:
7717 case DRM_FORMAT_ARGB2101010: 8643 case DRM_FORMAT_ARGB2101010:
7718 /* RGB formats are common across chipsets */ 8644 case DRM_FORMAT_XBGR2101010:
8645 case DRM_FORMAT_ABGR2101010:
8646 if (INTEL_INFO(dev)->gen < 4) {
8647 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8648 return -EINVAL;
8649 }
7719 break; 8650 break;
7720 case DRM_FORMAT_YUYV: 8651 case DRM_FORMAT_YUYV:
7721 case DRM_FORMAT_UYVY: 8652 case DRM_FORMAT_UYVY:
7722 case DRM_FORMAT_YVYU: 8653 case DRM_FORMAT_YVYU:
7723 case DRM_FORMAT_VYUY: 8654 case DRM_FORMAT_VYUY:
8655 if (INTEL_INFO(dev)->gen < 5) {
8656 DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
8657 return -EINVAL;
8658 }
7724 break; 8659 break;
7725 default: 8660 default:
7726 DRM_DEBUG_KMS("unsupported pixel format %u\n", 8661 DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
7727 mode_cmd->pixel_format);
7728 return -EINVAL; 8662 return -EINVAL;
7729 } 8663 }
7730 8664
8665 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
8666 if (mode_cmd->offsets[0] != 0)
8667 return -EINVAL;
8668
7731 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 8669 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
7732 if (ret) { 8670 if (ret) {
7733 DRM_ERROR("framebuffer init failed %d\n", ret); 8671 DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -7765,7 +8703,13 @@ static void intel_init_display(struct drm_device *dev)
7765 struct drm_i915_private *dev_priv = dev->dev_private; 8703 struct drm_i915_private *dev_priv = dev->dev_private;
7766 8704
7767 /* We always want a DPMS function */ 8705 /* We always want a DPMS function */
7768 if (HAS_PCH_SPLIT(dev)) { 8706 if (IS_HASWELL(dev)) {
8707 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8708 dev_priv->display.crtc_enable = haswell_crtc_enable;
8709 dev_priv->display.crtc_disable = haswell_crtc_disable;
8710 dev_priv->display.off = haswell_crtc_off;
8711 dev_priv->display.update_plane = ironlake_update_plane;
8712 } else if (HAS_PCH_SPLIT(dev)) {
7769 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 8713 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7770 dev_priv->display.crtc_enable = ironlake_crtc_enable; 8714 dev_priv->display.crtc_enable = ironlake_crtc_enable;
7771 dev_priv->display.crtc_disable = ironlake_crtc_disable; 8715 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -7816,6 +8760,8 @@ static void intel_init_display(struct drm_device *dev)
7816 /* FIXME: detect B0+ stepping and use auto training */ 8760 /* FIXME: detect B0+ stepping and use auto training */
7817 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 8761 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7818 dev_priv->display.write_eld = ironlake_write_eld; 8762 dev_priv->display.write_eld = ironlake_write_eld;
8763 dev_priv->display.modeset_global_resources =
8764 ivb_modeset_global_resources;
7819 } else if (IS_HASWELL(dev)) { 8765 } else if (IS_HASWELL(dev)) {
7820 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8766 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
7821 dev_priv->display.write_eld = haswell_write_eld; 8767 dev_priv->display.write_eld = haswell_write_eld;
@@ -8047,6 +8993,7 @@ void intel_modeset_init(struct drm_device *dev)
8047 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 8993 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
8048 } 8994 }
8049 8995
8996 intel_cpu_pll_init(dev);
8050 intel_pch_pll_init(dev); 8997 intel_pch_pll_init(dev);
8051 8998
8052 /* Just disable it once at startup */ 8999 /* Just disable it once at startup */
@@ -8116,7 +9063,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
8116 u32 reg; 9063 u32 reg;
8117 9064
8118 /* Clear any frame start delays used for debugging left by the BIOS */ 9065 /* Clear any frame start delays used for debugging left by the BIOS */
8119 reg = PIPECONF(crtc->pipe); 9066 reg = PIPECONF(crtc->cpu_transcoder);
8120 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 9067 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8121 9068
8122 /* We need to sanitize the plane -> pipe mapping first because this will 9069 /* We need to sanitize the plane -> pipe mapping first because this will
@@ -8233,9 +9180,27 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
8233 * the crtc fixup. */ 9180 * the crtc fixup. */
8234} 9181}
8235 9182
9183static void i915_redisable_vga(struct drm_device *dev)
9184{
9185 struct drm_i915_private *dev_priv = dev->dev_private;
9186 u32 vga_reg;
9187
9188 if (HAS_PCH_SPLIT(dev))
9189 vga_reg = CPU_VGACNTRL;
9190 else
9191 vga_reg = VGACNTRL;
9192
9193 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
9194 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
9195 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9196 POSTING_READ(vga_reg);
9197 }
9198}
9199
8236/* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9200/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
8237 * and i915 state tracking structures. */ 9201 * and i915 state tracking structures. */
8238void intel_modeset_setup_hw_state(struct drm_device *dev) 9202void intel_modeset_setup_hw_state(struct drm_device *dev,
9203 bool force_restore)
8239{ 9204{
8240 struct drm_i915_private *dev_priv = dev->dev_private; 9205 struct drm_i915_private *dev_priv = dev->dev_private;
8241 enum pipe pipe; 9206 enum pipe pipe;
@@ -8244,10 +9209,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
8244 struct intel_encoder *encoder; 9209 struct intel_encoder *encoder;
8245 struct intel_connector *connector; 9210 struct intel_connector *connector;
8246 9211
9212 if (IS_HASWELL(dev)) {
9213 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9214
9215 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9216 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9217 case TRANS_DDI_EDP_INPUT_A_ON:
9218 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9219 pipe = PIPE_A;
9220 break;
9221 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9222 pipe = PIPE_B;
9223 break;
9224 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9225 pipe = PIPE_C;
9226 break;
9227 }
9228
9229 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9230 crtc->cpu_transcoder = TRANSCODER_EDP;
9231
9232 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9233 pipe_name(pipe));
9234 }
9235 }
9236
8247 for_each_pipe(pipe) { 9237 for_each_pipe(pipe) {
8248 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9238 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8249 9239
8250 tmp = I915_READ(PIPECONF(pipe)); 9240 tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
8251 if (tmp & PIPECONF_ENABLE) 9241 if (tmp & PIPECONF_ENABLE)
8252 crtc->active = true; 9242 crtc->active = true;
8253 else 9243 else
@@ -8260,6 +9250,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
8260 crtc->active ? "enabled" : "disabled"); 9250 crtc->active ? "enabled" : "disabled");
8261 } 9251 }
8262 9252
9253 if (IS_HASWELL(dev))
9254 intel_ddi_setup_hw_pll_state(dev);
9255
8263 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9256 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8264 base.head) { 9257 base.head) {
8265 pipe = 0; 9258 pipe = 0;
@@ -8306,9 +9299,21 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
8306 intel_sanitize_crtc(crtc); 9299 intel_sanitize_crtc(crtc);
8307 } 9300 }
8308 9301
8309 intel_modeset_update_staged_output_state(dev); 9302 if (force_restore) {
9303 for_each_pipe(pipe) {
9304 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9305 intel_set_mode(&crtc->base, &crtc->base.mode,
9306 crtc->base.x, crtc->base.y, crtc->base.fb);
9307 }
9308
9309 i915_redisable_vga(dev);
9310 } else {
9311 intel_modeset_update_staged_output_state(dev);
9312 }
8310 9313
8311 intel_modeset_check_state(dev); 9314 intel_modeset_check_state(dev);
9315
9316 drm_mode_config_reset(dev);
8312} 9317}
8313 9318
8314void intel_modeset_gem_init(struct drm_device *dev) 9319void intel_modeset_gem_init(struct drm_device *dev)
@@ -8317,7 +9322,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
8317 9322
8318 intel_setup_overlay(dev); 9323 intel_setup_overlay(dev);
8319 9324
8320 intel_modeset_setup_hw_state(dev); 9325 intel_modeset_setup_hw_state(dev, false);
8321} 9326}
8322 9327
8323void intel_modeset_cleanup(struct drm_device *dev) 9328void intel_modeset_cleanup(struct drm_device *dev)
@@ -8436,6 +9441,7 @@ intel_display_capture_error_state(struct drm_device *dev)
8436{ 9441{
8437 drm_i915_private_t *dev_priv = dev->dev_private; 9442 drm_i915_private_t *dev_priv = dev->dev_private;
8438 struct intel_display_error_state *error; 9443 struct intel_display_error_state *error;
9444 enum transcoder cpu_transcoder;
8439 int i; 9445 int i;
8440 9446
8441 error = kmalloc(sizeof(*error), GFP_ATOMIC); 9447 error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -8443,6 +9449,8 @@ intel_display_capture_error_state(struct drm_device *dev)
8443 return NULL; 9449 return NULL;
8444 9450
8445 for_each_pipe(i) { 9451 for_each_pipe(i) {
9452 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
9453
8446 error->cursor[i].control = I915_READ(CURCNTR(i)); 9454 error->cursor[i].control = I915_READ(CURCNTR(i));
8447 error->cursor[i].position = I915_READ(CURPOS(i)); 9455 error->cursor[i].position = I915_READ(CURPOS(i));
8448 error->cursor[i].base = I915_READ(CURBASE(i)); 9456 error->cursor[i].base = I915_READ(CURBASE(i));
@@ -8457,14 +9465,14 @@ intel_display_capture_error_state(struct drm_device *dev)
8457 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 9465 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
8458 } 9466 }
8459 9467
8460 error->pipe[i].conf = I915_READ(PIPECONF(i)); 9468 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
8461 error->pipe[i].source = I915_READ(PIPESRC(i)); 9469 error->pipe[i].source = I915_READ(PIPESRC(i));
8462 error->pipe[i].htotal = I915_READ(HTOTAL(i)); 9470 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
8463 error->pipe[i].hblank = I915_READ(HBLANK(i)); 9471 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
8464 error->pipe[i].hsync = I915_READ(HSYNC(i)); 9472 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
8465 error->pipe[i].vtotal = I915_READ(VTOTAL(i)); 9473 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
8466 error->pipe[i].vblank = I915_READ(VBLANK(i)); 9474 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
8467 error->pipe[i].vsync = I915_READ(VSYNC(i)); 9475 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
8468 } 9476 }
8469 9477
8470 return error; 9478 return error;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 368ed8ef1600..1b63d55318a0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,8 +36,6 @@
36#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
37#include "i915_drv.h" 37#include "i915_drv.h"
38 38
39#define DP_RECEIVER_CAP_SIZE 0xf
40#define DP_LINK_STATUS_SIZE 6
41#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
42 40
43/** 41/**
@@ -49,7 +47,9 @@
49 */ 47 */
50static bool is_edp(struct intel_dp *intel_dp) 48static bool is_edp(struct intel_dp *intel_dp)
51{ 49{
52 return intel_dp->base.type == INTEL_OUTPUT_EDP; 50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53} 53}
54 54
55/** 55/**
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
76 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 76 return is_edp(intel_dp) && !is_pch_edp(intel_dp);
77} 77}
78 78
79static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 79static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
80{ 80{
81 return container_of(encoder, struct intel_dp, base.base); 81 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
82
83 return intel_dig_port->base.base.dev;
82} 84}
83 85
84static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 86static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
85{ 87{
86 return container_of(intel_attached_encoder(connector), 88 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
87 struct intel_dp, base);
88} 89}
89 90
90/** 91/**
@@ -106,49 +107,32 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
106 return is_pch_edp(intel_dp); 107 return is_pch_edp(intel_dp);
107} 108}
108 109
109static void intel_dp_start_link_train(struct intel_dp *intel_dp);
110static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
111static void intel_dp_link_down(struct intel_dp *intel_dp); 110static void intel_dp_link_down(struct intel_dp *intel_dp);
112 111
113void 112void
114intel_edp_link_config(struct intel_encoder *intel_encoder, 113intel_edp_link_config(struct intel_encoder *intel_encoder,
115 int *lane_num, int *link_bw) 114 int *lane_num, int *link_bw)
116{ 115{
117 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 116 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
118 117
119 *lane_num = intel_dp->lane_count; 118 *lane_num = intel_dp->lane_count;
120 if (intel_dp->link_bw == DP_LINK_BW_1_62) 119 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
121 *link_bw = 162000;
122 else if (intel_dp->link_bw == DP_LINK_BW_2_7)
123 *link_bw = 270000;
124} 120}
125 121
126int 122int
127intel_edp_target_clock(struct intel_encoder *intel_encoder, 123intel_edp_target_clock(struct intel_encoder *intel_encoder,
128 struct drm_display_mode *mode) 124 struct drm_display_mode *mode)
129{ 125{
130 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 126 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
127 struct intel_connector *intel_connector = intel_dp->attached_connector;
131 128
132 if (intel_dp->panel_fixed_mode) 129 if (intel_connector->panel.fixed_mode)
133 return intel_dp->panel_fixed_mode->clock; 130 return intel_connector->panel.fixed_mode->clock;
134 else 131 else
135 return mode->clock; 132 return mode->clock;
136} 133}
137 134
138static int 135static int
139intel_dp_max_lane_count(struct intel_dp *intel_dp)
140{
141 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
142 switch (max_lane_count) {
143 case 1: case 2: case 4:
144 break;
145 default:
146 max_lane_count = 4;
147 }
148 return max_lane_count;
149}
150
151static int
152intel_dp_max_link_bw(struct intel_dp *intel_dp) 136intel_dp_max_link_bw(struct intel_dp *intel_dp)
153{ 137{
154 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 138 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
208 bool adjust_mode) 192 bool adjust_mode)
209{ 193{
210 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 194 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
211 int max_lanes = intel_dp_max_lane_count(intel_dp); 195 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
212 int max_rate, mode_rate; 196 int max_rate, mode_rate;
213 197
214 mode_rate = intel_dp_link_required(mode->clock, 24); 198 mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
234 struct drm_display_mode *mode) 218 struct drm_display_mode *mode)
235{ 219{
236 struct intel_dp *intel_dp = intel_attached_dp(connector); 220 struct intel_dp *intel_dp = intel_attached_dp(connector);
221 struct intel_connector *intel_connector = to_intel_connector(connector);
222 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
237 223
238 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 224 if (is_edp(intel_dp) && fixed_mode) {
239 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 225 if (mode->hdisplay > fixed_mode->hdisplay)
240 return MODE_PANEL; 226 return MODE_PANEL;
241 227
242 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) 228 if (mode->vdisplay > fixed_mode->vdisplay)
243 return MODE_PANEL; 229 return MODE_PANEL;
244 } 230 }
245 231
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
285 struct drm_i915_private *dev_priv = dev->dev_private; 271 struct drm_i915_private *dev_priv = dev->dev_private;
286 uint32_t clkcfg; 272 uint32_t clkcfg;
287 273
274 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
275 if (IS_VALLEYVIEW(dev))
276 return 200;
277
288 clkcfg = I915_READ(CLKCFG); 278 clkcfg = I915_READ(CLKCFG);
289 switch (clkcfg & CLKCFG_FSB_MASK) { 279 switch (clkcfg & CLKCFG_FSB_MASK) {
290 case CLKCFG_FSB_400: 280 case CLKCFG_FSB_400:
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
310 300
311static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 301static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
312{ 302{
313 struct drm_device *dev = intel_dp->base.base.dev; 303 struct drm_device *dev = intel_dp_to_dev(intel_dp);
314 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
315 305
316 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 306 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
318 308
319static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 309static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
320{ 310{
321 struct drm_device *dev = intel_dp->base.base.dev; 311 struct drm_device *dev = intel_dp_to_dev(intel_dp);
322 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
323 313
324 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 314 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
327static void 317static void
328intel_dp_check_edp(struct intel_dp *intel_dp) 318intel_dp_check_edp(struct intel_dp *intel_dp)
329{ 319{
330 struct drm_device *dev = intel_dp->base.base.dev; 320 struct drm_device *dev = intel_dp_to_dev(intel_dp);
331 struct drm_i915_private *dev_priv = dev->dev_private; 321 struct drm_i915_private *dev_priv = dev->dev_private;
332 322
333 if (!is_edp(intel_dp)) 323 if (!is_edp(intel_dp))
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
346 uint8_t *recv, int recv_size) 336 uint8_t *recv, int recv_size)
347{ 337{
348 uint32_t output_reg = intel_dp->output_reg; 338 uint32_t output_reg = intel_dp->output_reg;
349 struct drm_device *dev = intel_dp->base.base.dev; 339 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
340 struct drm_device *dev = intel_dig_port->base.base.dev;
350 struct drm_i915_private *dev_priv = dev->dev_private; 341 struct drm_i915_private *dev_priv = dev->dev_private;
351 uint32_t ch_ctl = output_reg + 0x10; 342 uint32_t ch_ctl = output_reg + 0x10;
352 uint32_t ch_data = ch_ctl + 4; 343 uint32_t ch_data = ch_ctl + 4;
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
356 uint32_t aux_clock_divider; 347 uint32_t aux_clock_divider;
357 int try, precharge; 348 int try, precharge;
358 349
350 if (IS_HASWELL(dev)) {
351 switch (intel_dig_port->port) {
352 case PORT_A:
353 ch_ctl = DPA_AUX_CH_CTL;
354 ch_data = DPA_AUX_CH_DATA1;
355 break;
356 case PORT_B:
357 ch_ctl = PCH_DPB_AUX_CH_CTL;
358 ch_data = PCH_DPB_AUX_CH_DATA1;
359 break;
360 case PORT_C:
361 ch_ctl = PCH_DPC_AUX_CH_CTL;
362 ch_data = PCH_DPC_AUX_CH_DATA1;
363 break;
364 case PORT_D:
365 ch_ctl = PCH_DPD_AUX_CH_CTL;
366 ch_data = PCH_DPD_AUX_CH_DATA1;
367 break;
368 default:
369 BUG();
370 }
371 }
372
359 intel_dp_check_edp(intel_dp); 373 intel_dp_check_edp(intel_dp);
360 /* The clock divider is based off the hrawclk, 374 /* The clock divider is based off the hrawclk,
361 * and would like to run at 2MHz. So, take the 375 * and would like to run at 2MHz. So, take the
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
365 * clock divider. 379 * clock divider.
366 */ 380 */
367 if (is_cpu_edp(intel_dp)) { 381 if (is_cpu_edp(intel_dp)) {
368 if (IS_GEN6(dev) || IS_GEN7(dev)) 382 if (IS_HASWELL(dev))
383 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
384 else if (IS_VALLEYVIEW(dev))
385 aux_clock_divider = 100;
386 else if (IS_GEN6(dev) || IS_GEN7(dev))
369 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 387 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
370 else 388 else
371 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 389 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
372 } else if (HAS_PCH_SPLIT(dev)) 390 } else if (HAS_PCH_SPLIT(dev))
373 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ 391 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
374 else 392 else
375 aux_clock_divider = intel_hrawclk(dev) / 2; 393 aux_clock_divider = intel_hrawclk(dev) / 2;
376 394
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
642 return -EREMOTEIO; 660 return -EREMOTEIO;
643} 661}
644 662
645static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
646static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
647
648static int 663static int
649intel_dp_i2c_init(struct intel_dp *intel_dp, 664intel_dp_i2c_init(struct intel_dp *intel_dp,
650 struct intel_connector *intel_connector, const char *name) 665 struct intel_connector *intel_connector, const char *name)
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
670 return ret; 685 return ret;
671} 686}
672 687
673static bool 688bool
674intel_dp_mode_fixup(struct drm_encoder *encoder, 689intel_dp_mode_fixup(struct drm_encoder *encoder,
675 const struct drm_display_mode *mode, 690 const struct drm_display_mode *mode,
676 struct drm_display_mode *adjusted_mode) 691 struct drm_display_mode *adjusted_mode)
677{ 692{
678 struct drm_device *dev = encoder->dev; 693 struct drm_device *dev = encoder->dev;
679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 694 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
695 struct intel_connector *intel_connector = intel_dp->attached_connector;
680 int lane_count, clock; 696 int lane_count, clock;
681 int max_lane_count = intel_dp_max_lane_count(intel_dp); 697 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
682 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 698 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
683 int bpp, mode_rate; 699 int bpp, mode_rate;
684 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 700 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
685 701
686 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 702 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
687 intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); 703 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
688 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, 704 adjusted_mode);
705 intel_pch_panel_fitting(dev,
706 intel_connector->panel.fitting_mode,
689 mode, adjusted_mode); 707 mode, adjusted_mode);
690 } 708 }
691 709
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
762 struct drm_display_mode *adjusted_mode) 780 struct drm_display_mode *adjusted_mode)
763{ 781{
764 struct drm_device *dev = crtc->dev; 782 struct drm_device *dev = crtc->dev;
765 struct intel_encoder *encoder; 783 struct intel_encoder *intel_encoder;
784 struct intel_dp *intel_dp;
766 struct drm_i915_private *dev_priv = dev->dev_private; 785 struct drm_i915_private *dev_priv = dev->dev_private;
767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
768 int lane_count = 4; 787 int lane_count = 4;
769 struct intel_dp_m_n m_n; 788 struct intel_dp_m_n m_n;
770 int pipe = intel_crtc->pipe; 789 int pipe = intel_crtc->pipe;
790 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
771 791
772 /* 792 /*
773 * Find the lane count in the intel_encoder private 793 * Find the lane count in the intel_encoder private
774 */ 794 */
775 for_each_encoder_on_crtc(dev, crtc, encoder) { 795 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
776 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 796 intel_dp = enc_to_intel_dp(&intel_encoder->base);
777 797
778 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 798 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
779 intel_dp->base.type == INTEL_OUTPUT_EDP) 799 intel_encoder->type == INTEL_OUTPUT_EDP)
780 { 800 {
781 lane_count = intel_dp->lane_count; 801 lane_count = intel_dp->lane_count;
782 break; 802 break;
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
791 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 811 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
792 mode->clock, adjusted_mode->clock, &m_n); 812 mode->clock, adjusted_mode->clock, &m_n);
793 813
794 if (HAS_PCH_SPLIT(dev)) { 814 if (IS_HASWELL(dev)) {
795 I915_WRITE(TRANSDATA_M1(pipe), 815 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
796 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 816 TU_SIZE(m_n.tu) | m_n.gmch_m);
797 m_n.gmch_m); 817 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
818 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
819 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
820 } else if (HAS_PCH_SPLIT(dev)) {
821 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
798 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 822 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
799 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 823 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
800 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 824 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
825 } else if (IS_VALLEYVIEW(dev)) {
826 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
827 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
828 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
829 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
801 } else { 830 } else {
802 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 831 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
803 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 832 TU_SIZE(m_n.tu) | m_n.gmch_m);
804 m_n.gmch_m);
805 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 833 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
806 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 834 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
807 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 835 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
808 } 836 }
809} 837}
810 838
839void intel_dp_init_link_config(struct intel_dp *intel_dp)
840{
841 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
842 intel_dp->link_configuration[0] = intel_dp->link_bw;
843 intel_dp->link_configuration[1] = intel_dp->lane_count;
844 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
845 /*
846 * Check for DPCD version > 1.1 and enhanced framing support
847 */
848 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
849 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
850 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
851 }
852}
853
811static void 854static void
812intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 855intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
813 struct drm_display_mode *adjusted_mode) 856 struct drm_display_mode *adjusted_mode)
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
815 struct drm_device *dev = encoder->dev; 858 struct drm_device *dev = encoder->dev;
816 struct drm_i915_private *dev_priv = dev->dev_private; 859 struct drm_i915_private *dev_priv = dev->dev_private;
817 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 860 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
818 struct drm_crtc *crtc = intel_dp->base.base.crtc; 861 struct drm_crtc *crtc = encoder->crtc;
819 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 862 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
820 863
821 /* 864 /*
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
860 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 903 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
861 intel_write_eld(encoder, adjusted_mode); 904 intel_write_eld(encoder, adjusted_mode);
862 } 905 }
863 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 906
864 intel_dp->link_configuration[0] = intel_dp->link_bw; 907 intel_dp_init_link_config(intel_dp);
865 intel_dp->link_configuration[1] = intel_dp->lane_count;
866 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
867 /*
868 * Check for DPCD version > 1.1 and enhanced framing support
869 */
870 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
871 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
872 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
873 }
874 908
875 /* Split out the IBX/CPU vs CPT settings */ 909 /* Split out the IBX/CPU vs CPT settings */
876 910
877 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 911 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
878 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 912 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
879 intel_dp->DP |= DP_SYNC_HS_HIGH; 913 intel_dp->DP |= DP_SYNC_HS_HIGH;
880 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 914 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
931 u32 mask, 965 u32 mask,
932 u32 value) 966 u32 value)
933{ 967{
934 struct drm_device *dev = intel_dp->base.base.dev; 968 struct drm_device *dev = intel_dp_to_dev(intel_dp);
935 struct drm_i915_private *dev_priv = dev->dev_private; 969 struct drm_i915_private *dev_priv = dev->dev_private;
936 970
937 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 971 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -978,9 +1012,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
978 return control; 1012 return control;
979} 1013}
980 1014
981static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1015void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
982{ 1016{
983 struct drm_device *dev = intel_dp->base.base.dev; 1017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
984 struct drm_i915_private *dev_priv = dev->dev_private; 1018 struct drm_i915_private *dev_priv = dev->dev_private;
985 u32 pp; 1019 u32 pp;
986 1020
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1019 1053
1020static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1054static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1021{ 1055{
1022 struct drm_device *dev = intel_dp->base.base.dev; 1056 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1023 struct drm_i915_private *dev_priv = dev->dev_private; 1057 struct drm_i915_private *dev_priv = dev->dev_private;
1024 u32 pp; 1058 u32 pp;
1025 1059
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
1041{ 1075{
1042 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1076 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1043 struct intel_dp, panel_vdd_work); 1077 struct intel_dp, panel_vdd_work);
1044 struct drm_device *dev = intel_dp->base.base.dev; 1078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1045 1079
1046 mutex_lock(&dev->mode_config.mutex); 1080 mutex_lock(&dev->mode_config.mutex);
1047 ironlake_panel_vdd_off_sync(intel_dp); 1081 ironlake_panel_vdd_off_sync(intel_dp);
1048 mutex_unlock(&dev->mode_config.mutex); 1082 mutex_unlock(&dev->mode_config.mutex);
1049} 1083}
1050 1084
1051static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1085void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1052{ 1086{
1053 if (!is_edp(intel_dp)) 1087 if (!is_edp(intel_dp))
1054 return; 1088 return;
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1071 } 1105 }
1072} 1106}
1073 1107
1074static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1108void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1075{ 1109{
1076 struct drm_device *dev = intel_dp->base.base.dev; 1110 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1077 struct drm_i915_private *dev_priv = dev->dev_private; 1111 struct drm_i915_private *dev_priv = dev->dev_private;
1078 u32 pp; 1112 u32 pp;
1079 1113
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1113 } 1147 }
1114} 1148}
1115 1149
1116static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1150void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1117{ 1151{
1118 struct drm_device *dev = intel_dp->base.base.dev; 1152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1119 struct drm_i915_private *dev_priv = dev->dev_private; 1153 struct drm_i915_private *dev_priv = dev->dev_private;
1120 u32 pp; 1154 u32 pp;
1121 1155
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1138 ironlake_wait_panel_off(intel_dp); 1172 ironlake_wait_panel_off(intel_dp);
1139} 1173}
1140 1174
1141static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1175void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1142{ 1176{
1143 struct drm_device *dev = intel_dp->base.base.dev; 1177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1178 struct drm_device *dev = intel_dig_port->base.base.dev;
1144 struct drm_i915_private *dev_priv = dev->dev_private; 1179 struct drm_i915_private *dev_priv = dev->dev_private;
1180 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1145 u32 pp; 1181 u32 pp;
1146 1182
1147 if (!is_edp(intel_dp)) 1183 if (!is_edp(intel_dp))
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1159 pp |= EDP_BLC_ENABLE; 1195 pp |= EDP_BLC_ENABLE;
1160 I915_WRITE(PCH_PP_CONTROL, pp); 1196 I915_WRITE(PCH_PP_CONTROL, pp);
1161 POSTING_READ(PCH_PP_CONTROL); 1197 POSTING_READ(PCH_PP_CONTROL);
1198
1199 intel_panel_enable_backlight(dev, pipe);
1162} 1200}
1163 1201
1164static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1202void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1165{ 1203{
1166 struct drm_device *dev = intel_dp->base.base.dev; 1204 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1167 struct drm_i915_private *dev_priv = dev->dev_private; 1205 struct drm_i915_private *dev_priv = dev->dev_private;
1168 u32 pp; 1206 u32 pp;
1169 1207
1170 if (!is_edp(intel_dp)) 1208 if (!is_edp(intel_dp))
1171 return; 1209 return;
1172 1210
1211 intel_panel_disable_backlight(dev);
1212
1173 DRM_DEBUG_KMS("\n"); 1213 DRM_DEBUG_KMS("\n");
1174 pp = ironlake_get_pp_control(dev_priv); 1214 pp = ironlake_get_pp_control(dev_priv);
1175 pp &= ~EDP_BLC_ENABLE; 1215 pp &= ~EDP_BLC_ENABLE;
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1180 1220
1181static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1221static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1182{ 1222{
1183 struct drm_device *dev = intel_dp->base.base.dev; 1223 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1184 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1224 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1225 struct drm_device *dev = crtc->dev;
1185 struct drm_i915_private *dev_priv = dev->dev_private; 1226 struct drm_i915_private *dev_priv = dev->dev_private;
1186 u32 dpa_ctl; 1227 u32 dpa_ctl;
1187 1228
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1205 1246
1206static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1247static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1207{ 1248{
1208 struct drm_device *dev = intel_dp->base.base.dev; 1249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1209 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1250 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1251 struct drm_device *dev = crtc->dev;
1210 struct drm_i915_private *dev_priv = dev->dev_private; 1252 struct drm_i915_private *dev_priv = dev->dev_private;
1211 u32 dpa_ctl; 1253 u32 dpa_ctl;
1212 1254
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1228} 1270}
1229 1271
1230/* If the sink supports it, try to set the power state appropriately */ 1272/* If the sink supports it, try to set the power state appropriately */
1231static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1273void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1232{ 1274{
1233 int ret, i; 1275 int ret, i;
1234 1276
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1298 return true; 1340 return true;
1299 } 1341 }
1300 } 1342 }
1301 }
1302 1343
1303 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); 1344 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1345 intel_dp->output_reg);
1346 }
1304 1347
1305 return true; 1348 return true;
1306} 1349}
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
1396 DP_LINK_STATUS_SIZE); 1439 DP_LINK_STATUS_SIZE);
1397} 1440}
1398 1441
1399static uint8_t
1400intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1401 int r)
1402{
1403 return link_status[r - DP_LANE0_1_STATUS];
1404}
1405
1406static uint8_t
1407intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1408 int lane)
1409{
1410 int s = ((lane & 1) ?
1411 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1412 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1413 uint8_t l = adjust_request[lane>>1];
1414
1415 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1416}
1417
1418static uint8_t
1419intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1420 int lane)
1421{
1422 int s = ((lane & 1) ?
1423 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1424 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1425 uint8_t l = adjust_request[lane>>1];
1426
1427 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1428}
1429
1430
1431#if 0 1442#if 0
1432static char *voltage_names[] = { 1443static char *voltage_names[] = {
1433 "0.4V", "0.6V", "0.8V", "1.2V" 1444 "0.4V", "0.6V", "0.8V", "1.2V"
@@ -1448,7 +1459,7 @@ static char *link_train_names[] = {
1448static uint8_t 1459static uint8_t
1449intel_dp_voltage_max(struct intel_dp *intel_dp) 1460intel_dp_voltage_max(struct intel_dp *intel_dp)
1450{ 1461{
1451 struct drm_device *dev = intel_dp->base.base.dev; 1462 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1452 1463
1453 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1464 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1454 return DP_TRAIN_VOLTAGE_SWING_800; 1465 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
1461static uint8_t 1472static uint8_t
1462intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1473intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1463{ 1474{
1464 struct drm_device *dev = intel_dp->base.base.dev; 1475 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1465 1476
1466 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1477 if (IS_HASWELL(dev)) {
1478 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1479 case DP_TRAIN_VOLTAGE_SWING_400:
1480 return DP_TRAIN_PRE_EMPHASIS_9_5;
1481 case DP_TRAIN_VOLTAGE_SWING_600:
1482 return DP_TRAIN_PRE_EMPHASIS_6;
1483 case DP_TRAIN_VOLTAGE_SWING_800:
1484 return DP_TRAIN_PRE_EMPHASIS_3_5;
1485 case DP_TRAIN_VOLTAGE_SWING_1200:
1486 default:
1487 return DP_TRAIN_PRE_EMPHASIS_0;
1488 }
1489 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1467 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1490 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1468 case DP_TRAIN_VOLTAGE_SWING_400: 1491 case DP_TRAIN_VOLTAGE_SWING_400:
1469 return DP_TRAIN_PRE_EMPHASIS_6; 1492 return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
1494 uint8_t v = 0; 1517 uint8_t v = 0;
1495 uint8_t p = 0; 1518 uint8_t p = 0;
1496 int lane; 1519 int lane;
1497 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1498 uint8_t voltage_max; 1520 uint8_t voltage_max;
1499 uint8_t preemph_max; 1521 uint8_t preemph_max;
1500 1522
1501 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1523 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1502 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1524 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1503 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1525 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1504 1526
1505 if (this_v > v) 1527 if (this_v > v)
1506 v = this_v; 1528 v = this_v;
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
1617 } 1639 }
1618} 1640}
1619 1641
1620static uint8_t 1642/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1621intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1643static uint32_t
1622 int lane) 1644intel_dp_signal_levels_hsw(uint8_t train_set)
1623{
1624 int s = (lane & 1) * 4;
1625 uint8_t l = link_status[lane>>1];
1626
1627 return (l >> s) & 0xf;
1628}
1629
1630/* Check for clock recovery is done on all channels */
1631static bool
1632intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1633{ 1645{
1634 int lane; 1646 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1635 uint8_t lane_status; 1647 DP_TRAIN_PRE_EMPHASIS_MASK);
1636 1648 switch (signal_levels) {
1637 for (lane = 0; lane < lane_count; lane++) { 1649 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1638 lane_status = intel_get_lane_status(link_status, lane); 1650 return DDI_BUF_EMP_400MV_0DB_HSW;
1639 if ((lane_status & DP_LANE_CR_DONE) == 0) 1651 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1640 return false; 1652 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1641 } 1653 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1642 return true; 1654 return DDI_BUF_EMP_400MV_6DB_HSW;
1643} 1655 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1656 return DDI_BUF_EMP_400MV_9_5DB_HSW;
1644 1657
1645/* Check to see if channel eq is done on all channels */ 1658 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1646#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ 1659 return DDI_BUF_EMP_600MV_0DB_HSW;
1647 DP_LANE_CHANNEL_EQ_DONE|\ 1660 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1648 DP_LANE_SYMBOL_LOCKED) 1661 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1649static bool 1662 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1650intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1663 return DDI_BUF_EMP_600MV_6DB_HSW;
1651{
1652 uint8_t lane_align;
1653 uint8_t lane_status;
1654 int lane;
1655 1664
1656 lane_align = intel_dp_link_status(link_status, 1665 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1657 DP_LANE_ALIGN_STATUS_UPDATED); 1666 return DDI_BUF_EMP_800MV_0DB_HSW;
1658 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1667 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1659 return false; 1668 return DDI_BUF_EMP_800MV_3_5DB_HSW;
1660 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1669 default:
1661 lane_status = intel_get_lane_status(link_status, lane); 1670 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1662 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1671 "0x%x\n", signal_levels);
1663 return false; 1672 return DDI_BUF_EMP_400MV_0DB_HSW;
1664 } 1673 }
1665 return true;
1666} 1674}
1667 1675
1668static bool 1676static bool
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1670 uint32_t dp_reg_value, 1678 uint32_t dp_reg_value,
1671 uint8_t dp_train_pat) 1679 uint8_t dp_train_pat)
1672{ 1680{
1673 struct drm_device *dev = intel_dp->base.base.dev; 1681 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1682 struct drm_device *dev = intel_dig_port->base.base.dev;
1674 struct drm_i915_private *dev_priv = dev->dev_private; 1683 struct drm_i915_private *dev_priv = dev->dev_private;
1684 enum port port = intel_dig_port->port;
1675 int ret; 1685 int ret;
1686 uint32_t temp;
1676 1687
1677 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1688 if (IS_HASWELL(dev)) {
1689 temp = I915_READ(DP_TP_CTL(port));
1690
1691 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1692 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1693 else
1694 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1695
1696 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1697 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1698 case DP_TRAINING_PATTERN_DISABLE:
1699 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1700 I915_WRITE(DP_TP_CTL(port), temp);
1701
1702 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1703 DP_TP_STATUS_IDLE_DONE), 1))
1704 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1705
1706 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1707 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1708
1709 break;
1710 case DP_TRAINING_PATTERN_1:
1711 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1712 break;
1713 case DP_TRAINING_PATTERN_2:
1714 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1715 break;
1716 case DP_TRAINING_PATTERN_3:
1717 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1718 break;
1719 }
1720 I915_WRITE(DP_TP_CTL(port), temp);
1721
1722 } else if (HAS_PCH_CPT(dev) &&
1723 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1678 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1724 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1679 1725
1680 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1726 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1734} 1780}
1735 1781
1736/* Enable corresponding port and start training pattern 1 */ 1782/* Enable corresponding port and start training pattern 1 */
1737static void 1783void
1738intel_dp_start_link_train(struct intel_dp *intel_dp) 1784intel_dp_start_link_train(struct intel_dp *intel_dp)
1739{ 1785{
1740 struct drm_device *dev = intel_dp->base.base.dev; 1786 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
1787 struct drm_device *dev = encoder->dev;
1741 int i; 1788 int i;
1742 uint8_t voltage; 1789 uint8_t voltage;
1743 bool clock_recovery = false; 1790 bool clock_recovery = false;
1744 int voltage_tries, loop_tries; 1791 int voltage_tries, loop_tries;
1745 uint32_t DP = intel_dp->DP; 1792 uint32_t DP = intel_dp->DP;
1746 1793
1794 if (IS_HASWELL(dev))
1795 intel_ddi_prepare_link_retrain(encoder);
1796
1747 /* Write the link configuration data */ 1797 /* Write the link configuration data */
1748 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1798 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1749 intel_dp->link_configuration, 1799 intel_dp->link_configuration,
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1761 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1811 uint8_t link_status[DP_LINK_STATUS_SIZE];
1762 uint32_t signal_levels; 1812 uint32_t signal_levels;
1763 1813
1764 1814 if (IS_HASWELL(dev)) {
1765 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1815 signal_levels = intel_dp_signal_levels_hsw(
1816 intel_dp->train_set[0]);
1817 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1818 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1766 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1819 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1767 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1820 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1768 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1821 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1770 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1823 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1771 } else { 1824 } else {
1772 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1825 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1773 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1774 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1826 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1775 } 1827 }
1828 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1829 signal_levels);
1776 1830
1831 /* Set training pattern 1 */
1777 if (!intel_dp_set_link_train(intel_dp, DP, 1832 if (!intel_dp_set_link_train(intel_dp, DP,
1778 DP_TRAINING_PATTERN_1 | 1833 DP_TRAINING_PATTERN_1 |
1779 DP_LINK_SCRAMBLING_DISABLE)) 1834 DP_LINK_SCRAMBLING_DISABLE))
1780 break; 1835 break;
1781 /* Set training pattern 1 */
1782 1836
1783 udelay(100); 1837 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
1784 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1838 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1785 DRM_ERROR("failed to get link status\n"); 1839 DRM_ERROR("failed to get link status\n");
1786 break; 1840 break;
1787 } 1841 }
1788 1842
1789 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1843 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1790 DRM_DEBUG_KMS("clock recovery OK\n"); 1844 DRM_DEBUG_KMS("clock recovery OK\n");
1791 clock_recovery = true; 1845 clock_recovery = true;
1792 break; 1846 break;
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1825 intel_dp->DP = DP; 1879 intel_dp->DP = DP;
1826} 1880}
1827 1881
1828static void 1882void
1829intel_dp_complete_link_train(struct intel_dp *intel_dp) 1883intel_dp_complete_link_train(struct intel_dp *intel_dp)
1830{ 1884{
1831 struct drm_device *dev = intel_dp->base.base.dev; 1885 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1832 bool channel_eq = false; 1886 bool channel_eq = false;
1833 int tries, cr_tries; 1887 int tries, cr_tries;
1834 uint32_t DP = intel_dp->DP; 1888 uint32_t DP = intel_dp->DP;
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1848 break; 1902 break;
1849 } 1903 }
1850 1904
1851 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1905 if (IS_HASWELL(dev)) {
1906 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1907 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1908 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1852 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1909 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1853 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1910 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1854 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1911 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1865 DP_LINK_SCRAMBLING_DISABLE)) 1922 DP_LINK_SCRAMBLING_DISABLE))
1866 break; 1923 break;
1867 1924
1868 udelay(400); 1925 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
1869 if (!intel_dp_get_link_status(intel_dp, link_status)) 1926 if (!intel_dp_get_link_status(intel_dp, link_status))
1870 break; 1927 break;
1871 1928
1872 /* Make sure clock is still ok */ 1929 /* Make sure clock is still ok */
1873 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1930 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1874 intel_dp_start_link_train(intel_dp); 1931 intel_dp_start_link_train(intel_dp);
1875 cr_tries++; 1932 cr_tries++;
1876 continue; 1933 continue;
1877 } 1934 }
1878 1935
1879 if (intel_channel_eq_ok(intel_dp, link_status)) { 1936 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
1880 channel_eq = true; 1937 channel_eq = true;
1881 break; 1938 break;
1882 } 1939 }
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1895 ++tries; 1952 ++tries;
1896 } 1953 }
1897 1954
1955 if (channel_eq)
1956 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
1957
1898 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1958 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
1899} 1959}
1900 1960
1901static void 1961static void
1902intel_dp_link_down(struct intel_dp *intel_dp) 1962intel_dp_link_down(struct intel_dp *intel_dp)
1903{ 1963{
1904 struct drm_device *dev = intel_dp->base.base.dev; 1964 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1965 struct drm_device *dev = intel_dig_port->base.base.dev;
1905 struct drm_i915_private *dev_priv = dev->dev_private; 1966 struct drm_i915_private *dev_priv = dev->dev_private;
1906 uint32_t DP = intel_dp->DP; 1967 uint32_t DP = intel_dp->DP;
1907 1968
1969 /*
1970 * DDI code has a strict mode set sequence and we should try to respect
1971 * it, otherwise we might hang the machine in many different ways. So we
1972 * really should be disabling the port only on a complete crtc_disable
1973 * sequence. This function is just called under two conditions on DDI
1974 * code:
1975 * - Link train failed while doing crtc_enable, and on this case we
1976 * really should respect the mode set sequence and wait for a
1977 * crtc_disable.
1978 * - Someone turned the monitor off and intel_dp_check_link_status
1979 * called us. We don't need to disable the whole port on this case, so
1980 * when someone turns the monitor on again,
1981 * intel_ddi_prepare_link_retrain will take care of redoing the link
1982 * train.
1983 */
1984 if (IS_HASWELL(dev))
1985 return;
1986
1908 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 1987 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1909 return; 1988 return;
1910 1989
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1923 2002
1924 if (HAS_PCH_IBX(dev) && 2003 if (HAS_PCH_IBX(dev) &&
1925 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2004 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1926 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2005 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1927 2006
1928 /* Hardware workaround: leaving our transcoder select 2007 /* Hardware workaround: leaving our transcoder select
1929 * set to transcoder B while it's off will prevent the 2008 * set to transcoder B while it's off will prevent the
@@ -2024,7 +2103,7 @@ static void
2024intel_dp_handle_test_request(struct intel_dp *intel_dp) 2103intel_dp_handle_test_request(struct intel_dp *intel_dp)
2025{ 2104{
2026 /* NAK by default */ 2105 /* NAK by default */
2027 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); 2106 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2028} 2107}
2029 2108
2030/* 2109/*
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
2036 * 4. Check link status on receipt of hot-plug interrupt 2115 * 4. Check link status on receipt of hot-plug interrupt
2037 */ 2116 */
2038 2117
2039static void 2118void
2040intel_dp_check_link_status(struct intel_dp *intel_dp) 2119intel_dp_check_link_status(struct intel_dp *intel_dp)
2041{ 2120{
2121 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2042 u8 sink_irq_vector; 2122 u8 sink_irq_vector;
2043 u8 link_status[DP_LINK_STATUS_SIZE]; 2123 u8 link_status[DP_LINK_STATUS_SIZE];
2044 2124
2045 if (!intel_dp->base.connectors_active) 2125 if (!intel_encoder->connectors_active)
2046 return; 2126 return;
2047 2127
2048 if (WARN_ON(!intel_dp->base.base.crtc)) 2128 if (WARN_ON(!intel_encoder->base.crtc))
2049 return; 2129 return;
2050 2130
2051 /* Try to read receiver status if the link appears to be up */ 2131 /* Try to read receiver status if the link appears to be up */
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2074 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2154 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2075 } 2155 }
2076 2156
2077 if (!intel_channel_eq_ok(intel_dp, link_status)) { 2157 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2078 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2158 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2079 drm_get_encoder_name(&intel_dp->base.base)); 2159 drm_get_encoder_name(&intel_encoder->base));
2080 intel_dp_start_link_train(intel_dp); 2160 intel_dp_start_link_train(intel_dp);
2081 intel_dp_complete_link_train(intel_dp); 2161 intel_dp_complete_link_train(intel_dp);
2082 } 2162 }
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2125static enum drm_connector_status 2205static enum drm_connector_status
2126ironlake_dp_detect(struct intel_dp *intel_dp) 2206ironlake_dp_detect(struct intel_dp *intel_dp)
2127{ 2207{
2208 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2128 enum drm_connector_status status; 2209 enum drm_connector_status status;
2129 2210
2130 /* Can't disconnect eDP, but you can close the lid... */ 2211 /* Can't disconnect eDP, but you can close the lid... */
2131 if (is_edp(intel_dp)) { 2212 if (is_edp(intel_dp)) {
2132 status = intel_panel_detect(intel_dp->base.base.dev); 2213 status = intel_panel_detect(dev);
2133 if (status == connector_status_unknown) 2214 if (status == connector_status_unknown)
2134 status = connector_status_connected; 2215 status = connector_status_connected;
2135 return status; 2216 return status;
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
2141static enum drm_connector_status 2222static enum drm_connector_status
2142g4x_dp_detect(struct intel_dp *intel_dp) 2223g4x_dp_detect(struct intel_dp *intel_dp)
2143{ 2224{
2144 struct drm_device *dev = intel_dp->base.base.dev; 2225 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2226 struct drm_i915_private *dev_priv = dev->dev_private;
2146 uint32_t bit; 2227 uint32_t bit;
2147 2228
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
2168static struct edid * 2249static struct edid *
2169intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2250intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2170{ 2251{
2171 struct intel_dp *intel_dp = intel_attached_dp(connector); 2252 struct intel_connector *intel_connector = to_intel_connector(connector);
2172 struct edid *edid;
2173 int size;
2174 2253
2175 if (is_edp(intel_dp)) { 2254 /* use cached edid if we have one */
2176 if (!intel_dp->edid) 2255 if (intel_connector->edid) {
2256 struct edid *edid;
2257 int size;
2258
2259 /* invalid edid */
2260 if (IS_ERR(intel_connector->edid))
2177 return NULL; 2261 return NULL;
2178 2262
2179 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; 2263 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2180 edid = kmalloc(size, GFP_KERNEL); 2264 edid = kmalloc(size, GFP_KERNEL);
2181 if (!edid) 2265 if (!edid)
2182 return NULL; 2266 return NULL;
2183 2267
2184 memcpy(edid, intel_dp->edid, size); 2268 memcpy(edid, intel_connector->edid, size);
2185 return edid; 2269 return edid;
2186 } 2270 }
2187 2271
2188 edid = drm_get_edid(connector, adapter); 2272 return drm_get_edid(connector, adapter);
2189 return edid;
2190} 2273}
2191 2274
2192static int 2275static int
2193intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2276intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2194{ 2277{
2195 struct intel_dp *intel_dp = intel_attached_dp(connector); 2278 struct intel_connector *intel_connector = to_intel_connector(connector);
2196 int ret;
2197 2279
2198 if (is_edp(intel_dp)) { 2280 /* use cached edid if we have one */
2199 drm_mode_connector_update_edid_property(connector, 2281 if (intel_connector->edid) {
2200 intel_dp->edid); 2282 /* invalid edid */
2201 ret = drm_add_edid_modes(connector, intel_dp->edid); 2283 if (IS_ERR(intel_connector->edid))
2202 drm_edid_to_eld(connector, 2284 return 0;
2203 intel_dp->edid); 2285
2204 return intel_dp->edid_mode_count; 2286 return intel_connector_update_modes(connector,
2287 intel_connector->edid);
2205 } 2288 }
2206 2289
2207 ret = intel_ddc_get_modes(connector, adapter); 2290 return intel_ddc_get_modes(connector, adapter);
2208 return ret;
2209} 2291}
2210 2292
2211 2293
@@ -2219,9 +2301,12 @@ static enum drm_connector_status
2219intel_dp_detect(struct drm_connector *connector, bool force) 2301intel_dp_detect(struct drm_connector *connector, bool force)
2220{ 2302{
2221 struct intel_dp *intel_dp = intel_attached_dp(connector); 2303 struct intel_dp *intel_dp = intel_attached_dp(connector);
2222 struct drm_device *dev = intel_dp->base.base.dev; 2304 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2305 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2306 struct drm_device *dev = connector->dev;
2223 enum drm_connector_status status; 2307 enum drm_connector_status status;
2224 struct edid *edid = NULL; 2308 struct edid *edid = NULL;
2309 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2225 2310
2226 intel_dp->has_audio = false; 2311 intel_dp->has_audio = false;
2227 2312
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2230 else 2315 else
2231 status = g4x_dp_detect(intel_dp); 2316 status = g4x_dp_detect(intel_dp);
2232 2317
2233 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", 2318 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2234 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], 2319 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2235 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], 2320 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2236 intel_dp->dpcd[6], intel_dp->dpcd[7]);
2237 2321
2238 if (status != connector_status_connected) 2322 if (status != connector_status_connected)
2239 return status; 2323 return status;
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2250 } 2334 }
2251 } 2335 }
2252 2336
2337 if (intel_encoder->type != INTEL_OUTPUT_EDP)
2338 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2253 return connector_status_connected; 2339 return connector_status_connected;
2254} 2340}
2255 2341
2256static int intel_dp_get_modes(struct drm_connector *connector) 2342static int intel_dp_get_modes(struct drm_connector *connector)
2257{ 2343{
2258 struct intel_dp *intel_dp = intel_attached_dp(connector); 2344 struct intel_dp *intel_dp = intel_attached_dp(connector);
2259 struct drm_device *dev = intel_dp->base.base.dev; 2345 struct intel_connector *intel_connector = to_intel_connector(connector);
2260 struct drm_i915_private *dev_priv = dev->dev_private; 2346 struct drm_device *dev = connector->dev;
2261 int ret; 2347 int ret;
2262 2348
2263 /* We should parse the EDID data and find out if it has an audio sink 2349 /* We should parse the EDID data and find out if it has an audio sink
2264 */ 2350 */
2265 2351
2266 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2352 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2267 if (ret) { 2353 if (ret)
2268 if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
2269 struct drm_display_mode *newmode;
2270 list_for_each_entry(newmode, &connector->probed_modes,
2271 head) {
2272 if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
2273 intel_dp->panel_fixed_mode =
2274 drm_mode_duplicate(dev, newmode);
2275 break;
2276 }
2277 }
2278 }
2279 return ret; 2354 return ret;
2280 }
2281 2355
2282 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 2356 /* if eDP has no EDID, fall back to fixed mode */
2283 if (is_edp(intel_dp)) { 2357 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2284 /* initialize panel mode from VBT if available for eDP */ 2358 struct drm_display_mode *mode;
2285 if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { 2359 mode = drm_mode_duplicate(dev,
2286 intel_dp->panel_fixed_mode = 2360 intel_connector->panel.fixed_mode);
2287 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2361 if (mode) {
2288 if (intel_dp->panel_fixed_mode) {
2289 intel_dp->panel_fixed_mode->type |=
2290 DRM_MODE_TYPE_PREFERRED;
2291 }
2292 }
2293 if (intel_dp->panel_fixed_mode) {
2294 struct drm_display_mode *mode;
2295 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
2296 drm_mode_probed_add(connector, mode); 2362 drm_mode_probed_add(connector, mode);
2297 return 1; 2363 return 1;
2298 } 2364 }
@@ -2322,10 +2388,12 @@ intel_dp_set_property(struct drm_connector *connector,
2322 uint64_t val) 2388 uint64_t val)
2323{ 2389{
2324 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2390 struct drm_i915_private *dev_priv = connector->dev->dev_private;
2325 struct intel_dp *intel_dp = intel_attached_dp(connector); 2391 struct intel_connector *intel_connector = to_intel_connector(connector);
2392 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2393 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2326 int ret; 2394 int ret;
2327 2395
2328 ret = drm_connector_property_set_value(connector, property, val); 2396 ret = drm_object_property_set_value(&connector->base, property, val);
2329 if (ret) 2397 if (ret)
2330 return ret; 2398 return ret;
2331 2399
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector,
2358 goto done; 2426 goto done;
2359 } 2427 }
2360 2428
2429 if (is_edp(intel_dp) &&
2430 property == connector->dev->mode_config.scaling_mode_property) {
2431 if (val == DRM_MODE_SCALE_NONE) {
2432 DRM_DEBUG_KMS("no scaling not supported\n");
2433 return -EINVAL;
2434 }
2435
2436 if (intel_connector->panel.fitting_mode == val) {
2437 /* the eDP scaling property is not changed */
2438 return 0;
2439 }
2440 intel_connector->panel.fitting_mode = val;
2441
2442 goto done;
2443 }
2444
2361 return -EINVAL; 2445 return -EINVAL;
2362 2446
2363done: 2447done:
2364 if (intel_dp->base.base.crtc) { 2448 if (intel_encoder->base.crtc) {
2365 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2449 struct drm_crtc *crtc = intel_encoder->base.crtc;
2366 intel_set_mode(crtc, &crtc->mode, 2450 intel_set_mode(crtc, &crtc->mode,
2367 crtc->x, crtc->y, crtc->fb); 2451 crtc->x, crtc->y, crtc->fb);
2368 } 2452 }
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector)
2375{ 2459{
2376 struct drm_device *dev = connector->dev; 2460 struct drm_device *dev = connector->dev;
2377 struct intel_dp *intel_dp = intel_attached_dp(connector); 2461 struct intel_dp *intel_dp = intel_attached_dp(connector);
2462 struct intel_connector *intel_connector = to_intel_connector(connector);
2378 2463
2379 if (is_edp(intel_dp)) 2464 if (!IS_ERR_OR_NULL(intel_connector->edid))
2465 kfree(intel_connector->edid);
2466
2467 if (is_edp(intel_dp)) {
2380 intel_panel_destroy_backlight(dev); 2468 intel_panel_destroy_backlight(dev);
2469 intel_panel_fini(&intel_connector->panel);
2470 }
2381 2471
2382 drm_sysfs_connector_remove(connector); 2472 drm_sysfs_connector_remove(connector);
2383 drm_connector_cleanup(connector); 2473 drm_connector_cleanup(connector);
2384 kfree(connector); 2474 kfree(connector);
2385} 2475}
2386 2476
2387static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2477void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2388{ 2478{
2389 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2479 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2480 struct intel_dp *intel_dp = &intel_dig_port->dp;
2390 2481
2391 i2c_del_adapter(&intel_dp->adapter); 2482 i2c_del_adapter(&intel_dp->adapter);
2392 drm_encoder_cleanup(encoder); 2483 drm_encoder_cleanup(encoder);
2393 if (is_edp(intel_dp)) { 2484 if (is_edp(intel_dp)) {
2394 kfree(intel_dp->edid);
2395 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2485 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2396 ironlake_panel_vdd_off_sync(intel_dp); 2486 ironlake_panel_vdd_off_sync(intel_dp);
2397 } 2487 }
2398 kfree(intel_dp); 2488 kfree(intel_dig_port);
2399} 2489}
2400 2490
2401static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2491static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2425static void 2515static void
2426intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2516intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2427{ 2517{
2428 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 2518 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2429 2519
2430 intel_dp_check_link_status(intel_dp); 2520 intel_dp_check_link_status(intel_dp);
2431} 2521}
@@ -2435,13 +2525,14 @@ int
2435intel_trans_dp_port_sel(struct drm_crtc *crtc) 2525intel_trans_dp_port_sel(struct drm_crtc *crtc)
2436{ 2526{
2437 struct drm_device *dev = crtc->dev; 2527 struct drm_device *dev = crtc->dev;
2438 struct intel_encoder *encoder; 2528 struct intel_encoder *intel_encoder;
2529 struct intel_dp *intel_dp;
2439 2530
2440 for_each_encoder_on_crtc(dev, crtc, encoder) { 2531 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2441 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2532 intel_dp = enc_to_intel_dp(&intel_encoder->base);
2442 2533
2443 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2534 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2444 intel_dp->base.type == INTEL_OUTPUT_EDP) 2535 intel_encoder->type == INTEL_OUTPUT_EDP)
2445 return intel_dp->output_reg; 2536 return intel_dp->output_reg;
2446 } 2537 }
2447 2538
@@ -2471,78 +2562,191 @@ bool intel_dpd_is_edp(struct drm_device *dev)
2471static void 2562static void
2472intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2563intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2473{ 2564{
2565 struct intel_connector *intel_connector = to_intel_connector(connector);
2566
2474 intel_attach_force_audio_property(connector); 2567 intel_attach_force_audio_property(connector);
2475 intel_attach_broadcast_rgb_property(connector); 2568 intel_attach_broadcast_rgb_property(connector);
2569
2570 if (is_edp(intel_dp)) {
2571 drm_mode_create_scaling_mode_property(connector->dev);
2572 drm_object_attach_property(
2573 &connector->base,
2574 connector->dev->mode_config.scaling_mode_property,
2575 DRM_MODE_SCALE_ASPECT);
2576 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2577 }
2578}
2579
2580static void
2581intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2582 struct intel_dp *intel_dp)
2583{
2584 struct drm_i915_private *dev_priv = dev->dev_private;
2585 struct edp_power_seq cur, vbt, spec, final;
2586 u32 pp_on, pp_off, pp_div, pp;
2587
2588 /* Workaround: Need to write PP_CONTROL with the unlock key as
2589 * the very first thing. */
2590 pp = ironlake_get_pp_control(dev_priv);
2591 I915_WRITE(PCH_PP_CONTROL, pp);
2592
2593 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2594 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2595 pp_div = I915_READ(PCH_PP_DIVISOR);
2596
2597 /* Pull timing values out of registers */
2598 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2599 PANEL_POWER_UP_DELAY_SHIFT;
2600
2601 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2602 PANEL_LIGHT_ON_DELAY_SHIFT;
2603
2604 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2605 PANEL_LIGHT_OFF_DELAY_SHIFT;
2606
2607 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2608 PANEL_POWER_DOWN_DELAY_SHIFT;
2609
2610 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2611 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2612
2613 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2614 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2615
2616 vbt = dev_priv->edp.pps;
2617
2618 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2619 * our hw here, which are all in 100usec. */
2620 spec.t1_t3 = 210 * 10;
2621 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2622 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2623 spec.t10 = 500 * 10;
2624 /* This one is special and actually in units of 100ms, but zero
2625 * based in the hw (so we need to add 100 ms). But the sw vbt
2626 * table multiplies it with 1000 to make it in units of 100usec,
2627 * too. */
2628 spec.t11_t12 = (510 + 100) * 10;
2629
2630 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2631 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2632
2633 /* Use the max of the register settings and vbt. If both are
2634 * unset, fall back to the spec limits. */
2635#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
2636 spec.field : \
2637 max(cur.field, vbt.field))
2638 assign_final(t1_t3);
2639 assign_final(t8);
2640 assign_final(t9);
2641 assign_final(t10);
2642 assign_final(t11_t12);
2643#undef assign_final
2644
2645#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
2646 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2647 intel_dp->backlight_on_delay = get_delay(t8);
2648 intel_dp->backlight_off_delay = get_delay(t9);
2649 intel_dp->panel_power_down_delay = get_delay(t10);
2650 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2651#undef get_delay
2652
2653 /* And finally store the new values in the power sequencer. */
2654 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2655 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2656 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2657 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2658 /* Compute the divisor for the pp clock, simply match the Bspec
2659 * formula. */
2660 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2661 << PP_REFERENCE_DIVIDER_SHIFT;
2662 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
2663 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2664
2665 /* Haswell doesn't have any port selection bits for the panel
2666 * power sequencer any more. */
2667 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2668 if (is_cpu_edp(intel_dp))
2669 pp_on |= PANEL_POWER_PORT_DP_A;
2670 else
2671 pp_on |= PANEL_POWER_PORT_DP_D;
2672 }
2673
2674 I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
2675 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2676 I915_WRITE(PCH_PP_DIVISOR, pp_div);
2677
2678
2679 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2680 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2681 intel_dp->panel_power_cycle_delay);
2682
2683 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2684 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2685
2686 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2687 I915_READ(PCH_PP_ON_DELAYS),
2688 I915_READ(PCH_PP_OFF_DELAYS),
2689 I915_READ(PCH_PP_DIVISOR));
2476} 2690}
2477 2691
2478void 2692void
2479intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2693intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2694 struct intel_connector *intel_connector)
2480{ 2695{
2696 struct drm_connector *connector = &intel_connector->base;
2697 struct intel_dp *intel_dp = &intel_dig_port->dp;
2698 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2699 struct drm_device *dev = intel_encoder->base.dev;
2481 struct drm_i915_private *dev_priv = dev->dev_private; 2700 struct drm_i915_private *dev_priv = dev->dev_private;
2482 struct drm_connector *connector; 2701 struct drm_display_mode *fixed_mode = NULL;
2483 struct intel_dp *intel_dp; 2702 enum port port = intel_dig_port->port;
2484 struct intel_encoder *intel_encoder;
2485 struct intel_connector *intel_connector;
2486 const char *name = NULL; 2703 const char *name = NULL;
2487 int type; 2704 int type;
2488 2705
2489 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
2490 if (!intel_dp)
2491 return;
2492
2493 intel_dp->output_reg = output_reg;
2494 intel_dp->port = port;
2495 /* Preserve the current hw state. */ 2706 /* Preserve the current hw state. */
2496 intel_dp->DP = I915_READ(intel_dp->output_reg); 2707 intel_dp->DP = I915_READ(intel_dp->output_reg);
2708 intel_dp->attached_connector = intel_connector;
2497 2709
2498 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2710 if (HAS_PCH_SPLIT(dev) && port == PORT_D)
2499 if (!intel_connector) {
2500 kfree(intel_dp);
2501 return;
2502 }
2503 intel_encoder = &intel_dp->base;
2504
2505 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2506 if (intel_dpd_is_edp(dev)) 2711 if (intel_dpd_is_edp(dev))
2507 intel_dp->is_pch_edp = true; 2712 intel_dp->is_pch_edp = true;
2508 2713
2509 if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2714 /*
2715 * FIXME : We need to initialize built-in panels before external panels.
2716 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2717 */
2718 if (IS_VALLEYVIEW(dev) && port == PORT_C) {
2719 type = DRM_MODE_CONNECTOR_eDP;
2720 intel_encoder->type = INTEL_OUTPUT_EDP;
2721 } else if (port == PORT_A || is_pch_edp(intel_dp)) {
2510 type = DRM_MODE_CONNECTOR_eDP; 2722 type = DRM_MODE_CONNECTOR_eDP;
2511 intel_encoder->type = INTEL_OUTPUT_EDP; 2723 intel_encoder->type = INTEL_OUTPUT_EDP;
2512 } else { 2724 } else {
2725 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
2726 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
2727 * rewrite it.
2728 */
2513 type = DRM_MODE_CONNECTOR_DisplayPort; 2729 type = DRM_MODE_CONNECTOR_DisplayPort;
2514 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2515 } 2730 }
2516 2731
2517 connector = &intel_connector->base;
2518 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2732 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2519 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2733 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2520 2734
2521 connector->polled = DRM_CONNECTOR_POLL_HPD; 2735 connector->polled = DRM_CONNECTOR_POLL_HPD;
2522
2523 intel_encoder->cloneable = false;
2524
2525 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2526 ironlake_panel_vdd_work);
2527
2528 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2529
2530 connector->interlace_allowed = true; 2736 connector->interlace_allowed = true;
2531 connector->doublescan_allowed = 0; 2737 connector->doublescan_allowed = 0;
2532 2738
2533 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2739 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2534 DRM_MODE_ENCODER_TMDS); 2740 ironlake_panel_vdd_work);
2535 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2536 2741
2537 intel_connector_attach_encoder(intel_connector, intel_encoder); 2742 intel_connector_attach_encoder(intel_connector, intel_encoder);
2538 drm_sysfs_connector_add(connector); 2743 drm_sysfs_connector_add(connector);
2539 2744
2540 intel_encoder->enable = intel_enable_dp; 2745 if (IS_HASWELL(dev))
2541 intel_encoder->pre_enable = intel_pre_enable_dp; 2746 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2542 intel_encoder->disable = intel_disable_dp; 2747 else
2543 intel_encoder->post_disable = intel_post_disable_dp; 2748 intel_connector->get_hw_state = intel_connector_get_hw_state;
2544 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2749
2545 intel_connector->get_hw_state = intel_connector_get_hw_state;
2546 2750
2547 /* Set up the DDC bus. */ 2751 /* Set up the DDC bus. */
2548 switch (port) { 2752 switch (port) {
@@ -2566,66 +2770,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2566 break; 2770 break;
2567 } 2771 }
2568 2772
2569 /* Cache some DPCD data in the eDP case */ 2773 if (is_edp(intel_dp))
2570 if (is_edp(intel_dp)) { 2774 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2571 struct edp_power_seq cur, vbt;
2572 u32 pp_on, pp_off, pp_div;
2573
2574 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2575 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2576 pp_div = I915_READ(PCH_PP_DIVISOR);
2577
2578 if (!pp_on || !pp_off || !pp_div) {
2579 DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2580 intel_dp_encoder_destroy(&intel_dp->base.base);
2581 intel_dp_destroy(&intel_connector->base);
2582 return;
2583 }
2584
2585 /* Pull timing values out of registers */
2586 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2587 PANEL_POWER_UP_DELAY_SHIFT;
2588
2589 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2590 PANEL_LIGHT_ON_DELAY_SHIFT;
2591
2592 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2593 PANEL_LIGHT_OFF_DELAY_SHIFT;
2594
2595 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2596 PANEL_POWER_DOWN_DELAY_SHIFT;
2597
2598 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2599 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2600
2601 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2602 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2603
2604 vbt = dev_priv->edp.pps;
2605
2606 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2607 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2608
2609#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
2610
2611 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2612 intel_dp->backlight_on_delay = get_delay(t8);
2613 intel_dp->backlight_off_delay = get_delay(t9);
2614 intel_dp->panel_power_down_delay = get_delay(t10);
2615 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2616
2617 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2618 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2619 intel_dp->panel_power_cycle_delay);
2620
2621 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2622 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2623 }
2624 2775
2625 intel_dp_i2c_init(intel_dp, intel_connector, name); 2776 intel_dp_i2c_init(intel_dp, intel_connector, name);
2626 2777
2778 /* Cache DPCD and EDID for edp. */
2627 if (is_edp(intel_dp)) { 2779 if (is_edp(intel_dp)) {
2628 bool ret; 2780 bool ret;
2781 struct drm_display_mode *scan;
2629 struct edid *edid; 2782 struct edid *edid;
2630 2783
2631 ironlake_edp_panel_vdd_on(intel_dp); 2784 ironlake_edp_panel_vdd_on(intel_dp);
@@ -2640,29 +2793,47 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2640 } else { 2793 } else {
2641 /* if this fails, presume the device is a ghost */ 2794 /* if this fails, presume the device is a ghost */
2642 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2795 DRM_INFO("failed to retrieve link info, disabling eDP\n");
2643 intel_dp_encoder_destroy(&intel_dp->base.base); 2796 intel_dp_encoder_destroy(&intel_encoder->base);
2644 intel_dp_destroy(&intel_connector->base); 2797 intel_dp_destroy(connector);
2645 return; 2798 return;
2646 } 2799 }
2647 2800
2648 ironlake_edp_panel_vdd_on(intel_dp); 2801 ironlake_edp_panel_vdd_on(intel_dp);
2649 edid = drm_get_edid(connector, &intel_dp->adapter); 2802 edid = drm_get_edid(connector, &intel_dp->adapter);
2650 if (edid) { 2803 if (edid) {
2651 drm_mode_connector_update_edid_property(connector, 2804 if (drm_add_edid_modes(connector, edid)) {
2652 edid); 2805 drm_mode_connector_update_edid_property(connector, edid);
2653 intel_dp->edid_mode_count = 2806 drm_edid_to_eld(connector, edid);
2654 drm_add_edid_modes(connector, edid); 2807 } else {
2655 drm_edid_to_eld(connector, edid); 2808 kfree(edid);
2656 intel_dp->edid = edid; 2809 edid = ERR_PTR(-EINVAL);
2810 }
2811 } else {
2812 edid = ERR_PTR(-ENOENT);
2813 }
2814 intel_connector->edid = edid;
2815
2816 /* prefer fixed mode from EDID if available */
2817 list_for_each_entry(scan, &connector->probed_modes, head) {
2818 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2819 fixed_mode = drm_mode_duplicate(dev, scan);
2820 break;
2821 }
2657 } 2822 }
2823
2824 /* fallback to VBT if available for eDP */
2825 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2826 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2827 if (fixed_mode)
2828 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2829 }
2830
2658 ironlake_edp_panel_vdd_off(intel_dp, false); 2831 ironlake_edp_panel_vdd_off(intel_dp, false);
2659 } 2832 }
2660 2833
2661 intel_encoder->hot_plug = intel_dp_hot_plug;
2662
2663 if (is_edp(intel_dp)) { 2834 if (is_edp(intel_dp)) {
2664 dev_priv->int_edp_connector = connector; 2835 intel_panel_init(&intel_connector->panel, fixed_mode);
2665 intel_panel_setup_backlight(dev); 2836 intel_panel_setup_backlight(connector);
2666 } 2837 }
2667 2838
2668 intel_dp_add_properties(intel_dp, connector); 2839 intel_dp_add_properties(intel_dp, connector);
@@ -2676,3 +2847,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2676 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2847 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2677 } 2848 }
2678} 2849}
2850
2851void
2852intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2853{
2854 struct intel_digital_port *intel_dig_port;
2855 struct intel_encoder *intel_encoder;
2856 struct drm_encoder *encoder;
2857 struct intel_connector *intel_connector;
2858
2859 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
2860 if (!intel_dig_port)
2861 return;
2862
2863 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2864 if (!intel_connector) {
2865 kfree(intel_dig_port);
2866 return;
2867 }
2868
2869 intel_encoder = &intel_dig_port->base;
2870 encoder = &intel_encoder->base;
2871
2872 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2873 DRM_MODE_ENCODER_TMDS);
2874 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2875
2876 intel_encoder->enable = intel_enable_dp;
2877 intel_encoder->pre_enable = intel_pre_enable_dp;
2878 intel_encoder->disable = intel_disable_dp;
2879 intel_encoder->post_disable = intel_post_disable_dp;
2880 intel_encoder->get_hw_state = intel_dp_get_hw_state;
2881
2882 intel_dig_port->port = port;
2883 intel_dig_port->dp.output_reg = output_reg;
2884
2885 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2886 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2887 intel_encoder->cloneable = false;
2888 intel_encoder->hot_plug = intel_dp_hot_plug;
2889
2890 intel_dp_init_connector(intel_dig_port, intel_connector);
2891}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fe7142502f43..8a1bd4a3ad0d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -94,6 +94,7 @@
94#define INTEL_OUTPUT_HDMI 6 94#define INTEL_OUTPUT_HDMI 6
95#define INTEL_OUTPUT_DISPLAYPORT 7 95#define INTEL_OUTPUT_DISPLAYPORT 7
96#define INTEL_OUTPUT_EDP 8 96#define INTEL_OUTPUT_EDP 8
97#define INTEL_OUTPUT_UNKNOWN 9
97 98
98#define INTEL_DVO_CHIP_NONE 0 99#define INTEL_DVO_CHIP_NONE 0
99#define INTEL_DVO_CHIP_LVDS 1 100#define INTEL_DVO_CHIP_LVDS 1
@@ -163,6 +164,11 @@ struct intel_encoder {
163 int crtc_mask; 164 int crtc_mask;
164}; 165};
165 166
167struct intel_panel {
168 struct drm_display_mode *fixed_mode;
169 int fitting_mode;
170};
171
166struct intel_connector { 172struct intel_connector {
167 struct drm_connector base; 173 struct drm_connector base;
168 /* 174 /*
@@ -179,12 +185,19 @@ struct intel_connector {
179 /* Reads out the current hw, returning true if the connector is enabled 185 /* Reads out the current hw, returning true if the connector is enabled
180 * and active (i.e. dpms ON state). */ 186 * and active (i.e. dpms ON state). */
181 bool (*get_hw_state)(struct intel_connector *); 187 bool (*get_hw_state)(struct intel_connector *);
188
189 /* Panel info for eDP and LVDS */
190 struct intel_panel panel;
191
192 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
193 struct edid *edid;
182}; 194};
183 195
184struct intel_crtc { 196struct intel_crtc {
185 struct drm_crtc base; 197 struct drm_crtc base;
186 enum pipe pipe; 198 enum pipe pipe;
187 enum plane plane; 199 enum plane plane;
200 enum transcoder cpu_transcoder;
188 u8 lut_r[256], lut_g[256], lut_b[256]; 201 u8 lut_r[256], lut_g[256], lut_b[256];
189 /* 202 /*
190 * Whether the crtc and the connected output pipeline is active. Implies 203 * Whether the crtc and the connected output pipeline is active. Implies
@@ -198,6 +211,8 @@ struct intel_crtc {
198 struct intel_unpin_work *unpin_work; 211 struct intel_unpin_work *unpin_work;
199 int fdi_lanes; 212 int fdi_lanes;
200 213
214 atomic_t unpin_work_count;
215
201 /* Display surface base address adjustement for pageflips. Note that on 216 /* Display surface base address adjustement for pageflips. Note that on
202 * gen4+ this only adjusts up to a tile, offsets within a tile are 217 * gen4+ this only adjusts up to a tile, offsets within a tile are
203 * handled in the hw itself (with the TILEOFF register). */ 218 * handled in the hw itself (with the TILEOFF register). */
@@ -212,12 +227,14 @@ struct intel_crtc {
212 227
213 /* We can share PLLs across outputs if the timings match */ 228 /* We can share PLLs across outputs if the timings match */
214 struct intel_pch_pll *pch_pll; 229 struct intel_pch_pll *pch_pll;
230 uint32_t ddi_pll_sel;
215}; 231};
216 232
217struct intel_plane { 233struct intel_plane {
218 struct drm_plane base; 234 struct drm_plane base;
219 enum pipe pipe; 235 enum pipe pipe;
220 struct drm_i915_gem_object *obj; 236 struct drm_i915_gem_object *obj;
237 bool can_scale;
221 int max_downscale; 238 int max_downscale;
222 u32 lut_r[1024], lut_g[1024], lut_b[1024]; 239 u32 lut_r[1024], lut_g[1024], lut_b[1024];
223 void (*update_plane)(struct drm_plane *plane, 240 void (*update_plane)(struct drm_plane *plane,
@@ -317,10 +334,8 @@ struct dip_infoframe {
317} __attribute__((packed)); 334} __attribute__((packed));
318 335
319struct intel_hdmi { 336struct intel_hdmi {
320 struct intel_encoder base;
321 u32 sdvox_reg; 337 u32 sdvox_reg;
322 int ddc_bus; 338 int ddc_bus;
323 int ddi_port;
324 uint32_t color_range; 339 uint32_t color_range;
325 bool has_hdmi_sink; 340 bool has_hdmi_sink;
326 bool has_audio; 341 bool has_audio;
@@ -331,18 +346,15 @@ struct intel_hdmi {
331 struct drm_display_mode *adjusted_mode); 346 struct drm_display_mode *adjusted_mode);
332}; 347};
333 348
334#define DP_RECEIVER_CAP_SIZE 0xf
335#define DP_MAX_DOWNSTREAM_PORTS 0x10 349#define DP_MAX_DOWNSTREAM_PORTS 0x10
336#define DP_LINK_CONFIGURATION_SIZE 9 350#define DP_LINK_CONFIGURATION_SIZE 9
337 351
338struct intel_dp { 352struct intel_dp {
339 struct intel_encoder base;
340 uint32_t output_reg; 353 uint32_t output_reg;
341 uint32_t DP; 354 uint32_t DP;
342 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 355 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
343 bool has_audio; 356 bool has_audio;
344 enum hdmi_force_audio force_audio; 357 enum hdmi_force_audio force_audio;
345 enum port port;
346 uint32_t color_range; 358 uint32_t color_range;
347 uint8_t link_bw; 359 uint8_t link_bw;
348 uint8_t lane_count; 360 uint8_t lane_count;
@@ -357,11 +369,16 @@ struct intel_dp {
357 int panel_power_cycle_delay; 369 int panel_power_cycle_delay;
358 int backlight_on_delay; 370 int backlight_on_delay;
359 int backlight_off_delay; 371 int backlight_off_delay;
360 struct drm_display_mode *panel_fixed_mode; /* for eDP */
361 struct delayed_work panel_vdd_work; 372 struct delayed_work panel_vdd_work;
362 bool want_panel_vdd; 373 bool want_panel_vdd;
363 struct edid *edid; /* cached EDID for eDP */ 374 struct intel_connector *attached_connector;
364 int edid_mode_count; 375};
376
377struct intel_digital_port {
378 struct intel_encoder base;
379 enum port port;
380 struct intel_dp dp;
381 struct intel_hdmi hdmi;
365}; 382};
366 383
367static inline struct drm_crtc * 384static inline struct drm_crtc *
@@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
380 397
381struct intel_unpin_work { 398struct intel_unpin_work {
382 struct work_struct work; 399 struct work_struct work;
383 struct drm_device *dev; 400 struct drm_crtc *crtc;
384 struct drm_i915_gem_object *old_fb_obj; 401 struct drm_i915_gem_object *old_fb_obj;
385 struct drm_i915_gem_object *pending_flip_obj; 402 struct drm_i915_gem_object *pending_flip_obj;
386 struct drm_pending_vblank_event *event; 403 struct drm_pending_vblank_event *event;
387 int pending; 404 atomic_t pending;
405#define INTEL_FLIP_INACTIVE 0
406#define INTEL_FLIP_PENDING 1
407#define INTEL_FLIP_COMPLETE 2
388 bool enable_stall_check; 408 bool enable_stall_check;
389}; 409};
390 410
@@ -395,6 +415,8 @@ struct intel_fbc_work {
395 int interval; 415 int interval;
396}; 416};
397 417
418int intel_pch_rawclk(struct drm_device *dev);
419
398int intel_connector_update_modes(struct drm_connector *connector, 420int intel_connector_update_modes(struct drm_connector *connector,
399 struct edid *edid); 421 struct edid *edid);
400int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 422int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
405extern void intel_crt_init(struct drm_device *dev); 427extern void intel_crt_init(struct drm_device *dev);
406extern void intel_hdmi_init(struct drm_device *dev, 428extern void intel_hdmi_init(struct drm_device *dev,
407 int sdvox_reg, enum port port); 429 int sdvox_reg, enum port port);
430extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
431 struct intel_connector *intel_connector);
408extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 432extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
433extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
434 const struct drm_display_mode *mode,
435 struct drm_display_mode *adjusted_mode);
409extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 436extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
410extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 437extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
411 bool is_sdvob); 438 bool is_sdvob);
@@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
418extern bool intel_lvds_init(struct drm_device *dev); 445extern bool intel_lvds_init(struct drm_device *dev);
419extern void intel_dp_init(struct drm_device *dev, int output_reg, 446extern void intel_dp_init(struct drm_device *dev, int output_reg,
420 enum port port); 447 enum port port);
448extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
449 struct intel_connector *intel_connector);
421void 450void
422intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 451intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
423 struct drm_display_mode *adjusted_mode); 452 struct drm_display_mode *adjusted_mode);
453extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
454extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
455extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
456extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
457extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
458extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
459extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
460 const struct drm_display_mode *mode,
461 struct drm_display_mode *adjusted_mode);
424extern bool intel_dpd_is_edp(struct drm_device *dev); 462extern bool intel_dpd_is_edp(struct drm_device *dev);
463extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
464extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
465extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
466extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
467extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
468extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
425extern void intel_edp_link_config(struct intel_encoder *, int *, int *); 469extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
426extern int intel_edp_target_clock(struct intel_encoder *, 470extern int intel_edp_target_clock(struct intel_encoder *,
427 struct drm_display_mode *mode); 471 struct drm_display_mode *mode);
@@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
431 enum plane plane); 475 enum plane plane);
432 476
433/* intel_panel.c */ 477/* intel_panel.c */
478extern int intel_panel_init(struct intel_panel *panel,
479 struct drm_display_mode *fixed_mode);
480extern void intel_panel_fini(struct intel_panel *panel);
481
434extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 482extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
435 struct drm_display_mode *adjusted_mode); 483 struct drm_display_mode *adjusted_mode);
436extern void intel_pch_panel_fitting(struct drm_device *dev, 484extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
439 struct drm_display_mode *adjusted_mode); 487 struct drm_display_mode *adjusted_mode);
440extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 488extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
441extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 489extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
442extern int intel_panel_setup_backlight(struct drm_device *dev); 490extern int intel_panel_setup_backlight(struct drm_connector *connector);
443extern void intel_panel_enable_backlight(struct drm_device *dev, 491extern void intel_panel_enable_backlight(struct drm_device *dev,
444 enum pipe pipe); 492 enum pipe pipe);
445extern void intel_panel_disable_backlight(struct drm_device *dev); 493extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
473 return to_intel_connector(connector)->encoder; 521 return to_intel_connector(connector)->encoder;
474} 522}
475 523
524static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
525{
526 struct intel_digital_port *intel_dig_port =
527 container_of(encoder, struct intel_digital_port, base.base);
528 return &intel_dig_port->dp;
529}
530
531static inline struct intel_digital_port *
532enc_to_dig_port(struct drm_encoder *encoder)
533{
534 return container_of(encoder, struct intel_digital_port, base.base);
535}
536
537static inline struct intel_digital_port *
538dp_to_dig_port(struct intel_dp *intel_dp)
539{
540 return container_of(intel_dp, struct intel_digital_port, dp);
541}
542
543static inline struct intel_digital_port *
544hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
545{
546 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
547}
548
476extern void intel_connector_attach_encoder(struct intel_connector *connector, 549extern void intel_connector_attach_encoder(struct intel_connector *connector,
477 struct intel_encoder *encoder); 550 struct intel_encoder *encoder);
478extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 551extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
481 struct drm_crtc *crtc); 554 struct drm_crtc *crtc);
482int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 555int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
483 struct drm_file *file_priv); 556 struct drm_file *file_priv);
557extern enum transcoder
558intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
559 enum pipe pipe);
484extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 560extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
485extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 561extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
562extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
486 563
487struct intel_load_detect_pipe { 564struct intel_load_detect_pipe {
488 struct drm_framebuffer *release_fb; 565 struct drm_framebuffer *release_fb;
@@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
550extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, 627extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
551 struct drm_display_mode *mode); 628 struct drm_display_mode *mode);
552 629
630extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
631 unsigned int bpp,
632 unsigned int pitch);
633
553extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 634extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
554 struct drm_file *file_priv); 635 struct drm_file *file_priv);
555extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 636extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
573extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); 654extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
574extern void ironlake_teardown_rc6(struct drm_device *dev); 655extern void ironlake_teardown_rc6(struct drm_device *dev);
575 656
576extern void intel_enable_ddi(struct intel_encoder *encoder);
577extern void intel_disable_ddi(struct intel_encoder *encoder);
578extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 657extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
579 enum pipe *pipe); 658 enum pipe *pipe);
580extern void intel_ddi_mode_set(struct drm_encoder *encoder, 659extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
581 struct drm_display_mode *mode, 660extern void intel_ddi_pll_init(struct drm_device *dev);
582 struct drm_display_mode *adjusted_mode); 661extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
662extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
663 enum transcoder cpu_transcoder);
664extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
665extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
666extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
667extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
668extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
669extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
670extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
671extern bool
672intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
673extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
583 674
584#endif /* __INTEL_DRV_H__ */ 675#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9ba0aaed7ee8..2ee9821b9d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -36,10 +36,15 @@
36#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
37#include "i915_drv.h" 37#include "i915_drv.h"
38 38
39static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
40{
41 return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
42}
43
39static void 44static void
40assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) 45assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
41{ 46{
42 struct drm_device *dev = intel_hdmi->base.base.dev; 47 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
43 struct drm_i915_private *dev_priv = dev->dev_private; 48 struct drm_i915_private *dev_priv = dev->dev_private;
44 uint32_t enabled_bits; 49 uint32_t enabled_bits;
45 50
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
51 56
52struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 57struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
53{ 58{
54 return container_of(encoder, struct intel_hdmi, base.base); 59 struct intel_digital_port *intel_dig_port =
60 container_of(encoder, struct intel_digital_port, base.base);
61 return &intel_dig_port->hdmi;
55} 62}
56 63
57static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) 64static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
58{ 65{
59 return container_of(intel_attached_encoder(connector), 66 return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
60 struct intel_hdmi, base);
61} 67}
62 68
63void intel_dip_infoframe_csum(struct dip_infoframe *frame) 69void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
334 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 340 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
335 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 341 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
336 342
343 avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
344
337 intel_set_infoframe(encoder, &avi_if); 345 intel_set_infoframe(encoder, &avi_if);
338} 346}
339 347
@@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
754 return MODE_OK; 762 return MODE_OK;
755} 763}
756 764
757static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, 765bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
758 const struct drm_display_mode *mode, 766 const struct drm_display_mode *mode,
759 struct drm_display_mode *adjusted_mode) 767 struct drm_display_mode *adjusted_mode)
760{ 768{
761 return true; 769 return true;
762} 770}
763 771
764static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) 772static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
765{ 773{
766 struct drm_device *dev = intel_hdmi->base.base.dev; 774 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
767 struct drm_i915_private *dev_priv = dev->dev_private; 775 struct drm_i915_private *dev_priv = dev->dev_private;
768 uint32_t bit; 776 uint32_t bit;
769 777
@@ -786,6 +794,9 @@ static enum drm_connector_status
786intel_hdmi_detect(struct drm_connector *connector, bool force) 794intel_hdmi_detect(struct drm_connector *connector, bool force)
787{ 795{
788 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 796 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
797 struct intel_digital_port *intel_dig_port =
798 hdmi_to_dig_port(intel_hdmi);
799 struct intel_encoder *intel_encoder = &intel_dig_port->base;
789 struct drm_i915_private *dev_priv = connector->dev->dev_private; 800 struct drm_i915_private *dev_priv = connector->dev->dev_private;
790 struct edid *edid; 801 struct edid *edid;
791 enum drm_connector_status status = connector_status_disconnected; 802 enum drm_connector_status status = connector_status_disconnected;
@@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
814 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) 825 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
815 intel_hdmi->has_audio = 826 intel_hdmi->has_audio =
816 (intel_hdmi->force_audio == HDMI_AUDIO_ON); 827 (intel_hdmi->force_audio == HDMI_AUDIO_ON);
828 intel_encoder->type = INTEL_OUTPUT_HDMI;
817 } 829 }
818 830
819 return status; 831 return status;
@@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
859 uint64_t val) 871 uint64_t val)
860{ 872{
861 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 873 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
874 struct intel_digital_port *intel_dig_port =
875 hdmi_to_dig_port(intel_hdmi);
862 struct drm_i915_private *dev_priv = connector->dev->dev_private; 876 struct drm_i915_private *dev_priv = connector->dev->dev_private;
863 int ret; 877 int ret;
864 878
865 ret = drm_connector_property_set_value(connector, property, val); 879 ret = drm_object_property_set_value(&connector->base, property, val);
866 if (ret) 880 if (ret)
867 return ret; 881 return ret;
868 882
@@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
898 return -EINVAL; 912 return -EINVAL;
899 913
900done: 914done:
901 if (intel_hdmi->base.base.crtc) { 915 if (intel_dig_port->base.base.crtc) {
902 struct drm_crtc *crtc = intel_hdmi->base.base.crtc; 916 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
903 intel_set_mode(crtc, &crtc->mode, 917 intel_set_mode(crtc, &crtc->mode,
904 crtc->x, crtc->y, crtc->fb); 918 crtc->x, crtc->y, crtc->fb);
905 } 919 }
@@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
914 kfree(connector); 928 kfree(connector);
915} 929}
916 930
917static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
918 .mode_fixup = intel_hdmi_mode_fixup,
919 .mode_set = intel_ddi_mode_set,
920 .disable = intel_encoder_noop,
921};
922
923static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 931static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
924 .mode_fixup = intel_hdmi_mode_fixup, 932 .mode_fixup = intel_hdmi_mode_fixup,
925 .mode_set = intel_hdmi_mode_set, 933 .mode_set = intel_hdmi_mode_set,
@@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
951 intel_attach_broadcast_rgb_property(connector); 959 intel_attach_broadcast_rgb_property(connector);
952} 960}
953 961
954void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) 962void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
963 struct intel_connector *intel_connector)
955{ 964{
965 struct drm_connector *connector = &intel_connector->base;
966 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
967 struct intel_encoder *intel_encoder = &intel_dig_port->base;
968 struct drm_device *dev = intel_encoder->base.dev;
956 struct drm_i915_private *dev_priv = dev->dev_private; 969 struct drm_i915_private *dev_priv = dev->dev_private;
957 struct drm_connector *connector; 970 enum port port = intel_dig_port->port;
958 struct intel_encoder *intel_encoder;
959 struct intel_connector *intel_connector;
960 struct intel_hdmi *intel_hdmi;
961
962 intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
963 if (!intel_hdmi)
964 return;
965
966 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
967 if (!intel_connector) {
968 kfree(intel_hdmi);
969 return;
970 }
971
972 intel_encoder = &intel_hdmi->base;
973 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
974 DRM_MODE_ENCODER_TMDS);
975 971
976 connector = &intel_connector->base;
977 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 972 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
978 DRM_MODE_CONNECTOR_HDMIA); 973 DRM_MODE_CONNECTOR_HDMIA);
979 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 974 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
980 975
981 intel_encoder->type = INTEL_OUTPUT_HDMI;
982
983 connector->polled = DRM_CONNECTOR_POLL_HPD; 976 connector->polled = DRM_CONNECTOR_POLL_HPD;
984 connector->interlace_allowed = 1; 977 connector->interlace_allowed = 1;
985 connector->doublescan_allowed = 0; 978 connector->doublescan_allowed = 0;
986 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
987 979
988 intel_encoder->cloneable = false;
989
990 intel_hdmi->ddi_port = port;
991 switch (port) { 980 switch (port) {
992 case PORT_B: 981 case PORT_B:
993 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 982 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1007 BUG(); 996 BUG();
1008 } 997 }
1009 998
1010 intel_hdmi->sdvox_reg = sdvox_reg;
1011
1012 if (!HAS_PCH_SPLIT(dev)) { 999 if (!HAS_PCH_SPLIT(dev)) {
1013 intel_hdmi->write_infoframe = g4x_write_infoframe; 1000 intel_hdmi->write_infoframe = g4x_write_infoframe;
1014 intel_hdmi->set_infoframes = g4x_set_infoframes; 1001 intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1026 intel_hdmi->set_infoframes = cpt_set_infoframes; 1013 intel_hdmi->set_infoframes = cpt_set_infoframes;
1027 } 1014 }
1028 1015
1029 if (IS_HASWELL(dev)) { 1016 if (IS_HASWELL(dev))
1030 intel_encoder->enable = intel_enable_ddi; 1017 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
1031 intel_encoder->disable = intel_disable_ddi; 1018 else
1032 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 1019 intel_connector->get_hw_state = intel_connector_get_hw_state;
1033 drm_encoder_helper_add(&intel_encoder->base,
1034 &intel_hdmi_helper_funcs_hsw);
1035 } else {
1036 intel_encoder->enable = intel_enable_hdmi;
1037 intel_encoder->disable = intel_disable_hdmi;
1038 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1039 drm_encoder_helper_add(&intel_encoder->base,
1040 &intel_hdmi_helper_funcs);
1041 }
1042 intel_connector->get_hw_state = intel_connector_get_hw_state;
1043
1044 1020
1045 intel_hdmi_add_properties(intel_hdmi, connector); 1021 intel_hdmi_add_properties(intel_hdmi, connector);
1046 1022
@@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1056 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 1032 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
1057 } 1033 }
1058} 1034}
1035
1036void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1037{
1038 struct intel_digital_port *intel_dig_port;
1039 struct intel_encoder *intel_encoder;
1040 struct drm_encoder *encoder;
1041 struct intel_connector *intel_connector;
1042
1043 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
1044 if (!intel_dig_port)
1045 return;
1046
1047 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1048 if (!intel_connector) {
1049 kfree(intel_dig_port);
1050 return;
1051 }
1052
1053 intel_encoder = &intel_dig_port->base;
1054 encoder = &intel_encoder->base;
1055
1056 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
1057 DRM_MODE_ENCODER_TMDS);
1058 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
1059
1060 intel_encoder->enable = intel_enable_hdmi;
1061 intel_encoder->disable = intel_disable_hdmi;
1062 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1063
1064 intel_encoder->type = INTEL_OUTPUT_HDMI;
1065 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1066 intel_encoder->cloneable = false;
1067
1068 intel_dig_port->port = port;
1069 intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
1070 intel_dig_port->dp.output_reg = 0;
1071
1072 intel_hdmi_init_connector(intel_dig_port, intel_connector);
1073}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2c6dbc0971c..3ef5af15b812 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -432,7 +432,7 @@ timeout:
432 I915_WRITE(GMBUS0 + reg_offset, 0); 432 I915_WRITE(GMBUS0 + reg_offset, 0);
433 433
434 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 434 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
435 bus->force_bit = true; 435 bus->force_bit = 1;
436 ret = i2c_bit_algo.master_xfer(adapter, msgs, num); 436 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
437 437
438out: 438out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
491 491
492 /* gmbus seems to be broken on i830 */ 492 /* gmbus seems to be broken on i830 */
493 if (IS_I830(dev)) 493 if (IS_I830(dev))
494 bus->force_bit = true; 494 bus->force_bit = 1;
495 495
496 intel_gpio_setup(bus, port); 496 intel_gpio_setup(bus, port);
497 497
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
532{ 532{
533 struct intel_gmbus *bus = to_intel_gmbus(adapter); 533 struct intel_gmbus *bus = to_intel_gmbus(adapter);
534 534
535 bus->force_bit = force_bit; 535 bus->force_bit += force_bit ? 1 : -1;
536 DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
537 force_bit ? "en" : "dis", adapter->name,
538 bus->force_bit);
536} 539}
537 540
538void intel_teardown_gmbus(struct drm_device *dev) 541void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index edba93b3474b..17aee74258ad 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,28 +40,30 @@
40#include <linux/acpi.h> 40#include <linux/acpi.h>
41 41
42/* Private structure for the integrated LVDS support */ 42/* Private structure for the integrated LVDS support */
43struct intel_lvds { 43struct intel_lvds_connector {
44 struct intel_encoder base; 44 struct intel_connector base;
45 45
46 struct edid *edid; 46 struct notifier_block lid_notifier;
47};
48
49struct intel_lvds_encoder {
50 struct intel_encoder base;
47 51
48 int fitting_mode;
49 u32 pfit_control; 52 u32 pfit_control;
50 u32 pfit_pgm_ratios; 53 u32 pfit_pgm_ratios;
51 bool pfit_dirty; 54 bool pfit_dirty;
52 55
53 struct drm_display_mode *fixed_mode; 56 struct intel_lvds_connector *attached_connector;
54}; 57};
55 58
56static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) 59static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
57{ 60{
58 return container_of(encoder, struct intel_lvds, base.base); 61 return container_of(encoder, struct intel_lvds_encoder, base.base);
59} 62}
60 63
61static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) 64static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
62{ 65{
63 return container_of(intel_attached_encoder(connector), 66 return container_of(connector, struct intel_lvds_connector, base.base);
64 struct intel_lvds, base);
65} 67}
66 68
67static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, 69static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
96static void intel_enable_lvds(struct intel_encoder *encoder) 98static void intel_enable_lvds(struct intel_encoder *encoder)
97{ 99{
98 struct drm_device *dev = encoder->base.dev; 100 struct drm_device *dev = encoder->base.dev;
99 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); 101 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
100 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 102 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
101 struct drm_i915_private *dev_priv = dev->dev_private; 103 struct drm_i915_private *dev_priv = dev->dev_private;
102 u32 ctl_reg, lvds_reg, stat_reg; 104 u32 ctl_reg, lvds_reg, stat_reg;
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
113 115
114 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 116 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
115 117
116 if (intel_lvds->pfit_dirty) { 118 if (lvds_encoder->pfit_dirty) {
117 /* 119 /*
118 * Enable automatic panel scaling so that non-native modes 120 * Enable automatic panel scaling so that non-native modes
119 * fill the screen. The panel fitter should only be 121 * fill the screen. The panel fitter should only be
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
121 * register description and PRM. 123 * register description and PRM.
122 */ 124 */
123 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 125 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
124 intel_lvds->pfit_control, 126 lvds_encoder->pfit_control,
125 intel_lvds->pfit_pgm_ratios); 127 lvds_encoder->pfit_pgm_ratios);
126 128
127 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); 129 I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
128 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); 130 I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
129 intel_lvds->pfit_dirty = false; 131 lvds_encoder->pfit_dirty = false;
130 } 132 }
131 133
132 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 134 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
140static void intel_disable_lvds(struct intel_encoder *encoder) 142static void intel_disable_lvds(struct intel_encoder *encoder)
141{ 143{
142 struct drm_device *dev = encoder->base.dev; 144 struct drm_device *dev = encoder->base.dev;
143 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); 145 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
144 struct drm_i915_private *dev_priv = dev->dev_private; 146 struct drm_i915_private *dev_priv = dev->dev_private;
145 u32 ctl_reg, lvds_reg, stat_reg; 147 u32 ctl_reg, lvds_reg, stat_reg;
146 148
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
160 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 162 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
161 DRM_ERROR("timed out waiting for panel to power off\n"); 163 DRM_ERROR("timed out waiting for panel to power off\n");
162 164
163 if (intel_lvds->pfit_control) { 165 if (lvds_encoder->pfit_control) {
164 I915_WRITE(PFIT_CONTROL, 0); 166 I915_WRITE(PFIT_CONTROL, 0);
165 intel_lvds->pfit_dirty = true; 167 lvds_encoder->pfit_dirty = true;
166 } 168 }
167 169
168 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 170 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
172static int intel_lvds_mode_valid(struct drm_connector *connector, 174static int intel_lvds_mode_valid(struct drm_connector *connector,
173 struct drm_display_mode *mode) 175 struct drm_display_mode *mode)
174{ 176{
175 struct intel_lvds *intel_lvds = intel_attached_lvds(connector); 177 struct intel_connector *intel_connector = to_intel_connector(connector);
176 struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; 178 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
177 179
178 if (mode->hdisplay > fixed_mode->hdisplay) 180 if (mode->hdisplay > fixed_mode->hdisplay)
179 return MODE_PANEL; 181 return MODE_PANEL;
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
249{ 251{
250 struct drm_device *dev = encoder->dev; 252 struct drm_device *dev = encoder->dev;
251 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
252 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 254 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
253 struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; 255 struct intel_connector *intel_connector =
256 &lvds_encoder->attached_connector->base;
257 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
254 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 258 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
255 int pipe; 259 int pipe;
256 260
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
260 return false; 264 return false;
261 } 265 }
262 266
263 if (intel_encoder_check_is_cloned(&intel_lvds->base)) 267 if (intel_encoder_check_is_cloned(&lvds_encoder->base))
264 return false; 268 return false;
265 269
266 /* 270 /*
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
269 * with the panel scaling set up to source from the H/VDisplay 273 * with the panel scaling set up to source from the H/VDisplay
270 * of the original mode. 274 * of the original mode.
271 */ 275 */
272 intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); 276 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
277 adjusted_mode);
273 278
274 if (HAS_PCH_SPLIT(dev)) { 279 if (HAS_PCH_SPLIT(dev)) {
275 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, 280 intel_pch_panel_fitting(dev,
281 intel_connector->panel.fitting_mode,
276 mode, adjusted_mode); 282 mode, adjusted_mode);
277 return true; 283 return true;
278 } 284 }
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
298 304
299 drm_mode_set_crtcinfo(adjusted_mode, 0); 305 drm_mode_set_crtcinfo(adjusted_mode, 0);
300 306
301 switch (intel_lvds->fitting_mode) { 307 switch (intel_connector->panel.fitting_mode) {
302 case DRM_MODE_SCALE_CENTER: 308 case DRM_MODE_SCALE_CENTER:
303 /* 309 /*
304 * For centered modes, we have to calculate border widths & 310 * For centered modes, we have to calculate border widths &
@@ -396,11 +402,11 @@ out:
396 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) 402 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
397 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 403 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
398 404
399 if (pfit_control != intel_lvds->pfit_control || 405 if (pfit_control != lvds_encoder->pfit_control ||
400 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 406 pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
401 intel_lvds->pfit_control = pfit_control; 407 lvds_encoder->pfit_control = pfit_control;
402 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; 408 lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
403 intel_lvds->pfit_dirty = true; 409 lvds_encoder->pfit_dirty = true;
404 } 410 }
405 dev_priv->lvds_border_bits = border; 411 dev_priv->lvds_border_bits = border;
406 412
@@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
449 */ 455 */
450static int intel_lvds_get_modes(struct drm_connector *connector) 456static int intel_lvds_get_modes(struct drm_connector *connector)
451{ 457{
452 struct intel_lvds *intel_lvds = intel_attached_lvds(connector); 458 struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
453 struct drm_device *dev = connector->dev; 459 struct drm_device *dev = connector->dev;
454 struct drm_display_mode *mode; 460 struct drm_display_mode *mode;
455 461
456 if (intel_lvds->edid) 462 /* use cached edid if we have one */
457 return drm_add_edid_modes(connector, intel_lvds->edid); 463 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
464 return drm_add_edid_modes(connector, lvds_connector->base.edid);
458 465
459 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 466 mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
460 if (mode == NULL) 467 if (mode == NULL)
461 return 0; 468 return 0;
462 469
@@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
496static int intel_lid_notify(struct notifier_block *nb, unsigned long val, 503static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
497 void *unused) 504 void *unused)
498{ 505{
499 struct drm_i915_private *dev_priv = 506 struct intel_lvds_connector *lvds_connector =
500 container_of(nb, struct drm_i915_private, lid_notifier); 507 container_of(nb, struct intel_lvds_connector, lid_notifier);
501 struct drm_device *dev = dev_priv->dev; 508 struct drm_connector *connector = &lvds_connector->base.base;
502 struct drm_connector *connector = dev_priv->int_lvds_connector; 509 struct drm_device *dev = connector->dev;
510 struct drm_i915_private *dev_priv = dev->dev_private;
503 511
504 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 512 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
505 return NOTIFY_OK; 513 return NOTIFY_OK;
@@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
508 * check and update the status of LVDS connector after receiving 516 * check and update the status of LVDS connector after receiving
509 * the LID nofication event. 517 * the LID nofication event.
510 */ 518 */
511 if (connector) 519 connector->status = connector->funcs->detect(connector, false);
512 connector->status = connector->funcs->detect(connector,
513 false);
514 520
515 /* Don't force modeset on machines where it causes a GPU lockup */ 521 /* Don't force modeset on machines where it causes a GPU lockup */
516 if (dmi_check_system(intel_no_modeset_on_lid)) 522 if (dmi_check_system(intel_no_modeset_on_lid))
@@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
526 dev_priv->modeset_on_lid = 0; 532 dev_priv->modeset_on_lid = 0;
527 533
528 mutex_lock(&dev->mode_config.mutex); 534 mutex_lock(&dev->mode_config.mutex);
529 intel_modeset_check_state(dev); 535 intel_modeset_setup_hw_state(dev, true);
530 mutex_unlock(&dev->mode_config.mutex); 536 mutex_unlock(&dev->mode_config.mutex);
531 537
532 return NOTIFY_OK; 538 return NOTIFY_OK;
@@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
541 */ 547 */
542static void intel_lvds_destroy(struct drm_connector *connector) 548static void intel_lvds_destroy(struct drm_connector *connector)
543{ 549{
544 struct drm_device *dev = connector->dev; 550 struct intel_lvds_connector *lvds_connector =
545 struct drm_i915_private *dev_priv = dev->dev_private; 551 to_lvds_connector(connector);
552
553 if (lvds_connector->lid_notifier.notifier_call)
554 acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
555
556 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
557 kfree(lvds_connector->base.edid);
546 558
547 intel_panel_destroy_backlight(dev); 559 intel_panel_destroy_backlight(connector->dev);
560 intel_panel_fini(&lvds_connector->base.panel);
548 561
549 if (dev_priv->lid_notifier.notifier_call)
550 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
551 drm_sysfs_connector_remove(connector); 562 drm_sysfs_connector_remove(connector);
552 drm_connector_cleanup(connector); 563 drm_connector_cleanup(connector);
553 kfree(connector); 564 kfree(connector);
@@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
557 struct drm_property *property, 568 struct drm_property *property,
558 uint64_t value) 569 uint64_t value)
559{ 570{
560 struct intel_lvds *intel_lvds = intel_attached_lvds(connector); 571 struct intel_connector *intel_connector = to_intel_connector(connector);
561 struct drm_device *dev = connector->dev; 572 struct drm_device *dev = connector->dev;
562 573
563 if (property == dev->mode_config.scaling_mode_property) { 574 if (property == dev->mode_config.scaling_mode_property) {
564 struct drm_crtc *crtc = intel_lvds->base.base.crtc; 575 struct drm_crtc *crtc;
565 576
566 if (value == DRM_MODE_SCALE_NONE) { 577 if (value == DRM_MODE_SCALE_NONE) {
567 DRM_DEBUG_KMS("no scaling not supported\n"); 578 DRM_DEBUG_KMS("no scaling not supported\n");
568 return -EINVAL; 579 return -EINVAL;
569 } 580 }
570 581
571 if (intel_lvds->fitting_mode == value) { 582 if (intel_connector->panel.fitting_mode == value) {
572 /* the LVDS scaling property is not changed */ 583 /* the LVDS scaling property is not changed */
573 return 0; 584 return 0;
574 } 585 }
575 intel_lvds->fitting_mode = value; 586 intel_connector->panel.fitting_mode = value;
587
588 crtc = intel_attached_encoder(connector)->base.crtc;
576 if (crtc && crtc->enabled) { 589 if (crtc && crtc->enabled) {
577 /* 590 /*
578 * If the CRTC is enabled, the display will be changed 591 * If the CRTC is enabled, the display will be changed
@@ -763,14 +776,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
763 }, 776 },
764 { 777 {
765 .callback = intel_no_lvds_dmi_callback, 778 .callback = intel_no_lvds_dmi_callback,
766 .ident = "ZOTAC ZBOXSD-ID12/ID13",
767 .matches = {
768 DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
769 DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
770 },
771 },
772 {
773 .callback = intel_no_lvds_dmi_callback,
774 .ident = "Gigabyte GA-D525TUD", 779 .ident = "Gigabyte GA-D525TUD",
775 .matches = { 780 .matches = {
776 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), 781 DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
@@ -912,12 +917,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
912bool intel_lvds_init(struct drm_device *dev) 917bool intel_lvds_init(struct drm_device *dev)
913{ 918{
914 struct drm_i915_private *dev_priv = dev->dev_private; 919 struct drm_i915_private *dev_priv = dev->dev_private;
915 struct intel_lvds *intel_lvds; 920 struct intel_lvds_encoder *lvds_encoder;
916 struct intel_encoder *intel_encoder; 921 struct intel_encoder *intel_encoder;
922 struct intel_lvds_connector *lvds_connector;
917 struct intel_connector *intel_connector; 923 struct intel_connector *intel_connector;
918 struct drm_connector *connector; 924 struct drm_connector *connector;
919 struct drm_encoder *encoder; 925 struct drm_encoder *encoder;
920 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 926 struct drm_display_mode *scan; /* *modes, *bios_mode; */
927 struct drm_display_mode *fixed_mode = NULL;
928 struct edid *edid;
921 struct drm_crtc *crtc; 929 struct drm_crtc *crtc;
922 u32 lvds; 930 u32 lvds;
923 int pipe; 931 int pipe;
@@ -945,23 +953,25 @@ bool intel_lvds_init(struct drm_device *dev)
945 } 953 }
946 } 954 }
947 955
948 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); 956 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
949 if (!intel_lvds) { 957 if (!lvds_encoder)
950 return false; 958 return false;
951 }
952 959
953 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 960 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
954 if (!intel_connector) { 961 if (!lvds_connector) {
955 kfree(intel_lvds); 962 kfree(lvds_encoder);
956 return false; 963 return false;
957 } 964 }
958 965
966 lvds_encoder->attached_connector = lvds_connector;
967
959 if (!HAS_PCH_SPLIT(dev)) { 968 if (!HAS_PCH_SPLIT(dev)) {
960 intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); 969 lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
961 } 970 }
962 971
963 intel_encoder = &intel_lvds->base; 972 intel_encoder = &lvds_encoder->base;
964 encoder = &intel_encoder->base; 973 encoder = &intel_encoder->base;
974 intel_connector = &lvds_connector->base;
965 connector = &intel_connector->base; 975 connector = &intel_connector->base;
966 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 976 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
967 DRM_MODE_CONNECTOR_LVDS); 977 DRM_MODE_CONNECTOR_LVDS);
@@ -993,14 +1003,10 @@ bool intel_lvds_init(struct drm_device *dev)
993 1003
994 /* create the scaling mode property */ 1004 /* create the scaling mode property */
995 drm_mode_create_scaling_mode_property(dev); 1005 drm_mode_create_scaling_mode_property(dev);
996 /* 1006 drm_object_attach_property(&connector->base,
997 * the initial panel fitting mode will be FULL_SCREEN.
998 */
999
1000 drm_connector_attach_property(&intel_connector->base,
1001 dev->mode_config.scaling_mode_property, 1007 dev->mode_config.scaling_mode_property,
1002 DRM_MODE_SCALE_ASPECT); 1008 DRM_MODE_SCALE_ASPECT);
1003 intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; 1009 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
1004 /* 1010 /*
1005 * LVDS discovery: 1011 * LVDS discovery:
1006 * 1) check for EDID on DDC 1012 * 1) check for EDID on DDC
@@ -1015,20 +1021,21 @@ bool intel_lvds_init(struct drm_device *dev)
1015 * Attempt to get the fixed panel mode from DDC. Assume that the 1021 * Attempt to get the fixed panel mode from DDC. Assume that the
1016 * preferred mode is the right one. 1022 * preferred mode is the right one.
1017 */ 1023 */
1018 intel_lvds->edid = drm_get_edid(connector, 1024 edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
1019 intel_gmbus_get_adapter(dev_priv, 1025 if (edid) {
1020 pin)); 1026 if (drm_add_edid_modes(connector, edid)) {
1021 if (intel_lvds->edid) {
1022 if (drm_add_edid_modes(connector,
1023 intel_lvds->edid)) {
1024 drm_mode_connector_update_edid_property(connector, 1027 drm_mode_connector_update_edid_property(connector,
1025 intel_lvds->edid); 1028 edid);
1026 } else { 1029 } else {
1027 kfree(intel_lvds->edid); 1030 kfree(edid);
1028 intel_lvds->edid = NULL; 1031 edid = ERR_PTR(-EINVAL);
1029 } 1032 }
1033 } else {
1034 edid = ERR_PTR(-ENOENT);
1030 } 1035 }
1031 if (!intel_lvds->edid) { 1036 lvds_connector->base.edid = edid;
1037
1038 if (IS_ERR_OR_NULL(edid)) {
1032 /* Didn't get an EDID, so 1039 /* Didn't get an EDID, so
1033 * Set wide sync ranges so we get all modes 1040 * Set wide sync ranges so we get all modes
1034 * handed to valid_mode for checking 1041 * handed to valid_mode for checking
@@ -1041,22 +1048,26 @@ bool intel_lvds_init(struct drm_device *dev)
1041 1048
1042 list_for_each_entry(scan, &connector->probed_modes, head) { 1049 list_for_each_entry(scan, &connector->probed_modes, head) {
1043 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 1050 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
1044 intel_lvds->fixed_mode = 1051 DRM_DEBUG_KMS("using preferred mode from EDID: ");
1045 drm_mode_duplicate(dev, scan); 1052 drm_mode_debug_printmodeline(scan);
1046 intel_find_lvds_downclock(dev, 1053
1047 intel_lvds->fixed_mode, 1054 fixed_mode = drm_mode_duplicate(dev, scan);
1048 connector); 1055 if (fixed_mode) {
1049 goto out; 1056 intel_find_lvds_downclock(dev, fixed_mode,
1057 connector);
1058 goto out;
1059 }
1050 } 1060 }
1051 } 1061 }
1052 1062
1053 /* Failed to get EDID, what about VBT? */ 1063 /* Failed to get EDID, what about VBT? */
1054 if (dev_priv->lfp_lvds_vbt_mode) { 1064 if (dev_priv->lfp_lvds_vbt_mode) {
1055 intel_lvds->fixed_mode = 1065 DRM_DEBUG_KMS("using mode from VBT: ");
1056 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 1066 drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
1057 if (intel_lvds->fixed_mode) { 1067
1058 intel_lvds->fixed_mode->type |= 1068 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
1059 DRM_MODE_TYPE_PREFERRED; 1069 if (fixed_mode) {
1070 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1060 goto out; 1071 goto out;
1061 } 1072 }
1062 } 1073 }
@@ -1076,16 +1087,17 @@ bool intel_lvds_init(struct drm_device *dev)
1076 crtc = intel_get_crtc_for_pipe(dev, pipe); 1087 crtc = intel_get_crtc_for_pipe(dev, pipe);
1077 1088
1078 if (crtc && (lvds & LVDS_PORT_EN)) { 1089 if (crtc && (lvds & LVDS_PORT_EN)) {
1079 intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); 1090 fixed_mode = intel_crtc_mode_get(dev, crtc);
1080 if (intel_lvds->fixed_mode) { 1091 if (fixed_mode) {
1081 intel_lvds->fixed_mode->type |= 1092 DRM_DEBUG_KMS("using current (BIOS) mode: ");
1082 DRM_MODE_TYPE_PREFERRED; 1093 drm_mode_debug_printmodeline(fixed_mode);
1094 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1083 goto out; 1095 goto out;
1084 } 1096 }
1085 } 1097 }
1086 1098
1087 /* If we still don't have a mode after all that, give up. */ 1099 /* If we still don't have a mode after all that, give up. */
1088 if (!intel_lvds->fixed_mode) 1100 if (!fixed_mode)
1089 goto failed; 1101 goto failed;
1090 1102
1091out: 1103out:
@@ -1100,16 +1112,15 @@ out:
1100 I915_WRITE(PP_CONTROL, 1112 I915_WRITE(PP_CONTROL,
1101 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); 1113 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1102 } 1114 }
1103 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1115 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1104 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1116 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1105 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1117 DRM_DEBUG_KMS("lid notifier registration failed\n");
1106 dev_priv->lid_notifier.notifier_call = NULL; 1118 lvds_connector->lid_notifier.notifier_call = NULL;
1107 } 1119 }
1108 /* keep the LVDS connector */
1109 dev_priv->int_lvds_connector = connector;
1110 drm_sysfs_connector_add(connector); 1120 drm_sysfs_connector_add(connector);
1111 1121
1112 intel_panel_setup_backlight(dev); 1122 intel_panel_init(&intel_connector->panel, fixed_mode);
1123 intel_panel_setup_backlight(connector);
1113 1124
1114 return true; 1125 return true;
1115 1126
@@ -1117,7 +1128,9 @@ failed:
1117 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1128 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1118 drm_connector_cleanup(connector); 1129 drm_connector_cleanup(connector);
1119 drm_encoder_cleanup(encoder); 1130 drm_encoder_cleanup(encoder);
1120 kfree(intel_lvds); 1131 if (fixed_mode)
1121 kfree(intel_connector); 1132 drm_mode_destroy(dev, fixed_mode);
1133 kfree(lvds_encoder);
1134 kfree(lvds_connector);
1122 return false; 1135 return false;
1123} 1136}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cabd84bf66eb..b00f1c83adce 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
45 drm_mode_connector_update_edid_property(connector, edid); 45 drm_mode_connector_update_edid_property(connector, edid);
46 ret = drm_add_edid_modes(connector, edid); 46 ret = drm_add_edid_modes(connector, edid);
47 drm_edid_to_eld(connector, edid); 47 drm_edid_to_eld(connector, edid);
48 kfree(edid);
49 48
50 return ret; 49 return ret;
51} 50}
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
61 struct i2c_adapter *adapter) 60 struct i2c_adapter *adapter)
62{ 61{
63 struct edid *edid; 62 struct edid *edid;
63 int ret;
64 64
65 edid = drm_get_edid(connector, adapter); 65 edid = drm_get_edid(connector, adapter);
66 if (!edid) 66 if (!edid)
67 return 0; 67 return 0;
68 68
69 return intel_connector_update_modes(connector, edid); 69 ret = intel_connector_update_modes(connector, edid);
70 kfree(edid);
71
72 return ret;
70} 73}
71 74
72static const struct drm_prop_enum_list force_audio_names[] = { 75static const struct drm_prop_enum_list force_audio_names[] = {
@@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
94 97
95 dev_priv->force_audio_property = prop; 98 dev_priv->force_audio_property = prop;
96 } 99 }
97 drm_connector_attach_property(connector, prop, 0); 100 drm_object_attach_property(&connector->base, prop, 0);
98} 101}
99 102
100static const struct drm_prop_enum_list broadcast_rgb_names[] = { 103static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
121 dev_priv->broadcast_rgb_property = prop; 124 dev_priv->broadcast_rgb_property = prop;
122 } 125 }
123 126
124 drm_connector_attach_property(connector, prop, 0); 127 drm_object_attach_property(&connector->base, prop, 0);
125} 128}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5530413213d8..7741c22c934c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
155 u32 max; 155 u32 max;
156 156
157 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
158
157 if (!(bclp & ASLE_BCLP_VALID)) 159 if (!(bclp & ASLE_BCLP_VALID))
158 return ASLE_BACKLIGHT_FAILED; 160 return ASLE_BACKLIGHT_FAILED;
159 161
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 495625914e4a..d7bc817f51a0 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -341,9 +341,17 @@ static int intel_overlay_off(struct intel_overlay *overlay)
341 intel_ring_emit(ring, flip_addr); 341 intel_ring_emit(ring, flip_addr);
342 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 342 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
343 /* turn overlay off */ 343 /* turn overlay off */
344 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); 344 if (IS_I830(dev)) {
345 intel_ring_emit(ring, flip_addr); 345 /* Workaround: Don't disable the overlay fully, since otherwise
346 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 346 * it dies on the next OVERLAY_ON cmd. */
347 intel_ring_emit(ring, MI_NOOP);
348 intel_ring_emit(ring, MI_NOOP);
349 intel_ring_emit(ring, MI_NOOP);
350 } else {
351 intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
352 intel_ring_emit(ring, flip_addr);
353 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
354 }
347 intel_ring_advance(ring); 355 intel_ring_advance(ring);
348 356
349 return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail); 357 return intel_overlay_do_wait_request(overlay, intel_overlay_off_tail);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e019b2369861..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
130 return 0; 130 return 0;
131} 131}
132 132
133static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 133static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
134{ 134{
135 struct drm_i915_private *dev_priv = dev->dev_private;
135 u32 val; 136 u32 val;
136 137
137 /* Restore the CTL value if it lost, e.g. GPU reset */ 138 /* Restore the CTL value if it lost, e.g. GPU reset */
138 139
139 if (HAS_PCH_SPLIT(dev_priv->dev)) { 140 if (HAS_PCH_SPLIT(dev_priv->dev)) {
140 val = I915_READ(BLC_PWM_PCH_CTL2); 141 val = I915_READ(BLC_PWM_PCH_CTL2);
141 if (dev_priv->saveBLC_PWM_CTL2 == 0) { 142 if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
142 dev_priv->saveBLC_PWM_CTL2 = val; 143 dev_priv->regfile.saveBLC_PWM_CTL2 = val;
143 } else if (val == 0) { 144 } else if (val == 0) {
144 I915_WRITE(BLC_PWM_PCH_CTL2, 145 val = dev_priv->regfile.saveBLC_PWM_CTL2;
145 dev_priv->saveBLC_PWM_CTL2); 146 I915_WRITE(BLC_PWM_PCH_CTL2, val);
146 val = dev_priv->saveBLC_PWM_CTL2;
147 } 147 }
148 } else { 148 } else {
149 val = I915_READ(BLC_PWM_CTL); 149 val = I915_READ(BLC_PWM_CTL);
150 if (dev_priv->saveBLC_PWM_CTL == 0) { 150 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
151 dev_priv->saveBLC_PWM_CTL = val; 151 dev_priv->regfile.saveBLC_PWM_CTL = val;
152 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 152 if (INTEL_INFO(dev)->gen >= 4)
153 dev_priv->regfile.saveBLC_PWM_CTL2 =
154 I915_READ(BLC_PWM_CTL2);
153 } else if (val == 0) { 155 } else if (val == 0) {
154 I915_WRITE(BLC_PWM_CTL, 156 val = dev_priv->regfile.saveBLC_PWM_CTL;
155 dev_priv->saveBLC_PWM_CTL); 157 I915_WRITE(BLC_PWM_CTL, val);
156 I915_WRITE(BLC_PWM_CTL2, 158 if (INTEL_INFO(dev)->gen >= 4)
157 dev_priv->saveBLC_PWM_CTL2); 159 I915_WRITE(BLC_PWM_CTL2,
158 val = dev_priv->saveBLC_PWM_CTL; 160 dev_priv->regfile.saveBLC_PWM_CTL2);
159 } 161 }
160 } 162 }
161 163
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
164 166
165static u32 _intel_panel_get_max_backlight(struct drm_device *dev) 167static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
166{ 168{
167 struct drm_i915_private *dev_priv = dev->dev_private;
168 u32 max; 169 u32 max;
169 170
170 max = i915_read_blc_pwm_ctl(dev_priv); 171 max = i915_read_blc_pwm_ctl(dev);
171 172
172 if (HAS_PCH_SPLIT(dev)) { 173 if (HAS_PCH_SPLIT(dev)) {
173 max >>= 16; 174 max >>= 16;
@@ -275,7 +276,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
275 } 276 }
276 277
277 tmp = I915_READ(BLC_PWM_CTL); 278 tmp = I915_READ(BLC_PWM_CTL);
278 if (INTEL_INFO(dev)->gen < 4) 279 if (INTEL_INFO(dev)->gen < 4)
279 level <<= 1; 280 level <<= 1;
280 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 281 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
281 I915_WRITE(BLC_PWM_CTL, tmp | level); 282 I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
374enum drm_connector_status 375enum drm_connector_status
375intel_panel_detect(struct drm_device *dev) 376intel_panel_detect(struct drm_device *dev)
376{ 377{
377#if 0
378 struct drm_i915_private *dev_priv = dev->dev_private; 378 struct drm_i915_private *dev_priv = dev->dev_private;
379#endif
380
381 if (i915_panel_ignore_lid)
382 return i915_panel_ignore_lid > 0 ?
383 connector_status_connected :
384 connector_status_disconnected;
385 379
386 /* opregion lid state on HP 2540p is wrong at boot up,
387 * appears to be either the BIOS or Linux ACPI fault */
388#if 0
389 /* Assume that the BIOS does not lie through the OpRegion... */ 380 /* Assume that the BIOS does not lie through the OpRegion... */
390 if (dev_priv->opregion.lid_state) 381 if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
391 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 382 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
392 connector_status_connected : 383 connector_status_connected :
393 connector_status_disconnected; 384 connector_status_disconnected;
394#endif 385 }
395 386
396 return connector_status_unknown; 387 switch (i915_panel_ignore_lid) {
388 case -2:
389 return connector_status_connected;
390 case -1:
391 return connector_status_disconnected;
392 default:
393 return connector_status_unknown;
394 }
397} 395}
398 396
399#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 397#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -416,26 +414,19 @@ static const struct backlight_ops intel_panel_bl_ops = {
416 .get_brightness = intel_panel_get_brightness, 414 .get_brightness = intel_panel_get_brightness,
417}; 415};
418 416
419int intel_panel_setup_backlight(struct drm_device *dev) 417int intel_panel_setup_backlight(struct drm_connector *connector)
420{ 418{
419 struct drm_device *dev = connector->dev;
421 struct drm_i915_private *dev_priv = dev->dev_private; 420 struct drm_i915_private *dev_priv = dev->dev_private;
422 struct backlight_properties props; 421 struct backlight_properties props;
423 struct drm_connector *connector;
424 422
425 intel_panel_init_backlight(dev); 423 intel_panel_init_backlight(dev);
426 424
427 if (dev_priv->int_lvds_connector)
428 connector = dev_priv->int_lvds_connector;
429 else if (dev_priv->int_edp_connector)
430 connector = dev_priv->int_edp_connector;
431 else
432 return -ENODEV;
433
434 memset(&props, 0, sizeof(props)); 425 memset(&props, 0, sizeof(props));
435 props.type = BACKLIGHT_RAW; 426 props.type = BACKLIGHT_RAW;
436 props.max_brightness = _intel_panel_get_max_backlight(dev); 427 props.max_brightness = _intel_panel_get_max_backlight(dev);
437 if (props.max_brightness == 0) { 428 if (props.max_brightness == 0) {
438 DRM_ERROR("Failed to get maximum backlight value\n"); 429 DRM_DEBUG_DRIVER("Failed to get maximum backlight value\n");
439 return -ENODEV; 430 return -ENODEV;
440 } 431 }
441 dev_priv->backlight = 432 dev_priv->backlight =
@@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
460 backlight_device_unregister(dev_priv->backlight); 451 backlight_device_unregister(dev_priv->backlight);
461} 452}
462#else 453#else
463int intel_panel_setup_backlight(struct drm_device *dev) 454int intel_panel_setup_backlight(struct drm_connector *connector)
464{ 455{
465 intel_panel_init_backlight(dev); 456 intel_panel_init_backlight(connector->dev);
466 return 0; 457 return 0;
467} 458}
468 459
@@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
471 return; 462 return;
472} 463}
473#endif 464#endif
465
466int intel_panel_init(struct intel_panel *panel,
467 struct drm_display_mode *fixed_mode)
468{
469 panel->fixed_mode = fixed_mode;
470
471 return 0;
472}
473
474void intel_panel_fini(struct intel_panel *panel)
475{
476 struct intel_connector *intel_connector =
477 container_of(panel, struct intel_connector, panel);
478
479 if (panel->fixed_mode)
480 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
481}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 72f41aaa71ff..e83a11794172 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -44,6 +44,14 @@
44 * i915.i915_enable_fbc parameter 44 * i915.i915_enable_fbc parameter
45 */ 45 */
46 46
47static bool intel_crtc_active(struct drm_crtc *crtc)
48{
49 /* Be paranoid as we can arrive here with only partial
50 * state retrieved from the hardware during setup.
51 */
52 return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
53}
54
47static void i8xx_disable_fbc(struct drm_device *dev) 55static void i8xx_disable_fbc(struct drm_device *dev)
48{ 56{
49 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -405,9 +413,8 @@ void intel_update_fbc(struct drm_device *dev)
405 * - going to an unsupported config (interlace, pixel multiply, etc.) 413 * - going to an unsupported config (interlace, pixel multiply, etc.)
406 */ 414 */
407 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { 415 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408 if (tmp_crtc->enabled && 416 if (intel_crtc_active(tmp_crtc) &&
409 !to_intel_crtc(tmp_crtc)->primary_disabled && 417 !to_intel_crtc(tmp_crtc)->primary_disabled) {
410 tmp_crtc->fb) {
411 if (crtc) { 418 if (crtc) {
412 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); 419 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
413 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; 420 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
@@ -992,7 +999,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
992 struct drm_crtc *crtc, *enabled = NULL; 999 struct drm_crtc *crtc, *enabled = NULL;
993 1000
994 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 1001 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
995 if (crtc->enabled && crtc->fb) { 1002 if (intel_crtc_active(crtc)) {
996 if (enabled) 1003 if (enabled)
997 return NULL; 1004 return NULL;
998 enabled = crtc; 1005 enabled = crtc;
@@ -1086,7 +1093,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
1086 int entries, tlb_miss; 1093 int entries, tlb_miss;
1087 1094
1088 crtc = intel_get_crtc_for_plane(dev, plane); 1095 crtc = intel_get_crtc_for_plane(dev, plane);
1089 if (crtc->fb == NULL || !crtc->enabled) { 1096 if (!intel_crtc_active(crtc)) {
1090 *cursor_wm = cursor->guard_size; 1097 *cursor_wm = cursor->guard_size;
1091 *plane_wm = display->guard_size; 1098 *plane_wm = display->guard_size;
1092 return false; 1099 return false;
@@ -1215,7 +1222,7 @@ static bool vlv_compute_drain_latency(struct drm_device *dev,
1215 int entries; 1222 int entries;
1216 1223
1217 crtc = intel_get_crtc_for_plane(dev, plane); 1224 crtc = intel_get_crtc_for_plane(dev, plane);
1218 if (crtc->fb == NULL || !crtc->enabled) 1225 if (!intel_crtc_active(crtc))
1219 return false; 1226 return false;
1220 1227
1221 clock = crtc->mode.clock; /* VESA DOT Clock */ 1228 clock = crtc->mode.clock; /* VESA DOT Clock */
@@ -1286,6 +1293,7 @@ static void valleyview_update_wm(struct drm_device *dev)
1286 struct drm_i915_private *dev_priv = dev->dev_private; 1293 struct drm_i915_private *dev_priv = dev->dev_private;
1287 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1294 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1288 int plane_sr, cursor_sr; 1295 int plane_sr, cursor_sr;
1296 int ignore_plane_sr, ignore_cursor_sr;
1289 unsigned int enabled = 0; 1297 unsigned int enabled = 0;
1290 1298
1291 vlv_update_drain_latency(dev); 1299 vlv_update_drain_latency(dev);
@@ -1302,17 +1310,23 @@ static void valleyview_update_wm(struct drm_device *dev)
1302 &planeb_wm, &cursorb_wm)) 1310 &planeb_wm, &cursorb_wm))
1303 enabled |= 2; 1311 enabled |= 2;
1304 1312
1305 plane_sr = cursor_sr = 0;
1306 if (single_plane_enabled(enabled) && 1313 if (single_plane_enabled(enabled) &&
1307 g4x_compute_srwm(dev, ffs(enabled) - 1, 1314 g4x_compute_srwm(dev, ffs(enabled) - 1,
1308 sr_latency_ns, 1315 sr_latency_ns,
1309 &valleyview_wm_info, 1316 &valleyview_wm_info,
1310 &valleyview_cursor_wm_info, 1317 &valleyview_cursor_wm_info,
1311 &plane_sr, &cursor_sr)) 1318 &plane_sr, &ignore_cursor_sr) &&
1319 g4x_compute_srwm(dev, ffs(enabled) - 1,
1320 2*sr_latency_ns,
1321 &valleyview_wm_info,
1322 &valleyview_cursor_wm_info,
1323 &ignore_plane_sr, &cursor_sr)) {
1312 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN); 1324 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1313 else 1325 } else {
1314 I915_WRITE(FW_BLC_SELF_VLV, 1326 I915_WRITE(FW_BLC_SELF_VLV,
1315 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN); 1327 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1328 plane_sr = cursor_sr = 0;
1329 }
1316 1330
1317 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1331 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1318 planea_wm, cursora_wm, 1332 planea_wm, cursora_wm,
@@ -1325,10 +1339,11 @@ static void valleyview_update_wm(struct drm_device *dev)
1325 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1339 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1326 planea_wm); 1340 planea_wm);
1327 I915_WRITE(DSPFW2, 1341 I915_WRITE(DSPFW2,
1328 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1342 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1329 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1343 (cursora_wm << DSPFW_CURSORA_SHIFT));
1330 I915_WRITE(DSPFW3, 1344 I915_WRITE(DSPFW3,
1331 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); 1345 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1346 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1332} 1347}
1333 1348
1334static void g4x_update_wm(struct drm_device *dev) 1349static void g4x_update_wm(struct drm_device *dev)
@@ -1351,17 +1366,18 @@ static void g4x_update_wm(struct drm_device *dev)
1351 &planeb_wm, &cursorb_wm)) 1366 &planeb_wm, &cursorb_wm))
1352 enabled |= 2; 1367 enabled |= 2;
1353 1368
1354 plane_sr = cursor_sr = 0;
1355 if (single_plane_enabled(enabled) && 1369 if (single_plane_enabled(enabled) &&
1356 g4x_compute_srwm(dev, ffs(enabled) - 1, 1370 g4x_compute_srwm(dev, ffs(enabled) - 1,
1357 sr_latency_ns, 1371 sr_latency_ns,
1358 &g4x_wm_info, 1372 &g4x_wm_info,
1359 &g4x_cursor_wm_info, 1373 &g4x_cursor_wm_info,
1360 &plane_sr, &cursor_sr)) 1374 &plane_sr, &cursor_sr)) {
1361 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); 1375 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1362 else 1376 } else {
1363 I915_WRITE(FW_BLC_SELF, 1377 I915_WRITE(FW_BLC_SELF,
1364 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); 1378 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1379 plane_sr = cursor_sr = 0;
1380 }
1365 1381
1366 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", 1382 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1367 planea_wm, cursora_wm, 1383 planea_wm, cursora_wm,
@@ -1374,11 +1390,11 @@ static void g4x_update_wm(struct drm_device *dev)
1374 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1390 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1375 planea_wm); 1391 planea_wm);
1376 I915_WRITE(DSPFW2, 1392 I915_WRITE(DSPFW2,
1377 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1393 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1378 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1394 (cursora_wm << DSPFW_CURSORA_SHIFT));
1379 /* HPLL off in SR has some issues on G4x... disable it */ 1395 /* HPLL off in SR has some issues on G4x... disable it */
1380 I915_WRITE(DSPFW3, 1396 I915_WRITE(DSPFW3,
1381 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | 1397 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1382 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1398 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1383} 1399}
1384 1400
@@ -1467,10 +1483,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1467 1483
1468 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1484 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1469 crtc = intel_get_crtc_for_plane(dev, 0); 1485 crtc = intel_get_crtc_for_plane(dev, 0);
1470 if (crtc->enabled && crtc->fb) { 1486 if (intel_crtc_active(crtc)) {
1487 int cpp = crtc->fb->bits_per_pixel / 8;
1488 if (IS_GEN2(dev))
1489 cpp = 4;
1490
1471 planea_wm = intel_calculate_wm(crtc->mode.clock, 1491 planea_wm = intel_calculate_wm(crtc->mode.clock,
1472 wm_info, fifo_size, 1492 wm_info, fifo_size, cpp,
1473 crtc->fb->bits_per_pixel / 8,
1474 latency_ns); 1493 latency_ns);
1475 enabled = crtc; 1494 enabled = crtc;
1476 } else 1495 } else
@@ -1478,10 +1497,13 @@ static void i9xx_update_wm(struct drm_device *dev)
1478 1497
1479 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1498 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1480 crtc = intel_get_crtc_for_plane(dev, 1); 1499 crtc = intel_get_crtc_for_plane(dev, 1);
1481 if (crtc->enabled && crtc->fb) { 1500 if (intel_crtc_active(crtc)) {
1501 int cpp = crtc->fb->bits_per_pixel / 8;
1502 if (IS_GEN2(dev))
1503 cpp = 4;
1504
1482 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1505 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1483 wm_info, fifo_size, 1506 wm_info, fifo_size, cpp,
1484 crtc->fb->bits_per_pixel / 8,
1485 latency_ns); 1507 latency_ns);
1486 if (enabled == NULL) 1508 if (enabled == NULL)
1487 enabled = crtc; 1509 enabled = crtc;
@@ -1571,8 +1593,7 @@ static void i830_update_wm(struct drm_device *dev)
1571 1593
1572 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1594 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1573 dev_priv->display.get_fifo_size(dev, 0), 1595 dev_priv->display.get_fifo_size(dev, 0),
1574 crtc->fb->bits_per_pixel / 8, 1596 4, latency_ns);
1575 latency_ns);
1576 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1597 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1577 fwater_lo |= (3<<8) | planea_wm; 1598 fwater_lo |= (3<<8) | planea_wm;
1578 1599
@@ -1805,8 +1826,110 @@ static void sandybridge_update_wm(struct drm_device *dev)
1805 enabled |= 2; 1826 enabled |= 2;
1806 } 1827 }
1807 1828
1808 if ((dev_priv->num_pipe == 3) && 1829 /*
1809 g4x_compute_wm0(dev, 2, 1830 * Calculate and update the self-refresh watermark only when one
1831 * display plane is used.
1832 *
1833 * SNB support 3 levels of watermark.
1834 *
1835 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1836 * and disabled in the descending order
1837 *
1838 */
1839 I915_WRITE(WM3_LP_ILK, 0);
1840 I915_WRITE(WM2_LP_ILK, 0);
1841 I915_WRITE(WM1_LP_ILK, 0);
1842
1843 if (!single_plane_enabled(enabled) ||
1844 dev_priv->sprite_scaling_enabled)
1845 return;
1846 enabled = ffs(enabled) - 1;
1847
1848 /* WM1 */
1849 if (!ironlake_compute_srwm(dev, 1, enabled,
1850 SNB_READ_WM1_LATENCY() * 500,
1851 &sandybridge_display_srwm_info,
1852 &sandybridge_cursor_srwm_info,
1853 &fbc_wm, &plane_wm, &cursor_wm))
1854 return;
1855
1856 I915_WRITE(WM1_LP_ILK,
1857 WM1_LP_SR_EN |
1858 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1859 (fbc_wm << WM1_LP_FBC_SHIFT) |
1860 (plane_wm << WM1_LP_SR_SHIFT) |
1861 cursor_wm);
1862
1863 /* WM2 */
1864 if (!ironlake_compute_srwm(dev, 2, enabled,
1865 SNB_READ_WM2_LATENCY() * 500,
1866 &sandybridge_display_srwm_info,
1867 &sandybridge_cursor_srwm_info,
1868 &fbc_wm, &plane_wm, &cursor_wm))
1869 return;
1870
1871 I915_WRITE(WM2_LP_ILK,
1872 WM2_LP_EN |
1873 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1874 (fbc_wm << WM1_LP_FBC_SHIFT) |
1875 (plane_wm << WM1_LP_SR_SHIFT) |
1876 cursor_wm);
1877
1878 /* WM3 */
1879 if (!ironlake_compute_srwm(dev, 3, enabled,
1880 SNB_READ_WM3_LATENCY() * 500,
1881 &sandybridge_display_srwm_info,
1882 &sandybridge_cursor_srwm_info,
1883 &fbc_wm, &plane_wm, &cursor_wm))
1884 return;
1885
1886 I915_WRITE(WM3_LP_ILK,
1887 WM3_LP_EN |
1888 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1889 (fbc_wm << WM1_LP_FBC_SHIFT) |
1890 (plane_wm << WM1_LP_SR_SHIFT) |
1891 cursor_wm);
1892}
1893
1894static void ivybridge_update_wm(struct drm_device *dev)
1895{
1896 struct drm_i915_private *dev_priv = dev->dev_private;
1897 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1898 u32 val;
1899 int fbc_wm, plane_wm, cursor_wm;
1900 int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
1901 unsigned int enabled;
1902
1903 enabled = 0;
1904 if (g4x_compute_wm0(dev, 0,
1905 &sandybridge_display_wm_info, latency,
1906 &sandybridge_cursor_wm_info, latency,
1907 &plane_wm, &cursor_wm)) {
1908 val = I915_READ(WM0_PIPEA_ILK);
1909 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1910 I915_WRITE(WM0_PIPEA_ILK, val |
1911 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1912 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1913 " plane %d, " "cursor: %d\n",
1914 plane_wm, cursor_wm);
1915 enabled |= 1;
1916 }
1917
1918 if (g4x_compute_wm0(dev, 1,
1919 &sandybridge_display_wm_info, latency,
1920 &sandybridge_cursor_wm_info, latency,
1921 &plane_wm, &cursor_wm)) {
1922 val = I915_READ(WM0_PIPEB_ILK);
1923 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1924 I915_WRITE(WM0_PIPEB_ILK, val |
1925 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1926 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1927 " plane %d, cursor: %d\n",
1928 plane_wm, cursor_wm);
1929 enabled |= 2;
1930 }
1931
1932 if (g4x_compute_wm0(dev, 2,
1810 &sandybridge_display_wm_info, latency, 1933 &sandybridge_display_wm_info, latency,
1811 &sandybridge_cursor_wm_info, latency, 1934 &sandybridge_cursor_wm_info, latency,
1812 &plane_wm, &cursor_wm)) { 1935 &plane_wm, &cursor_wm)) {
@@ -1869,12 +1992,17 @@ static void sandybridge_update_wm(struct drm_device *dev)
1869 (plane_wm << WM1_LP_SR_SHIFT) | 1992 (plane_wm << WM1_LP_SR_SHIFT) |
1870 cursor_wm); 1993 cursor_wm);
1871 1994
1872 /* WM3 */ 1995 /* WM3, note we have to correct the cursor latency */
1873 if (!ironlake_compute_srwm(dev, 3, enabled, 1996 if (!ironlake_compute_srwm(dev, 3, enabled,
1874 SNB_READ_WM3_LATENCY() * 500, 1997 SNB_READ_WM3_LATENCY() * 500,
1875 &sandybridge_display_srwm_info, 1998 &sandybridge_display_srwm_info,
1876 &sandybridge_cursor_srwm_info, 1999 &sandybridge_cursor_srwm_info,
1877 &fbc_wm, &plane_wm, &cursor_wm)) 2000 &fbc_wm, &plane_wm, &ignore_cursor_wm) ||
2001 !ironlake_compute_srwm(dev, 3, enabled,
2002 2 * SNB_READ_WM3_LATENCY() * 500,
2003 &sandybridge_display_srwm_info,
2004 &sandybridge_cursor_srwm_info,
2005 &ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
1878 return; 2006 return;
1879 2007
1880 I915_WRITE(WM3_LP_ILK, 2008 I915_WRITE(WM3_LP_ILK,
@@ -1923,7 +2051,7 @@ sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1923 int entries, tlb_miss; 2051 int entries, tlb_miss;
1924 2052
1925 crtc = intel_get_crtc_for_plane(dev, plane); 2053 crtc = intel_get_crtc_for_plane(dev, plane);
1926 if (crtc->fb == NULL || !crtc->enabled) { 2054 if (!intel_crtc_active(crtc)) {
1927 *sprite_wm = display->guard_size; 2055 *sprite_wm = display->guard_size;
1928 return false; 2056 return false;
1929 } 2057 }
@@ -2323,7 +2451,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
2323 struct drm_i915_private *dev_priv = dev->dev_private; 2451 struct drm_i915_private *dev_priv = dev->dev_private;
2324 u32 limits = gen6_rps_limits(dev_priv, &val); 2452 u32 limits = gen6_rps_limits(dev_priv, &val);
2325 2453
2326 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2454 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2327 WARN_ON(val > dev_priv->rps.max_delay); 2455 WARN_ON(val > dev_priv->rps.max_delay);
2328 WARN_ON(val < dev_priv->rps.min_delay); 2456 WARN_ON(val < dev_priv->rps.min_delay);
2329 2457
@@ -2373,15 +2501,9 @@ int intel_enable_rc6(const struct drm_device *dev)
2373 if (i915_enable_rc6 >= 0) 2501 if (i915_enable_rc6 >= 0)
2374 return i915_enable_rc6; 2502 return i915_enable_rc6;
2375 2503
2376 if (INTEL_INFO(dev)->gen == 5) { 2504 /* Disable RC6 on Ironlake */
2377#ifdef CONFIG_INTEL_IOMMU 2505 if (INTEL_INFO(dev)->gen == 5)
2378 /* Disable rc6 on ilk if VT-d is on. */ 2506 return 0;
2379 if (intel_iommu_gfx_mapped)
2380 return false;
2381#endif
2382 DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
2383 return INTEL_RC6_ENABLE;
2384 }
2385 2507
2386 if (IS_HASWELL(dev)) { 2508 if (IS_HASWELL(dev)) {
2387 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n"); 2509 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
@@ -2404,12 +2526,12 @@ static void gen6_enable_rps(struct drm_device *dev)
2404 struct intel_ring_buffer *ring; 2526 struct intel_ring_buffer *ring;
2405 u32 rp_state_cap; 2527 u32 rp_state_cap;
2406 u32 gt_perf_status; 2528 u32 gt_perf_status;
2407 u32 pcu_mbox, rc6_mask = 0; 2529 u32 rc6vids, pcu_mbox, rc6_mask = 0;
2408 u32 gtfifodbg; 2530 u32 gtfifodbg;
2409 int rc6_mode; 2531 int rc6_mode;
2410 int i; 2532 int i, ret;
2411 2533
2412 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2534 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2413 2535
2414 /* Here begins a magic sequence of register writes to enable 2536 /* Here begins a magic sequence of register writes to enable
2415 * auto-downclocking. 2537 * auto-downclocking.
@@ -2503,30 +2625,16 @@ static void gen6_enable_rps(struct drm_device *dev)
2503 GEN6_RP_UP_BUSY_AVG | 2625 GEN6_RP_UP_BUSY_AVG |
2504 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); 2626 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
2505 2627
2506 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2628 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2507 500)) 2629 if (!ret) {
2508 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 2630 pcu_mbox = 0;
2509 2631 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2510 I915_WRITE(GEN6_PCODE_DATA, 0); 2632 if (ret && pcu_mbox & (1<<31)) { /* OC supported */
2511 I915_WRITE(GEN6_PCODE_MAILBOX, 2633 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2512 GEN6_PCODE_READY | 2634 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2513 GEN6_PCODE_WRITE_MIN_FREQ_TABLE); 2635 }
2514 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2636 } else {
2515 500)) 2637 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2516 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2517
2518 /* Check for overclock support */
2519 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2520 500))
2521 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2522 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2523 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2524 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2525 500))
2526 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2527 if (pcu_mbox & (1<<31)) { /* OC supported */
2528 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2529 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2530 } 2638 }
2531 2639
2532 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 2640 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2540,6 +2648,20 @@ static void gen6_enable_rps(struct drm_device *dev)
2540 /* enable all PM interrupts */ 2648 /* enable all PM interrupts */
2541 I915_WRITE(GEN6_PMINTRMSK, 0); 2649 I915_WRITE(GEN6_PMINTRMSK, 0);
2542 2650
2651 rc6vids = 0;
2652 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
2653 if (IS_GEN6(dev) && ret) {
2654 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
2655 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
2656 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
2657 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
2658 rc6vids &= 0xffff00;
2659 rc6vids |= GEN6_ENCODE_RC6_VID(450);
2660 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
2661 if (ret)
2662 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
2663 }
2664
2543 gen6_gt_force_wake_put(dev_priv); 2665 gen6_gt_force_wake_put(dev_priv);
2544} 2666}
2545 2667
@@ -2547,10 +2669,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2547{ 2669{
2548 struct drm_i915_private *dev_priv = dev->dev_private; 2670 struct drm_i915_private *dev_priv = dev->dev_private;
2549 int min_freq = 15; 2671 int min_freq = 15;
2550 int gpu_freq, ia_freq, max_ia_freq; 2672 int gpu_freq;
2673 unsigned int ia_freq, max_ia_freq;
2551 int scaling_factor = 180; 2674 int scaling_factor = 180;
2552 2675
2553 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2676 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2554 2677
2555 max_ia_freq = cpufreq_quick_get_max(0); 2678 max_ia_freq = cpufreq_quick_get_max(0);
2556 /* 2679 /*
@@ -2581,17 +2704,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2581 else 2704 else
2582 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 2705 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2583 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 2706 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2707 ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
2584 2708
2585 I915_WRITE(GEN6_PCODE_DATA, 2709 sandybridge_pcode_write(dev_priv,
2586 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | 2710 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2587 gpu_freq); 2711 ia_freq | gpu_freq);
2588 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2589 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2590 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2591 GEN6_PCODE_READY) == 0, 10)) {
2592 DRM_ERROR("pcode write of freq table timed out\n");
2593 continue;
2594 }
2595 } 2712 }
2596} 2713}
2597 2714
@@ -2599,16 +2716,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
2599{ 2716{
2600 struct drm_i915_private *dev_priv = dev->dev_private; 2717 struct drm_i915_private *dev_priv = dev->dev_private;
2601 2718
2602 if (dev_priv->renderctx) { 2719 if (dev_priv->ips.renderctx) {
2603 i915_gem_object_unpin(dev_priv->renderctx); 2720 i915_gem_object_unpin(dev_priv->ips.renderctx);
2604 drm_gem_object_unreference(&dev_priv->renderctx->base); 2721 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
2605 dev_priv->renderctx = NULL; 2722 dev_priv->ips.renderctx = NULL;
2606 } 2723 }
2607 2724
2608 if (dev_priv->pwrctx) { 2725 if (dev_priv->ips.pwrctx) {
2609 i915_gem_object_unpin(dev_priv->pwrctx); 2726 i915_gem_object_unpin(dev_priv->ips.pwrctx);
2610 drm_gem_object_unreference(&dev_priv->pwrctx->base); 2727 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
2611 dev_priv->pwrctx = NULL; 2728 dev_priv->ips.pwrctx = NULL;
2612 } 2729 }
2613} 2730}
2614 2731
@@ -2634,14 +2751,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
2634{ 2751{
2635 struct drm_i915_private *dev_priv = dev->dev_private; 2752 struct drm_i915_private *dev_priv = dev->dev_private;
2636 2753
2637 if (dev_priv->renderctx == NULL) 2754 if (dev_priv->ips.renderctx == NULL)
2638 dev_priv->renderctx = intel_alloc_context_page(dev); 2755 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
2639 if (!dev_priv->renderctx) 2756 if (!dev_priv->ips.renderctx)
2640 return -ENOMEM; 2757 return -ENOMEM;
2641 2758
2642 if (dev_priv->pwrctx == NULL) 2759 if (dev_priv->ips.pwrctx == NULL)
2643 dev_priv->pwrctx = intel_alloc_context_page(dev); 2760 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
2644 if (!dev_priv->pwrctx) { 2761 if (!dev_priv->ips.pwrctx) {
2645 ironlake_teardown_rc6(dev); 2762 ironlake_teardown_rc6(dev);
2646 return -ENOMEM; 2763 return -ENOMEM;
2647 } 2764 }
@@ -2653,6 +2770,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2653{ 2770{
2654 struct drm_i915_private *dev_priv = dev->dev_private; 2771 struct drm_i915_private *dev_priv = dev->dev_private;
2655 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 2772 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2773 bool was_interruptible;
2656 int ret; 2774 int ret;
2657 2775
2658 /* rc6 disabled by default due to repeated reports of hanging during 2776 /* rc6 disabled by default due to repeated reports of hanging during
@@ -2667,6 +2785,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2667 if (ret) 2785 if (ret)
2668 return; 2786 return;
2669 2787
2788 was_interruptible = dev_priv->mm.interruptible;
2789 dev_priv->mm.interruptible = false;
2790
2670 /* 2791 /*
2671 * GPU can automatically power down the render unit if given a page 2792 * GPU can automatically power down the render unit if given a page
2672 * to save state. 2793 * to save state.
@@ -2674,12 +2795,13 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2674 ret = intel_ring_begin(ring, 6); 2795 ret = intel_ring_begin(ring, 6);
2675 if (ret) { 2796 if (ret) {
2676 ironlake_teardown_rc6(dev); 2797 ironlake_teardown_rc6(dev);
2798 dev_priv->mm.interruptible = was_interruptible;
2677 return; 2799 return;
2678 } 2800 }
2679 2801
2680 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 2802 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2681 intel_ring_emit(ring, MI_SET_CONTEXT); 2803 intel_ring_emit(ring, MI_SET_CONTEXT);
2682 intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | 2804 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
2683 MI_MM_SPACE_GTT | 2805 MI_MM_SPACE_GTT |
2684 MI_SAVE_EXT_STATE_EN | 2806 MI_SAVE_EXT_STATE_EN |
2685 MI_RESTORE_EXT_STATE_EN | 2807 MI_RESTORE_EXT_STATE_EN |
@@ -2694,14 +2816,15 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2694 * does an implicit flush, combined with MI_FLUSH above, it should be 2816 * does an implicit flush, combined with MI_FLUSH above, it should be
2695 * safe to assume that renderctx is valid 2817 * safe to assume that renderctx is valid
2696 */ 2818 */
2697 ret = intel_wait_ring_idle(ring); 2819 ret = intel_ring_idle(ring);
2820 dev_priv->mm.interruptible = was_interruptible;
2698 if (ret) { 2821 if (ret) {
2699 DRM_ERROR("failed to enable ironlake power power savings\n"); 2822 DRM_ERROR("failed to enable ironlake power power savings\n");
2700 ironlake_teardown_rc6(dev); 2823 ironlake_teardown_rc6(dev);
2701 return; 2824 return;
2702 } 2825 }
2703 2826
2704 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 2827 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
2705 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 2828 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2706} 2829}
2707 2830
@@ -3310,37 +3433,72 @@ static void intel_init_emon(struct drm_device *dev)
3310 3433
3311void intel_disable_gt_powersave(struct drm_device *dev) 3434void intel_disable_gt_powersave(struct drm_device *dev)
3312{ 3435{
3436 struct drm_i915_private *dev_priv = dev->dev_private;
3437
3313 if (IS_IRONLAKE_M(dev)) { 3438 if (IS_IRONLAKE_M(dev)) {
3314 ironlake_disable_drps(dev); 3439 ironlake_disable_drps(dev);
3315 ironlake_disable_rc6(dev); 3440 ironlake_disable_rc6(dev);
3316 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { 3441 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
3442 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
3443 mutex_lock(&dev_priv->rps.hw_lock);
3317 gen6_disable_rps(dev); 3444 gen6_disable_rps(dev);
3445 mutex_unlock(&dev_priv->rps.hw_lock);
3318 } 3446 }
3319} 3447}
3320 3448
3449static void intel_gen6_powersave_work(struct work_struct *work)
3450{
3451 struct drm_i915_private *dev_priv =
3452 container_of(work, struct drm_i915_private,
3453 rps.delayed_resume_work.work);
3454 struct drm_device *dev = dev_priv->dev;
3455
3456 mutex_lock(&dev_priv->rps.hw_lock);
3457 gen6_enable_rps(dev);
3458 gen6_update_ring_freq(dev);
3459 mutex_unlock(&dev_priv->rps.hw_lock);
3460}
3461
3321void intel_enable_gt_powersave(struct drm_device *dev) 3462void intel_enable_gt_powersave(struct drm_device *dev)
3322{ 3463{
3464 struct drm_i915_private *dev_priv = dev->dev_private;
3465
3323 if (IS_IRONLAKE_M(dev)) { 3466 if (IS_IRONLAKE_M(dev)) {
3324 ironlake_enable_drps(dev); 3467 ironlake_enable_drps(dev);
3325 ironlake_enable_rc6(dev); 3468 ironlake_enable_rc6(dev);
3326 intel_init_emon(dev); 3469 intel_init_emon(dev);
3327 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 3470 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
3328 gen6_enable_rps(dev); 3471 /*
3329 gen6_update_ring_freq(dev); 3472 * PCU communication is slow and this doesn't need to be
3473 * done at any specific time, so do this out of our fast path
3474 * to make resume and init faster.
3475 */
3476 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
3477 round_jiffies_up_relative(HZ));
3330 } 3478 }
3331} 3479}
3332 3480
3481static void ibx_init_clock_gating(struct drm_device *dev)
3482{
3483 struct drm_i915_private *dev_priv = dev->dev_private;
3484
3485 /*
3486 * On Ibex Peak and Cougar Point, we need to disable clock
3487 * gating for the panel power sequencer or it will fail to
3488 * start up when no ports are active.
3489 */
3490 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3491}
3492
3333static void ironlake_init_clock_gating(struct drm_device *dev) 3493static void ironlake_init_clock_gating(struct drm_device *dev)
3334{ 3494{
3335 struct drm_i915_private *dev_priv = dev->dev_private; 3495 struct drm_i915_private *dev_priv = dev->dev_private;
3336 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 3496 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3337 3497
3338 /* Required for FBC */ 3498 /* Required for FBC */
3339 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 3499 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
3340 DPFCRUNIT_CLOCK_GATE_DISABLE | 3500 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
3341 DPFDUNIT_CLOCK_GATE_DISABLE; 3501 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
3342 /* Required for CxSR */
3343 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
3344 3502
3345 I915_WRITE(PCH_3DCGDIS0, 3503 I915_WRITE(PCH_3DCGDIS0,
3346 MARIUNIT_CLOCK_GATE_DISABLE | 3504 MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3348,8 +3506,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
3348 I915_WRITE(PCH_3DCGDIS1, 3506 I915_WRITE(PCH_3DCGDIS1,
3349 VFMUNIT_CLOCK_GATE_DISABLE); 3507 VFMUNIT_CLOCK_GATE_DISABLE);
3350 3508
3351 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3352
3353 /* 3509 /*
3354 * According to the spec the following bits should be set in 3510 * According to the spec the following bits should be set in
3355 * order to enable memory self-refresh 3511 * order to enable memory self-refresh
@@ -3360,9 +3516,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
3360 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3516 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3361 (I915_READ(ILK_DISPLAY_CHICKEN2) | 3517 (I915_READ(ILK_DISPLAY_CHICKEN2) |
3362 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 3518 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3363 I915_WRITE(ILK_DSPCLK_GATE, 3519 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
3364 (I915_READ(ILK_DSPCLK_GATE) |
3365 ILK_DPARB_CLK_GATE));
3366 I915_WRITE(DISP_ARB_CTL, 3520 I915_WRITE(DISP_ARB_CTL,
3367 (I915_READ(DISP_ARB_CTL) | 3521 (I915_READ(DISP_ARB_CTL) |
3368 DISP_FBC_WM_DIS)); 3522 DISP_FBC_WM_DIS));
@@ -3384,33 +3538,70 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
3384 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3538 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3385 I915_READ(ILK_DISPLAY_CHICKEN2) | 3539 I915_READ(ILK_DISPLAY_CHICKEN2) |
3386 ILK_DPARB_GATE); 3540 ILK_DPARB_GATE);
3387 I915_WRITE(ILK_DSPCLK_GATE,
3388 I915_READ(ILK_DSPCLK_GATE) |
3389 ILK_DPFC_DIS1 |
3390 ILK_DPFC_DIS2 |
3391 ILK_CLK_FBC);
3392 } 3541 }
3393 3542
3543 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3544
3394 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3545 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3395 I915_READ(ILK_DISPLAY_CHICKEN2) | 3546 I915_READ(ILK_DISPLAY_CHICKEN2) |
3396 ILK_ELPIN_409_SELECT); 3547 ILK_ELPIN_409_SELECT);
3397 I915_WRITE(_3D_CHICKEN2, 3548 I915_WRITE(_3D_CHICKEN2,
3398 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 3549 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3399 _3D_CHICKEN2_WM_READ_PIPELINED); 3550 _3D_CHICKEN2_WM_READ_PIPELINED);
3551
3552 /* WaDisableRenderCachePipelinedFlush */
3553 I915_WRITE(CACHE_MODE_0,
3554 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3555
3556 ibx_init_clock_gating(dev);
3557}
3558
3559static void cpt_init_clock_gating(struct drm_device *dev)
3560{
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3562 int pipe;
3563
3564 /*
3565 * On Ibex Peak and Cougar Point, we need to disable clock
3566 * gating for the panel power sequencer or it will fail to
3567 * start up when no ports are active.
3568 */
3569 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3570 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3571 DPLS_EDP_PPS_FIX_DIS);
3572 /* The below fixes the weird display corruption, a few pixels shifted
3573 * downward, on (only) LVDS of some HP laptops with IVY.
3574 */
3575 for_each_pipe(pipe)
3576 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
3577 /* WADP0ClockGatingDisable */
3578 for_each_pipe(pipe) {
3579 I915_WRITE(TRANS_CHICKEN1(pipe),
3580 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
3581 }
3400} 3582}
3401 3583
3402static void gen6_init_clock_gating(struct drm_device *dev) 3584static void gen6_init_clock_gating(struct drm_device *dev)
3403{ 3585{
3404 struct drm_i915_private *dev_priv = dev->dev_private; 3586 struct drm_i915_private *dev_priv = dev->dev_private;
3405 int pipe; 3587 int pipe;
3406 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 3588 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3407 3589
3408 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 3590 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3409 3591
3410 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3592 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3411 I915_READ(ILK_DISPLAY_CHICKEN2) | 3593 I915_READ(ILK_DISPLAY_CHICKEN2) |
3412 ILK_ELPIN_409_SELECT); 3594 ILK_ELPIN_409_SELECT);
3413 3595
3596 /* WaDisableHiZPlanesWhenMSAAEnabled */
3597 I915_WRITE(_3D_CHICKEN,
3598 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
3599
3600 /* WaSetupGtModeTdRowDispatch */
3601 if (IS_SNB_GT1(dev))
3602 I915_WRITE(GEN6_GT_MODE,
3603 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
3604
3414 I915_WRITE(WM3_LP_ILK, 0); 3605 I915_WRITE(WM3_LP_ILK, 0);
3415 I915_WRITE(WM2_LP_ILK, 0); 3606 I915_WRITE(WM2_LP_ILK, 0);
3416 I915_WRITE(WM1_LP_ILK, 0); 3607 I915_WRITE(WM1_LP_ILK, 0);
@@ -3460,11 +3651,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3460 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3651 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3461 I915_READ(ILK_DISPLAY_CHICKEN2) | 3652 I915_READ(ILK_DISPLAY_CHICKEN2) |
3462 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 3653 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3463 I915_WRITE(ILK_DSPCLK_GATE, 3654 I915_WRITE(ILK_DSPCLK_GATE_D,
3464 I915_READ(ILK_DSPCLK_GATE) | 3655 I915_READ(ILK_DSPCLK_GATE_D) |
3465 ILK_DPARB_CLK_GATE | 3656 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
3466 ILK_DPFD_CLK_GATE); 3657 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
3467 3658
3659 /* WaMbcDriverBootEnable */
3468 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3660 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3469 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3661 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3470 3662
@@ -3479,6 +3671,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3479 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ 3671 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
3480 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); 3672 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
3481 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); 3673 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
3674
3675 cpt_init_clock_gating(dev);
3482} 3676}
3483 3677
3484static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 3678static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3493,13 +3687,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3493 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 3687 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3494} 3688}
3495 3689
3690static void lpt_init_clock_gating(struct drm_device *dev)
3691{
3692 struct drm_i915_private *dev_priv = dev->dev_private;
3693
3694 /*
3695 * TODO: this bit should only be enabled when really needed, then
3696 * disabled when not needed anymore in order to save power.
3697 */
3698 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
3699 I915_WRITE(SOUTH_DSPCLK_GATE_D,
3700 I915_READ(SOUTH_DSPCLK_GATE_D) |
3701 PCH_LP_PARTITION_LEVEL_DISABLE);
3702}
3703
3496static void haswell_init_clock_gating(struct drm_device *dev) 3704static void haswell_init_clock_gating(struct drm_device *dev)
3497{ 3705{
3498 struct drm_i915_private *dev_priv = dev->dev_private; 3706 struct drm_i915_private *dev_priv = dev->dev_private;
3499 int pipe; 3707 int pipe;
3500 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3501
3502 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3503 3708
3504 I915_WRITE(WM3_LP_ILK, 0); 3709 I915_WRITE(WM3_LP_ILK, 0);
3505 I915_WRITE(WM2_LP_ILK, 0); 3710 I915_WRITE(WM2_LP_ILK, 0);
@@ -3510,12 +3715,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3510 */ 3715 */
3511 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 3716 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3512 3717
3513 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3514
3515 I915_WRITE(IVB_CHICKEN3,
3516 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3517 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3518
3519 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3718 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3520 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3719 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3521 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3720 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3544,6 +3743,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3544 I915_WRITE(CACHE_MODE_1, 3743 I915_WRITE(CACHE_MODE_1,
3545 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 3744 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3546 3745
3746 /* WaMbcDriverBootEnable */
3747 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3748 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3749
3547 /* XXX: This is a workaround for early silicon revisions and should be 3750 /* XXX: This is a workaround for early silicon revisions and should be
3548 * removed later. 3751 * removed later.
3549 */ 3752 */
@@ -3553,27 +3756,38 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3553 WM_DBG_DISALLOW_SPRITE | 3756 WM_DBG_DISALLOW_SPRITE |
3554 WM_DBG_DISALLOW_MAXFIFO); 3757 WM_DBG_DISALLOW_MAXFIFO);
3555 3758
3759 lpt_init_clock_gating(dev);
3556} 3760}
3557 3761
3558static void ivybridge_init_clock_gating(struct drm_device *dev) 3762static void ivybridge_init_clock_gating(struct drm_device *dev)
3559{ 3763{
3560 struct drm_i915_private *dev_priv = dev->dev_private; 3764 struct drm_i915_private *dev_priv = dev->dev_private;
3561 int pipe; 3765 int pipe;
3562 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3563 uint32_t snpcr; 3766 uint32_t snpcr;
3564 3767
3565 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3566
3567 I915_WRITE(WM3_LP_ILK, 0); 3768 I915_WRITE(WM3_LP_ILK, 0);
3568 I915_WRITE(WM2_LP_ILK, 0); 3769 I915_WRITE(WM2_LP_ILK, 0);
3569 I915_WRITE(WM1_LP_ILK, 0); 3770 I915_WRITE(WM1_LP_ILK, 0);
3570 3771
3571 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 3772 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3773
3774 /* WaDisableEarlyCull */
3775 I915_WRITE(_3D_CHICKEN3,
3776 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3572 3777
3778 /* WaDisableBackToBackFlipFix */
3573 I915_WRITE(IVB_CHICKEN3, 3779 I915_WRITE(IVB_CHICKEN3,
3574 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3780 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3575 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3781 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3576 3782
3783 /* WaDisablePSDDualDispatchEnable */
3784 if (IS_IVB_GT1(dev))
3785 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3786 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3787 else
3788 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
3789 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3790
3577 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3791 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3578 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3792 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3579 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3793 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3582,7 +3796,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3582 I915_WRITE(GEN7_L3CNTLREG1, 3796 I915_WRITE(GEN7_L3CNTLREG1,
3583 GEN7_WA_FOR_GEN7_L3_CONTROL); 3797 GEN7_WA_FOR_GEN7_L3_CONTROL);
3584 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 3798 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3585 GEN7_WA_L3_CHICKEN_MODE); 3799 GEN7_WA_L3_CHICKEN_MODE);
3800 if (IS_IVB_GT1(dev))
3801 I915_WRITE(GEN7_ROW_CHICKEN2,
3802 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3803 else
3804 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
3805 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3806
3807
3808 /* WaForceL3Serialization */
3809 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3810 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3586 3811
3587 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 3812 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3588 * gating disable must be set. Failure to set it results in 3813 * gating disable must be set. Failure to set it results in
@@ -3613,6 +3838,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3613 intel_flush_display_plane(dev_priv, pipe); 3838 intel_flush_display_plane(dev_priv, pipe);
3614 } 3839 }
3615 3840
3841 /* WaMbcDriverBootEnable */
3616 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3842 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3617 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3843 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3618 3844
@@ -3626,39 +3852,59 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3626 snpcr &= ~GEN6_MBC_SNPCR_MASK; 3852 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3627 snpcr |= GEN6_MBC_SNPCR_MED; 3853 snpcr |= GEN6_MBC_SNPCR_MED;
3628 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3854 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3855
3856 cpt_init_clock_gating(dev);
3629} 3857}
3630 3858
3631static void valleyview_init_clock_gating(struct drm_device *dev) 3859static void valleyview_init_clock_gating(struct drm_device *dev)
3632{ 3860{
3633 struct drm_i915_private *dev_priv = dev->dev_private; 3861 struct drm_i915_private *dev_priv = dev->dev_private;
3634 int pipe; 3862 int pipe;
3635 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3636
3637 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3638 3863
3639 I915_WRITE(WM3_LP_ILK, 0); 3864 I915_WRITE(WM3_LP_ILK, 0);
3640 I915_WRITE(WM2_LP_ILK, 0); 3865 I915_WRITE(WM2_LP_ILK, 0);
3641 I915_WRITE(WM1_LP_ILK, 0); 3866 I915_WRITE(WM1_LP_ILK, 0);
3642 3867
3643 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 3868 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3869
3870 /* WaDisableEarlyCull */
3871 I915_WRITE(_3D_CHICKEN3,
3872 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3644 3873
3874 /* WaDisableBackToBackFlipFix */
3645 I915_WRITE(IVB_CHICKEN3, 3875 I915_WRITE(IVB_CHICKEN3,
3646 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3876 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3647 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3877 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3648 3878
3879 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3880 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3881
3649 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3882 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3650 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3883 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3651 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3884 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3652 3885
3653 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ 3886 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3654 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); 3887 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
3655 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); 3888 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3656 3889
3890 /* WaForceL3Serialization */
3891 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3892 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3893
3894 /* WaDisableDopClockGating */
3895 I915_WRITE(GEN7_ROW_CHICKEN2,
3896 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3897
3898 /* WaForceL3Serialization */
3899 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3900 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3901
3657 /* This is required by WaCatErrorRejectionIssue */ 3902 /* This is required by WaCatErrorRejectionIssue */
3658 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 3903 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3659 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 3904 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3660 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 3905 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3661 3906
3907 /* WaMbcDriverBootEnable */
3662 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3908 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3663 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3909 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3664 3910
@@ -3710,6 +3956,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
3710 PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | 3956 PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
3711 SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | 3957 SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
3712 PLANEA_FLIPDONE_INT_EN); 3958 PLANEA_FLIPDONE_INT_EN);
3959
3960 /*
3961 * WaDisableVLVClockGating_VBIIssue
3962 * Disable clock gating on th GCFG unit to prevent a delay
3963 * in the reporting of vblank events.
3964 */
3965 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
3713} 3966}
3714 3967
3715static void g4x_init_clock_gating(struct drm_device *dev) 3968static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3728,6 +3981,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
3728 if (IS_GM45(dev)) 3981 if (IS_GM45(dev))
3729 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 3982 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3730 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 3983 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3984
3985 /* WaDisableRenderCachePipelinedFlush */
3986 I915_WRITE(CACHE_MODE_0,
3987 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3731} 3988}
3732 3989
3733static void crestline_init_clock_gating(struct drm_device *dev) 3990static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3783,44 +4040,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
3783 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 4040 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3784} 4041}
3785 4042
3786static void ibx_init_clock_gating(struct drm_device *dev)
3787{
3788 struct drm_i915_private *dev_priv = dev->dev_private;
3789
3790 /*
3791 * On Ibex Peak and Cougar Point, we need to disable clock
3792 * gating for the panel power sequencer or it will fail to
3793 * start up when no ports are active.
3794 */
3795 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3796}
3797
3798static void cpt_init_clock_gating(struct drm_device *dev)
3799{
3800 struct drm_i915_private *dev_priv = dev->dev_private;
3801 int pipe;
3802
3803 /*
3804 * On Ibex Peak and Cougar Point, we need to disable clock
3805 * gating for the panel power sequencer or it will fail to
3806 * start up when no ports are active.
3807 */
3808 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3809 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3810 DPLS_EDP_PPS_FIX_DIS);
3811 /* Without this, mode sets may fail silently on FDI */
3812 for_each_pipe(pipe)
3813 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3814}
3815
3816void intel_init_clock_gating(struct drm_device *dev) 4043void intel_init_clock_gating(struct drm_device *dev)
3817{ 4044{
3818 struct drm_i915_private *dev_priv = dev->dev_private; 4045 struct drm_i915_private *dev_priv = dev->dev_private;
3819 4046
3820 dev_priv->display.init_clock_gating(dev); 4047 dev_priv->display.init_clock_gating(dev);
3821
3822 if (dev_priv->display.init_pch_clock_gating)
3823 dev_priv->display.init_pch_clock_gating(dev);
3824} 4048}
3825 4049
3826/* Starting with Haswell, we have different power wells for 4050/* Starting with Haswell, we have different power wells for
@@ -3846,7 +4070,7 @@ void intel_init_power_wells(struct drm_device *dev)
3846 4070
3847 if ((well & HSW_PWR_WELL_STATE) == 0) { 4071 if ((well & HSW_PWR_WELL_STATE) == 0) {
3848 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); 4072 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3849 if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) 4073 if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
3850 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); 4074 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3851 } 4075 }
3852 } 4076 }
@@ -3884,11 +4108,6 @@ void intel_init_pm(struct drm_device *dev)
3884 4108
3885 /* For FIFO watermark updates */ 4109 /* For FIFO watermark updates */
3886 if (HAS_PCH_SPLIT(dev)) { 4110 if (HAS_PCH_SPLIT(dev)) {
3887 if (HAS_PCH_IBX(dev))
3888 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
3889 else if (HAS_PCH_CPT(dev))
3890 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
3891
3892 if (IS_GEN5(dev)) { 4111 if (IS_GEN5(dev)) {
3893 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 4112 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3894 dev_priv->display.update_wm = ironlake_update_wm; 4113 dev_priv->display.update_wm = ironlake_update_wm;
@@ -3911,7 +4130,7 @@ void intel_init_pm(struct drm_device *dev)
3911 } else if (IS_IVYBRIDGE(dev)) { 4130 } else if (IS_IVYBRIDGE(dev)) {
3912 /* FIXME: detect B0+ stepping and use auto training */ 4131 /* FIXME: detect B0+ stepping and use auto training */
3913 if (SNB_READ_WM0_LATENCY()) { 4132 if (SNB_READ_WM0_LATENCY()) {
3914 dev_priv->display.update_wm = sandybridge_update_wm; 4133 dev_priv->display.update_wm = ivybridge_update_wm;
3915 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm; 4134 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3916 } else { 4135 } else {
3917 DRM_DEBUG_KMS("Failed to read display plane latency. " 4136 DRM_DEBUG_KMS("Failed to read display plane latency. "
@@ -3999,6 +4218,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
3999 DRM_ERROR("GT thread status wait timed out\n"); 4218 DRM_ERROR("GT thread status wait timed out\n");
4000} 4219}
4001 4220
4221static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
4222{
4223 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4224 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4225}
4226
4002static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 4227static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4003{ 4228{
4004 u32 forcewake_ack; 4229 u32 forcewake_ack;
@@ -4012,7 +4237,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4012 FORCEWAKE_ACK_TIMEOUT_MS)) 4237 FORCEWAKE_ACK_TIMEOUT_MS))
4013 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4238 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4014 4239
4015 I915_WRITE_NOTRACE(FORCEWAKE, 1); 4240 I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
4016 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4241 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4017 4242
4018 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4243 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4022,6 +4247,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4022 __gen6_gt_wait_for_thread_c0(dev_priv); 4247 __gen6_gt_wait_for_thread_c0(dev_priv);
4023} 4248}
4024 4249
4250static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
4251{
4252 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
4253 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4254}
4255
4025static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) 4256static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4026{ 4257{
4027 u32 forcewake_ack; 4258 u32 forcewake_ack;
@@ -4035,7 +4266,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4035 FORCEWAKE_ACK_TIMEOUT_MS)) 4266 FORCEWAKE_ACK_TIMEOUT_MS))
4036 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4267 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4037 4268
4038 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); 4269 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4039 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4270 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4040 4271
4041 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4272 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4079,7 +4310,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4079 4310
4080static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4311static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4081{ 4312{
4082 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); 4313 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4083 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4314 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4084 gen6_gt_check_fifodbg(dev_priv); 4315 gen6_gt_check_fifodbg(dev_priv);
4085} 4316}
@@ -4117,13 +4348,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4117 return ret; 4348 return ret;
4118} 4349}
4119 4350
4351static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4352{
4353 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
4354}
4355
4120static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4356static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4121{ 4357{
4122 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, 4358 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4123 FORCEWAKE_ACK_TIMEOUT_MS)) 4359 FORCEWAKE_ACK_TIMEOUT_MS))
4124 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4360 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4125 4361
4126 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); 4362 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4127 4363
4128 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 4364 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4129 FORCEWAKE_ACK_TIMEOUT_MS)) 4365 FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4134,49 +4370,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4134 4370
4135static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4371static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4136{ 4372{
4137 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); 4373 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4138 /* The below doubles as a POSTING_READ */ 4374 /* The below doubles as a POSTING_READ */
4139 gen6_gt_check_fifodbg(dev_priv); 4375 gen6_gt_check_fifodbg(dev_priv);
4140} 4376}
4141 4377
4378void intel_gt_reset(struct drm_device *dev)
4379{
4380 struct drm_i915_private *dev_priv = dev->dev_private;
4381
4382 if (IS_VALLEYVIEW(dev)) {
4383 vlv_force_wake_reset(dev_priv);
4384 } else if (INTEL_INFO(dev)->gen >= 6) {
4385 __gen6_gt_force_wake_reset(dev_priv);
4386 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4387 __gen6_gt_force_wake_mt_reset(dev_priv);
4388 }
4389}
4390
4142void intel_gt_init(struct drm_device *dev) 4391void intel_gt_init(struct drm_device *dev)
4143{ 4392{
4144 struct drm_i915_private *dev_priv = dev->dev_private; 4393 struct drm_i915_private *dev_priv = dev->dev_private;
4145 4394
4146 spin_lock_init(&dev_priv->gt_lock); 4395 spin_lock_init(&dev_priv->gt_lock);
4147 4396
4397 intel_gt_reset(dev);
4398
4148 if (IS_VALLEYVIEW(dev)) { 4399 if (IS_VALLEYVIEW(dev)) {
4149 dev_priv->gt.force_wake_get = vlv_force_wake_get; 4400 dev_priv->gt.force_wake_get = vlv_force_wake_get;
4150 dev_priv->gt.force_wake_put = vlv_force_wake_put; 4401 dev_priv->gt.force_wake_put = vlv_force_wake_put;
4151 } else if (INTEL_INFO(dev)->gen >= 6) { 4402 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
4403 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
4404 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
4405 } else if (IS_GEN6(dev)) {
4152 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; 4406 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
4153 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; 4407 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
4408 }
4409 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
4410 intel_gen6_powersave_work);
4411}
4154 4412
4155 /* IVB configs may use multi-threaded forcewake */ 4413int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
4156 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 4414{
4157 u32 ecobus; 4415 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4158 4416
4159 /* A small trick here - if the bios hasn't configured 4417 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4160 * MT forcewake, and if the device is in RC6, then 4418 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
4161 * force_wake_mt_get will not wake the device and the 4419 return -EAGAIN;
4162 * ECOBUS read will return zero. Which will be 4420 }
4163 * (correctly) interpreted by the test below as MT 4421
4164 * forcewake being disabled. 4422 I915_WRITE(GEN6_PCODE_DATA, *val);
4165 */ 4423 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4166 mutex_lock(&dev->struct_mutex); 4424
4167 __gen6_gt_force_wake_mt_get(dev_priv); 4425 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4168 ecobus = I915_READ_NOTRACE(ECOBUS); 4426 500)) {
4169 __gen6_gt_force_wake_mt_put(dev_priv); 4427 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
4170 mutex_unlock(&dev->struct_mutex); 4428 return -ETIMEDOUT;
4171
4172 if (ecobus & FORCEWAKE_MT_ENABLE) {
4173 DRM_DEBUG_KMS("Using MT version of forcewake\n");
4174 dev_priv->gt.force_wake_get =
4175 __gen6_gt_force_wake_mt_get;
4176 dev_priv->gt.force_wake_put =
4177 __gen6_gt_force_wake_mt_put;
4178 }
4179 }
4180 } 4429 }
4430
4431 *val = I915_READ(GEN6_PCODE_DATA);
4432 I915_WRITE(GEN6_PCODE_DATA, 0);
4433
4434 return 0;
4181} 4435}
4182 4436
4437int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4438{
4439 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4440
4441 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4442 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
4443 return -EAGAIN;
4444 }
4445
4446 I915_WRITE(GEN6_PCODE_DATA, val);
4447 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4448
4449 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4450 500)) {
4451 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
4452 return -ETIMEDOUT;
4453 }
4454
4455 I915_WRITE(GEN6_PCODE_DATA, 0);
4456
4457 return 0;
4458}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ecbc5c5dbbbc..ae253e04c391 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -45,7 +45,7 @@ struct pipe_control {
45 45
46static inline int ring_space(struct intel_ring_buffer *ring) 46static inline int ring_space(struct intel_ring_buffer *ring)
47{ 47{
48 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); 48 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
49 if (space < 0) 49 if (space < 0)
50 space += ring->size; 50 space += ring->size;
51 return space; 51 return space;
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
245 /* 245 /*
246 * TLB invalidate requires a post-sync write. 246 * TLB invalidate requires a post-sync write.
247 */ 247 */
248 flags |= PIPE_CONTROL_QW_WRITE; 248 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
249 } 249 }
250 250
251 ret = intel_ring_begin(ring, 4); 251 ret = intel_ring_begin(ring, 4);
@@ -547,23 +547,24 @@ static int init_render_ring(struct intel_ring_buffer *ring)
547 547
548static void render_ring_cleanup(struct intel_ring_buffer *ring) 548static void render_ring_cleanup(struct intel_ring_buffer *ring)
549{ 549{
550 struct drm_device *dev = ring->dev;
551
550 if (!ring->private) 552 if (!ring->private)
551 return; 553 return;
552 554
555 if (HAS_BROKEN_CS_TLB(dev))
556 drm_gem_object_unreference(to_gem_object(ring->private));
557
553 cleanup_pipe_control(ring); 558 cleanup_pipe_control(ring);
554} 559}
555 560
556static void 561static void
557update_mboxes(struct intel_ring_buffer *ring, 562update_mboxes(struct intel_ring_buffer *ring,
558 u32 seqno, 563 u32 mmio_offset)
559 u32 mmio_offset)
560{ 564{
561 intel_ring_emit(ring, MI_SEMAPHORE_MBOX | 565 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
562 MI_SEMAPHORE_GLOBAL_GTT |
563 MI_SEMAPHORE_REGISTER |
564 MI_SEMAPHORE_UPDATE);
565 intel_ring_emit(ring, seqno);
566 intel_ring_emit(ring, mmio_offset); 566 intel_ring_emit(ring, mmio_offset);
567 intel_ring_emit(ring, ring->outstanding_lazy_request);
567} 568}
568 569
569/** 570/**
@@ -576,8 +577,7 @@ update_mboxes(struct intel_ring_buffer *ring,
576 * This acts like a signal in the canonical semaphore. 577 * This acts like a signal in the canonical semaphore.
577 */ 578 */
578static int 579static int
579gen6_add_request(struct intel_ring_buffer *ring, 580gen6_add_request(struct intel_ring_buffer *ring)
580 u32 *seqno)
581{ 581{
582 u32 mbox1_reg; 582 u32 mbox1_reg;
583 u32 mbox2_reg; 583 u32 mbox2_reg;
@@ -590,13 +590,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
590 mbox1_reg = ring->signal_mbox[0]; 590 mbox1_reg = ring->signal_mbox[0];
591 mbox2_reg = ring->signal_mbox[1]; 591 mbox2_reg = ring->signal_mbox[1];
592 592
593 *seqno = i915_gem_next_request_seqno(ring); 593 update_mboxes(ring, mbox1_reg);
594 594 update_mboxes(ring, mbox2_reg);
595 update_mboxes(ring, *seqno, mbox1_reg);
596 update_mboxes(ring, *seqno, mbox2_reg);
597 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 595 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
598 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 596 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
599 intel_ring_emit(ring, *seqno); 597 intel_ring_emit(ring, ring->outstanding_lazy_request);
600 intel_ring_emit(ring, MI_USER_INTERRUPT); 598 intel_ring_emit(ring, MI_USER_INTERRUPT);
601 intel_ring_advance(ring); 599 intel_ring_advance(ring);
602 600
@@ -653,10 +651,8 @@ do { \
653} while (0) 651} while (0)
654 652
655static int 653static int
656pc_render_add_request(struct intel_ring_buffer *ring, 654pc_render_add_request(struct intel_ring_buffer *ring)
657 u32 *result)
658{ 655{
659 u32 seqno = i915_gem_next_request_seqno(ring);
660 struct pipe_control *pc = ring->private; 656 struct pipe_control *pc = ring->private;
661 u32 scratch_addr = pc->gtt_offset + 128; 657 u32 scratch_addr = pc->gtt_offset + 128;
662 int ret; 658 int ret;
@@ -677,7 +673,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
677 PIPE_CONTROL_WRITE_FLUSH | 673 PIPE_CONTROL_WRITE_FLUSH |
678 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 674 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
679 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 675 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
680 intel_ring_emit(ring, seqno); 676 intel_ring_emit(ring, ring->outstanding_lazy_request);
681 intel_ring_emit(ring, 0); 677 intel_ring_emit(ring, 0);
682 PIPE_CONTROL_FLUSH(ring, scratch_addr); 678 PIPE_CONTROL_FLUSH(ring, scratch_addr);
683 scratch_addr += 128; /* write to separate cachelines */ 679 scratch_addr += 128; /* write to separate cachelines */
@@ -696,11 +692,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
696 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 692 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
697 PIPE_CONTROL_NOTIFY); 693 PIPE_CONTROL_NOTIFY);
698 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 694 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
699 intel_ring_emit(ring, seqno); 695 intel_ring_emit(ring, ring->outstanding_lazy_request);
700 intel_ring_emit(ring, 0); 696 intel_ring_emit(ring, 0);
701 intel_ring_advance(ring); 697 intel_ring_advance(ring);
702 698
703 *result = seqno;
704 return 0; 699 return 0;
705} 700}
706 701
@@ -888,25 +883,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
888} 883}
889 884
890static int 885static int
891i9xx_add_request(struct intel_ring_buffer *ring, 886i9xx_add_request(struct intel_ring_buffer *ring)
892 u32 *result)
893{ 887{
894 u32 seqno;
895 int ret; 888 int ret;
896 889
897 ret = intel_ring_begin(ring, 4); 890 ret = intel_ring_begin(ring, 4);
898 if (ret) 891 if (ret)
899 return ret; 892 return ret;
900 893
901 seqno = i915_gem_next_request_seqno(ring);
902
903 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 894 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
904 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 895 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
905 intel_ring_emit(ring, seqno); 896 intel_ring_emit(ring, ring->outstanding_lazy_request);
906 intel_ring_emit(ring, MI_USER_INTERRUPT); 897 intel_ring_emit(ring, MI_USER_INTERRUPT);
907 intel_ring_advance(ring); 898 intel_ring_advance(ring);
908 899
909 *result = seqno;
910 return 0; 900 return 0;
911} 901}
912 902
@@ -964,7 +954,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
964} 954}
965 955
966static int 956static int
967i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 957i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
958 u32 offset, u32 length,
959 unsigned flags)
968{ 960{
969 int ret; 961 int ret;
970 962
@@ -975,35 +967,71 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
975 intel_ring_emit(ring, 967 intel_ring_emit(ring,
976 MI_BATCH_BUFFER_START | 968 MI_BATCH_BUFFER_START |
977 MI_BATCH_GTT | 969 MI_BATCH_GTT |
978 MI_BATCH_NON_SECURE_I965); 970 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
979 intel_ring_emit(ring, offset); 971 intel_ring_emit(ring, offset);
980 intel_ring_advance(ring); 972 intel_ring_advance(ring);
981 973
982 return 0; 974 return 0;
983} 975}
984 976
977/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
978#define I830_BATCH_LIMIT (256*1024)
985static int 979static int
986i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 980i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
987 u32 offset, u32 len) 981 u32 offset, u32 len,
982 unsigned flags)
988{ 983{
989 int ret; 984 int ret;
990 985
991 ret = intel_ring_begin(ring, 4); 986 if (flags & I915_DISPATCH_PINNED) {
992 if (ret) 987 ret = intel_ring_begin(ring, 4);
993 return ret; 988 if (ret)
989 return ret;
994 990
995 intel_ring_emit(ring, MI_BATCH_BUFFER); 991 intel_ring_emit(ring, MI_BATCH_BUFFER);
996 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 992 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
997 intel_ring_emit(ring, offset + len - 8); 993 intel_ring_emit(ring, offset + len - 8);
998 intel_ring_emit(ring, 0); 994 intel_ring_emit(ring, MI_NOOP);
999 intel_ring_advance(ring); 995 intel_ring_advance(ring);
996 } else {
997 struct drm_i915_gem_object *obj = ring->private;
998 u32 cs_offset = obj->gtt_offset;
999
1000 if (len > I830_BATCH_LIMIT)
1001 return -ENOSPC;
1002
1003 ret = intel_ring_begin(ring, 9+3);
1004 if (ret)
1005 return ret;
1006 /* Blit the batch (which has now all relocs applied) to the stable batch
1007 * scratch bo area (so that the CS never stumbles over its tlb
1008 * invalidation bug) ... */
1009 intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
1010 XY_SRC_COPY_BLT_WRITE_ALPHA |
1011 XY_SRC_COPY_BLT_WRITE_RGB);
1012 intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
1013 intel_ring_emit(ring, 0);
1014 intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
1015 intel_ring_emit(ring, cs_offset);
1016 intel_ring_emit(ring, 0);
1017 intel_ring_emit(ring, 4096);
1018 intel_ring_emit(ring, offset);
1019 intel_ring_emit(ring, MI_FLUSH);
1020
1021 /* ... and execute it. */
1022 intel_ring_emit(ring, MI_BATCH_BUFFER);
1023 intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1024 intel_ring_emit(ring, cs_offset + len - 8);
1025 intel_ring_advance(ring);
1026 }
1000 1027
1001 return 0; 1028 return 0;
1002} 1029}
1003 1030
1004static int 1031static int
1005i915_dispatch_execbuffer(struct intel_ring_buffer *ring, 1032i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1006 u32 offset, u32 len) 1033 u32 offset, u32 len,
1034 unsigned flags)
1007{ 1035{
1008 int ret; 1036 int ret;
1009 1037
@@ -1012,7 +1040,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1012 return ret; 1040 return ret;
1013 1041
1014 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1042 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1015 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 1043 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1016 intel_ring_advance(ring); 1044 intel_ring_advance(ring);
1017 1045
1018 return 0; 1046 return 0;
@@ -1075,6 +1103,29 @@ err:
1075 return ret; 1103 return ret;
1076} 1104}
1077 1105
1106static int init_phys_hws_pga(struct intel_ring_buffer *ring)
1107{
1108 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1109 u32 addr;
1110
1111 if (!dev_priv->status_page_dmah) {
1112 dev_priv->status_page_dmah =
1113 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1114 if (!dev_priv->status_page_dmah)
1115 return -ENOMEM;
1116 }
1117
1118 addr = dev_priv->status_page_dmah->busaddr;
1119 if (INTEL_INFO(ring->dev)->gen >= 4)
1120 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
1121 I915_WRITE(HWS_PGA, addr);
1122
1123 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1124 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1125
1126 return 0;
1127}
1128
1078static int intel_init_ring_buffer(struct drm_device *dev, 1129static int intel_init_ring_buffer(struct drm_device *dev,
1079 struct intel_ring_buffer *ring) 1130 struct intel_ring_buffer *ring)
1080{ 1131{
@@ -1086,6 +1137,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1086 INIT_LIST_HEAD(&ring->active_list); 1137 INIT_LIST_HEAD(&ring->active_list);
1087 INIT_LIST_HEAD(&ring->request_list); 1138 INIT_LIST_HEAD(&ring->request_list);
1088 ring->size = 32 * PAGE_SIZE; 1139 ring->size = 32 * PAGE_SIZE;
1140 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1089 1141
1090 init_waitqueue_head(&ring->irq_queue); 1142 init_waitqueue_head(&ring->irq_queue);
1091 1143
@@ -1093,6 +1145,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1093 ret = init_status_page(ring); 1145 ret = init_status_page(ring);
1094 if (ret) 1146 if (ret)
1095 return ret; 1147 return ret;
1148 } else {
1149 BUG_ON(ring->id != RCS);
1150 ret = init_phys_hws_pga(ring);
1151 if (ret)
1152 return ret;
1096 } 1153 }
1097 1154
1098 obj = i915_gem_alloc_object(dev, ring->size); 1155 obj = i915_gem_alloc_object(dev, ring->size);
@@ -1157,7 +1214,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1157 1214
1158 /* Disable the ring buffer. The ring must be idle at this point */ 1215 /* Disable the ring buffer. The ring must be idle at this point */
1159 dev_priv = ring->dev->dev_private; 1216 dev_priv = ring->dev->dev_private;
1160 ret = intel_wait_ring_idle(ring); 1217 ret = intel_ring_idle(ring);
1161 if (ret) 1218 if (ret)
1162 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1219 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1163 ring->name, ret); 1220 ring->name, ret);
@@ -1176,28 +1233,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1176 cleanup_status_page(ring); 1233 cleanup_status_page(ring);
1177} 1234}
1178 1235
1179static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1180{
1181 uint32_t __iomem *virt;
1182 int rem = ring->size - ring->tail;
1183
1184 if (ring->space < rem) {
1185 int ret = intel_wait_ring_buffer(ring, rem);
1186 if (ret)
1187 return ret;
1188 }
1189
1190 virt = ring->virtual_start + ring->tail;
1191 rem /= 4;
1192 while (rem--)
1193 iowrite32(MI_NOOP, virt++);
1194
1195 ring->tail = 0;
1196 ring->space = ring_space(ring);
1197
1198 return 0;
1199}
1200
1201static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1236static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1202{ 1237{
1203 int ret; 1238 int ret;
@@ -1231,7 +1266,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1231 if (request->tail == -1) 1266 if (request->tail == -1)
1232 continue; 1267 continue;
1233 1268
1234 space = request->tail - (ring->tail + 8); 1269 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1235 if (space < 0) 1270 if (space < 0)
1236 space += ring->size; 1271 space += ring->size;
1237 if (space >= n) { 1272 if (space >= n) {
@@ -1266,7 +1301,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1266 return 0; 1301 return 0;
1267} 1302}
1268 1303
1269int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 1304static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1270{ 1305{
1271 struct drm_device *dev = ring->dev; 1306 struct drm_device *dev = ring->dev;
1272 struct drm_i915_private *dev_priv = dev->dev_private; 1307 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,6 +1344,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1309 return -EBUSY; 1344 return -EBUSY;
1310} 1345}
1311 1346
1347static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1348{
1349 uint32_t __iomem *virt;
1350 int rem = ring->size - ring->tail;
1351
1352 if (ring->space < rem) {
1353 int ret = ring_wait_for_space(ring, rem);
1354 if (ret)
1355 return ret;
1356 }
1357
1358 virt = ring->virtual_start + ring->tail;
1359 rem /= 4;
1360 while (rem--)
1361 iowrite32(MI_NOOP, virt++);
1362
1363 ring->tail = 0;
1364 ring->space = ring_space(ring);
1365
1366 return 0;
1367}
1368
1369int intel_ring_idle(struct intel_ring_buffer *ring)
1370{
1371 u32 seqno;
1372 int ret;
1373
1374 /* We need to add any requests required to flush the objects and ring */
1375 if (ring->outstanding_lazy_request) {
1376 ret = i915_add_request(ring, NULL, NULL);
1377 if (ret)
1378 return ret;
1379 }
1380
1381 /* Wait upon the last request to be completed */
1382 if (list_empty(&ring->request_list))
1383 return 0;
1384
1385 seqno = list_entry(ring->request_list.prev,
1386 struct drm_i915_gem_request,
1387 list)->seqno;
1388
1389 return i915_wait_seqno(ring, seqno);
1390}
1391
1392static int
1393intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1394{
1395 if (ring->outstanding_lazy_request)
1396 return 0;
1397
1398 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1399}
1400
1312int intel_ring_begin(struct intel_ring_buffer *ring, 1401int intel_ring_begin(struct intel_ring_buffer *ring,
1313 int num_dwords) 1402 int num_dwords)
1314{ 1403{
@@ -1320,6 +1409,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1320 if (ret) 1409 if (ret)
1321 return ret; 1410 return ret;
1322 1411
1412 /* Preallocate the olr before touching the ring */
1413 ret = intel_ring_alloc_seqno(ring);
1414 if (ret)
1415 return ret;
1416
1323 if (unlikely(ring->tail + n > ring->effective_size)) { 1417 if (unlikely(ring->tail + n > ring->effective_size)) {
1324 ret = intel_wrap_ring_buffer(ring); 1418 ret = intel_wrap_ring_buffer(ring);
1325 if (unlikely(ret)) 1419 if (unlikely(ret))
@@ -1327,7 +1421,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1327 } 1421 }
1328 1422
1329 if (unlikely(ring->space < n)) { 1423 if (unlikely(ring->space < n)) {
1330 ret = intel_wait_ring_buffer(ring, n); 1424 ret = ring_wait_for_space(ring, n);
1331 if (unlikely(ret)) 1425 if (unlikely(ret))
1332 return ret; 1426 return ret;
1333 } 1427 }
@@ -1391,10 +1485,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1391 return ret; 1485 return ret;
1392 1486
1393 cmd = MI_FLUSH_DW; 1487 cmd = MI_FLUSH_DW;
1488 /*
1489 * Bspec vol 1c.5 - video engine command streamer:
1490 * "If ENABLED, all TLBs will be invalidated once the flush
1491 * operation is complete. This bit is only valid when the
1492 * Post-Sync Operation field is a value of 1h or 3h."
1493 */
1394 if (invalidate & I915_GEM_GPU_DOMAINS) 1494 if (invalidate & I915_GEM_GPU_DOMAINS)
1395 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 1495 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1496 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1396 intel_ring_emit(ring, cmd); 1497 intel_ring_emit(ring, cmd);
1397 intel_ring_emit(ring, 0); 1498 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1398 intel_ring_emit(ring, 0); 1499 intel_ring_emit(ring, 0);
1399 intel_ring_emit(ring, MI_NOOP); 1500 intel_ring_emit(ring, MI_NOOP);
1400 intel_ring_advance(ring); 1501 intel_ring_advance(ring);
@@ -1402,8 +1503,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1402} 1503}
1403 1504
1404static int 1505static int
1506hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1507 u32 offset, u32 len,
1508 unsigned flags)
1509{
1510 int ret;
1511
1512 ret = intel_ring_begin(ring, 2);
1513 if (ret)
1514 return ret;
1515
1516 intel_ring_emit(ring,
1517 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1518 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1519 /* bit0-7 is the length on GEN6+ */
1520 intel_ring_emit(ring, offset);
1521 intel_ring_advance(ring);
1522
1523 return 0;
1524}
1525
1526static int
1405gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1527gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1406 u32 offset, u32 len) 1528 u32 offset, u32 len,
1529 unsigned flags)
1407{ 1530{
1408 int ret; 1531 int ret;
1409 1532
@@ -1411,7 +1534,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1411 if (ret) 1534 if (ret)
1412 return ret; 1535 return ret;
1413 1536
1414 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); 1537 intel_ring_emit(ring,
1538 MI_BATCH_BUFFER_START |
1539 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1415 /* bit0-7 is the length on GEN6+ */ 1540 /* bit0-7 is the length on GEN6+ */
1416 intel_ring_emit(ring, offset); 1541 intel_ring_emit(ring, offset);
1417 intel_ring_advance(ring); 1542 intel_ring_advance(ring);
@@ -1432,10 +1557,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
1432 return ret; 1557 return ret;
1433 1558
1434 cmd = MI_FLUSH_DW; 1559 cmd = MI_FLUSH_DW;
1560 /*
1561 * Bspec vol 1c.3 - blitter engine command streamer:
1562 * "If ENABLED, all TLBs will be invalidated once the flush
1563 * operation is complete. This bit is only valid when the
1564 * Post-Sync Operation field is a value of 1h or 3h."
1565 */
1435 if (invalidate & I915_GEM_DOMAIN_RENDER) 1566 if (invalidate & I915_GEM_DOMAIN_RENDER)
1436 cmd |= MI_INVALIDATE_TLB; 1567 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1568 MI_FLUSH_DW_OP_STOREDW;
1437 intel_ring_emit(ring, cmd); 1569 intel_ring_emit(ring, cmd);
1438 intel_ring_emit(ring, 0); 1570 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1439 intel_ring_emit(ring, 0); 1571 intel_ring_emit(ring, 0);
1440 intel_ring_emit(ring, MI_NOOP); 1572 intel_ring_emit(ring, MI_NOOP);
1441 intel_ring_advance(ring); 1573 intel_ring_advance(ring);
@@ -1490,7 +1622,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1490 ring->irq_enable_mask = I915_USER_INTERRUPT; 1622 ring->irq_enable_mask = I915_USER_INTERRUPT;
1491 } 1623 }
1492 ring->write_tail = ring_write_tail; 1624 ring->write_tail = ring_write_tail;
1493 if (INTEL_INFO(dev)->gen >= 6) 1625 if (IS_HASWELL(dev))
1626 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1627 else if (INTEL_INFO(dev)->gen >= 6)
1494 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1628 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1495 else if (INTEL_INFO(dev)->gen >= 4) 1629 else if (INTEL_INFO(dev)->gen >= 4)
1496 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1630 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1501,10 +1635,25 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1501 ring->init = init_render_ring; 1635 ring->init = init_render_ring;
1502 ring->cleanup = render_ring_cleanup; 1636 ring->cleanup = render_ring_cleanup;
1503 1637
1638 /* Workaround batchbuffer to combat CS tlb bug. */
1639 if (HAS_BROKEN_CS_TLB(dev)) {
1640 struct drm_i915_gem_object *obj;
1641 int ret;
1504 1642
1505 if (!I915_NEED_GFX_HWS(dev)) { 1643 obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
1506 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1644 if (obj == NULL) {
1507 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 1645 DRM_ERROR("Failed to allocate batch bo\n");
1646 return -ENOMEM;
1647 }
1648
1649 ret = i915_gem_object_pin(obj, 0, true, false);
1650 if (ret != 0) {
1651 drm_gem_object_unreference(&obj->base);
1652 DRM_ERROR("Failed to ping batch bo\n");
1653 return ret;
1654 }
1655
1656 ring->private = obj;
1508 } 1657 }
1509 1658
1510 return intel_init_ring_buffer(dev, ring); 1659 return intel_init_ring_buffer(dev, ring);
@@ -1514,6 +1663,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1514{ 1663{
1515 drm_i915_private_t *dev_priv = dev->dev_private; 1664 drm_i915_private_t *dev_priv = dev->dev_private;
1516 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1665 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1666 int ret;
1517 1667
1518 ring->name = "render ring"; 1668 ring->name = "render ring";
1519 ring->id = RCS; 1669 ring->id = RCS;
@@ -1551,16 +1701,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1551 ring->init = init_render_ring; 1701 ring->init = init_render_ring;
1552 ring->cleanup = render_ring_cleanup; 1702 ring->cleanup = render_ring_cleanup;
1553 1703
1554 if (!I915_NEED_GFX_HWS(dev))
1555 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1556
1557 ring->dev = dev; 1704 ring->dev = dev;
1558 INIT_LIST_HEAD(&ring->active_list); 1705 INIT_LIST_HEAD(&ring->active_list);
1559 INIT_LIST_HEAD(&ring->request_list); 1706 INIT_LIST_HEAD(&ring->request_list);
1560 1707
1561 ring->size = size; 1708 ring->size = size;
1562 ring->effective_size = ring->size; 1709 ring->effective_size = ring->size;
1563 if (IS_I830(ring->dev)) 1710 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1564 ring->effective_size -= 128; 1711 ring->effective_size -= 128;
1565 1712
1566 ring->virtual_start = ioremap_wc(start, size); 1713 ring->virtual_start = ioremap_wc(start, size);
@@ -1570,6 +1717,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1570 return -ENOMEM; 1717 return -ENOMEM;
1571 } 1718 }
1572 1719
1720 if (!I915_NEED_GFX_HWS(dev)) {
1721 ret = init_phys_hws_pga(ring);
1722 if (ret)
1723 return ret;
1724 }
1725
1573 return 0; 1726 return 0;
1574} 1727}
1575 1728
@@ -1618,7 +1771,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1618 } 1771 }
1619 ring->init = init_ring_common; 1772 ring->init = init_ring_common;
1620 1773
1621
1622 return intel_init_ring_buffer(dev, ring); 1774 return intel_init_ring_buffer(dev, ring);
1623} 1775}
1624 1776
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ea7a311a1f0..6af87cd05725 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,17 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4/*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
8 *
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13#define I915_RING_FREE_SPACE 64
14
4struct intel_hw_status_page { 15struct intel_hw_status_page {
5 u32 *page_addr; 16 u32 *page_addr;
6 unsigned int gfx_addr; 17 unsigned int gfx_addr;
@@ -70,8 +81,7 @@ struct intel_ring_buffer {
70 int __must_check (*flush)(struct intel_ring_buffer *ring, 81 int __must_check (*flush)(struct intel_ring_buffer *ring,
71 u32 invalidate_domains, 82 u32 invalidate_domains,
72 u32 flush_domains); 83 u32 flush_domains);
73 int (*add_request)(struct intel_ring_buffer *ring, 84 int (*add_request)(struct intel_ring_buffer *ring);
74 u32 *seqno);
75 /* Some chipsets are not quite as coherent as advertised and need 85 /* Some chipsets are not quite as coherent as advertised and need
76 * an expensive kick to force a true read of the up-to-date seqno. 86 * an expensive kick to force a true read of the up-to-date seqno.
77 * However, the up-to-date seqno is not always required and the last 87 * However, the up-to-date seqno is not always required and the last
@@ -81,7 +91,10 @@ struct intel_ring_buffer {
81 u32 (*get_seqno)(struct intel_ring_buffer *ring, 91 u32 (*get_seqno)(struct intel_ring_buffer *ring,
82 bool lazy_coherency); 92 bool lazy_coherency);
83 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 93 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
84 u32 offset, u32 length); 94 u32 offset, u32 length,
95 unsigned flags);
96#define I915_DISPATCH_SECURE 0x1
97#define I915_DISPATCH_PINNED 0x2
85 void (*cleanup)(struct intel_ring_buffer *ring); 98 void (*cleanup)(struct intel_ring_buffer *ring);
86 int (*sync_to)(struct intel_ring_buffer *ring, 99 int (*sync_to)(struct intel_ring_buffer *ring,
87 struct intel_ring_buffer *to, 100 struct intel_ring_buffer *to,
@@ -181,27 +194,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
181 * The area from dword 0x20 to 0x3ff is available for driver usage. 194 * The area from dword 0x20 to 0x3ff is available for driver usage.
182 */ 195 */
183#define I915_GEM_HWS_INDEX 0x20 196#define I915_GEM_HWS_INDEX 0x20
197#define I915_GEM_HWS_SCRATCH_INDEX 0x30
198#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
184 199
185void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 200void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
186 201
187int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
188static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
189{
190 return intel_wait_ring_buffer(ring, ring->size - 8);
191}
192
193int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 202int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
194
195static inline void intel_ring_emit(struct intel_ring_buffer *ring, 203static inline void intel_ring_emit(struct intel_ring_buffer *ring,
196 u32 data) 204 u32 data)
197{ 205{
198 iowrite32(data, ring->virtual_start + ring->tail); 206 iowrite32(data, ring->virtual_start + ring->tail);
199 ring->tail += 4; 207 ring->tail += 4;
200} 208}
201
202void intel_ring_advance(struct intel_ring_buffer *ring); 209void intel_ring_advance(struct intel_ring_buffer *ring);
210int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
203 211
204u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
205int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 212int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
206int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 213int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
207 214
@@ -217,6 +224,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
217 return ring->tail; 224 return ring->tail;
218} 225}
219 226
227static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
228{
229 BUG_ON(ring->outstanding_lazy_request == 0);
230 return ring->outstanding_lazy_request;
231}
232
220static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 233static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
221{ 234{
222 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 235 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index c01d97db0061..c275bf0fa36d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -509,7 +509,7 @@ out:
509static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 509static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
510 void *response, int response_len) 510 void *response, int response_len)
511{ 511{
512 u8 retry = 5; 512 u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
513 u8 status; 513 u8 status;
514 int i; 514 int i;
515 515
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
522 * command to be complete. 522 * command to be complete.
523 * 523 *
524 * Check 5 times in case the hardware failed to read the docs. 524 * Check 5 times in case the hardware failed to read the docs.
525 *
526 * Also beware that the first response by many devices is to
527 * reply PENDING and stall for time. TVs are notorious for
528 * requiring longer than specified to complete their replies.
529 * Originally (in the DDX long ago), the delay was only ever 15ms
530 * with an additional delay of 30ms applied for TVs added later after
531 * many experiments. To accommodate both sets of delays, we do a
532 * sequence of slow checks if the device is falling behind and fails
533 * to reply within 5*15µs.
525 */ 534 */
526 if (!intel_sdvo_read_byte(intel_sdvo, 535 if (!intel_sdvo_read_byte(intel_sdvo,
527 SDVO_I2C_CMD_STATUS, 536 SDVO_I2C_CMD_STATUS,
528 &status)) 537 &status))
529 goto log_fail; 538 goto log_fail;
530 539
531 while (status == SDVO_CMD_STATUS_PENDING && retry--) { 540 while (status == SDVO_CMD_STATUS_PENDING && --retry) {
532 udelay(15); 541 if (retry < 10)
542 msleep(15);
543 else
544 udelay(15);
545
533 if (!intel_sdvo_read_byte(intel_sdvo, 546 if (!intel_sdvo_read_byte(intel_sdvo,
534 SDVO_I2C_CMD_STATUS, 547 SDVO_I2C_CMD_STATUS,
535 &status)) 548 &status))
@@ -894,6 +907,45 @@ static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
894} 907}
895#endif 908#endif
896 909
910static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
911 unsigned if_index, uint8_t tx_rate,
912 uint8_t *data, unsigned length)
913{
914 uint8_t set_buf_index[2] = { if_index, 0 };
915 uint8_t hbuf_size, tmp[8];
916 int i;
917
918 if (!intel_sdvo_set_value(intel_sdvo,
919 SDVO_CMD_SET_HBUF_INDEX,
920 set_buf_index, 2))
921 return false;
922
923 if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
924 &hbuf_size, 1))
925 return false;
926
927 /* Buffer size is 0 based, hooray! */
928 hbuf_size++;
929
930 DRM_DEBUG_KMS("writing sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
931 if_index, length, hbuf_size);
932
933 for (i = 0; i < hbuf_size; i += 8) {
934 memset(tmp, 0, 8);
935 if (i < length)
936 memcpy(tmp, data + i, min_t(unsigned, 8, length - i));
937
938 if (!intel_sdvo_set_value(intel_sdvo,
939 SDVO_CMD_SET_HBUF_DATA,
940 tmp, 8))
941 return false;
942 }
943
944 return intel_sdvo_set_value(intel_sdvo,
945 SDVO_CMD_SET_HBUF_TXRATE,
946 &tx_rate, 1);
947}
948
897static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) 949static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
898{ 950{
899 struct dip_infoframe avi_if = { 951 struct dip_infoframe avi_if = {
@@ -901,11 +953,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
901 .ver = DIP_VERSION_AVI, 953 .ver = DIP_VERSION_AVI,
902 .len = DIP_LEN_AVI, 954 .len = DIP_LEN_AVI,
903 }; 955 };
904 uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
905 uint8_t set_buf_index[2] = { 1, 0 };
906 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; 956 uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
907 uint64_t *data = (uint64_t *)sdvo_data;
908 unsigned i;
909 957
910 intel_dip_infoframe_csum(&avi_if); 958 intel_dip_infoframe_csum(&avi_if);
911 959
@@ -915,22 +963,9 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
915 sdvo_data[3] = avi_if.checksum; 963 sdvo_data[3] = avi_if.checksum;
916 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi)); 964 memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
917 965
918 if (!intel_sdvo_set_value(intel_sdvo, 966 return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
919 SDVO_CMD_SET_HBUF_INDEX, 967 SDVO_HBUF_TX_VSYNC,
920 set_buf_index, 2)) 968 sdvo_data, sizeof(sdvo_data));
921 return false;
922
923 for (i = 0; i < sizeof(sdvo_data); i += 8) {
924 if (!intel_sdvo_set_value(intel_sdvo,
925 SDVO_CMD_SET_HBUF_DATA,
926 data, 8))
927 return false;
928 data++;
929 }
930
931 return intel_sdvo_set_value(intel_sdvo,
932 SDVO_CMD_SET_HBUF_TXRATE,
933 &tx_rate, 1);
934} 969}
935 970
936static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo) 971static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo)
@@ -1206,6 +1241,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1206 1241
1207 temp = I915_READ(intel_sdvo->sdvo_reg); 1242 temp = I915_READ(intel_sdvo->sdvo_reg);
1208 if ((temp & SDVO_ENABLE) != 0) { 1243 if ((temp & SDVO_ENABLE) != 0) {
1244 /* HW workaround for IBX, we need to move the port to
1245 * transcoder A before disabling it. */
1246 if (HAS_PCH_IBX(encoder->base.dev)) {
1247 struct drm_crtc *crtc = encoder->base.crtc;
1248 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1249
1250 if (temp & SDVO_PIPE_B_SELECT) {
1251 temp &= ~SDVO_PIPE_B_SELECT;
1252 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1253 POSTING_READ(intel_sdvo->sdvo_reg);
1254
1255 /* Again we need to write this twice. */
1256 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1257 POSTING_READ(intel_sdvo->sdvo_reg);
1258
1259 /* Transcoder selection bits only update
1260 * effectively on vblank. */
1261 if (crtc)
1262 intel_wait_for_vblank(encoder->base.dev, pipe);
1263 else
1264 msleep(50);
1265 }
1266 }
1267
1209 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); 1268 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1210 } 1269 }
1211} 1270}
@@ -1222,8 +1281,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1222 u8 status; 1281 u8 status;
1223 1282
1224 temp = I915_READ(intel_sdvo->sdvo_reg); 1283 temp = I915_READ(intel_sdvo->sdvo_reg);
1225 if ((temp & SDVO_ENABLE) == 0) 1284 if ((temp & SDVO_ENABLE) == 0) {
1285 /* HW workaround for IBX, we need to move the port
1286 * to transcoder A before disabling it. */
1287 if (HAS_PCH_IBX(dev)) {
1288 struct drm_crtc *crtc = encoder->base.crtc;
1289 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1290
1291 /* Restore the transcoder select bit. */
1292 if (pipe == PIPE_B)
1293 temp |= SDVO_PIPE_B_SELECT;
1294 }
1295
1226 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); 1296 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1297 }
1227 for (i = 0; i < 2; i++) 1298 for (i = 0; i < 2; i++)
1228 intel_wait_for_vblank(dev, intel_crtc->pipe); 1299 intel_wait_for_vblank(dev, intel_crtc->pipe);
1229 1300
@@ -1477,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1477 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1548 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1478 enum drm_connector_status ret; 1549 enum drm_connector_status ret;
1479 1550
1480 if (!intel_sdvo_write_cmd(intel_sdvo, 1551 if (!intel_sdvo_get_value(intel_sdvo,
1481 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1552 SDVO_CMD_GET_ATTACHED_DISPLAYS,
1482 return connector_status_unknown; 1553 &response, 2))
1483
1484 /* add 30ms delay when the output type might be TV */
1485 if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
1486 msleep(30);
1487
1488 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1489 return connector_status_unknown; 1554 return connector_status_unknown;
1490 1555
1491 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", 1556 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1774,7 +1839,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1774 intel_sdvo_destroy_enhance_property(connector); 1839 intel_sdvo_destroy_enhance_property(connector);
1775 drm_sysfs_connector_remove(connector); 1840 drm_sysfs_connector_remove(connector);
1776 drm_connector_cleanup(connector); 1841 drm_connector_cleanup(connector);
1777 kfree(connector); 1842 kfree(intel_sdvo_connector);
1778} 1843}
1779 1844
1780static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) 1845static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1806,7 +1871,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1806 uint8_t cmd; 1871 uint8_t cmd;
1807 int ret; 1872 int ret;
1808 1873
1809 ret = drm_connector_property_set_value(connector, property, val); 1874 ret = drm_object_property_set_value(&connector->base, property, val);
1810 if (ret) 1875 if (ret)
1811 return ret; 1876 return ret;
1812 1877
@@ -1861,7 +1926,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1861 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { 1926 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
1862 temp_value = val; 1927 temp_value = val;
1863 if (intel_sdvo_connector->left == property) { 1928 if (intel_sdvo_connector->left == property) {
1864 drm_connector_property_set_value(connector, 1929 drm_object_property_set_value(&connector->base,
1865 intel_sdvo_connector->right, val); 1930 intel_sdvo_connector->right, val);
1866 if (intel_sdvo_connector->left_margin == temp_value) 1931 if (intel_sdvo_connector->left_margin == temp_value)
1867 return 0; 1932 return 0;
@@ -1873,7 +1938,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1873 cmd = SDVO_CMD_SET_OVERSCAN_H; 1938 cmd = SDVO_CMD_SET_OVERSCAN_H;
1874 goto set_value; 1939 goto set_value;
1875 } else if (intel_sdvo_connector->right == property) { 1940 } else if (intel_sdvo_connector->right == property) {
1876 drm_connector_property_set_value(connector, 1941 drm_object_property_set_value(&connector->base,
1877 intel_sdvo_connector->left, val); 1942 intel_sdvo_connector->left, val);
1878 if (intel_sdvo_connector->right_margin == temp_value) 1943 if (intel_sdvo_connector->right_margin == temp_value)
1879 return 0; 1944 return 0;
@@ -1885,7 +1950,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1885 cmd = SDVO_CMD_SET_OVERSCAN_H; 1950 cmd = SDVO_CMD_SET_OVERSCAN_H;
1886 goto set_value; 1951 goto set_value;
1887 } else if (intel_sdvo_connector->top == property) { 1952 } else if (intel_sdvo_connector->top == property) {
1888 drm_connector_property_set_value(connector, 1953 drm_object_property_set_value(&connector->base,
1889 intel_sdvo_connector->bottom, val); 1954 intel_sdvo_connector->bottom, val);
1890 if (intel_sdvo_connector->top_margin == temp_value) 1955 if (intel_sdvo_connector->top_margin == temp_value)
1891 return 0; 1956 return 0;
@@ -1897,7 +1962,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1897 cmd = SDVO_CMD_SET_OVERSCAN_V; 1962 cmd = SDVO_CMD_SET_OVERSCAN_V;
1898 goto set_value; 1963 goto set_value;
1899 } else if (intel_sdvo_connector->bottom == property) { 1964 } else if (intel_sdvo_connector->bottom == property) {
1900 drm_connector_property_set_value(connector, 1965 drm_object_property_set_value(&connector->base,
1901 intel_sdvo_connector->top, val); 1966 intel_sdvo_connector->top, val);
1902 if (intel_sdvo_connector->bottom_margin == temp_value) 1967 if (intel_sdvo_connector->bottom_margin == temp_value)
1903 return 0; 1968 return 0;
@@ -2050,17 +2115,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
2050 else 2115 else
2051 mapping = &dev_priv->sdvo_mappings[1]; 2116 mapping = &dev_priv->sdvo_mappings[1];
2052 2117
2053 pin = GMBUS_PORT_DPB; 2118 if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
2054 if (mapping->initialized)
2055 pin = mapping->i2c_pin; 2119 pin = mapping->i2c_pin;
2120 else
2121 pin = GMBUS_PORT_DPB;
2056 2122
2057 if (intel_gmbus_is_port_valid(pin)) { 2123 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
2058 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); 2124
2059 intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); 2125 /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
2060 intel_gmbus_force_bit(sdvo->i2c, true); 2126 * our code totally fails once we start using gmbus. Hence fall back to
2061 } else { 2127 * bit banging for now. */
2062 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); 2128 intel_gmbus_force_bit(sdvo->i2c, true);
2063 } 2129}
2130
2131/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
2132static void
2133intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
2134{
2135 intel_gmbus_force_bit(sdvo->i2c, false);
2064} 2136}
2065 2137
2066static bool 2138static bool
@@ -2179,7 +2251,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2179 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2251 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2180 intel_sdvo->is_hdmi = true; 2252 intel_sdvo->is_hdmi = true;
2181 } 2253 }
2182 intel_sdvo->base.cloneable = true;
2183 2254
2184 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2255 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2185 if (intel_sdvo->is_hdmi) 2256 if (intel_sdvo->is_hdmi)
@@ -2210,7 +2281,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2210 2281
2211 intel_sdvo->is_tv = true; 2282 intel_sdvo->is_tv = true;
2212 intel_sdvo->base.needs_tv_clock = true; 2283 intel_sdvo->base.needs_tv_clock = true;
2213 intel_sdvo->base.cloneable = false;
2214 2284
2215 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2285 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2216 2286
@@ -2253,8 +2323,6 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
2253 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; 2323 intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
2254 } 2324 }
2255 2325
2256 intel_sdvo->base.cloneable = true;
2257
2258 intel_sdvo_connector_init(intel_sdvo_connector, 2326 intel_sdvo_connector_init(intel_sdvo_connector,
2259 intel_sdvo); 2327 intel_sdvo);
2260 return true; 2328 return true;
@@ -2285,9 +2353,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2285 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; 2353 intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
2286 } 2354 }
2287 2355
2288 /* SDVO LVDS is not cloneable because the input mode gets adjusted by the encoder */
2289 intel_sdvo->base.cloneable = false;
2290
2291 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2356 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2292 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector)) 2357 if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
2293 goto err; 2358 goto err;
@@ -2360,6 +2425,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
2360 return true; 2425 return true;
2361} 2426}
2362 2427
2428static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo)
2429{
2430 struct drm_device *dev = intel_sdvo->base.base.dev;
2431 struct drm_connector *connector, *tmp;
2432
2433 list_for_each_entry_safe(connector, tmp,
2434 &dev->mode_config.connector_list, head) {
2435 if (intel_attached_encoder(connector) == &intel_sdvo->base)
2436 intel_sdvo_destroy(connector);
2437 }
2438}
2439
2363static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, 2440static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2364 struct intel_sdvo_connector *intel_sdvo_connector, 2441 struct intel_sdvo_connector *intel_sdvo_connector,
2365 int type) 2442 int type)
@@ -2400,7 +2477,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2400 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); 2477 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
2401 2478
2402 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; 2479 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
2403 drm_connector_attach_property(&intel_sdvo_connector->base.base, 2480 drm_object_attach_property(&intel_sdvo_connector->base.base.base,
2404 intel_sdvo_connector->tv_format, 0); 2481 intel_sdvo_connector->tv_format, 0);
2405 return true; 2482 return true;
2406 2483
@@ -2416,7 +2493,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2416 intel_sdvo_connector->name = \ 2493 intel_sdvo_connector->name = \
2417 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ 2494 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
2418 if (!intel_sdvo_connector->name) return false; \ 2495 if (!intel_sdvo_connector->name) return false; \
2419 drm_connector_attach_property(connector, \ 2496 drm_object_attach_property(&connector->base, \
2420 intel_sdvo_connector->name, \ 2497 intel_sdvo_connector->name, \
2421 intel_sdvo_connector->cur_##name); \ 2498 intel_sdvo_connector->cur_##name); \
2422 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ 2499 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2453,7 +2530,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2453 if (!intel_sdvo_connector->left) 2530 if (!intel_sdvo_connector->left)
2454 return false; 2531 return false;
2455 2532
2456 drm_connector_attach_property(connector, 2533 drm_object_attach_property(&connector->base,
2457 intel_sdvo_connector->left, 2534 intel_sdvo_connector->left,
2458 intel_sdvo_connector->left_margin); 2535 intel_sdvo_connector->left_margin);
2459 2536
@@ -2462,7 +2539,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2462 if (!intel_sdvo_connector->right) 2539 if (!intel_sdvo_connector->right)
2463 return false; 2540 return false;
2464 2541
2465 drm_connector_attach_property(connector, 2542 drm_object_attach_property(&connector->base,
2466 intel_sdvo_connector->right, 2543 intel_sdvo_connector->right,
2467 intel_sdvo_connector->right_margin); 2544 intel_sdvo_connector->right_margin);
2468 DRM_DEBUG_KMS("h_overscan: max %d, " 2545 DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2490,7 +2567,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2490 if (!intel_sdvo_connector->top) 2567 if (!intel_sdvo_connector->top)
2491 return false; 2568 return false;
2492 2569
2493 drm_connector_attach_property(connector, 2570 drm_object_attach_property(&connector->base,
2494 intel_sdvo_connector->top, 2571 intel_sdvo_connector->top,
2495 intel_sdvo_connector->top_margin); 2572 intel_sdvo_connector->top_margin);
2496 2573
@@ -2500,7 +2577,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2500 if (!intel_sdvo_connector->bottom) 2577 if (!intel_sdvo_connector->bottom)
2501 return false; 2578 return false;
2502 2579
2503 drm_connector_attach_property(connector, 2580 drm_object_attach_property(&connector->base,
2504 intel_sdvo_connector->bottom, 2581 intel_sdvo_connector->bottom,
2505 intel_sdvo_connector->bottom_margin); 2582 intel_sdvo_connector->bottom_margin);
2506 DRM_DEBUG_KMS("v_overscan: max %d, " 2583 DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2532,7 +2609,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2532 if (!intel_sdvo_connector->dot_crawl) 2609 if (!intel_sdvo_connector->dot_crawl)
2533 return false; 2610 return false;
2534 2611
2535 drm_connector_attach_property(connector, 2612 drm_object_attach_property(&connector->base,
2536 intel_sdvo_connector->dot_crawl, 2613 intel_sdvo_connector->dot_crawl,
2537 intel_sdvo_connector->cur_dot_crawl); 2614 intel_sdvo_connector->cur_dot_crawl);
2538 DRM_DEBUG_KMS("dot crawl: current %d\n", response); 2615 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2636,10 +2713,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2636 intel_sdvo->is_sdvob = is_sdvob; 2713 intel_sdvo->is_sdvob = is_sdvob;
2637 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; 2714 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2638 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2715 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2639 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { 2716 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
2640 kfree(intel_sdvo); 2717 goto err_i2c_bus;
2641 return false;
2642 }
2643 2718
2644 /* encoder type will be decided later */ 2719 /* encoder type will be decided later */
2645 intel_encoder = &intel_sdvo->base; 2720 intel_encoder = &intel_sdvo->base;
@@ -2683,9 +2758,20 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2683 intel_sdvo->caps.output_flags) != true) { 2758 intel_sdvo->caps.output_flags) != true) {
2684 DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", 2759 DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
2685 SDVO_NAME(intel_sdvo)); 2760 SDVO_NAME(intel_sdvo));
2686 goto err; 2761 /* Output_setup can leave behind connectors! */
2762 goto err_output;
2687 } 2763 }
2688 2764
2765 /*
2766 * Cloning SDVO with anything is often impossible, since the SDVO
2767 * encoder can request a special input timing mode. And even if that's
2768 * not the case we have evidence that cloning a plain unscaled mode with
2769 * VGA doesn't really work. Furthermore the cloning flags are way too
2770 * simplistic anyway to express such constraints, so just give up on
2771 * cloning for SDVO encoders.
2772 */
2773 intel_sdvo->base.cloneable = false;
2774
2689 /* Only enable the hotplug irq if we need it, to work around noisy 2775 /* Only enable the hotplug irq if we need it, to work around noisy
2690 * hotplug lines. 2776 * hotplug lines.
2691 */ 2777 */
@@ -2696,12 +2782,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2696 2782
2697 /* Set the input timing to the screen. Assume always input 0. */ 2783 /* Set the input timing to the screen. Assume always input 0. */
2698 if (!intel_sdvo_set_target_input(intel_sdvo)) 2784 if (!intel_sdvo_set_target_input(intel_sdvo))
2699 goto err; 2785 goto err_output;
2700 2786
2701 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, 2787 if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo,
2702 &intel_sdvo->pixel_clock_min, 2788 &intel_sdvo->pixel_clock_min,
2703 &intel_sdvo->pixel_clock_max)) 2789 &intel_sdvo->pixel_clock_max))
2704 goto err; 2790 goto err_output;
2705 2791
2706 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " 2792 DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
2707 "clock range %dMHz - %dMHz, " 2793 "clock range %dMHz - %dMHz, "
@@ -2721,9 +2807,14 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2721 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 2807 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
2722 return true; 2808 return true;
2723 2809
2810err_output:
2811 intel_sdvo_output_cleanup(intel_sdvo);
2812
2724err: 2813err:
2725 drm_encoder_cleanup(&intel_encoder->base); 2814 drm_encoder_cleanup(&intel_encoder->base);
2726 i2c_del_adapter(&intel_sdvo->ddc); 2815 i2c_del_adapter(&intel_sdvo->ddc);
2816err_i2c_bus:
2817 intel_sdvo_unselect_i2c_bus(intel_sdvo);
2727 kfree(intel_sdvo); 2818 kfree(intel_sdvo);
2728 2819
2729 return false; 2820 return false;
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 9d030142ee43..770bdd6ecd9f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -708,6 +708,8 @@ struct intel_sdvo_enhancements_arg {
708#define SDVO_CMD_SET_AUDIO_STAT 0x91 708#define SDVO_CMD_SET_AUDIO_STAT 0x91
709#define SDVO_CMD_GET_AUDIO_STAT 0x92 709#define SDVO_CMD_GET_AUDIO_STAT 0x92
710#define SDVO_CMD_SET_HBUF_INDEX 0x93 710#define SDVO_CMD_SET_HBUF_INDEX 0x93
711 #define SDVO_HBUF_INDEX_ELD 0
712 #define SDVO_HBUF_INDEX_AVI_IF 1
711#define SDVO_CMD_GET_HBUF_INDEX 0x94 713#define SDVO_CMD_GET_HBUF_INDEX 0x94
712#define SDVO_CMD_GET_HBUF_INFO 0x95 714#define SDVO_CMD_GET_HBUF_INFO 0x95
713#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96 715#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 82f5e5c7009d..d7b060e0a231 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
48 struct intel_plane *intel_plane = to_intel_plane(plane); 48 struct intel_plane *intel_plane = to_intel_plane(plane);
49 int pipe = intel_plane->pipe; 49 int pipe = intel_plane->pipe;
50 u32 sprctl, sprscale = 0; 50 u32 sprctl, sprscale = 0;
51 int pixel_size; 51 unsigned long sprsurf_offset, linear_offset;
52 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
52 53
53 sprctl = I915_READ(SPRCTL(pipe)); 54 sprctl = I915_READ(SPRCTL(pipe));
54 55
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
61 switch (fb->pixel_format) { 62 switch (fb->pixel_format) {
62 case DRM_FORMAT_XBGR8888: 63 case DRM_FORMAT_XBGR8888:
63 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; 64 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
64 pixel_size = 4;
65 break; 65 break;
66 case DRM_FORMAT_XRGB8888: 66 case DRM_FORMAT_XRGB8888:
67 sprctl |= SPRITE_FORMAT_RGBX888; 67 sprctl |= SPRITE_FORMAT_RGBX888;
68 pixel_size = 4;
69 break; 68 break;
70 case DRM_FORMAT_YUYV: 69 case DRM_FORMAT_YUYV:
71 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; 70 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
72 pixel_size = 2;
73 break; 71 break;
74 case DRM_FORMAT_YVYU: 72 case DRM_FORMAT_YVYU:
75 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; 73 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
76 pixel_size = 2;
77 break; 74 break;
78 case DRM_FORMAT_UYVY: 75 case DRM_FORMAT_UYVY:
79 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; 76 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
80 pixel_size = 2;
81 break; 77 break;
82 case DRM_FORMAT_VYUY: 78 case DRM_FORMAT_VYUY:
83 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; 79 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
84 pixel_size = 2;
85 break; 80 break;
86 default: 81 default:
87 DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); 82 BUG();
88 sprctl |= SPRITE_FORMAT_RGBX888;
89 pixel_size = 4;
90 break;
91 } 83 }
92 84
93 if (obj->tiling_mode != I915_TILING_NONE) 85 if (obj->tiling_mode != I915_TILING_NONE)
@@ -127,18 +119,27 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
127 119
128 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
129 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
130 if (obj->tiling_mode != I915_TILING_NONE) { 122
123 linear_offset = y * fb->pitches[0] + x * pixel_size;
124 sprsurf_offset =
125 intel_gen4_compute_offset_xtiled(&x, &y,
126 pixel_size, fb->pitches[0]);
127 linear_offset -= sprsurf_offset;
128
129 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
130 * register */
131 if (IS_HASWELL(dev))
132 I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
133 else if (obj->tiling_mode != I915_TILING_NONE)
131 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); 134 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
132 } else { 135 else
133 unsigned long offset; 136 I915_WRITE(SPRLINOFF(pipe), linear_offset);
134 137
135 offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
136 I915_WRITE(SPRLINOFF(pipe), offset);
137 }
138 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 138 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
139 I915_WRITE(SPRSCALE(pipe), sprscale); 139 if (intel_plane->can_scale)
140 I915_WRITE(SPRSCALE(pipe), sprscale);
140 I915_WRITE(SPRCTL(pipe), sprctl); 141 I915_WRITE(SPRCTL(pipe), sprctl);
141 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); 142 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
142 POSTING_READ(SPRSURF(pipe)); 143 POSTING_READ(SPRSURF(pipe));
143} 144}
144 145
@@ -152,7 +153,8 @@ ivb_disable_plane(struct drm_plane *plane)
152 153
153 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 154 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
154 /* Can't leave the scaler enabled... */ 155 /* Can't leave the scaler enabled... */
155 I915_WRITE(SPRSCALE(pipe), 0); 156 if (intel_plane->can_scale)
157 I915_WRITE(SPRSCALE(pipe), 0);
156 /* Activate double buffered register update */ 158 /* Activate double buffered register update */
157 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); 159 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
158 POSTING_READ(SPRSURF(pipe)); 160 POSTING_READ(SPRSURF(pipe));
@@ -225,8 +227,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
225 struct drm_device *dev = plane->dev; 227 struct drm_device *dev = plane->dev;
226 struct drm_i915_private *dev_priv = dev->dev_private; 228 struct drm_i915_private *dev_priv = dev->dev_private;
227 struct intel_plane *intel_plane = to_intel_plane(plane); 229 struct intel_plane *intel_plane = to_intel_plane(plane);
228 int pipe = intel_plane->pipe, pixel_size; 230 int pipe = intel_plane->pipe;
231 unsigned long dvssurf_offset, linear_offset;
229 u32 dvscntr, dvsscale; 232 u32 dvscntr, dvsscale;
233 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
230 234
231 dvscntr = I915_READ(DVSCNTR(pipe)); 235 dvscntr = I915_READ(DVSCNTR(pipe));
232 236
@@ -239,33 +243,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
239 switch (fb->pixel_format) { 243 switch (fb->pixel_format) {
240 case DRM_FORMAT_XBGR8888: 244 case DRM_FORMAT_XBGR8888:
241 dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; 245 dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
242 pixel_size = 4;
243 break; 246 break;
244 case DRM_FORMAT_XRGB8888: 247 case DRM_FORMAT_XRGB8888:
245 dvscntr |= DVS_FORMAT_RGBX888; 248 dvscntr |= DVS_FORMAT_RGBX888;
246 pixel_size = 4;
247 break; 249 break;
248 case DRM_FORMAT_YUYV: 250 case DRM_FORMAT_YUYV:
249 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; 251 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
250 pixel_size = 2;
251 break; 252 break;
252 case DRM_FORMAT_YVYU: 253 case DRM_FORMAT_YVYU:
253 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; 254 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
254 pixel_size = 2;
255 break; 255 break;
256 case DRM_FORMAT_UYVY: 256 case DRM_FORMAT_UYVY:
257 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; 257 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
258 pixel_size = 2;
259 break; 258 break;
260 case DRM_FORMAT_VYUY: 259 case DRM_FORMAT_VYUY:
261 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; 260 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
262 pixel_size = 2;
263 break; 261 break;
264 default: 262 default:
265 DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); 263 BUG();
266 dvscntr |= DVS_FORMAT_RGBX888;
267 pixel_size = 4;
268 break;
269 } 264 }
270 265
271 if (obj->tiling_mode != I915_TILING_NONE) 266 if (obj->tiling_mode != I915_TILING_NONE)
@@ -289,18 +284,22 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
289 284
290 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 285 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
291 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 286 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
292 if (obj->tiling_mode != I915_TILING_NONE) { 287
288 linear_offset = y * fb->pitches[0] + x * pixel_size;
289 dvssurf_offset =
290 intel_gen4_compute_offset_xtiled(&x, &y,
291 pixel_size, fb->pitches[0]);
292 linear_offset -= dvssurf_offset;
293
294 if (obj->tiling_mode != I915_TILING_NONE)
293 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); 295 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
294 } else { 296 else
295 unsigned long offset; 297 I915_WRITE(DVSLINOFF(pipe), linear_offset);
296 298
297 offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
298 I915_WRITE(DVSLINOFF(pipe), offset);
299 }
300 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 299 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
301 I915_WRITE(DVSSCALE(pipe), dvsscale); 300 I915_WRITE(DVSSCALE(pipe), dvsscale);
302 I915_WRITE(DVSCNTR(pipe), dvscntr); 301 I915_WRITE(DVSCNTR(pipe), dvscntr);
303 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); 302 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
304 POSTING_READ(DVSSURF(pipe)); 303 POSTING_READ(DVSSURF(pipe));
305} 304}
306 305
@@ -422,6 +421,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
422 struct intel_framebuffer *intel_fb; 421 struct intel_framebuffer *intel_fb;
423 struct drm_i915_gem_object *obj, *old_obj; 422 struct drm_i915_gem_object *obj, *old_obj;
424 int pipe = intel_plane->pipe; 423 int pipe = intel_plane->pipe;
424 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
425 pipe);
425 int ret = 0; 426 int ret = 0;
426 int x = src_x >> 16, y = src_y >> 16; 427 int x = src_x >> 16, y = src_y >> 16;
427 int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; 428 int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -436,7 +437,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
436 src_h = src_h >> 16; 437 src_h = src_h >> 16;
437 438
438 /* Pipe must be running... */ 439 /* Pipe must be running... */
439 if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) 440 if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
440 return -EINVAL; 441 return -EINVAL;
441 442
442 if (crtc_x >= primary_w || crtc_y >= primary_h) 443 if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -446,6 +447,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
446 if (intel_plane->pipe != intel_crtc->pipe) 447 if (intel_plane->pipe != intel_crtc->pipe)
447 return -EINVAL; 448 return -EINVAL;
448 449
450 /* Sprite planes can be linear or x-tiled surfaces */
451 switch (obj->tiling_mode) {
452 case I915_TILING_NONE:
453 case I915_TILING_X:
454 break;
455 default:
456 return -EINVAL;
457 }
458
449 /* 459 /*
450 * Clamp the width & height into the visible area. Note we don't 460 * Clamp the width & height into the visible area. Note we don't
451 * try to scale the source if part of the visible region is offscreen. 461 * try to scale the source if part of the visible region is offscreen.
@@ -473,6 +483,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
473 goto out; 483 goto out;
474 484
475 /* 485 /*
486 * We may not have a scaler, eg. HSW does not have it any more
487 */
488 if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
489 return -EINVAL;
490
491 /*
476 * We can take a larger source and scale it down, but 492 * We can take a larger source and scale it down, but
477 * only so much... 16x is the max on SNB. 493 * only so much... 16x is the max on SNB.
478 */ 494 */
@@ -665,6 +681,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
665 switch (INTEL_INFO(dev)->gen) { 681 switch (INTEL_INFO(dev)->gen) {
666 case 5: 682 case 5:
667 case 6: 683 case 6:
684 intel_plane->can_scale = true;
668 intel_plane->max_downscale = 16; 685 intel_plane->max_downscale = 16;
669 intel_plane->update_plane = ilk_update_plane; 686 intel_plane->update_plane = ilk_update_plane;
670 intel_plane->disable_plane = ilk_disable_plane; 687 intel_plane->disable_plane = ilk_disable_plane;
@@ -681,6 +698,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
681 break; 698 break;
682 699
683 case 7: 700 case 7:
701 if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
702 intel_plane->can_scale = false;
703 else
704 intel_plane->can_scale = true;
684 intel_plane->max_downscale = 2; 705 intel_plane->max_downscale = 2;
685 intel_plane->update_plane = ivb_update_plane; 706 intel_plane->update_plane = ivb_update_plane;
686 intel_plane->disable_plane = ivb_disable_plane; 707 intel_plane->disable_plane = ivb_disable_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 62bb048c135e..ea93520c1278 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1088 int dspcntr_reg = DSPCNTR(intel_crtc->plane); 1088 int dspcntr_reg = DSPCNTR(intel_crtc->plane);
1089 int pipeconf = I915_READ(pipeconf_reg); 1089 int pipeconf = I915_READ(pipeconf_reg);
1090 int dspcntr = I915_READ(dspcntr_reg); 1090 int dspcntr = I915_READ(dspcntr_reg);
1091 int dspbase_reg = DSPADDR(intel_crtc->plane);
1092 int xpos = 0x0, ypos = 0x0; 1091 int xpos = 0x0, ypos = 0x0;
1093 unsigned int xsize, ysize; 1092 unsigned int xsize, ysize;
1094 /* Pipe must be off here */ 1093 /* Pipe must be off here */
1095 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); 1094 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1096 /* Flush the plane changes */ 1095 intel_flush_display_plane(dev_priv, intel_crtc->plane);
1097 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1098 1096
1099 /* Wait for vblank for the disable to take effect */ 1097 /* Wait for vblank for the disable to take effect */
1100 if (IS_GEN2(dev)) 1098 if (IS_GEN2(dev))
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1123 1121
1124 I915_WRITE(pipeconf_reg, pipeconf); 1122 I915_WRITE(pipeconf_reg, pipeconf);
1125 I915_WRITE(dspcntr_reg, dspcntr); 1123 I915_WRITE(dspcntr_reg, dspcntr);
1126 /* Flush the plane changes */ 1124 intel_flush_display_plane(dev_priv, intel_crtc->plane);
1127 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1128 } 1125 }
1129 1126
1130 j = 0; 1127 j = 0;
@@ -1292,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1292 } 1289 }
1293 1290
1294 intel_tv->tv_format = tv_mode->name; 1291 intel_tv->tv_format = tv_mode->name;
1295 drm_connector_property_set_value(connector, 1292 drm_object_property_set_value(&connector->base,
1296 connector->dev->mode_config.tv_mode_property, i); 1293 connector->dev->mode_config.tv_mode_property, i);
1297} 1294}
1298 1295
@@ -1446,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1446 int ret = 0; 1443 int ret = 0;
1447 bool changed = false; 1444 bool changed = false;
1448 1445
1449 ret = drm_connector_property_set_value(connector, property, val); 1446 ret = drm_object_property_set_value(&connector->base, property, val);
1450 if (ret < 0) 1447 if (ret < 0)
1451 goto out; 1448 goto out;
1452 1449
@@ -1658,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
1658 ARRAY_SIZE(tv_modes), 1655 ARRAY_SIZE(tv_modes),
1659 tv_format_names); 1656 tv_format_names);
1660 1657
1661 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, 1658 drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
1662 initial_mode); 1659 initial_mode);
1663 drm_connector_attach_property(connector, 1660 drm_object_attach_property(&connector->base,
1664 dev->mode_config.tv_left_margin_property, 1661 dev->mode_config.tv_left_margin_property,
1665 intel_tv->margin[TV_MARGIN_LEFT]); 1662 intel_tv->margin[TV_MARGIN_LEFT]);
1666 drm_connector_attach_property(connector, 1663 drm_object_attach_property(&connector->base,
1667 dev->mode_config.tv_top_margin_property, 1664 dev->mode_config.tv_top_margin_property,
1668 intel_tv->margin[TV_MARGIN_TOP]); 1665 intel_tv->margin[TV_MARGIN_TOP]);
1669 drm_connector_attach_property(connector, 1666 drm_object_attach_property(&connector->base,
1670 dev->mode_config.tv_right_margin_property, 1667 dev->mode_config.tv_right_margin_property,
1671 intel_tv->margin[TV_MARGIN_RIGHT]); 1668 intel_tv->margin[TV_MARGIN_RIGHT]);
1672 drm_connector_attach_property(connector, 1669 drm_object_attach_property(&connector->base,
1673 dev->mode_config.tv_bottom_margin_property, 1670 dev->mode_config.tv_bottom_margin_property,
1674 intel_tv->margin[TV_MARGIN_BOTTOM]); 1671 intel_tv->margin[TV_MARGIN_BOTTOM]);
1675 drm_sysfs_connector_add(connector); 1672 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 1e910117b0a2..122b571ccc7c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -60,8 +60,7 @@ static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
60} 60}
61 61
62 62
63static int __devinit 63static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
64mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
65{ 64{
66 mgag200_kick_out_firmware_fb(pdev); 65 mgag200_kick_out_firmware_fb(pdev);
67 66
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index d6a1aae33701..70dd3c5529d4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -133,6 +133,8 @@ static int mga_vram_init(struct mga_device *mdev)
133{ 133{
134 void __iomem *mem; 134 void __iomem *mem;
135 struct apertures_struct *aper = alloc_apertures(1); 135 struct apertures_struct *aper = alloc_apertures(1);
136 if (!aper)
137 return -ENOMEM;
136 138
137 /* BAR 0 is VRAM */ 139 /* BAR 0 is VRAM */
138 mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0); 140 mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -140,9 +142,9 @@ static int mga_vram_init(struct mga_device *mdev)
140 142
141 aper->ranges[0].base = mdev->mc.vram_base; 143 aper->ranges[0].base = mdev->mc.vram_base;
142 aper->ranges[0].size = mdev->mc.vram_window; 144 aper->ranges[0].size = mdev->mc.vram_window;
143 aper->count = 1;
144 145
145 remove_conflicting_framebuffers(aper, "mgafb", true); 146 remove_conflicting_framebuffers(aper, "mgafb", true);
147 kfree(aper);
146 148
147 if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window, 149 if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
148 "mgadrmfb_vram")) { 150 "mgadrmfb_vram")) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 1504699666c4..8fc9d9201945 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
186 186
187static int mgag200_bo_move(struct ttm_buffer_object *bo, 187static int mgag200_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
355 355
356 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, 356 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
357 ttm_bo_type_device, &mgabo->placement, 357 ttm_bo_type_device, &mgabo->placement,
358 align >> PAGE_SHIFT, 0, false, NULL, acc_size, 358 align >> PAGE_SHIFT, false, NULL, acc_size,
359 NULL, mgag200_bo_ttm_destroy); 359 NULL, mgag200_bo_ttm_destroy);
360 if (ret) 360 if (ret)
361 return ret; 361 return ret;
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
382 mgag200_ttm_placement(bo, pl_flag); 382 mgag200_ttm_placement(bo, pl_flag);
383 for (i = 0; i < bo->placement.num_placement; i++) 383 for (i = 0; i < bo->placement.num_placement; i++)
384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
385 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 385 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
386 if (ret) 386 if (ret)
387 return ret; 387 return ret;
388 388
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
405 405
406 for (i = 0; i < bo->placement.num_placement ; i++) 406 for (i = 0; i < bo->placement.num_placement ; i++)
407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
408 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 408 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
409 if (ret) 409 if (ret)
410 return ret; 410 return ret;
411 411
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
430 for (i = 0; i < bo->placement.num_placement ; i++) 430 for (i = 0; i < bo->placement.num_placement ; i++)
431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
432 432
433 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 433 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
434 if (ret) { 434 if (ret) {
435 DRM_ERROR("pushing to VRAM failed\n"); 435 DRM_ERROR("pushing to VRAM failed\n");
436 return ret; 436 return ret;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index a990df4d6c04..ab25752a0b1e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
11nouveau-y += core/core/engctx.o 11nouveau-y += core/core/engctx.o
12nouveau-y += core/core/engine.o 12nouveau-y += core/core/engine.o
13nouveau-y += core/core/enum.o 13nouveau-y += core/core/enum.o
14nouveau-y += core/core/falcon.o
14nouveau-y += core/core/gpuobj.o 15nouveau-y += core/core/gpuobj.o
15nouveau-y += core/core/handle.o 16nouveau-y += core/core/handle.o
16nouveau-y += core/core/mm.o 17nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
29nouveau-y += core/subdev/bios/bit.o 30nouveau-y += core/subdev/bios/bit.o
30nouveau-y += core/subdev/bios/conn.o 31nouveau-y += core/subdev/bios/conn.o
31nouveau-y += core/subdev/bios/dcb.o 32nouveau-y += core/subdev/bios/dcb.o
33nouveau-y += core/subdev/bios/disp.o
32nouveau-y += core/subdev/bios/dp.o 34nouveau-y += core/subdev/bios/dp.o
33nouveau-y += core/subdev/bios/extdev.o 35nouveau-y += core/subdev/bios/extdev.o
34nouveau-y += core/subdev/bios/gpio.o 36nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
64nouveau-y += core/subdev/fb/base.o 66nouveau-y += core/subdev/fb/base.o
65nouveau-y += core/subdev/fb/nv04.o 67nouveau-y += core/subdev/fb/nv04.o
66nouveau-y += core/subdev/fb/nv10.o 68nouveau-y += core/subdev/fb/nv10.o
69nouveau-y += core/subdev/fb/nv1a.o
67nouveau-y += core/subdev/fb/nv20.o 70nouveau-y += core/subdev/fb/nv20.o
71nouveau-y += core/subdev/fb/nv25.o
68nouveau-y += core/subdev/fb/nv30.o 72nouveau-y += core/subdev/fb/nv30.o
73nouveau-y += core/subdev/fb/nv35.o
74nouveau-y += core/subdev/fb/nv36.o
69nouveau-y += core/subdev/fb/nv40.o 75nouveau-y += core/subdev/fb/nv40.o
76nouveau-y += core/subdev/fb/nv41.o
77nouveau-y += core/subdev/fb/nv44.o
78nouveau-y += core/subdev/fb/nv46.o
79nouveau-y += core/subdev/fb/nv47.o
80nouveau-y += core/subdev/fb/nv49.o
81nouveau-y += core/subdev/fb/nv4e.o
70nouveau-y += core/subdev/fb/nv50.o 82nouveau-y += core/subdev/fb/nv50.o
71nouveau-y += core/subdev/fb/nvc0.o 83nouveau-y += core/subdev/fb/nvc0.o
72nouveau-y += core/subdev/gpio/base.o 84nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
111nouveau-y += core/engine/dmaobj/nv04.o 123nouveau-y += core/engine/dmaobj/nv04.o
112nouveau-y += core/engine/dmaobj/nv50.o 124nouveau-y += core/engine/dmaobj/nv50.o
113nouveau-y += core/engine/dmaobj/nvc0.o 125nouveau-y += core/engine/dmaobj/nvc0.o
126nouveau-y += core/engine/dmaobj/nvd0.o
114nouveau-y += core/engine/bsp/nv84.o 127nouveau-y += core/engine/bsp/nv84.o
128nouveau-y += core/engine/bsp/nvc0.o
129nouveau-y += core/engine/bsp/nve0.o
115nouveau-y += core/engine/copy/nva3.o 130nouveau-y += core/engine/copy/nva3.o
116nouveau-y += core/engine/copy/nvc0.o 131nouveau-y += core/engine/copy/nvc0.o
117nouveau-y += core/engine/copy/nve0.o 132nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
119nouveau-y += core/engine/crypt/nv98.o 134nouveau-y += core/engine/crypt/nv98.o
120nouveau-y += core/engine/disp/nv04.o 135nouveau-y += core/engine/disp/nv04.o
121nouveau-y += core/engine/disp/nv50.o 136nouveau-y += core/engine/disp/nv50.o
137nouveau-y += core/engine/disp/nv84.o
138nouveau-y += core/engine/disp/nv94.o
139nouveau-y += core/engine/disp/nva0.o
140nouveau-y += core/engine/disp/nva3.o
122nouveau-y += core/engine/disp/nvd0.o 141nouveau-y += core/engine/disp/nvd0.o
142nouveau-y += core/engine/disp/nve0.o
143nouveau-y += core/engine/disp/dacnv50.o
144nouveau-y += core/engine/disp/hdanva3.o
145nouveau-y += core/engine/disp/hdanvd0.o
146nouveau-y += core/engine/disp/hdminv84.o
147nouveau-y += core/engine/disp/hdminva3.o
148nouveau-y += core/engine/disp/hdminvd0.o
149nouveau-y += core/engine/disp/sornv50.o
150nouveau-y += core/engine/disp/sornv94.o
151nouveau-y += core/engine/disp/sornvd0.o
123nouveau-y += core/engine/disp/vga.o 152nouveau-y += core/engine/disp/vga.o
124nouveau-y += core/engine/fifo/base.o 153nouveau-y += core/engine/fifo/base.o
125nouveau-y += core/engine/fifo/nv04.o 154nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
151nouveau-y += core/engine/mpeg/nv50.o 180nouveau-y += core/engine/mpeg/nv50.o
152nouveau-y += core/engine/mpeg/nv84.o 181nouveau-y += core/engine/mpeg/nv84.o
153nouveau-y += core/engine/ppp/nv98.o 182nouveau-y += core/engine/ppp/nv98.o
183nouveau-y += core/engine/ppp/nvc0.o
154nouveau-y += core/engine/software/nv04.o 184nouveau-y += core/engine/software/nv04.o
155nouveau-y += core/engine/software/nv10.o 185nouveau-y += core/engine/software/nv10.o
156nouveau-y += core/engine/software/nv50.o 186nouveau-y += core/engine/software/nv50.o
157nouveau-y += core/engine/software/nvc0.o 187nouveau-y += core/engine/software/nvc0.o
158nouveau-y += core/engine/vp/nv84.o 188nouveau-y += core/engine/vp/nv84.o
189nouveau-y += core/engine/vp/nvc0.o
190nouveau-y += core/engine/vp/nve0.o
159 191
160# drm/core 192# drm/core
161nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o 193nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
166 198
167# drm/kms 199# drm/kms
168nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o 200nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
169nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o 201nouveau-y += nouveau_connector.o nouveau_dp.o
170nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o 202nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
171 203
172# drm/kms/nv04:nv50 204# drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
175nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o 207nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
176 208
177# drm/kms/nv50- 209# drm/kms/nv50-
178nouveau-y += nv50_display.o nvd0_display.o 210nouveau-y += nv50_display.o
179nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
180nouveau-y += nv50_evo.o
181 211
182# drm/pm 212# drm/pm
183nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o 213nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index c617f0480071..8bbb58f94a19 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -66,10 +66,8 @@ nouveau_client_create_(const char *name, u64 devname, const char *cfg,
66 66
67 ret = nouveau_handle_create(nv_object(client), ~0, ~0, 67 ret = nouveau_handle_create(nv_object(client), ~0, ~0,
68 nv_object(client), &client->root); 68 nv_object(client), &client->root);
69 if (ret) { 69 if (ret)
70 nouveau_namedb_destroy(&client->base);
71 return ret; 70 return ret;
72 }
73 71
74 /* prevent init/fini being called, os in in charge of this */ 72 /* prevent init/fini being called, os in in charge of this */
75 atomic_set(&nv_object(client)->usecount, 2); 73 atomic_set(&nv_object(client)->usecount, 2);
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
index e41b10d5eb59..84c71fad2b6c 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
189 return nouveau_gpuobj_fini(&engctx->base, suspend); 189 return nouveau_gpuobj_fini(&engctx->base, suspend);
190} 190}
191 191
192int
193_nouveau_engctx_ctor(struct nouveau_object *parent,
194 struct nouveau_object *engine,
195 struct nouveau_oclass *oclass, void *data, u32 size,
196 struct nouveau_object **pobject)
197{
198 struct nouveau_engctx *engctx;
199 int ret;
200
201 ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
202 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
203 *pobject = nv_object(engctx);
204 return ret;
205}
206
192void 207void
193_nouveau_engctx_dtor(struct nouveau_object *object) 208_nouveau_engctx_dtor(struct nouveau_object *object)
194{ 209{
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 000000000000..6b0843c33877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <core/falcon.h>
24
25#include <subdev/timer.h>
26
27u32
28_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
29{
30 struct nouveau_falcon *falcon = (void *)object;
31 return nv_rd32(falcon, falcon->addr + addr);
32}
33
34void
35_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
36{
37 struct nouveau_falcon *falcon = (void *)object;
38 nv_wr32(falcon, falcon->addr + addr, data);
39}
40
41int
42_nouveau_falcon_init(struct nouveau_object *object)
43{
44 struct nouveau_device *device = nv_device(object);
45 struct nouveau_falcon *falcon = (void *)object;
46 const struct firmware *fw;
47 char name[32] = "internal";
48 int ret, i;
49 u32 caps;
50
51 /* enable engine, and determine its capabilities */
52 ret = nouveau_engine_init(&falcon->base);
53 if (ret)
54 return ret;
55
56 if (device->chipset < 0xa3 ||
57 device->chipset == 0xaa || device->chipset == 0xac) {
58 falcon->version = 0;
59 falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
60 } else {
61 caps = nv_ro32(falcon, 0x12c);
62 falcon->version = (caps & 0x0000000f);
63 falcon->secret = (caps & 0x00000030) >> 4;
64 }
65
66 caps = nv_ro32(falcon, 0x108);
67 falcon->code.limit = (caps & 0x000001ff) << 8;
68 falcon->data.limit = (caps & 0x0003fe00) >> 1;
69
70 nv_debug(falcon, "falcon version: %d\n", falcon->version);
71 nv_debug(falcon, "secret level: %d\n", falcon->secret);
72 nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
73 nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
74
75 /* wait for 'uc halted' to be signalled before continuing */
76 if (falcon->secret) {
77 nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
78 nv_wo32(falcon, 0x004, 0x00000010);
79 }
80
81 /* disable all interrupts */
82 nv_wo32(falcon, 0x014, 0xffffffff);
83
84 /* no default ucode provided by the engine implementation, try and
85 * locate a "self-bootstrapping" firmware image for the engine
86 */
87 if (!falcon->code.data) {
88 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
89 device->chipset, falcon->addr >> 12);
90
91 ret = request_firmware(&fw, name, &device->pdev->dev);
92 if (ret == 0) {
93 falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
94 falcon->code.size = fw->size;
95 falcon->data.data = NULL;
96 falcon->data.size = 0;
97 release_firmware(fw);
98 }
99
100 falcon->external = true;
101 }
102
103 /* next step is to try and load "static code/data segment" firmware
104 * images for the engine
105 */
106 if (!falcon->code.data) {
107 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
108 device->chipset, falcon->addr >> 12);
109
110 ret = request_firmware(&fw, name, &device->pdev->dev);
111 if (ret) {
112 nv_error(falcon, "unable to load firmware data\n");
113 return ret;
114 }
115
116 falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
117 falcon->data.size = fw->size;
118 release_firmware(fw);
119 if (!falcon->data.data)
120 return -ENOMEM;
121
122 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
123 device->chipset, falcon->addr >> 12);
124
125 ret = request_firmware(&fw, name, &device->pdev->dev);
126 if (ret) {
127 nv_error(falcon, "unable to load firmware code\n");
128 return ret;
129 }
130
131 falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
132 falcon->code.size = fw->size;
133 release_firmware(fw);
134 if (!falcon->code.data)
135 return -ENOMEM;
136 }
137
138 nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
139 "static code/data segments" : "self-bootstrapping");
140
141 /* ensure any "self-bootstrapping" firmware image is in vram */
142 if (!falcon->data.data && !falcon->core) {
143 ret = nouveau_gpuobj_new(object->parent, NULL,
144 falcon->code.size, 256, 0,
145 &falcon->core);
146 if (ret) {
147 nv_error(falcon, "core allocation failed, %d\n", ret);
148 return ret;
149 }
150
151 for (i = 0; i < falcon->code.size; i += 4)
152 nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
153 }
154
155 /* upload firmware bootloader (or the full code segments) */
156 if (falcon->core) {
157 if (device->card_type < NV_C0)
158 nv_wo32(falcon, 0x618, 0x04000000);
159 else
160 nv_wo32(falcon, 0x618, 0x00000114);
161 nv_wo32(falcon, 0x11c, 0);
162 nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
163 nv_wo32(falcon, 0x114, 0);
164 nv_wo32(falcon, 0x118, 0x00006610);
165 } else {
166 if (falcon->code.size > falcon->code.limit ||
167 falcon->data.size > falcon->data.limit) {
168 nv_error(falcon, "ucode exceeds falcon limit(s)\n");
169 return -EINVAL;
170 }
171
172 if (falcon->version < 3) {
173 nv_wo32(falcon, 0xff8, 0x00100000);
174 for (i = 0; i < falcon->code.size / 4; i++)
175 nv_wo32(falcon, 0xff4, falcon->code.data[i]);
176 } else {
177 nv_wo32(falcon, 0x180, 0x01000000);
178 for (i = 0; i < falcon->code.size / 4; i++) {
179 if ((i & 0x3f) == 0)
180 nv_wo32(falcon, 0x188, i >> 6);
181 nv_wo32(falcon, 0x184, falcon->code.data[i]);
182 }
183 }
184 }
185
186 /* upload data segment (if necessary), zeroing the remainder */
187 if (falcon->version < 3) {
188 nv_wo32(falcon, 0xff8, 0x00000000);
189 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
190 nv_wo32(falcon, 0xff4, falcon->data.data[i]);
191 for (; i < falcon->data.limit; i += 4)
192 nv_wo32(falcon, 0xff4, 0x00000000);
193 } else {
194 nv_wo32(falcon, 0x1c0, 0x01000000);
195 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
196 nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
197 for (; i < falcon->data.limit / 4; i++)
198 nv_wo32(falcon, 0x1c4, 0x00000000);
199 }
200
201 /* start it running */
202 nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
203 nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
204 nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
205 nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
206 return 0;
207}
208
209int
210_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
211{
212 struct nouveau_falcon *falcon = (void *)object;
213
214 if (!suspend) {
215 nouveau_gpuobj_ref(NULL, &falcon->core);
216 if (falcon->external) {
217 kfree(falcon->data.data);
218 kfree(falcon->code.data);
219 falcon->code.data = NULL;
220 }
221 }
222
223 nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
224 nv_wo32(falcon, 0x014, 0xffffffff);
225
226 return nouveau_engine_fini(&falcon->base, suspend);
227}
228
229int
230nouveau_falcon_create_(struct nouveau_object *parent,
231 struct nouveau_object *engine,
232 struct nouveau_oclass *oclass, u32 addr, bool enable,
233 const char *iname, const char *fname,
234 int length, void **pobject)
235{
236 struct nouveau_falcon *falcon;
237 int ret;
238
239 ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
240 fname, length, pobject);
241 falcon = *pobject;
242 if (ret)
243 return ret;
244
245 falcon->addr = addr;
246 return 0;
247}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 70586fde69cf..560b2214cf1c 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
183} 183}
184 184
185u32 185u32
186_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr) 186_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
187{ 187{
188 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); 188 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
189 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); 189 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
193} 193}
194 194
195void 195void
196_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 196_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
197{ 197{
198 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); 198 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
199 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); 199 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index b8d2cbf8a7a7..264c2b338ac3 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -109,7 +109,7 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS)) 109 while (!nv_iclass(namedb, NV_NAMEDB_CLASS))
110 namedb = namedb->parent; 110 namedb = namedb->parent;
111 111
112 handle = *phandle = kzalloc(sizeof(*handle), GFP_KERNEL); 112 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
113 if (!handle) 113 if (!handle)
114 return -ENOMEM; 114 return -ENOMEM;
115 115
@@ -146,6 +146,9 @@ nouveau_handle_create(struct nouveau_object *parent, u32 _parent, u32 _handle,
146 } 146 }
147 147
148 hprintk(handle, TRACE, "created\n"); 148 hprintk(handle, TRACE, "created\n");
149
150 *phandle = handle;
151
149 return 0; 152 return 0;
150} 153}
151 154
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index a6d3cd6490f7..0261a11b2ae0 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
234int 234int
235nouveau_mm_fini(struct nouveau_mm *mm) 235nouveau_mm_fini(struct nouveau_mm *mm)
236{ 236{
237 struct nouveau_mm_node *node, *heap = 237 if (nouveau_mm_initialised(mm)) {
238 list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry); 238 struct nouveau_mm_node *node, *heap =
239 int nodes = 0; 239 list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
240 int nodes = 0;
241
242 list_for_each_entry(node, &mm->nodes, nl_entry) {
243 if (WARN_ON(nodes++ == mm->heap_nodes))
244 return -EBUSY;
245 }
240 246
241 list_for_each_entry(node, &mm->nodes, nl_entry) { 247 kfree(heap);
242 if (WARN_ON(nodes++ == mm->heap_nodes))
243 return -EBUSY;
244 } 248 }
245 249
246 kfree(heap);
247 return 0; 250 return 0;
248} 251}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 66f7dfd907ee..1d9f614cb97d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,18 +22,13 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 25#include <core/engctx.h>
26#include <core/class.h>
28 27
29#include <engine/bsp.h> 28#include <engine/bsp.h>
30 29
31struct nv84_bsp_priv { 30struct nv84_bsp_priv {
32 struct nouveau_bsp base; 31 struct nouveau_engine base;
33};
34
35struct nv84_bsp_chan {
36 struct nouveau_bsp_chan base;
37}; 32};
38 33
39/******************************************************************************* 34/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
49 * BSP context 44 * BSP context
50 ******************************************************************************/ 45 ******************************************************************************/
51 46
52static int
53nv84_bsp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_bsp_chan *priv;
59 int ret;
60
61 ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_bsp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_bsp_chan *priv = (void *)object;
74 nouveau_bsp_context_destroy(&priv->base);
75}
76
77static int
78nv84_bsp_context_init(struct nouveau_object *object)
79{
80 struct nv84_bsp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_bsp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_bsp_chan *priv = (void *)object;
94 return nouveau_bsp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass 47static struct nouveau_oclass
98nv84_bsp_cclass = { 48nv84_bsp_cclass = {
99 .handle = NV_ENGCTX(BSP, 0x84), 49 .handle = NV_ENGCTX(BSP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) { 50 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_bsp_context_ctor, 51 .ctor = _nouveau_engctx_ctor,
102 .dtor = nv84_bsp_context_dtor, 52 .dtor = _nouveau_engctx_dtor,
103 .init = nv84_bsp_context_init, 53 .init = _nouveau_engctx_init,
104 .fini = nv84_bsp_context_fini, 54 .fini = _nouveau_engctx_fini,
105 .rd32 = _nouveau_bsp_context_rd32, 55 .rd32 = _nouveau_engctx_rd32,
106 .wr32 = _nouveau_bsp_context_wr32, 56 .wr32 = _nouveau_engctx_wr32,
107 }, 57 },
108}; 58};
109 59
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
111 * BSP engine/subdev functions 61 * BSP engine/subdev functions
112 ******************************************************************************/ 62 ******************************************************************************/
113 63
114static void
115nv84_bsp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int 64static int
120nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 65nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size, 66 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
124 struct nv84_bsp_priv *priv; 69 struct nv84_bsp_priv *priv;
125 int ret; 70 int ret;
126 71
127 ret = nouveau_bsp_create(parent, engine, oclass, &priv); 72 ret = nouveau_engine_create(parent, engine, oclass, true,
73 "PBSP", "bsp", &priv);
128 *pobject = nv_object(priv); 74 *pobject = nv_object(priv);
129 if (ret) 75 if (ret)
130 return ret; 76 return ret;
131 77
132 nv_subdev(priv)->unit = 0x04008000; 78 nv_subdev(priv)->unit = 0x04008000;
133 nv_subdev(priv)->intr = nv84_bsp_intr;
134 nv_engine(priv)->cclass = &nv84_bsp_cclass; 79 nv_engine(priv)->cclass = &nv84_bsp_cclass;
135 nv_engine(priv)->sclass = nv84_bsp_sclass; 80 nv_engine(priv)->sclass = nv84_bsp_sclass;
136 return 0; 81 return 0;
137} 82}
138 83
139static void
140nv84_bsp_dtor(struct nouveau_object *object)
141{
142 struct nv84_bsp_priv *priv = (void *)object;
143 nouveau_bsp_destroy(&priv->base);
144}
145
146static int
147nv84_bsp_init(struct nouveau_object *object)
148{
149 struct nv84_bsp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_bsp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_bsp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_bsp_priv *priv = (void *)object;
163 return nouveau_bsp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass 84struct nouveau_oclass
167nv84_bsp_oclass = { 85nv84_bsp_oclass = {
168 .handle = NV_ENGINE(BSP, 0x84), 86 .handle = NV_ENGINE(BSP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) { 87 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_bsp_ctor, 88 .ctor = nv84_bsp_ctor,
171 .dtor = nv84_bsp_dtor, 89 .dtor = _nouveau_engine_dtor,
172 .init = nv84_bsp_init, 90 .init = _nouveau_engine_init,
173 .fini = nv84_bsp_fini, 91 .fini = _nouveau_engine_fini,
174 }, 92 },
175}; 93};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 000000000000..0a5aa6bb0870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Maarten Lankhorst
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Maarten Lankhorst
23 */
24
25#include <core/falcon.h>
26
27#include <engine/bsp.h>
28
29struct nvc0_bsp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * BSP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nvc0_bsp_sclass[] = {
39 { 0x90b1, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PBSP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_bsp_cclass = {
49 .handle = NV_ENGCTX(BSP, 0xc0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PBSP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nvc0_bsp_init(struct nouveau_object *object)
66{
67 struct nvc0_bsp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x084010, 0x0000fff2);
75 nv_wr32(priv, 0x08401c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nvc0_bsp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
88 "PBSP", "bsp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00008000;
94 nv_engine(priv)->cclass = &nvc0_bsp_cclass;
95 nv_engine(priv)->sclass = nvc0_bsp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nvc0_bsp_oclass = {
101 .handle = NV_ENGINE(BSP, 0xc0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nvc0_bsp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nvc0_bsp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 000000000000..d4f23bbd75b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/falcon.h>
26
27#include <engine/bsp.h>
28
29struct nve0_bsp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * BSP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nve0_bsp_sclass[] = {
39 { 0x95b1, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PBSP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nve0_bsp_cclass = {
49 .handle = NV_ENGCTX(BSP, 0xe0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PBSP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nve0_bsp_init(struct nouveau_object *object)
66{
67 struct nve0_bsp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x084010, 0x0000fff2);
75 nv_wr32(priv, 0x08401c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nve0_bsp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
88 "PBSP", "bsp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00008000;
94 nv_engine(priv)->cclass = &nve0_bsp_cclass;
95 nv_engine(priv)->sclass = nve0_bsp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nve0_bsp_oclass = {
101 .handle = NV_ENGINE(BSP, 0xe0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nve0_bsp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nve0_bsp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 4df6da0af740..283248c7b050 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,10 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/falcon.h>
26#include <core/enum.h>
27#include <core/class.h> 26#include <core/class.h>
28#include <core/engctx.h> 27#include <core/enum.h>
29 28
30#include <subdev/fb.h> 29#include <subdev/fb.h>
31#include <subdev/vm.h> 30#include <subdev/vm.h>
@@ -36,11 +35,7 @@
36#include "fuc/nva3.fuc.h" 35#include "fuc/nva3.fuc.h"
37 36
38struct nva3_copy_priv { 37struct nva3_copy_priv {
39 struct nouveau_copy base; 38 struct nouveau_falcon base;
40};
41
42struct nva3_copy_chan {
43 struct nouveau_copy_chan base;
44}; 39};
45 40
46/******************************************************************************* 41/*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
57 * PCOPY context 52 * PCOPY context
58 ******************************************************************************/ 53 ******************************************************************************/
59 54
60static int
61nva3_copy_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nva3_copy_chan *priv;
67 int ret;
68
69 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
70 NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass 55static struct nouveau_oclass
79nva3_copy_cclass = { 56nva3_copy_cclass = {
80 .handle = NV_ENGCTX(COPY0, 0xa3), 57 .handle = NV_ENGCTX(COPY0, 0xa3),
81 .ofuncs = &(struct nouveau_ofuncs) { 58 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nva3_copy_context_ctor, 59 .ctor = _nouveau_falcon_context_ctor,
83 .dtor = _nouveau_copy_context_dtor, 60 .dtor = _nouveau_falcon_context_dtor,
84 .init = _nouveau_copy_context_init, 61 .init = _nouveau_falcon_context_init,
85 .fini = _nouveau_copy_context_fini, 62 .fini = _nouveau_falcon_context_fini,
86 .rd32 = _nouveau_copy_context_rd32, 63 .rd32 = _nouveau_falcon_context_rd32,
87 .wr32 = _nouveau_copy_context_wr32, 64 .wr32 = _nouveau_falcon_context_wr32,
88 65
89 }, 66 },
90}; 67};
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
100 {} 77 {}
101}; 78};
102 79
103static void 80void
104nva3_copy_intr(struct nouveau_subdev *subdev) 81nva3_copy_intr(struct nouveau_subdev *subdev)
105{ 82{
106 struct nouveau_fifo *pfifo = nouveau_fifo(subdev); 83 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
107 struct nouveau_engine *engine = nv_engine(subdev); 84 struct nouveau_engine *engine = nv_engine(subdev);
85 struct nouveau_falcon *falcon = (void *)subdev;
108 struct nouveau_object *engctx; 86 struct nouveau_object *engctx;
109 struct nva3_copy_priv *priv = (void *)subdev; 87 u32 dispatch = nv_ro32(falcon, 0x01c);
110 u32 dispatch = nv_rd32(priv, 0x10401c); 88 u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
111 u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16); 89 u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
112 u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff; 90 u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
113 u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff; 91 u32 addr = nv_ro32(falcon, 0x040) >> 16;
114 u32 addr = nv_rd32(priv, 0x104040) >> 16;
115 u32 mthd = (addr & 0x07ff) << 2; 92 u32 mthd = (addr & 0x07ff) << 2;
116 u32 subc = (addr & 0x3800) >> 11; 93 u32 subc = (addr & 0x3800) >> 11;
117 u32 data = nv_rd32(priv, 0x104044); 94 u32 data = nv_ro32(falcon, 0x044);
118 int chid; 95 int chid;
119 96
120 engctx = nouveau_engctx_get(engine, inst); 97 engctx = nouveau_engctx_get(engine, inst);
121 chid = pfifo->chid(pfifo, engctx); 98 chid = pfifo->chid(pfifo, engctx);
122 99
123 if (stat & 0x00000040) { 100 if (stat & 0x00000040) {
124 nv_error(priv, "DISPATCH_ERROR ["); 101 nv_error(falcon, "DISPATCH_ERROR [");
125 nouveau_enum_print(nva3_copy_isr_error_name, ssta); 102 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
126 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", 103 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
127 chid, inst << 12, subc, mthd, data); 104 chid, inst << 12, subc, mthd, data);
128 nv_wr32(priv, 0x104004, 0x00000040); 105 nv_wo32(falcon, 0x004, 0x00000040);
129 stat &= ~0x00000040; 106 stat &= ~0x00000040;
130 } 107 }
131 108
132 if (stat) { 109 if (stat) {
133 nv_error(priv, "unhandled intr 0x%08x\n", stat); 110 nv_error(falcon, "unhandled intr 0x%08x\n", stat);
134 nv_wr32(priv, 0x104004, stat); 111 nv_wo32(falcon, 0x004, stat);
135 } 112 }
136 113
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx); 114 nouveau_engctx_put(engctx);
139} 115}
140 116
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
154 struct nva3_copy_priv *priv; 130 struct nva3_copy_priv *priv;
155 int ret; 131 int ret;
156 132
157 ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv); 133 ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
134 "PCE0", "copy0", &priv);
158 *pobject = nv_object(priv); 135 *pobject = nv_object(priv);
159 if (ret) 136 if (ret)
160 return ret; 137 return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
164 nv_engine(priv)->cclass = &nva3_copy_cclass; 141 nv_engine(priv)->cclass = &nva3_copy_cclass;
165 nv_engine(priv)->sclass = nva3_copy_sclass; 142 nv_engine(priv)->sclass = nva3_copy_sclass;
166 nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush; 143 nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
144 nv_falcon(priv)->code.data = nva3_pcopy_code;
145 nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
146 nv_falcon(priv)->data.data = nva3_pcopy_data;
147 nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
167 return 0; 148 return 0;
168} 149}
169 150
170static int
171nva3_copy_init(struct nouveau_object *object)
172{
173 struct nva3_copy_priv *priv = (void *)object;
174 int ret, i;
175
176 ret = nouveau_copy_init(&priv->base);
177 if (ret)
178 return ret;
179
180 /* disable all interrupts */
181 nv_wr32(priv, 0x104014, 0xffffffff);
182
183 /* upload ucode */
184 nv_wr32(priv, 0x1041c0, 0x01000000);
185 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
186 nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
187
188 nv_wr32(priv, 0x104180, 0x01000000);
189 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
190 if ((i & 0x3f) == 0)
191 nv_wr32(priv, 0x104188, i >> 6);
192 nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
193 }
194
195 /* start it running */
196 nv_wr32(priv, 0x10410c, 0x00000000);
197 nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
198 nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
199 return 0;
200}
201
202static int
203nva3_copy_fini(struct nouveau_object *object, bool suspend)
204{
205 struct nva3_copy_priv *priv = (void *)object;
206
207 nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
208 nv_wr32(priv, 0x104014, 0xffffffff);
209
210 return nouveau_copy_fini(&priv->base, suspend);
211}
212
213struct nouveau_oclass 151struct nouveau_oclass
214nva3_copy_oclass = { 152nva3_copy_oclass = {
215 .handle = NV_ENGINE(COPY0, 0xa3), 153 .handle = NV_ENGINE(COPY0, 0xa3),
216 .ofuncs = &(struct nouveau_ofuncs) { 154 .ofuncs = &(struct nouveau_ofuncs) {
217 .ctor = nva3_copy_ctor, 155 .ctor = nva3_copy_ctor,
218 .dtor = _nouveau_copy_dtor, 156 .dtor = _nouveau_falcon_dtor,
219 .init = nva3_copy_init, 157 .init = _nouveau_falcon_init,
220 .fini = nva3_copy_fini, 158 .fini = _nouveau_falcon_fini,
159 .rd32 = _nouveau_falcon_rd32,
160 .wr32 = _nouveau_falcon_wr32,
221 }, 161 },
222}; 162};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 06d4a8791055..b3ed2737e21f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,10 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/falcon.h>
26#include <core/enum.h>
27#include <core/class.h> 26#include <core/class.h>
28#include <core/engctx.h> 27#include <core/enum.h>
29 28
30#include <engine/fifo.h> 29#include <engine/fifo.h>
31#include <engine/copy.h> 30#include <engine/copy.h>
@@ -33,11 +32,7 @@
33#include "fuc/nvc0.fuc.h" 32#include "fuc/nvc0.fuc.h"
34 33
35struct nvc0_copy_priv { 34struct nvc0_copy_priv {
36 struct nouveau_copy base; 35 struct nouveau_falcon base;
37};
38
39struct nvc0_copy_chan {
40 struct nouveau_copy_chan base;
41}; 36};
42 37
43/******************************************************************************* 38/*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
60 * PCOPY context 55 * PCOPY context
61 ******************************************************************************/ 56 ******************************************************************************/
62 57
63static int
64nvc0_copy_context_ctor(struct nouveau_object *parent,
65 struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nvc0_copy_chan *priv;
70 int ret;
71
72 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
73 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
81static struct nouveau_ofuncs 58static struct nouveau_ofuncs
82nvc0_copy_context_ofuncs = { 59nvc0_copy_context_ofuncs = {
83 .ctor = nvc0_copy_context_ctor, 60 .ctor = _nouveau_falcon_context_ctor,
84 .dtor = _nouveau_copy_context_dtor, 61 .dtor = _nouveau_falcon_context_dtor,
85 .init = _nouveau_copy_context_init, 62 .init = _nouveau_falcon_context_init,
86 .fini = _nouveau_copy_context_fini, 63 .fini = _nouveau_falcon_context_fini,
87 .rd32 = _nouveau_copy_context_rd32, 64 .rd32 = _nouveau_falcon_context_rd32,
88 .wr32 = _nouveau_copy_context_wr32, 65 .wr32 = _nouveau_falcon_context_wr32,
89}; 66};
90 67
91static struct nouveau_oclass 68static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
104 * PCOPY engine/subdev functions 81 * PCOPY engine/subdev functions
105 ******************************************************************************/ 82 ******************************************************************************/
106 83
107static const struct nouveau_enum nvc0_copy_isr_error_name[] = { 84static int
108 { 0x0001, "ILLEGAL_MTHD" }, 85nvc0_copy_init(struct nouveau_object *object)
109 { 0x0002, "INVALID_ENUM" },
110 { 0x0003, "INVALID_BITFIELD" },
111 {}
112};
113
114static void
115nvc0_copy_intr(struct nouveau_subdev *subdev)
116{ 86{
117 struct nouveau_fifo *pfifo = nouveau_fifo(subdev); 87 struct nvc0_copy_priv *priv = (void *)object;
118 struct nouveau_engine *engine = nv_engine(subdev); 88 int ret;
119 struct nouveau_object *engctx;
120 int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
121 struct nvc0_copy_priv *priv = (void *)subdev;
122 u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
123 u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
124 u32 stat = intr & disp & ~(disp >> 16);
125 u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
126 u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
127 u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
128 u32 mthd = (addr & 0x07ff) << 2;
129 u32 subc = (addr & 0x3800) >> 11;
130 u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
131 int chid;
132
133 engctx = nouveau_engctx_get(engine, inst);
134 chid = pfifo->chid(pfifo, engctx);
135
136 if (stat & 0x00000040) {
137 nv_error(priv, "DISPATCH_ERROR [");
138 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
139 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
140 chid, (u64)inst << 12, subc, mthd, data);
141 nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
142 stat &= ~0x00000040;
143 }
144 89
145 if (stat) { 90 ret = nouveau_falcon_init(&priv->base);
146 nv_error(priv, "unhandled intr 0x%08x\n", stat); 91 if (ret)
147 nv_wr32(priv, 0x104004 + (idx * 0x1000), stat); 92 return ret;
148 }
149 93
150 nouveau_engctx_put(engctx); 94 nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
95 return 0;
151} 96}
152 97
153static int 98static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
161 if (nv_rd32(parent, 0x022500) & 0x00000100) 106 if (nv_rd32(parent, 0x022500) & 0x00000100)
162 return -ENODEV; 107 return -ENODEV;
163 108
164 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); 109 ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
110 "PCE0", "copy0", &priv);
165 *pobject = nv_object(priv); 111 *pobject = nv_object(priv);
166 if (ret) 112 if (ret)
167 return ret; 113 return ret;
168 114
169 nv_subdev(priv)->unit = 0x00000040; 115 nv_subdev(priv)->unit = 0x00000040;
170 nv_subdev(priv)->intr = nvc0_copy_intr; 116 nv_subdev(priv)->intr = nva3_copy_intr;
171 nv_engine(priv)->cclass = &nvc0_copy0_cclass; 117 nv_engine(priv)->cclass = &nvc0_copy0_cclass;
172 nv_engine(priv)->sclass = nvc0_copy0_sclass; 118 nv_engine(priv)->sclass = nvc0_copy0_sclass;
119 nv_falcon(priv)->code.data = nvc0_pcopy_code;
120 nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
121 nv_falcon(priv)->data.data = nvc0_pcopy_data;
122 nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
173 return 0; 123 return 0;
174} 124}
175 125
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
184 if (nv_rd32(parent, 0x022500) & 0x00000200) 134 if (nv_rd32(parent, 0x022500) & 0x00000200)
185 return -ENODEV; 135 return -ENODEV;
186 136
187 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); 137 ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
138 "PCE1", "copy1", &priv);
188 *pobject = nv_object(priv); 139 *pobject = nv_object(priv);
189 if (ret) 140 if (ret)
190 return ret; 141 return ret;
191 142
192 nv_subdev(priv)->unit = 0x00000080; 143 nv_subdev(priv)->unit = 0x00000080;
193 nv_subdev(priv)->intr = nvc0_copy_intr; 144 nv_subdev(priv)->intr = nva3_copy_intr;
194 nv_engine(priv)->cclass = &nvc0_copy1_cclass; 145 nv_engine(priv)->cclass = &nvc0_copy1_cclass;
195 nv_engine(priv)->sclass = nvc0_copy1_sclass; 146 nv_engine(priv)->sclass = nvc0_copy1_sclass;
147 nv_falcon(priv)->code.data = nvc0_pcopy_code;
148 nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
149 nv_falcon(priv)->data.data = nvc0_pcopy_data;
150 nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
196 return 0; 151 return 0;
197} 152}
198 153
199static int
200nvc0_copy_init(struct nouveau_object *object)
201{
202 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
203 struct nvc0_copy_priv *priv = (void *)object;
204 int ret, i;
205
206 ret = nouveau_copy_init(&priv->base);
207 if (ret)
208 return ret;
209
210 /* disable all interrupts */
211 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
212
213 /* upload ucode */
214 nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
215 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
216 nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
217
218 nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
219 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
220 if ((i & 0x3f) == 0)
221 nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
222 nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
223 }
224
225 /* start it running */
226 nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
227 nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
228 nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
229 nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
230 return 0;
231}
232
233static int
234nvc0_copy_fini(struct nouveau_object *object, bool suspend)
235{
236 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
237 struct nvc0_copy_priv *priv = (void *)object;
238
239 nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
240 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
241
242 return nouveau_copy_fini(&priv->base, suspend);
243}
244
245struct nouveau_oclass 154struct nouveau_oclass
246nvc0_copy0_oclass = { 155nvc0_copy0_oclass = {
247 .handle = NV_ENGINE(COPY0, 0xc0), 156 .handle = NV_ENGINE(COPY0, 0xc0),
248 .ofuncs = &(struct nouveau_ofuncs) { 157 .ofuncs = &(struct nouveau_ofuncs) {
249 .ctor = nvc0_copy0_ctor, 158 .ctor = nvc0_copy0_ctor,
250 .dtor = _nouveau_copy_dtor, 159 .dtor = _nouveau_falcon_dtor,
251 .init = nvc0_copy_init, 160 .init = nvc0_copy_init,
252 .fini = nvc0_copy_fini, 161 .fini = _nouveau_falcon_fini,
162 .rd32 = _nouveau_falcon_rd32,
163 .wr32 = _nouveau_falcon_wr32,
253 }, 164 },
254}; 165};
255 166
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
258 .handle = NV_ENGINE(COPY1, 0xc0), 169 .handle = NV_ENGINE(COPY1, 0xc0),
259 .ofuncs = &(struct nouveau_ofuncs) { 170 .ofuncs = &(struct nouveau_ofuncs) {
260 .ctor = nvc0_copy1_ctor, 171 .ctor = nvc0_copy1_ctor,
261 .dtor = _nouveau_copy_dtor, 172 .dtor = _nouveau_falcon_dtor,
262 .init = nvc0_copy_init, 173 .init = nvc0_copy_init,
263 .fini = nvc0_copy_fini, 174 .fini = _nouveau_falcon_fini,
175 .rd32 = _nouveau_falcon_rd32,
176 .wr32 = _nouveau_falcon_wr32,
264 }, 177 },
265}; 178};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 2017c1579ac5..dbbe9e8998fe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,11 +30,7 @@
30#include <engine/copy.h> 30#include <engine/copy.h>
31 31
32struct nve0_copy_priv { 32struct nve0_copy_priv {
33 struct nouveau_copy base; 33 struct nouveau_engine base;
34};
35
36struct nve0_copy_chan {
37 struct nouveau_copy_chan base;
38}; 34};
39 35
40/******************************************************************************* 36/*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
51 * PCOPY context 47 * PCOPY context
52 ******************************************************************************/ 48 ******************************************************************************/
53 49
54static int
55nve0_copy_context_ctor(struct nouveau_object *parent,
56 struct nouveau_object *engine,
57 struct nouveau_oclass *oclass, void *data, u32 size,
58 struct nouveau_object **pobject)
59{
60 struct nve0_copy_chan *priv;
61 int ret;
62
63 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
64 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
65 *pobject = nv_object(priv);
66 if (ret)
67 return ret;
68
69 return 0;
70}
71
72static struct nouveau_ofuncs 50static struct nouveau_ofuncs
73nve0_copy_context_ofuncs = { 51nve0_copy_context_ofuncs = {
74 .ctor = nve0_copy_context_ctor, 52 .ctor = _nouveau_engctx_ctor,
75 .dtor = _nouveau_copy_context_dtor, 53 .dtor = _nouveau_engctx_dtor,
76 .init = _nouveau_copy_context_init, 54 .init = _nouveau_engctx_init,
77 .fini = _nouveau_copy_context_fini, 55 .fini = _nouveau_engctx_fini,
78 .rd32 = _nouveau_copy_context_rd32, 56 .rd32 = _nouveau_engctx_rd32,
79 .wr32 = _nouveau_copy_context_wr32, 57 .wr32 = _nouveau_engctx_wr32,
80}; 58};
81 59
82static struct nouveau_oclass 60static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 if (nv_rd32(parent, 0x022500) & 0x00000100) 78 if (nv_rd32(parent, 0x022500) & 0x00000100)
101 return -ENODEV; 79 return -ENODEV;
102 80
103 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); 81 ret = nouveau_engine_create(parent, engine, oclass, true,
82 "PCE0", "copy0", &priv);
104 *pobject = nv_object(priv); 83 *pobject = nv_object(priv);
105 if (ret) 84 if (ret)
106 return ret; 85 return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
122 if (nv_rd32(parent, 0x022500) & 0x00000200) 101 if (nv_rd32(parent, 0x022500) & 0x00000200)
123 return -ENODEV; 102 return -ENODEV;
124 103
125 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); 104 ret = nouveau_engine_create(parent, engine, oclass, true,
105 "PCE1", "copy1", &priv);
126 *pobject = nv_object(priv); 106 *pobject = nv_object(priv);
127 if (ret) 107 if (ret)
128 return ret; 108 return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
138 .handle = NV_ENGINE(COPY0, 0xe0), 118 .handle = NV_ENGINE(COPY0, 0xe0),
139 .ofuncs = &(struct nouveau_ofuncs) { 119 .ofuncs = &(struct nouveau_ofuncs) {
140 .ctor = nve0_copy0_ctor, 120 .ctor = nve0_copy0_ctor,
141 .dtor = _nouveau_copy_dtor, 121 .dtor = _nouveau_engine_dtor,
142 .init = _nouveau_copy_init, 122 .init = _nouveau_engine_init,
143 .fini = _nouveau_copy_fini, 123 .fini = _nouveau_engine_fini,
144 }, 124 },
145}; 125};
146 126
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
149 .handle = NV_ENGINE(COPY1, 0xe0), 129 .handle = NV_ENGINE(COPY1, 0xe0),
150 .ofuncs = &(struct nouveau_ofuncs) { 130 .ofuncs = &(struct nouveau_ofuncs) {
151 .ctor = nve0_copy1_ctor, 131 .ctor = nve0_copy1_ctor,
152 .dtor = _nouveau_copy_dtor, 132 .dtor = _nouveau_engine_dtor,
153 .init = _nouveau_copy_init, 133 .init = _nouveau_engine_init,
154 .fini = _nouveau_copy_fini, 134 .fini = _nouveau_engine_fini,
155 }, 135 },
156}; 136};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 1d85e5b66ca0..b97490512723 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,11 +34,7 @@
34#include <engine/crypt.h> 34#include <engine/crypt.h>
35 35
36struct nv84_crypt_priv { 36struct nv84_crypt_priv {
37 struct nouveau_crypt base; 37 struct nouveau_engine base;
38};
39
40struct nv84_crypt_chan {
41 struct nouveau_crypt_chan base;
42}; 38};
43 39
44/******************************************************************************* 40/*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
87 * PCRYPT context 83 * PCRYPT context
88 ******************************************************************************/ 84 ******************************************************************************/
89 85
90static int
91nv84_crypt_context_ctor(struct nouveau_object *parent,
92 struct nouveau_object *engine,
93 struct nouveau_oclass *oclass, void *data, u32 size,
94 struct nouveau_object **pobject)
95{
96 struct nv84_crypt_chan *priv;
97 int ret;
98
99 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
100 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
101 *pobject = nv_object(priv);
102 if (ret)
103 return ret;
104
105 return 0;
106}
107
108static struct nouveau_oclass 86static struct nouveau_oclass
109nv84_crypt_cclass = { 87nv84_crypt_cclass = {
110 .handle = NV_ENGCTX(CRYPT, 0x84), 88 .handle = NV_ENGCTX(CRYPT, 0x84),
111 .ofuncs = &(struct nouveau_ofuncs) { 89 .ofuncs = &(struct nouveau_ofuncs) {
112 .ctor = nv84_crypt_context_ctor, 90 .ctor = _nouveau_engctx_ctor,
113 .dtor = _nouveau_crypt_context_dtor, 91 .dtor = _nouveau_engctx_dtor,
114 .init = _nouveau_crypt_context_init, 92 .init = _nouveau_engctx_init,
115 .fini = _nouveau_crypt_context_fini, 93 .fini = _nouveau_engctx_fini,
116 .rd32 = _nouveau_crypt_context_rd32, 94 .rd32 = _nouveau_engctx_rd32,
117 .wr32 = _nouveau_crypt_context_wr32, 95 .wr32 = _nouveau_engctx_wr32,
118 }, 96 },
119}; 97};
120 98
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
157 nv_wr32(priv, 0x102130, stat); 135 nv_wr32(priv, 0x102130, stat);
158 nv_wr32(priv, 0x10200c, 0x10); 136 nv_wr32(priv, 0x10200c, 0x10);
159 137
160 nv50_fb_trap(nouveau_fb(priv), 1);
161 nouveau_engctx_put(engctx); 138 nouveau_engctx_put(engctx);
162} 139}
163 140
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
176 struct nv84_crypt_priv *priv; 153 struct nv84_crypt_priv *priv;
177 int ret; 154 int ret;
178 155
179 ret = nouveau_crypt_create(parent, engine, oclass, &priv); 156 ret = nouveau_engine_create(parent, engine, oclass, true,
157 "PCRYPT", "crypt", &priv);
180 *pobject = nv_object(priv); 158 *pobject = nv_object(priv);
181 if (ret) 159 if (ret)
182 return ret; 160 return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
195 struct nv84_crypt_priv *priv = (void *)object; 173 struct nv84_crypt_priv *priv = (void *)object;
196 int ret; 174 int ret;
197 175
198 ret = nouveau_crypt_init(&priv->base); 176 ret = nouveau_engine_init(&priv->base);
199 if (ret) 177 if (ret)
200 return ret; 178 return ret;
201 179
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
210 .handle = NV_ENGINE(CRYPT, 0x84), 188 .handle = NV_ENGINE(CRYPT, 0x84),
211 .ofuncs = &(struct nouveau_ofuncs) { 189 .ofuncs = &(struct nouveau_ofuncs) {
212 .ctor = nv84_crypt_ctor, 190 .ctor = nv84_crypt_ctor,
213 .dtor = _nouveau_crypt_dtor, 191 .dtor = _nouveau_engine_dtor,
214 .init = nv84_crypt_init, 192 .init = nv84_crypt_init,
215 .fini = _nouveau_crypt_fini, 193 .fini = _nouveau_engine_fini,
216 }, 194 },
217}; 195};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 9e3876c89b96..21986f3bf0c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,6 +26,7 @@
26#include <core/enum.h> 26#include <core/enum.h>
27#include <core/class.h> 27#include <core/class.h>
28#include <core/engctx.h> 28#include <core/engctx.h>
29#include <core/falcon.h>
29 30
30#include <subdev/timer.h> 31#include <subdev/timer.h>
31#include <subdev/fb.h> 32#include <subdev/fb.h>
@@ -36,11 +37,7 @@
36#include "fuc/nv98.fuc.h" 37#include "fuc/nv98.fuc.h"
37 38
38struct nv98_crypt_priv { 39struct nv98_crypt_priv {
39 struct nouveau_crypt base; 40 struct nouveau_falcon base;
40};
41
42struct nv98_crypt_chan {
43 struct nouveau_crypt_chan base;
44}; 41};
45 42
46/******************************************************************************* 43/*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
57 * PCRYPT context 54 * PCRYPT context
58 ******************************************************************************/ 55 ******************************************************************************/
59 56
60static int
61nv98_crypt_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nv98_crypt_chan *priv;
67 int ret;
68
69 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
70 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass 57static struct nouveau_oclass
79nv98_crypt_cclass = { 58nv98_crypt_cclass = {
80 .handle = NV_ENGCTX(CRYPT, 0x98), 59 .handle = NV_ENGCTX(CRYPT, 0x98),
81 .ofuncs = &(struct nouveau_ofuncs) { 60 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv98_crypt_context_ctor, 61 .ctor = _nouveau_falcon_context_ctor,
83 .dtor = _nouveau_crypt_context_dtor, 62 .dtor = _nouveau_falcon_context_dtor,
84 .init = _nouveau_crypt_context_init, 63 .init = _nouveau_falcon_context_init,
85 .fini = _nouveau_crypt_context_fini, 64 .fini = _nouveau_falcon_context_fini,
86 .rd32 = _nouveau_crypt_context_rd32, 65 .rd32 = _nouveau_falcon_context_rd32,
87 .wr32 = _nouveau_crypt_context_wr32, 66 .wr32 = _nouveau_falcon_context_wr32,
88 }, 67 },
89}; 68};
90 69
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
134 nv_wr32(priv, 0x087004, stat); 113 nv_wr32(priv, 0x087004, stat);
135 } 114 }
136 115
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx); 116 nouveau_engctx_put(engctx);
139} 117}
140 118
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
153 struct nv98_crypt_priv *priv; 131 struct nv98_crypt_priv *priv;
154 int ret; 132 int ret;
155 133
156 ret = nouveau_crypt_create(parent, engine, oclass, &priv); 134 ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
135 "PCRYPT", "crypt", &priv);
157 *pobject = nv_object(priv); 136 *pobject = nv_object(priv);
158 if (ret) 137 if (ret)
159 return ret; 138 return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
163 nv_engine(priv)->cclass = &nv98_crypt_cclass; 142 nv_engine(priv)->cclass = &nv98_crypt_cclass;
164 nv_engine(priv)->sclass = nv98_crypt_sclass; 143 nv_engine(priv)->sclass = nv98_crypt_sclass;
165 nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush; 144 nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
166 return 0; 145 nv_falcon(priv)->code.data = nv98_pcrypt_code;
167} 146 nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
168 147 nv_falcon(priv)->data.data = nv98_pcrypt_data;
169static int 148 nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
170nv98_crypt_init(struct nouveau_object *object)
171{
172 struct nv98_crypt_priv *priv = (void *)object;
173 int ret, i;
174
175 ret = nouveau_crypt_init(&priv->base);
176 if (ret)
177 return ret;
178
179 /* wait for exit interrupt to signal */
180 nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
181 nv_wr32(priv, 0x087004, 0x00000010);
182
183 /* upload microcode code and data segments */
184 nv_wr32(priv, 0x087ff8, 0x00100000);
185 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
186 nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
187
188 nv_wr32(priv, 0x087ff8, 0x00000000);
189 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
190 nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
191
192 /* start it running */
193 nv_wr32(priv, 0x08710c, 0x00000000);
194 nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
195 nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
196 return 0; 149 return 0;
197} 150}
198 151
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
201 .handle = NV_ENGINE(CRYPT, 0x98), 154 .handle = NV_ENGINE(CRYPT, 0x98),
202 .ofuncs = &(struct nouveau_ofuncs) { 155 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv98_crypt_ctor, 156 .ctor = nv98_crypt_ctor,
204 .dtor = _nouveau_crypt_dtor, 157 .dtor = _nouveau_falcon_dtor,
205 .init = nv98_crypt_init, 158 .init = _nouveau_falcon_init,
206 .fini = _nouveau_crypt_fini, 159 .fini = _nouveau_falcon_fini,
160 .rd32 = _nouveau_falcon_rd32,
161 .wr32 = _nouveau_falcon_wr32,
207 }, 162 },
208}; 163};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 000000000000..d0817d94454c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/timer.h>
31
32#include "nv50.h"
33
34int
35nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
36{
37 const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
38 (data & NV50_DISP_DAC_PWR_VSYNC) |
39 (data & NV50_DISP_DAC_PWR_DATA) |
40 (data & NV50_DISP_DAC_PWR_STATE);
41 const u32 doff = (or * 0x800);
42 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
43 nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
44 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
45 return 0;
46}
47
48int
49nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
50{
51 const u32 doff = (or * 0x800);
52 int load = -EINVAL;
53 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
54 udelay(9500);
55 nv_wr32(priv, 0x61a00c + doff, 0x80000000);
56 load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
57 nv_wr32(priv, 0x61a00c + doff, 0x00000000);
58 return load;
59}
60
61int
62nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
63{
64 struct nv50_disp_priv *priv = (void *)object->engine;
65 const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
66 u32 *data = args;
67 int ret;
68
69 if (size < sizeof(u32))
70 return -EINVAL;
71
72 switch (mthd & ~0x3f) {
73 case NV50_DISP_DAC_PWR:
74 ret = priv->dac.power(priv, or, data[0]);
75 break;
76 case NV50_DISP_DAC_LOAD:
77 ret = priv->dac.sense(priv, or, data[0]);
78 if (ret >= 0) {
79 data[0] = ret;
80 ret = 0;
81 }
82 break;
83 default:
84 BUG_ON(1);
85 }
86
87 return ret;
88}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 000000000000..373dbcc523b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
32{
33 const u32 soff = (or * 0x800);
34 int i;
35
36 if (data && data[0]) {
37 for (i = 0; i < size; i++)
38 nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
39 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
40 } else
41 if (data) {
42 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
43 } else {
44 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
45 }
46
47 return 0;
48}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 000000000000..dc57e24fc1df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h>
32
33#include "nv50.h"
34
35int
36nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
37{
38 const u32 soff = (or * 0x030);
39 int i;
40
41 if (data && data[0]) {
42 for (i = 0; i < size; i++)
43 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
44 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
45 } else
46 if (data) {
47 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
48 } else {
49 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
50 }
51
52 return 0;
53}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 000000000000..0d36bdc51417
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
32{
33 const u32 hoff = (head * 0x800);
34
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
36 nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
39 return 0;
40 }
41
42 /* AVI InfoFrame */
43 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
44 nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
45 nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
46 nv_wr32(priv, 0x616530 + hoff, 0x00000000);
47 nv_wr32(priv, 0x616534 + hoff, 0x00000000);
48 nv_wr32(priv, 0x616538 + hoff, 0x00000000);
49 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
50
51 /* Audio InfoFrame */
52 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
53 nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
54 nv_wr32(priv, 0x61650c + hoff, 0x00000071);
55 nv_wr32(priv, 0x616510 + hoff, 0x00000000);
56 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
57
58 /* ??? */
59 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
60 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
61 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
62
63 /* HDMI_CTRL */
64 nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
65 return 0;
66}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 000000000000..f065fc248adf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
32{
33 const u32 soff = (or * 0x800);
34
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
36 nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
39 return 0;
40 }
41
42 /* AVI InfoFrame */
43 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
44 nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
45 nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
46 nv_wr32(priv, 0x61c530 + soff, 0x00000000);
47 nv_wr32(priv, 0x61c534 + soff, 0x00000000);
48 nv_wr32(priv, 0x61c538 + soff, 0x00000000);
49 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
50
51 /* Audio InfoFrame */
52 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
53 nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
54 nv_wr32(priv, 0x61c50c + soff, 0x00000071);
55 nv_wr32(priv, 0x61c510 + soff, 0x00000000);
56 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
57
58 /* ??? */
59 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
60 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
61 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
62
63 /* HDMI_CTRL */
64 nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
65 return 0;
66}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 000000000000..5151bb261832
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
32{
33 const u32 hoff = (head * 0x800);
34
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
36 nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
39 return 0;
40 }
41
42 /* AVI InfoFrame */
43 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
44 nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
45 nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
46 nv_wr32(priv, 0x616724 + hoff, 0x00000000);
47 nv_wr32(priv, 0x616728 + hoff, 0x00000000);
48 nv_wr32(priv, 0x61672c + hoff, 0x00000000);
49 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
50
51 /* ??? InfoFrame? */
52 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
53 nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
54 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
55
56 /* HDMI_CTRL */
57 nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
58
59 /* NFI, audio doesn't work without it though.. */
60 nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
61 return 0;
62}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 16a9afb1060b..ca1a7d76a95b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,21 +22,744 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/object.h>
26#include <core/parent.h>
27#include <core/handle.h>
28#include <core/class.h>
29
25#include <engine/software.h> 30#include <engine/software.h>
26#include <engine/disp.h> 31#include <engine/disp.h>
27 32
28struct nv50_disp_priv { 33#include <subdev/bios.h>
29 struct nouveau_disp base; 34#include <subdev/bios/dcb.h>
35#include <subdev/bios/disp.h>
36#include <subdev/bios/init.h>
37#include <subdev/bios/pll.h>
38#include <subdev/timer.h>
39#include <subdev/fb.h>
40#include <subdev/bar.h>
41#include <subdev/clock.h>
42
43#include "nv50.h"
44
45/*******************************************************************************
46 * EVO channel base class
47 ******************************************************************************/
48
49int
50nv50_disp_chan_create_(struct nouveau_object *parent,
51 struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, int chid,
53 int length, void **pobject)
54{
55 struct nv50_disp_base *base = (void *)parent;
56 struct nv50_disp_chan *chan;
57 int ret;
58
59 if (base->chan & (1 << chid))
60 return -EBUSY;
61 base->chan |= (1 << chid);
62
63 ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
64 (1ULL << NVDEV_ENGINE_DMAOBJ),
65 length, pobject);
66 chan = *pobject;
67 if (ret)
68 return ret;
69
70 chan->chid = chid;
71 return 0;
72}
73
74void
75nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
76{
77 struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
78 base->chan &= ~(1 << chan->chid);
79 nouveau_namedb_destroy(&chan->base);
80}
81
82u32
83nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
84{
85 struct nv50_disp_priv *priv = (void *)object->engine;
86 struct nv50_disp_chan *chan = (void *)object;
87 return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
88}
89
90void
91nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
92{
93 struct nv50_disp_priv *priv = (void *)object->engine;
94 struct nv50_disp_chan *chan = (void *)object;
95 nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
96}
97
98/*******************************************************************************
99 * EVO DMA channel base class
100 ******************************************************************************/
101
102static int
103nv50_disp_dmac_object_attach(struct nouveau_object *parent,
104 struct nouveau_object *object, u32 name)
105{
106 struct nv50_disp_base *base = (void *)parent->parent;
107 struct nv50_disp_chan *chan = (void *)parent;
108 u32 addr = nv_gpuobj(object)->node->offset;
109 u32 chid = chan->chid;
110 u32 data = (chid << 28) | (addr << 10) | chid;
111 return nouveau_ramht_insert(base->ramht, chid, name, data);
112}
113
114static void
115nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
116{
117 struct nv50_disp_base *base = (void *)parent->parent;
118 nouveau_ramht_remove(base->ramht, cookie);
119}
120
121int
122nv50_disp_dmac_create_(struct nouveau_object *parent,
123 struct nouveau_object *engine,
124 struct nouveau_oclass *oclass, u32 pushbuf, int chid,
125 int length, void **pobject)
126{
127 struct nv50_disp_dmac *dmac;
128 int ret;
129
130 ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
131 length, pobject);
132 dmac = *pobject;
133 if (ret)
134 return ret;
135
136 dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
137 if (!dmac->pushdma)
138 return -ENOENT;
139
140 switch (nv_mclass(dmac->pushdma)) {
141 case 0x0002:
142 case 0x003d:
143 if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
144 return -EINVAL;
145
146 switch (dmac->pushdma->target) {
147 case NV_MEM_TARGET_VRAM:
148 dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
149 break;
150 case NV_MEM_TARGET_PCI_NOSNOOP:
151 dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
152 break;
153 default:
154 return -EINVAL;
155 }
156 break;
157 default:
158 return -EINVAL;
159 }
160
161 return 0;
162}
163
164void
165nv50_disp_dmac_dtor(struct nouveau_object *object)
166{
167 struct nv50_disp_dmac *dmac = (void *)object;
168 nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
169 nv50_disp_chan_destroy(&dmac->base);
170}
171
172static int
173nv50_disp_dmac_init(struct nouveau_object *object)
174{
175 struct nv50_disp_priv *priv = (void *)object->engine;
176 struct nv50_disp_dmac *dmac = (void *)object;
177 int chid = dmac->base.chid;
178 int ret;
179
180 ret = nv50_disp_chan_init(&dmac->base);
181 if (ret)
182 return ret;
183
184 /* enable error reporting */
185 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
186
187 /* initialise channel for dma command submission */
188 nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
189 nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
190 nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
191 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
192 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
193 nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
194
195 /* wait for it to go inactive */
196 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
197 nv_error(dmac, "init timeout, 0x%08x\n",
198 nv_rd32(priv, 0x610200 + (chid * 0x10)));
199 return -EBUSY;
200 }
201
202 return 0;
203}
204
205static int
206nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
207{
208 struct nv50_disp_priv *priv = (void *)object->engine;
209 struct nv50_disp_dmac *dmac = (void *)object;
210 int chid = dmac->base.chid;
211
212 /* deactivate channel */
213 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
214 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
215 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
216 nv_error(dmac, "fini timeout, 0x%08x\n",
217 nv_rd32(priv, 0x610200 + (chid * 0x10)));
218 if (suspend)
219 return -EBUSY;
220 }
221
222 /* disable error reporting */
223 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
224
225 return nv50_disp_chan_fini(&dmac->base, suspend);
226}
227
228/*******************************************************************************
229 * EVO master channel object
230 ******************************************************************************/
231
232static int
233nv50_disp_mast_ctor(struct nouveau_object *parent,
234 struct nouveau_object *engine,
235 struct nouveau_oclass *oclass, void *data, u32 size,
236 struct nouveau_object **pobject)
237{
238 struct nv50_display_mast_class *args = data;
239 struct nv50_disp_dmac *mast;
240 int ret;
241
242 if (size < sizeof(*args))
243 return -EINVAL;
244
245 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
246 0, sizeof(*mast), (void **)&mast);
247 *pobject = nv_object(mast);
248 if (ret)
249 return ret;
250
251 nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
252 nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
253 return 0;
254}
255
256static int
257nv50_disp_mast_init(struct nouveau_object *object)
258{
259 struct nv50_disp_priv *priv = (void *)object->engine;
260 struct nv50_disp_dmac *mast = (void *)object;
261 int ret;
262
263 ret = nv50_disp_chan_init(&mast->base);
264 if (ret)
265 return ret;
266
267 /* enable error reporting */
268 nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
269
270 /* attempt to unstick channel from some unknown state */
271 if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
272 nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
273 if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
274 nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
275
276 /* initialise channel for dma command submission */
277 nv_wr32(priv, 0x610204, mast->push);
278 nv_wr32(priv, 0x610208, 0x00010000);
279 nv_wr32(priv, 0x61020c, 0x00000000);
280 nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
281 nv_wr32(priv, 0x640000, 0x00000000);
282 nv_wr32(priv, 0x610200, 0x01000013);
283
284 /* wait for it to go inactive */
285 if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
286 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
287 return -EBUSY;
288 }
289
290 return 0;
291}
292
293static int
294nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
295{
296 struct nv50_disp_priv *priv = (void *)object->engine;
297 struct nv50_disp_dmac *mast = (void *)object;
298
299 /* deactivate channel */
300 nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
301 nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
302 if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
303 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
304 if (suspend)
305 return -EBUSY;
306 }
307
308 /* disable error reporting */
309 nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
310
311 return nv50_disp_chan_fini(&mast->base, suspend);
312}
313
314struct nouveau_ofuncs
315nv50_disp_mast_ofuncs = {
316 .ctor = nv50_disp_mast_ctor,
317 .dtor = nv50_disp_dmac_dtor,
318 .init = nv50_disp_mast_init,
319 .fini = nv50_disp_mast_fini,
320 .rd32 = nv50_disp_chan_rd32,
321 .wr32 = nv50_disp_chan_wr32,
322};
323
324/*******************************************************************************
325 * EVO sync channel objects
326 ******************************************************************************/
327
328static int
329nv50_disp_sync_ctor(struct nouveau_object *parent,
330 struct nouveau_object *engine,
331 struct nouveau_oclass *oclass, void *data, u32 size,
332 struct nouveau_object **pobject)
333{
334 struct nv50_display_sync_class *args = data;
335 struct nv50_disp_dmac *dmac;
336 int ret;
337
338 if (size < sizeof(*data) || args->head > 1)
339 return -EINVAL;
340
341 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
342 1 + args->head, sizeof(*dmac),
343 (void **)&dmac);
344 *pobject = nv_object(dmac);
345 if (ret)
346 return ret;
347
348 nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
349 nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
350 return 0;
351}
352
353struct nouveau_ofuncs
354nv50_disp_sync_ofuncs = {
355 .ctor = nv50_disp_sync_ctor,
356 .dtor = nv50_disp_dmac_dtor,
357 .init = nv50_disp_dmac_init,
358 .fini = nv50_disp_dmac_fini,
359 .rd32 = nv50_disp_chan_rd32,
360 .wr32 = nv50_disp_chan_wr32,
361};
362
363/*******************************************************************************
364 * EVO overlay channel objects
365 ******************************************************************************/
366
367static int
368nv50_disp_ovly_ctor(struct nouveau_object *parent,
369 struct nouveau_object *engine,
370 struct nouveau_oclass *oclass, void *data, u32 size,
371 struct nouveau_object **pobject)
372{
373 struct nv50_display_ovly_class *args = data;
374 struct nv50_disp_dmac *dmac;
375 int ret;
376
377 if (size < sizeof(*data) || args->head > 1)
378 return -EINVAL;
379
380 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
381 3 + args->head, sizeof(*dmac),
382 (void **)&dmac);
383 *pobject = nv_object(dmac);
384 if (ret)
385 return ret;
386
387 nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
388 nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
389 return 0;
390}
391
392struct nouveau_ofuncs
393nv50_disp_ovly_ofuncs = {
394 .ctor = nv50_disp_ovly_ctor,
395 .dtor = nv50_disp_dmac_dtor,
396 .init = nv50_disp_dmac_init,
397 .fini = nv50_disp_dmac_fini,
398 .rd32 = nv50_disp_chan_rd32,
399 .wr32 = nv50_disp_chan_wr32,
400};
401
402/*******************************************************************************
403 * EVO PIO channel base class
404 ******************************************************************************/
405
406static int
407nv50_disp_pioc_create_(struct nouveau_object *parent,
408 struct nouveau_object *engine,
409 struct nouveau_oclass *oclass, int chid,
410 int length, void **pobject)
411{
412 return nv50_disp_chan_create_(parent, engine, oclass, chid,
413 length, pobject);
414}
415
416static void
417nv50_disp_pioc_dtor(struct nouveau_object *object)
418{
419 struct nv50_disp_pioc *pioc = (void *)object;
420 nv50_disp_chan_destroy(&pioc->base);
421}
422
423static int
424nv50_disp_pioc_init(struct nouveau_object *object)
425{
426 struct nv50_disp_priv *priv = (void *)object->engine;
427 struct nv50_disp_pioc *pioc = (void *)object;
428 int chid = pioc->base.chid;
429 int ret;
430
431 ret = nv50_disp_chan_init(&pioc->base);
432 if (ret)
433 return ret;
434
435 nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
436 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
437 nv_error(pioc, "timeout0: 0x%08x\n",
438 nv_rd32(priv, 0x610200 + (chid * 0x10)));
439 return -EBUSY;
440 }
441
442 nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
443 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
444 nv_error(pioc, "timeout1: 0x%08x\n",
445 nv_rd32(priv, 0x610200 + (chid * 0x10)));
446 return -EBUSY;
447 }
448
449 return 0;
450}
451
452static int
453nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
454{
455 struct nv50_disp_priv *priv = (void *)object->engine;
456 struct nv50_disp_pioc *pioc = (void *)object;
457 int chid = pioc->base.chid;
458
459 nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
460 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
461 nv_error(pioc, "timeout: 0x%08x\n",
462 nv_rd32(priv, 0x610200 + (chid * 0x10)));
463 if (suspend)
464 return -EBUSY;
465 }
466
467 return nv50_disp_chan_fini(&pioc->base, suspend);
468}
469
470/*******************************************************************************
471 * EVO immediate overlay channel objects
472 ******************************************************************************/
473
474static int
475nv50_disp_oimm_ctor(struct nouveau_object *parent,
476 struct nouveau_object *engine,
477 struct nouveau_oclass *oclass, void *data, u32 size,
478 struct nouveau_object **pobject)
479{
480 struct nv50_display_oimm_class *args = data;
481 struct nv50_disp_pioc *pioc;
482 int ret;
483
484 if (size < sizeof(*args) || args->head > 1)
485 return -EINVAL;
486
487 ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
488 sizeof(*pioc), (void **)&pioc);
489 *pobject = nv_object(pioc);
490 if (ret)
491 return ret;
492
493 return 0;
494}
495
496struct nouveau_ofuncs
497nv50_disp_oimm_ofuncs = {
498 .ctor = nv50_disp_oimm_ctor,
499 .dtor = nv50_disp_pioc_dtor,
500 .init = nv50_disp_pioc_init,
501 .fini = nv50_disp_pioc_fini,
502 .rd32 = nv50_disp_chan_rd32,
503 .wr32 = nv50_disp_chan_wr32,
504};
505
506/*******************************************************************************
507 * EVO cursor channel objects
508 ******************************************************************************/
509
510static int
511nv50_disp_curs_ctor(struct nouveau_object *parent,
512 struct nouveau_object *engine,
513 struct nouveau_oclass *oclass, void *data, u32 size,
514 struct nouveau_object **pobject)
515{
516 struct nv50_display_curs_class *args = data;
517 struct nv50_disp_pioc *pioc;
518 int ret;
519
520 if (size < sizeof(*args) || args->head > 1)
521 return -EINVAL;
522
523 ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
524 sizeof(*pioc), (void **)&pioc);
525 *pobject = nv_object(pioc);
526 if (ret)
527 return ret;
528
529 return 0;
530}
531
532struct nouveau_ofuncs
533nv50_disp_curs_ofuncs = {
534 .ctor = nv50_disp_curs_ctor,
535 .dtor = nv50_disp_pioc_dtor,
536 .init = nv50_disp_pioc_init,
537 .fini = nv50_disp_pioc_fini,
538 .rd32 = nv50_disp_chan_rd32,
539 .wr32 = nv50_disp_chan_wr32,
540};
541
542/*******************************************************************************
543 * Base display object
544 ******************************************************************************/
545
546static int
547nv50_disp_base_ctor(struct nouveau_object *parent,
548 struct nouveau_object *engine,
549 struct nouveau_oclass *oclass, void *data, u32 size,
550 struct nouveau_object **pobject)
551{
552 struct nv50_disp_priv *priv = (void *)engine;
553 struct nv50_disp_base *base;
554 int ret;
555
556 ret = nouveau_parent_create(parent, engine, oclass, 0,
557 priv->sclass, 0, &base);
558 *pobject = nv_object(base);
559 if (ret)
560 return ret;
561
562 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
563}
564
565static void
566nv50_disp_base_dtor(struct nouveau_object *object)
567{
568 struct nv50_disp_base *base = (void *)object;
569 nouveau_ramht_ref(NULL, &base->ramht);
570 nouveau_parent_destroy(&base->base);
571}
572
573static int
574nv50_disp_base_init(struct nouveau_object *object)
575{
576 struct nv50_disp_priv *priv = (void *)object->engine;
577 struct nv50_disp_base *base = (void *)object;
578 int ret, i;
579 u32 tmp;
580
581 ret = nouveau_parent_init(&base->base);
582 if (ret)
583 return ret;
584
585 /* The below segments of code copying values from one register to
586 * another appear to inform EVO of the display capabilities or
587 * something similar. NFI what the 0x614004 caps are for..
588 */
589 tmp = nv_rd32(priv, 0x614004);
590 nv_wr32(priv, 0x610184, tmp);
591
592 /* ... CRTC caps */
593 for (i = 0; i < priv->head.nr; i++) {
594 tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
595 nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
596 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
597 nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
598 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
599 nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
600 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
601 nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
602 }
603
604 /* ... DAC caps */
605 for (i = 0; i < priv->dac.nr; i++) {
606 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
607 nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
608 }
609
610 /* ... SOR caps */
611 for (i = 0; i < priv->sor.nr; i++) {
612 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
613 nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
614 }
615
616 /* ... EXT caps */
617 for (i = 0; i < 3; i++) {
618 tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
619 nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
620 }
621
622 /* steal display away from vbios, or something like that */
623 if (nv_rd32(priv, 0x610024) & 0x00000100) {
624 nv_wr32(priv, 0x610024, 0x00000100);
625 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
626 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
627 nv_error(priv, "timeout acquiring display\n");
628 return -EBUSY;
629 }
630 }
631
632 /* point at display engine memory area (hash table, objects) */
633 nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
634
635 /* enable supervisor interrupts, disable everything else */
636 nv_wr32(priv, 0x61002c, 0x00000370);
637 nv_wr32(priv, 0x610028, 0x00000000);
638 return 0;
639}
640
641static int
642nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
643{
644 struct nv50_disp_priv *priv = (void *)object->engine;
645 struct nv50_disp_base *base = (void *)object;
646
647 /* disable all interrupts */
648 nv_wr32(priv, 0x610024, 0x00000000);
649 nv_wr32(priv, 0x610020, 0x00000000);
650
651 return nouveau_parent_fini(&base->base, suspend);
652}
653
654struct nouveau_ofuncs
655nv50_disp_base_ofuncs = {
656 .ctor = nv50_disp_base_ctor,
657 .dtor = nv50_disp_base_dtor,
658 .init = nv50_disp_base_init,
659 .fini = nv50_disp_base_fini,
660};
661
662static struct nouveau_omthds
663nv50_disp_base_omthds[] = {
664 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
665 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
666 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
667 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
668 {},
669};
670
671static struct nouveau_oclass
672nv50_disp_base_oclass[] = {
673 { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
674 {}
30}; 675};
31 676
32static struct nouveau_oclass 677static struct nouveau_oclass
33nv50_disp_sclass[] = { 678nv50_disp_sclass[] = {
34 {}, 679 { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
680 { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
681 { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
682 { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
683 { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
684 {}
685};
686
687/*******************************************************************************
688 * Display context, tracks instmem allocation and prevents more than one
689 * client using the display hardware at any time.
690 ******************************************************************************/
691
692static int
693nv50_disp_data_ctor(struct nouveau_object *parent,
694 struct nouveau_object *engine,
695 struct nouveau_oclass *oclass, void *data, u32 size,
696 struct nouveau_object **pobject)
697{
698 struct nv50_disp_priv *priv = (void *)engine;
699 struct nouveau_engctx *ectx;
700 int ret = -EBUSY;
701
702 /* no context needed for channel objects... */
703 if (nv_mclass(parent) != NV_DEVICE_CLASS) {
704 atomic_inc(&parent->refcount);
705 *pobject = parent;
706 return 0;
707 }
708
709 /* allocate display hardware to client */
710 mutex_lock(&nv_subdev(priv)->mutex);
711 if (list_empty(&nv_engine(priv)->contexts)) {
712 ret = nouveau_engctx_create(parent, engine, oclass, NULL,
713 0x10000, 0x10000,
714 NVOBJ_FLAG_HEAP, &ectx);
715 *pobject = nv_object(ectx);
716 }
717 mutex_unlock(&nv_subdev(priv)->mutex);
718 return ret;
719}
720
721struct nouveau_oclass
722nv50_disp_cclass = {
723 .handle = NV_ENGCTX(DISP, 0x50),
724 .ofuncs = &(struct nouveau_ofuncs) {
725 .ctor = nv50_disp_data_ctor,
726 .dtor = _nouveau_engctx_dtor,
727 .init = _nouveau_engctx_init,
728 .fini = _nouveau_engctx_fini,
729 .rd32 = _nouveau_engctx_rd32,
730 .wr32 = _nouveau_engctx_wr32,
731 },
35}; 732};
36 733
734/*******************************************************************************
735 * Display engine implementation
736 ******************************************************************************/
737
738static void
739nv50_disp_intr_error(struct nv50_disp_priv *priv)
740{
741 u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
742 u32 addr, data;
743 int chid;
744
745 for (chid = 0; chid < 5; chid++) {
746 if (!(channels & (1 << chid)))
747 continue;
748
749 nv_wr32(priv, 0x610020, 0x00010000 << chid);
750 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
751 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
752 nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
753
754 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
755 chid, addr & 0xffc, data, addr);
756 }
757}
758
37static void 759static void
38nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) 760nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
39{ 761{
762 struct nouveau_bar *bar = nouveau_bar(priv);
40 struct nouveau_disp *disp = &priv->base; 763 struct nouveau_disp *disp = &priv->base;
41 struct nouveau_software_chan *chan, *temp; 764 struct nouveau_software_chan *chan, *temp;
42 unsigned long flags; 765 unsigned long flags;
@@ -46,19 +769,25 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
46 if (chan->vblank.crtc != crtc) 769 if (chan->vblank.crtc != crtc)
47 continue; 770 continue;
48 771
49 nv_wr32(priv, 0x001704, chan->vblank.channel); 772 if (nv_device(priv)->chipset >= 0xc0) {
50 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma); 773 nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
51 774 bar->flush(bar);
52 if (nv_device(priv)->chipset == 0x50) { 775 nv_wr32(priv, 0x06000c,
53 nv_wr32(priv, 0x001570, chan->vblank.offset); 776 upper_32_bits(chan->vblank.offset));
54 nv_wr32(priv, 0x001574, chan->vblank.value); 777 nv_wr32(priv, 0x060010,
778 lower_32_bits(chan->vblank.offset));
779 nv_wr32(priv, 0x060014, chan->vblank.value);
55 } else { 780 } else {
56 if (nv_device(priv)->chipset >= 0xc0) { 781 nv_wr32(priv, 0x001704, chan->vblank.channel);
57 nv_wr32(priv, 0x06000c, 782 nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
58 upper_32_bits(chan->vblank.offset)); 783 bar->flush(bar);
784 if (nv_device(priv)->chipset == 0x50) {
785 nv_wr32(priv, 0x001570, chan->vblank.offset);
786 nv_wr32(priv, 0x001574, chan->vblank.value);
787 } else {
788 nv_wr32(priv, 0x060010, chan->vblank.offset);
789 nv_wr32(priv, 0x060014, chan->vblank.value);
59 } 790 }
60 nv_wr32(priv, 0x060010, chan->vblank.offset);
61 nv_wr32(priv, 0x060014, chan->vblank.value);
62 } 791 }
63 792
64 list_del(&chan->vblank.head); 793 list_del(&chan->vblank.head);
@@ -71,30 +800,428 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
71 disp->vblank.notify(disp->vblank.data, crtc); 800 disp->vblank.notify(disp->vblank.data, crtc);
72} 801}
73 802
803static u16
804exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
805 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
806 struct nvbios_outp *info)
807{
808 struct nouveau_bios *bios = nouveau_bios(priv);
809 u16 mask, type, data;
810
811 if (outp < 4) {
812 type = DCB_OUTPUT_ANALOG;
813 mask = 0;
814 } else {
815 outp -= 4;
816 switch (ctrl & 0x00000f00) {
817 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
818 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
819 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
820 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
821 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
822 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
823 default:
824 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
825 return 0x0000;
826 }
827 }
828
829 mask = 0x00c0 & (mask << 6);
830 mask |= 0x0001 << outp;
831 mask |= 0x0100 << head;
832
833 data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
834 if (!data)
835 return 0x0000;
836
837 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
838}
839
840static bool
841exec_script(struct nv50_disp_priv *priv, int head, int id)
842{
843 struct nouveau_bios *bios = nouveau_bios(priv);
844 struct nvbios_outp info;
845 struct dcb_output dcb;
846 u8 ver, hdr, cnt, len;
847 u16 data;
848 u32 ctrl = 0x00000000;
849 int i;
850
851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
853
854 if (!(ctrl & (1 << head))) {
855 if (nv_device(priv)->chipset < 0x90 ||
856 nv_device(priv)->chipset == 0x92 ||
857 nv_device(priv)->chipset == 0xa0) {
858 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
859 ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
860 i += 4;
861 } else {
862 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
863 ctrl = nv_rd32(priv, 0x610798 + (i * 8));
864 i += 4;
865 }
866 }
867
868 if (!(ctrl & (1 << head)))
869 return false;
870 i--;
871
872 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
873 if (data) {
874 struct nvbios_init init = {
875 .subdev = nv_subdev(priv),
876 .bios = bios,
877 .offset = info.script[id],
878 .outp = &dcb,
879 .crtc = head,
880 .execute = 1,
881 };
882
883 return nvbios_exec(&init) == 0;
884 }
885
886 return false;
887}
888
889static u32
890exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
891 struct dcb_output *outp)
892{
893 struct nouveau_bios *bios = nouveau_bios(priv);
894 struct nvbios_outp info1;
895 struct nvbios_ocfg info2;
896 u8 ver, hdr, cnt, len;
897 u16 data, conf;
898 u32 ctrl = 0x00000000;
899 int i;
900
901 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
902 ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
903
904 if (!(ctrl & (1 << head))) {
905 if (nv_device(priv)->chipset < 0x90 ||
906 nv_device(priv)->chipset == 0x92 ||
907 nv_device(priv)->chipset == 0xa0) {
908 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
909 ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
910 i += 4;
911 } else {
912 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
913 ctrl = nv_rd32(priv, 0x610794 + (i * 8));
914 i += 4;
915 }
916 }
917
918 if (!(ctrl & (1 << head)))
919 return 0x0000;
920 i--;
921
922 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
923 if (!data)
924 return 0x0000;
925
926 switch (outp->type) {
927 case DCB_OUTPUT_TMDS:
928 conf = (ctrl & 0x00000f00) >> 8;
929 if (pclk >= 165000)
930 conf |= 0x0100;
931 break;
932 case DCB_OUTPUT_LVDS:
933 conf = priv->sor.lvdsconf;
934 break;
935 case DCB_OUTPUT_DP:
936 conf = (ctrl & 0x00000f00) >> 8;
937 break;
938 case DCB_OUTPUT_ANALOG:
939 default:
940 conf = 0x00ff;
941 break;
942 }
943
944 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
945 if (data) {
946 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
947 if (data) {
948 struct nvbios_init init = {
949 .subdev = nv_subdev(priv),
950 .bios = bios,
951 .offset = data,
952 .outp = outp,
953 .crtc = head,
954 .execute = 1,
955 };
956
957 if (nvbios_exec(&init))
958 return 0x0000;
959 return conf;
960 }
961 }
962
963 return 0x0000;
964}
965
966static void
967nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
968{
969 int head = ffs((super & 0x00000060) >> 5) - 1;
970 if (head >= 0) {
971 head = ffs((super & 0x00000180) >> 7) - 1;
972 if (head >= 0)
973 exec_script(priv, head, 1);
974 }
975
976 nv_wr32(priv, 0x610024, 0x00000010);
977 nv_wr32(priv, 0x610030, 0x80000000);
978}
979
74static void 980static void
981nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
982 struct dcb_output *outp, u32 pclk)
983{
984 const int link = !(outp->sorconf.link & 1);
985 const int or = ffs(outp->or) - 1;
986 const u32 soff = ( or * 0x800);
987 const u32 loff = (link * 0x080) + soff;
988 const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
989 const u32 symbol = 100000;
990 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
991 u32 clksor = nv_rd32(priv, 0x614300 + soff);
992 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
993 int TU, VTUi, VTUf, VTUa;
994 u64 link_data_rate, link_ratio, unk;
995 u32 best_diff = 64 * symbol;
996 u32 link_nr, link_bw, bits, r;
997
998 /* calculate packed data rate for each lane */
999 if (dpctrl > 0x00030000) link_nr = 4;
1000 else if (dpctrl > 0x00010000) link_nr = 2;
1001 else link_nr = 1;
1002
1003 if (clksor & 0x000c0000)
1004 link_bw = 270000;
1005 else
1006 link_bw = 162000;
1007
1008 if ((ctrl & 0xf0000) == 0x60000) bits = 30;
1009 else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
1010 else bits = 18;
1011
1012 link_data_rate = (pclk * bits / 8) / link_nr;
1013
1014 /* calculate ratio of packed data rate to link symbol rate */
1015 link_ratio = link_data_rate * symbol;
1016 r = do_div(link_ratio, link_bw);
1017
1018 for (TU = 64; TU >= 32; TU--) {
1019 /* calculate average number of valid symbols in each TU */
1020 u32 tu_valid = link_ratio * TU;
1021 u32 calc, diff;
1022
1023 /* find a hw representation for the fraction.. */
1024 VTUi = tu_valid / symbol;
1025 calc = VTUi * symbol;
1026 diff = tu_valid - calc;
1027 if (diff) {
1028 if (diff >= (symbol / 2)) {
1029 VTUf = symbol / (symbol - diff);
1030 if (symbol - (VTUf * diff))
1031 VTUf++;
1032
1033 if (VTUf <= 15) {
1034 VTUa = 1;
1035 calc += symbol - (symbol / VTUf);
1036 } else {
1037 VTUa = 0;
1038 VTUf = 1;
1039 calc += symbol;
1040 }
1041 } else {
1042 VTUa = 0;
1043 VTUf = min((int)(symbol / diff), 15);
1044 calc += symbol / VTUf;
1045 }
1046
1047 diff = calc - tu_valid;
1048 } else {
1049 /* no remainder, but the hw doesn't like the fractional
1050 * part to be zero. decrement the integer part and
1051 * have the fraction add a whole symbol back
1052 */
1053 VTUa = 0;
1054 VTUf = 1;
1055 VTUi--;
1056 }
1057
1058 if (diff < best_diff) {
1059 best_diff = diff;
1060 bestTU = TU;
1061 bestVTUa = VTUa;
1062 bestVTUf = VTUf;
1063 bestVTUi = VTUi;
1064 if (diff == 0)
1065 break;
1066 }
1067 }
1068
1069 if (!bestTU) {
1070 nv_error(priv, "unable to find suitable dp config\n");
1071 return;
1072 }
1073
1074 /* XXX close to vbios numbers, but not right */
1075 unk = (symbol - link_ratio) * bestTU;
1076 unk *= link_ratio;
1077 r = do_div(unk, symbol);
1078 r = do_div(unk, symbol);
1079 unk += 6;
1080
1081 nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
1082 nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
1083 bestVTUf << 16 |
1084 bestVTUi << 8 | unk);
1085}
1086
1087static void
1088nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
1089{
1090 struct dcb_output outp;
1091 u32 addr, mask, data;
1092 int head;
1093
1094 /* finish detaching encoder? */
1095 head = ffs((super & 0x00000180) >> 7) - 1;
1096 if (head >= 0)
1097 exec_script(priv, head, 2);
1098
1099 /* check whether a vpll change is required */
1100 head = ffs((super & 0x00000600) >> 9) - 1;
1101 if (head >= 0) {
1102 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1103 if (pclk) {
1104 struct nouveau_clock *clk = nouveau_clock(priv);
1105 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
1106 }
1107
1108 nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
1109 }
1110
1111 /* (re)attach the relevant OR to the head */
1112 head = ffs((super & 0x00000180) >> 7) - 1;
1113 if (head >= 0) {
1114 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1115 u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
1116 if (conf) {
1117 if (outp.type == DCB_OUTPUT_ANALOG) {
1118 addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
1119 mask = 0xffffffff;
1120 data = 0x00000000;
1121 } else {
1122 if (outp.type == DCB_OUTPUT_DP)
1123 nv50_disp_intr_unk20_dp(priv, &outp, pclk);
1124 addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
1125 mask = 0x00000707;
1126 data = (conf & 0x0100) ? 0x0101 : 0x0000;
1127 }
1128
1129 nv_mask(priv, addr, mask, data);
1130 }
1131 }
1132
1133 nv_wr32(priv, 0x610024, 0x00000020);
1134 nv_wr32(priv, 0x610030, 0x80000000);
1135}
1136
1137/* If programming a TMDS output on a SOR that can also be configured for
1138 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
1139 *
1140 * It looks like the VBIOS TMDS scripts make an attempt at this, however,
1141 * the VBIOS scripts on at least one board I have only switch it off on
1142 * link 0, causing a blank display if the output has previously been
1143 * programmed for DisplayPort.
1144 */
1145static void
1146nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
1147{
1148 struct nouveau_bios *bios = nouveau_bios(priv);
1149 const int link = !(outp->sorconf.link & 1);
1150 const int or = ffs(outp->or) - 1;
1151 const u32 loff = (or * 0x800) + (link * 0x80);
1152 const u16 mask = (outp->sorconf.link << 6) | outp->or;
1153 u8 ver, hdr;
1154
1155 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
1156 nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
1157}
1158
1159static void
1160nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
1161{
1162 int head = ffs((super & 0x00000180) >> 7) - 1;
1163 if (head >= 0) {
1164 struct dcb_output outp;
1165 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1166 if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
1167 if (outp.type == DCB_OUTPUT_TMDS)
1168 nv50_disp_intr_unk40_tmds(priv, &outp);
1169 }
1170 }
1171
1172 nv_wr32(priv, 0x610024, 0x00000040);
1173 nv_wr32(priv, 0x610030, 0x80000000);
1174}
1175
1176static void
1177nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
1178{
1179 u32 super = nv_rd32(priv, 0x610030);
1180
1181 nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
1182
1183 if (intr1 & 0x00000010)
1184 nv50_disp_intr_unk10(priv, super);
1185 if (intr1 & 0x00000020)
1186 nv50_disp_intr_unk20(priv, super);
1187 if (intr1 & 0x00000040)
1188 nv50_disp_intr_unk40(priv, super);
1189}
1190
1191void
75nv50_disp_intr(struct nouveau_subdev *subdev) 1192nv50_disp_intr(struct nouveau_subdev *subdev)
76{ 1193{
77 struct nv50_disp_priv *priv = (void *)subdev; 1194 struct nv50_disp_priv *priv = (void *)subdev;
78 u32 stat1 = nv_rd32(priv, 0x610024); 1195 u32 intr0 = nv_rd32(priv, 0x610020);
1196 u32 intr1 = nv_rd32(priv, 0x610024);
79 1197
80 if (stat1 & 0x00000004) { 1198 if (intr0 & 0x001f0000) {
1199 nv50_disp_intr_error(priv);
1200 intr0 &= ~0x001f0000;
1201 }
1202
1203 if (intr1 & 0x00000004) {
81 nv50_disp_intr_vblank(priv, 0); 1204 nv50_disp_intr_vblank(priv, 0);
82 nv_wr32(priv, 0x610024, 0x00000004); 1205 nv_wr32(priv, 0x610024, 0x00000004);
83 stat1 &= ~0x00000004; 1206 intr1 &= ~0x00000004;
84 } 1207 }
85 1208
86 if (stat1 & 0x00000008) { 1209 if (intr1 & 0x00000008) {
87 nv50_disp_intr_vblank(priv, 1); 1210 nv50_disp_intr_vblank(priv, 1);
88 nv_wr32(priv, 0x610024, 0x00000008); 1211 nv_wr32(priv, 0x610024, 0x00000008);
89 stat1 &= ~0x00000008; 1212 intr1 &= ~0x00000008;
90 } 1213 }
91 1214
1215 if (intr1 & 0x00000070) {
1216 nv50_disp_intr_super(priv, intr1);
1217 intr1 &= ~0x00000070;
1218 }
92} 1219}
93 1220
94static int 1221static int
95nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 1222nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
96 struct nouveau_oclass *oclass, void *data, u32 size, 1223 struct nouveau_oclass *oclass, void *data, u32 size,
97 struct nouveau_object **pobject) 1224 struct nouveau_object **pobject)
98{ 1225{
99 struct nv50_disp_priv *priv; 1226 struct nv50_disp_priv *priv;
100 int ret; 1227 int ret;
@@ -105,8 +1232,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
105 if (ret) 1232 if (ret)
106 return ret; 1233 return ret;
107 1234
108 nv_engine(priv)->sclass = nv50_disp_sclass; 1235 nv_engine(priv)->sclass = nv50_disp_base_oclass;
1236 nv_engine(priv)->cclass = &nv50_disp_cclass;
109 nv_subdev(priv)->intr = nv50_disp_intr; 1237 nv_subdev(priv)->intr = nv50_disp_intr;
1238 priv->sclass = nv50_disp_sclass;
1239 priv->head.nr = 2;
1240 priv->dac.nr = 3;
1241 priv->sor.nr = 2;
1242 priv->dac.power = nv50_dac_power;
1243 priv->dac.sense = nv50_dac_sense;
1244 priv->sor.power = nv50_sor_power;
110 1245
111 INIT_LIST_HEAD(&priv->base.vblank.list); 1246 INIT_LIST_HEAD(&priv->base.vblank.list);
112 spin_lock_init(&priv->base.vblank.lock); 1247 spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 000000000000..a6bb931450f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,142 @@
1#ifndef __NV50_DISP_H__
2#define __NV50_DISP_H__
3
4#include <core/parent.h>
5#include <core/namedb.h>
6#include <core/ramht.h>
7
8#include <engine/dmaobj.h>
9#include <engine/disp.h>
10
11struct dcb_output;
12
13struct nv50_disp_priv {
14 struct nouveau_disp base;
15 struct nouveau_oclass *sclass;
16 struct {
17 int nr;
18 } head;
19 struct {
20 int nr;
21 int (*power)(struct nv50_disp_priv *, int dac, u32 data);
22 int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
23 } dac;
24 struct {
25 int nr;
26 int (*power)(struct nv50_disp_priv *, int sor, u32 data);
27 int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
28 int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
29 int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
30 int head, u16 type, u16 mask, u32 data,
31 struct dcb_output *);
32 int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
33 int head, u16 type, u16 mask, u32 data,
34 struct dcb_output *);
35 int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
36 u16 type, u16 mask, u32 data,
37 struct dcb_output *);
38 int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
39 int head, u16 type, u16 mask, u32 data,
40 struct dcb_output *);
41 int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
42 int lane, u16 type, u16 mask, u32 data,
43 struct dcb_output *);
44 u32 lvdsconf;
45 } sor;
46};
47
48#define DAC_MTHD(n) (n), (n) + 0x03
49
50int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
51int nv50_dac_power(struct nv50_disp_priv *, int, u32);
52int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
53
54#define SOR_MTHD(n) (n), (n) + 0x3f
55
56int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
57int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
58
59int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
60int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
61int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
62
63int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
64int nv50_sor_power(struct nv50_disp_priv *, int, u32);
65
66int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
67 u32, struct dcb_output *);
68int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
69 u32, struct dcb_output *);
70int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
71 struct dcb_output *);
72int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
73 struct dcb_output *);
74int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
75 struct dcb_output *);
76
77int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
78 struct dcb_output *);
79int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
80 struct dcb_output *);
81int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
82 struct dcb_output *);
83
84struct nv50_disp_base {
85 struct nouveau_parent base;
86 struct nouveau_ramht *ramht;
87 u32 chan;
88};
89
90struct nv50_disp_chan {
91 struct nouveau_namedb base;
92 int chid;
93};
94
95int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
96 struct nouveau_oclass *, int, int, void **);
97void nv50_disp_chan_destroy(struct nv50_disp_chan *);
98u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
99void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
100
101#define nv50_disp_chan_init(a) \
102 nouveau_namedb_init(&(a)->base)
103#define nv50_disp_chan_fini(a,b) \
104 nouveau_namedb_fini(&(a)->base, (b))
105
106int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
107 struct nouveau_oclass *, u32, int, int, void **);
108void nv50_disp_dmac_dtor(struct nouveau_object *);
109
110struct nv50_disp_dmac {
111 struct nv50_disp_chan base;
112 struct nouveau_dmaobj *pushdma;
113 u32 push;
114};
115
116struct nv50_disp_pioc {
117 struct nv50_disp_chan base;
118};
119
120extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
121extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
122extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
123extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
124extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
125extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
126extern struct nouveau_oclass nv50_disp_cclass;
127void nv50_disp_intr(struct nouveau_subdev *);
128
129extern struct nouveau_omthds nv84_disp_base_omthds[];
130
131extern struct nouveau_omthds nva3_disp_base_omthds[];
132
133extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
134extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
135extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
136extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
137extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
138extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
139extern struct nouveau_oclass nvd0_disp_cclass;
140void nvd0_disp_intr(struct nouveau_subdev *);
141
142#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 000000000000..fc84eacdfbec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,98 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nv84_disp_sclass[] = {
34 { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42struct nouveau_omthds
43nv84_disp_base_omthds[] = {
44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
45 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
47 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
48 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
49 {},
50};
51
52static struct nouveau_oclass
53nv84_disp_base_oclass[] = {
54 { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
55 {}
56};
57
58static int
59nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 struct nouveau_oclass *oclass, void *data, u32 size,
61 struct nouveau_object **pobject)
62{
63 struct nv50_disp_priv *priv;
64 int ret;
65
66 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
67 "display", &priv);
68 *pobject = nv_object(priv);
69 if (ret)
70 return ret;
71
72 nv_engine(priv)->sclass = nv84_disp_base_oclass;
73 nv_engine(priv)->cclass = &nv50_disp_cclass;
74 nv_subdev(priv)->intr = nv50_disp_intr;
75 priv->sclass = nv84_disp_sclass;
76 priv->head.nr = 2;
77 priv->dac.nr = 3;
78 priv->sor.nr = 2;
79 priv->dac.power = nv50_dac_power;
80 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power;
82 priv->sor.hdmi = nv84_hdmi_ctrl;
83
84 INIT_LIST_HEAD(&priv->base.vblank.list);
85 spin_lock_init(&priv->base.vblank.lock);
86 return 0;
87}
88
89struct nouveau_oclass
90nv84_disp_oclass = {
91 .handle = NV_ENGINE(DISP, 0x82),
92 .ofuncs = &(struct nouveau_ofuncs) {
93 .ctor = nv84_disp_ctor,
94 .dtor = _nouveau_disp_dtor,
95 .init = _nouveau_disp_init,
96 .fini = _nouveau_disp_fini,
97 },
98};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 000000000000..ba9dfd4669a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nv94_disp_sclass[] = {
34 { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_omthds
43nv94_disp_base_omthds[] = {
44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
45 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
47 { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
48 { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
49 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
50 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
51 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
52 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
53 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
55 {},
56};
57
58static struct nouveau_oclass
59nv94_disp_base_oclass[] = {
60 { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
61 {}
62};
63
64static int
65nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nv50_disp_priv *priv;
70 int ret;
71
72 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
73 "display", &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 nv_engine(priv)->sclass = nv94_disp_base_oclass;
79 nv_engine(priv)->cclass = &nv50_disp_cclass;
80 nv_subdev(priv)->intr = nv50_disp_intr;
81 priv->sclass = nv94_disp_sclass;
82 priv->head.nr = 2;
83 priv->dac.nr = 3;
84 priv->sor.nr = 4;
85 priv->dac.power = nv50_dac_power;
86 priv->dac.sense = nv50_dac_sense;
87 priv->sor.power = nv50_sor_power;
88 priv->sor.hdmi = nv84_hdmi_ctrl;
89 priv->sor.dp_train = nv94_sor_dp_train;
90 priv->sor.dp_train_init = nv94_sor_dp_train_init;
91 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
92 priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
93 priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
94
95 INIT_LIST_HEAD(&priv->base.vblank.list);
96 spin_lock_init(&priv->base.vblank.lock);
97 return 0;
98}
99
100struct nouveau_oclass
101nv94_disp_oclass = {
102 .handle = NV_ENGINE(DISP, 0x88),
103 .ofuncs = &(struct nouveau_ofuncs) {
104 .ctor = nv94_disp_ctor,
105 .dtor = _nouveau_disp_dtor,
106 .init = _nouveau_disp_init,
107 .fini = _nouveau_disp_fini,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 000000000000..5d63902cdeda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nva0_disp_sclass[] = {
34 { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nva0_disp_base_oclass[] = {
44 { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
45 {}
46};
47
48static int
49nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nv50_disp_priv *priv;
54 int ret;
55
56 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
57 "display", &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 nv_engine(priv)->sclass = nva0_disp_base_oclass;
63 nv_engine(priv)->cclass = &nv50_disp_cclass;
64 nv_subdev(priv)->intr = nv50_disp_intr;
65 priv->sclass = nva0_disp_sclass;
66 priv->head.nr = 2;
67 priv->dac.nr = 3;
68 priv->sor.nr = 2;
69 priv->dac.power = nv50_dac_power;
70 priv->dac.sense = nv50_dac_sense;
71 priv->sor.power = nv50_sor_power;
72 priv->sor.hdmi = nv84_hdmi_ctrl;
73
74 INIT_LIST_HEAD(&priv->base.vblank.list);
75 spin_lock_init(&priv->base.vblank.lock);
76 return 0;
77}
78
79struct nouveau_oclass
80nva0_disp_oclass = {
81 .handle = NV_ENGINE(DISP, 0x83),
82 .ofuncs = &(struct nouveau_ofuncs) {
83 .ctor = nva0_disp_ctor,
84 .dtor = _nouveau_disp_dtor,
85 .init = _nouveau_disp_init,
86 .fini = _nouveau_disp_fini,
87 },
88};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 000000000000..e9192ca389fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nva3_disp_sclass[] = {
34 { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42struct nouveau_omthds
43nva3_disp_base_omthds[] = {
44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
45 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
46 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
47 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
48 { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
49 { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
50 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
51 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
52 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
53 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
55 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
56 {},
57};
58
59static struct nouveau_oclass
60nva3_disp_base_oclass[] = {
61 { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
62 {}
63};
64
65static int
66nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
67 struct nouveau_oclass *oclass, void *data, u32 size,
68 struct nouveau_object **pobject)
69{
70 struct nv50_disp_priv *priv;
71 int ret;
72
73 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
74 "display", &priv);
75 *pobject = nv_object(priv);
76 if (ret)
77 return ret;
78
79 nv_engine(priv)->sclass = nva3_disp_base_oclass;
80 nv_engine(priv)->cclass = &nv50_disp_cclass;
81 nv_subdev(priv)->intr = nv50_disp_intr;
82 priv->sclass = nva3_disp_sclass;
83 priv->head.nr = 2;
84 priv->dac.nr = 3;
85 priv->sor.nr = 4;
86 priv->dac.power = nv50_dac_power;
87 priv->dac.sense = nv50_dac_sense;
88 priv->sor.power = nv50_sor_power;
89 priv->sor.hda_eld = nva3_hda_eld;
90 priv->sor.hdmi = nva3_hdmi_ctrl;
91 priv->sor.dp_train = nv94_sor_dp_train;
92 priv->sor.dp_train_init = nv94_sor_dp_train_init;
93 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
94 priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
95 priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
96
97 INIT_LIST_HEAD(&priv->base.vblank.list);
98 spin_lock_init(&priv->base.vblank.lock);
99 return 0;
100}
101
102struct nouveau_oclass
103nva3_disp_oclass = {
104 .handle = NV_ENGINE(DISP, 0x85),
105 .ofuncs = &(struct nouveau_ofuncs) {
106 .ctor = nva3_disp_ctor,
107 .dtor = _nouveau_disp_dtor,
108 .init = _nouveau_disp_init,
109 .fini = _nouveau_disp_fini,
110 },
111};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index d93efbcf75b8..9e38ebff5fb3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -22,22 +22,808 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/bar.h> 25#include <core/object.h>
26#include <core/parent.h>
27#include <core/handle.h>
28#include <core/class.h>
26 29
27#include <engine/software.h> 30#include <engine/software.h>
28#include <engine/disp.h> 31#include <engine/disp.h>
29 32
30struct nvd0_disp_priv { 33#include <subdev/timer.h>
31 struct nouveau_disp base; 34#include <subdev/fb.h>
35#include <subdev/bar.h>
36#include <subdev/clock.h>
37
38#include <subdev/bios.h>
39#include <subdev/bios/dcb.h>
40#include <subdev/bios/disp.h>
41#include <subdev/bios/init.h>
42#include <subdev/bios/pll.h>
43
44#include "nv50.h"
45
46/*******************************************************************************
47 * EVO DMA channel base class
48 ******************************************************************************/
49
50static int
51nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
52 struct nouveau_object *object, u32 name)
53{
54 struct nv50_disp_base *base = (void *)parent->parent;
55 struct nv50_disp_chan *chan = (void *)parent;
56 u32 addr = nv_gpuobj(object)->node->offset;
57 u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
58 return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
59}
60
61static void
62nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
63{
64 struct nv50_disp_base *base = (void *)parent->parent;
65 nouveau_ramht_remove(base->ramht, cookie);
66}
67
68static int
69nvd0_disp_dmac_init(struct nouveau_object *object)
70{
71 struct nv50_disp_priv *priv = (void *)object->engine;
72 struct nv50_disp_dmac *dmac = (void *)object;
73 int chid = dmac->base.chid;
74 int ret;
75
76 ret = nv50_disp_chan_init(&dmac->base);
77 if (ret)
78 return ret;
79
80 /* enable error reporting */
81 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
82 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
83
84 /* initialise channel for dma command submission */
85 nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
86 nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
87 nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
88 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
89 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
90 nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
91
92 /* wait for it to go inactive */
93 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
94 nv_error(dmac, "init: 0x%08x\n",
95 nv_rd32(priv, 0x610490 + (chid * 0x10)));
96 return -EBUSY;
97 }
98
99 return 0;
100}
101
102static int
103nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
104{
105 struct nv50_disp_priv *priv = (void *)object->engine;
106 struct nv50_disp_dmac *dmac = (void *)object;
107 int chid = dmac->base.chid;
108
109 /* deactivate channel */
110 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
111 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
112 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
113 nv_error(dmac, "fini: 0x%08x\n",
114 nv_rd32(priv, 0x610490 + (chid * 0x10)));
115 if (suspend)
116 return -EBUSY;
117 }
118
119 /* disable error reporting */
120 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
121 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
122
123 return nv50_disp_chan_fini(&dmac->base, suspend);
124}
125
126/*******************************************************************************
127 * EVO master channel object
128 ******************************************************************************/
129
130static int
131nvd0_disp_mast_ctor(struct nouveau_object *parent,
132 struct nouveau_object *engine,
133 struct nouveau_oclass *oclass, void *data, u32 size,
134 struct nouveau_object **pobject)
135{
136 struct nv50_display_mast_class *args = data;
137 struct nv50_disp_dmac *mast;
138 int ret;
139
140 if (size < sizeof(*args))
141 return -EINVAL;
142
143 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
144 0, sizeof(*mast), (void **)&mast);
145 *pobject = nv_object(mast);
146 if (ret)
147 return ret;
148
149 nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
150 nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
151 return 0;
152}
153
154static int
155nvd0_disp_mast_init(struct nouveau_object *object)
156{
157 struct nv50_disp_priv *priv = (void *)object->engine;
158 struct nv50_disp_dmac *mast = (void *)object;
159 int ret;
160
161 ret = nv50_disp_chan_init(&mast->base);
162 if (ret)
163 return ret;
164
165 /* enable error reporting */
166 nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
167 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
168
169 /* initialise channel for dma command submission */
170 nv_wr32(priv, 0x610494, mast->push);
171 nv_wr32(priv, 0x610498, 0x00010000);
172 nv_wr32(priv, 0x61049c, 0x00000001);
173 nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
174 nv_wr32(priv, 0x640000, 0x00000000);
175 nv_wr32(priv, 0x610490, 0x01000013);
176
177 /* wait for it to go inactive */
178 if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
179 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
180 return -EBUSY;
181 }
182
183 return 0;
184}
185
186static int
187nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
188{
189 struct nv50_disp_priv *priv = (void *)object->engine;
190 struct nv50_disp_dmac *mast = (void *)object;
191
192 /* deactivate channel */
193 nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
194 nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
195 if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
196 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
197 if (suspend)
198 return -EBUSY;
199 }
200
201 /* disable error reporting */
202 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
203 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
204
205 return nv50_disp_chan_fini(&mast->base, suspend);
206}
207
208struct nouveau_ofuncs
209nvd0_disp_mast_ofuncs = {
210 .ctor = nvd0_disp_mast_ctor,
211 .dtor = nv50_disp_dmac_dtor,
212 .init = nvd0_disp_mast_init,
213 .fini = nvd0_disp_mast_fini,
214 .rd32 = nv50_disp_chan_rd32,
215 .wr32 = nv50_disp_chan_wr32,
216};
217
218/*******************************************************************************
219 * EVO sync channel objects
220 ******************************************************************************/
221
222static int
223nvd0_disp_sync_ctor(struct nouveau_object *parent,
224 struct nouveau_object *engine,
225 struct nouveau_oclass *oclass, void *data, u32 size,
226 struct nouveau_object **pobject)
227{
228 struct nv50_display_sync_class *args = data;
229 struct nv50_disp_priv *priv = (void *)engine;
230 struct nv50_disp_dmac *dmac;
231 int ret;
232
233 if (size < sizeof(*data) || args->head >= priv->head.nr)
234 return -EINVAL;
235
236 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
237 1 + args->head, sizeof(*dmac),
238 (void **)&dmac);
239 *pobject = nv_object(dmac);
240 if (ret)
241 return ret;
242
243 nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
244 nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
245 return 0;
246}
247
248struct nouveau_ofuncs
249nvd0_disp_sync_ofuncs = {
250 .ctor = nvd0_disp_sync_ctor,
251 .dtor = nv50_disp_dmac_dtor,
252 .init = nvd0_disp_dmac_init,
253 .fini = nvd0_disp_dmac_fini,
254 .rd32 = nv50_disp_chan_rd32,
255 .wr32 = nv50_disp_chan_wr32,
256};
257
258/*******************************************************************************
259 * EVO overlay channel objects
260 ******************************************************************************/
261
262static int
263nvd0_disp_ovly_ctor(struct nouveau_object *parent,
264 struct nouveau_object *engine,
265 struct nouveau_oclass *oclass, void *data, u32 size,
266 struct nouveau_object **pobject)
267{
268 struct nv50_display_ovly_class *args = data;
269 struct nv50_disp_priv *priv = (void *)engine;
270 struct nv50_disp_dmac *dmac;
271 int ret;
272
273 if (size < sizeof(*data) || args->head >= priv->head.nr)
274 return -EINVAL;
275
276 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
277 5 + args->head, sizeof(*dmac),
278 (void **)&dmac);
279 *pobject = nv_object(dmac);
280 if (ret)
281 return ret;
282
283 nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
284 nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
285 return 0;
286}
287
288struct nouveau_ofuncs
289nvd0_disp_ovly_ofuncs = {
290 .ctor = nvd0_disp_ovly_ctor,
291 .dtor = nv50_disp_dmac_dtor,
292 .init = nvd0_disp_dmac_init,
293 .fini = nvd0_disp_dmac_fini,
294 .rd32 = nv50_disp_chan_rd32,
295 .wr32 = nv50_disp_chan_wr32,
296};
297
298/*******************************************************************************
299 * EVO PIO channel base class
300 ******************************************************************************/
301
302static int
303nvd0_disp_pioc_create_(struct nouveau_object *parent,
304 struct nouveau_object *engine,
305 struct nouveau_oclass *oclass, int chid,
306 int length, void **pobject)
307{
308 return nv50_disp_chan_create_(parent, engine, oclass, chid,
309 length, pobject);
310}
311
312static void
313nvd0_disp_pioc_dtor(struct nouveau_object *object)
314{
315 struct nv50_disp_pioc *pioc = (void *)object;
316 nv50_disp_chan_destroy(&pioc->base);
317}
318
319static int
320nvd0_disp_pioc_init(struct nouveau_object *object)
321{
322 struct nv50_disp_priv *priv = (void *)object->engine;
323 struct nv50_disp_pioc *pioc = (void *)object;
324 int chid = pioc->base.chid;
325 int ret;
326
327 ret = nv50_disp_chan_init(&pioc->base);
328 if (ret)
329 return ret;
330
331 /* enable error reporting */
332 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
333 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
334
335 /* activate channel */
336 nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
337 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
338 nv_error(pioc, "init: 0x%08x\n",
339 nv_rd32(priv, 0x610490 + (chid * 0x10)));
340 return -EBUSY;
341 }
342
343 return 0;
344}
345
346static int
347nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
348{
349 struct nv50_disp_priv *priv = (void *)object->engine;
350 struct nv50_disp_pioc *pioc = (void *)object;
351 int chid = pioc->base.chid;
352
353 nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
354 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
355 nv_error(pioc, "timeout: 0x%08x\n",
356 nv_rd32(priv, 0x610490 + (chid * 0x10)));
357 if (suspend)
358 return -EBUSY;
359 }
360
361 /* disable error reporting */
362 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
363 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
364
365 return nv50_disp_chan_fini(&pioc->base, suspend);
366}
367
368/*******************************************************************************
369 * EVO immediate overlay channel objects
370 ******************************************************************************/
371
372static int
373nvd0_disp_oimm_ctor(struct nouveau_object *parent,
374 struct nouveau_object *engine,
375 struct nouveau_oclass *oclass, void *data, u32 size,
376 struct nouveau_object **pobject)
377{
378 struct nv50_display_oimm_class *args = data;
379 struct nv50_disp_priv *priv = (void *)engine;
380 struct nv50_disp_pioc *pioc;
381 int ret;
382
383 if (size < sizeof(*args) || args->head >= priv->head.nr)
384 return -EINVAL;
385
386 ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
387 sizeof(*pioc), (void **)&pioc);
388 *pobject = nv_object(pioc);
389 if (ret)
390 return ret;
391
392 return 0;
393}
394
395struct nouveau_ofuncs
396nvd0_disp_oimm_ofuncs = {
397 .ctor = nvd0_disp_oimm_ctor,
398 .dtor = nvd0_disp_pioc_dtor,
399 .init = nvd0_disp_pioc_init,
400 .fini = nvd0_disp_pioc_fini,
401 .rd32 = nv50_disp_chan_rd32,
402 .wr32 = nv50_disp_chan_wr32,
403};
404
405/*******************************************************************************
406 * EVO cursor channel objects
407 ******************************************************************************/
408
409static int
410nvd0_disp_curs_ctor(struct nouveau_object *parent,
411 struct nouveau_object *engine,
412 struct nouveau_oclass *oclass, void *data, u32 size,
413 struct nouveau_object **pobject)
414{
415 struct nv50_display_curs_class *args = data;
416 struct nv50_disp_priv *priv = (void *)engine;
417 struct nv50_disp_pioc *pioc;
418 int ret;
419
420 if (size < sizeof(*args) || args->head >= priv->head.nr)
421 return -EINVAL;
422
423 ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
424 sizeof(*pioc), (void **)&pioc);
425 *pobject = nv_object(pioc);
426 if (ret)
427 return ret;
428
429 return 0;
430}
431
432struct nouveau_ofuncs
433nvd0_disp_curs_ofuncs = {
434 .ctor = nvd0_disp_curs_ctor,
435 .dtor = nvd0_disp_pioc_dtor,
436 .init = nvd0_disp_pioc_init,
437 .fini = nvd0_disp_pioc_fini,
438 .rd32 = nv50_disp_chan_rd32,
439 .wr32 = nv50_disp_chan_wr32,
440};
441
442/*******************************************************************************
443 * Base display object
444 ******************************************************************************/
445
446static int
447nvd0_disp_base_ctor(struct nouveau_object *parent,
448 struct nouveau_object *engine,
449 struct nouveau_oclass *oclass, void *data, u32 size,
450 struct nouveau_object **pobject)
451{
452 struct nv50_disp_priv *priv = (void *)engine;
453 struct nv50_disp_base *base;
454 int ret;
455
456 ret = nouveau_parent_create(parent, engine, oclass, 0,
457 priv->sclass, 0, &base);
458 *pobject = nv_object(base);
459 if (ret)
460 return ret;
461
462 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
463}
464
465static void
466nvd0_disp_base_dtor(struct nouveau_object *object)
467{
468 struct nv50_disp_base *base = (void *)object;
469 nouveau_ramht_ref(NULL, &base->ramht);
470 nouveau_parent_destroy(&base->base);
471}
472
473static int
474nvd0_disp_base_init(struct nouveau_object *object)
475{
476 struct nv50_disp_priv *priv = (void *)object->engine;
477 struct nv50_disp_base *base = (void *)object;
478 int ret, i;
479 u32 tmp;
480
481 ret = nouveau_parent_init(&base->base);
482 if (ret)
483 return ret;
484
485 /* The below segments of code copying values from one register to
486 * another appear to inform EVO of the display capabilities or
487 * something similar.
488 */
489
490 /* ... CRTC caps */
491 for (i = 0; i < priv->head.nr; i++) {
492 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
493 nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
494 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
495 nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
496 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
497 nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
498 }
499
500 /* ... DAC caps */
501 for (i = 0; i < priv->dac.nr; i++) {
502 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
503 nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
504 }
505
506 /* ... SOR caps */
507 for (i = 0; i < priv->sor.nr; i++) {
508 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
509 nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
510 }
511
512 /* steal display away from vbios, or something like that */
513 if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
514 nv_wr32(priv, 0x6100ac, 0x00000100);
515 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
516 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
517 nv_error(priv, "timeout acquiring display\n");
518 return -EBUSY;
519 }
520 }
521
522 /* point at display engine memory area (hash table, objects) */
523 nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
524
525 /* enable supervisor interrupts, disable everything else */
526 nv_wr32(priv, 0x610090, 0x00000000);
527 nv_wr32(priv, 0x6100a0, 0x00000000);
528 nv_wr32(priv, 0x6100b0, 0x00000307);
529
530 return 0;
531}
532
533static int
534nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
535{
536 struct nv50_disp_priv *priv = (void *)object->engine;
537 struct nv50_disp_base *base = (void *)object;
538
539 /* disable all interrupts */
540 nv_wr32(priv, 0x6100b0, 0x00000000);
541
542 return nouveau_parent_fini(&base->base, suspend);
543}
544
545struct nouveau_ofuncs
546nvd0_disp_base_ofuncs = {
547 .ctor = nvd0_disp_base_ctor,
548 .dtor = nvd0_disp_base_dtor,
549 .init = nvd0_disp_base_init,
550 .fini = nvd0_disp_base_fini,
551};
552
553static struct nouveau_oclass
554nvd0_disp_base_oclass[] = {
555 { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
556 {}
32}; 557};
33 558
34static struct nouveau_oclass 559static struct nouveau_oclass
35nvd0_disp_sclass[] = { 560nvd0_disp_sclass[] = {
36 {}, 561 { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
562 { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
563 { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
564 { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
565 { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
566 {}
37}; 567};
38 568
569/*******************************************************************************
570 * Display engine implementation
571 ******************************************************************************/
572
573static u16
574exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
575 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
576 struct nvbios_outp *info)
577{
578 struct nouveau_bios *bios = nouveau_bios(priv);
579 u16 mask, type, data;
580
581 if (outp < 4) {
582 type = DCB_OUTPUT_ANALOG;
583 mask = 0;
584 } else {
585 outp -= 4;
586 switch (ctrl & 0x00000f00) {
587 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
588 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
589 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
590 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
591 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
592 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
593 default:
594 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
595 return 0x0000;
596 }
597 dcb->sorconf.link = mask;
598 }
599
600 mask = 0x00c0 & (mask << 6);
601 mask |= 0x0001 << outp;
602 mask |= 0x0100 << head;
603
604 data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
605 if (!data)
606 return 0x0000;
607
608 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
609}
610
611static bool
612exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
613{
614 struct nouveau_bios *bios = nouveau_bios(priv);
615 struct nvbios_outp info;
616 struct dcb_output dcb;
617 u8 ver, hdr, cnt, len;
618 u16 data;
619
620 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
621 if (data) {
622 struct nvbios_init init = {
623 .subdev = nv_subdev(priv),
624 .bios = bios,
625 .offset = info.script[id],
626 .outp = &dcb,
627 .crtc = head,
628 .execute = 1,
629 };
630
631 return nvbios_exec(&init) == 0;
632 }
633
634 return false;
635}
636
637static u32
638exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
639 u32 ctrl, int id, u32 pclk)
640{
641 struct nouveau_bios *bios = nouveau_bios(priv);
642 struct nvbios_outp info1;
643 struct nvbios_ocfg info2;
644 struct dcb_output dcb;
645 u8 ver, hdr, cnt, len;
646 u16 data, conf;
647
648 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
649 if (data == 0x0000)
650 return false;
651
652 switch (dcb.type) {
653 case DCB_OUTPUT_TMDS:
654 conf = (ctrl & 0x00000f00) >> 8;
655 if (pclk >= 165000)
656 conf |= 0x0100;
657 break;
658 case DCB_OUTPUT_LVDS:
659 conf = priv->sor.lvdsconf;
660 break;
661 case DCB_OUTPUT_DP:
662 conf = (ctrl & 0x00000f00) >> 8;
663 break;
664 case DCB_OUTPUT_ANALOG:
665 default:
666 conf = 0x00ff;
667 break;
668 }
669
670 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
671 if (data) {
672 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
673 if (data) {
674 struct nvbios_init init = {
675 .subdev = nv_subdev(priv),
676 .bios = bios,
677 .offset = data,
678 .outp = &dcb,
679 .crtc = head,
680 .execute = 1,
681 };
682
683 if (nvbios_exec(&init))
684 return 0x0000;
685 return conf;
686 }
687 }
688
689 return 0x0000;
690}
691
692static void
693nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
694{
695 int i;
696
697 for (i = 0; mask && i < 8; i++) {
698 u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
699 if (mcc & (1 << head))
700 exec_script(priv, head, i, mcc, 1);
701 }
702
703 nv_wr32(priv, 0x6101d4, 0x00000000);
704 nv_wr32(priv, 0x6109d4, 0x00000000);
705 nv_wr32(priv, 0x6101d0, 0x80000000);
706}
707
39static void 708static void
40nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc) 709nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
710{
711 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
712 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
713 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
714 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
715 const u32 hoff = (head * 0x800);
716 const u32 soff = ( or * 0x800);
717 const u32 loff = (link * 0x080) + soff;
718 const u32 symbol = 100000;
719 const u32 TU = 64;
720 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
721 u32 clksor = nv_rd32(priv, 0x612300 + soff);
722 u32 datarate, link_nr, link_bw, bits;
723 u64 ratio, value;
724
725 if ((conf & 0x3c0) == 0x180) bits = 30;
726 else if ((conf & 0x3c0) == 0x140) bits = 24;
727 else bits = 18;
728 datarate = (pclk * bits) / 8;
729
730 if (dpctrl > 0x00030000) link_nr = 4;
731 else if (dpctrl > 0x00010000) link_nr = 2;
732 else link_nr = 1;
733
734 link_bw = (clksor & 0x007c0000) >> 18;
735 link_bw *= 27000;
736
737 ratio = datarate;
738 ratio *= symbol;
739 do_div(ratio, link_nr * link_bw);
740
741 value = (symbol - ratio) * TU;
742 value *= ratio;
743 do_div(value, symbol);
744 do_div(value, symbol);
745
746 value += 5;
747 value |= 0x08000000;
748
749 nv_wr32(priv, 0x616610 + hoff, value);
750}
751
752static void
753nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
754{
755 u32 pclk;
756 int i;
757
758 for (i = 0; mask && i < 8; i++) {
759 u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
760 if (mcc & (1 << head))
761 exec_script(priv, head, i, mcc, 2);
762 }
763
764 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
765 nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
766 if (pclk && (mask & 0x00010000)) {
767 struct nouveau_clock *clk = nouveau_clock(priv);
768 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
769 }
770
771 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
772
773 for (i = 0; mask && i < 8; i++) {
774 u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
775 if (mcp & (1 << head)) {
776 if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
777 u32 addr, mask, data = 0x00000000;
778 if (i < 4) {
779 addr = 0x612280 + ((i - 0) * 0x800);
780 mask = 0xffffffff;
781 } else {
782 switch (mcp & 0x00000f00) {
783 case 0x00000800:
784 case 0x00000900:
785 nvd0_display_unk2_calc_tu(priv, head, i - 4);
786 break;
787 default:
788 break;
789 }
790
791 addr = 0x612300 + ((i - 4) * 0x800);
792 mask = 0x00000707;
793 if (cfg & 0x00000100)
794 data = 0x00000101;
795 }
796 nv_mask(priv, addr, mask, data);
797 }
798 break;
799 }
800 }
801
802 nv_wr32(priv, 0x6101d4, 0x00000000);
803 nv_wr32(priv, 0x6109d4, 0x00000000);
804 nv_wr32(priv, 0x6101d0, 0x80000000);
805}
806
807static void
808nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
809{
810 int pclk, i;
811
812 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
813
814 for (i = 0; mask && i < 8; i++) {
815 u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
816 if (mcp & (1 << head))
817 exec_clkcmp(priv, head, i, mcp, 1, pclk);
818 }
819
820 nv_wr32(priv, 0x6101d4, 0x00000000);
821 nv_wr32(priv, 0x6109d4, 0x00000000);
822 nv_wr32(priv, 0x6101d0, 0x80000000);
823}
824
825static void
826nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
41{ 827{
42 struct nouveau_bar *bar = nouveau_bar(priv); 828 struct nouveau_bar *bar = nouveau_bar(priv);
43 struct nouveau_disp *disp = &priv->base; 829 struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
65 disp->vblank.notify(disp->vblank.data, crtc); 851 disp->vblank.notify(disp->vblank.data, crtc);
66} 852}
67 853
68static void 854void
69nvd0_disp_intr(struct nouveau_subdev *subdev) 855nvd0_disp_intr(struct nouveau_subdev *subdev)
70{ 856{
71 struct nvd0_disp_priv *priv = (void *)subdev; 857 struct nv50_disp_priv *priv = (void *)subdev;
72 u32 intr = nv_rd32(priv, 0x610088); 858 u32 intr = nv_rd32(priv, 0x610088);
73 int i; 859 int i;
74 860
75 for (i = 0; i < 4; i++) { 861 if (intr & 0x00000001) {
862 u32 stat = nv_rd32(priv, 0x61008c);
863 nv_wr32(priv, 0x61008c, stat);
864 intr &= ~0x00000001;
865 }
866
867 if (intr & 0x00000002) {
868 u32 stat = nv_rd32(priv, 0x61009c);
869 int chid = ffs(stat) - 1;
870 if (chid >= 0) {
871 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
872 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
873 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
874
875 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
876 "0x%08x 0x%08x\n",
877 chid, (mthd & 0x0000ffc), data, mthd, unkn);
878 nv_wr32(priv, 0x61009c, (1 << chid));
879 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
880 }
881
882 intr &= ~0x00000002;
883 }
884
885 if (intr & 0x00100000) {
886 u32 stat = nv_rd32(priv, 0x6100ac);
887 u32 mask = 0, crtc = ~0;
888
889 while (!mask && ++crtc < priv->head.nr)
890 mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
891
892 if (stat & 0x00000001) {
893 nv_wr32(priv, 0x6100ac, 0x00000001);
894 nvd0_display_unk1_handler(priv, crtc, mask);
895 stat &= ~0x00000001;
896 }
897
898 if (stat & 0x00000002) {
899 nv_wr32(priv, 0x6100ac, 0x00000002);
900 nvd0_display_unk2_handler(priv, crtc, mask);
901 stat &= ~0x00000002;
902 }
903
904 if (stat & 0x00000004) {
905 nv_wr32(priv, 0x6100ac, 0x00000004);
906 nvd0_display_unk4_handler(priv, crtc, mask);
907 stat &= ~0x00000004;
908 }
909
910 if (stat) {
911 nv_info(priv, "unknown intr24 0x%08x\n", stat);
912 nv_wr32(priv, 0x6100ac, stat);
913 }
914
915 intr &= ~0x00100000;
916 }
917
918 for (i = 0; i < priv->head.nr; i++) {
76 u32 mask = 0x01000000 << i; 919 u32 mask = 0x01000000 << i;
77 if (mask & intr) { 920 if (mask & intr) {
78 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); 921 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
86 929
87static int 930static int
88nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 931nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
89 struct nouveau_oclass *oclass, void *data, u32 size, 932 struct nouveau_oclass *oclass, void *data, u32 size,
90 struct nouveau_object **pobject) 933 struct nouveau_object **pobject)
91{ 934{
92 struct nvd0_disp_priv *priv; 935 struct nv50_disp_priv *priv;
93 int ret; 936 int ret;
94 937
95 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 938 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
98 if (ret) 941 if (ret)
99 return ret; 942 return ret;
100 943
101 nv_engine(priv)->sclass = nvd0_disp_sclass; 944 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
945 nv_engine(priv)->cclass = &nv50_disp_cclass;
102 nv_subdev(priv)->intr = nvd0_disp_intr; 946 nv_subdev(priv)->intr = nvd0_disp_intr;
947 priv->sclass = nvd0_disp_sclass;
948 priv->head.nr = nv_rd32(priv, 0x022448);
949 priv->dac.nr = 3;
950 priv->sor.nr = 4;
951 priv->dac.power = nv50_dac_power;
952 priv->dac.sense = nv50_dac_sense;
953 priv->sor.power = nv50_sor_power;
954 priv->sor.hda_eld = nvd0_hda_eld;
955 priv->sor.hdmi = nvd0_hdmi_ctrl;
956 priv->sor.dp_train = nvd0_sor_dp_train;
957 priv->sor.dp_train_init = nv94_sor_dp_train_init;
958 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
959 priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
960 priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
103 961
104 INIT_LIST_HEAD(&priv->base.vblank.list); 962 INIT_LIST_HEAD(&priv->base.vblank.list);
105 spin_lock_init(&priv->base.vblank.lock); 963 spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
108 966
109struct nouveau_oclass 967struct nouveau_oclass
110nvd0_disp_oclass = { 968nvd0_disp_oclass = {
111 .handle = NV_ENGINE(DISP, 0xd0), 969 .handle = NV_ENGINE(DISP, 0x90),
112 .ofuncs = &(struct nouveau_ofuncs) { 970 .ofuncs = &(struct nouveau_ofuncs) {
113 .ctor = nvd0_disp_ctor, 971 .ctor = nvd0_disp_ctor,
114 .dtor = _nouveau_disp_dtor, 972 .dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 000000000000..259537c4587e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nve0_disp_sclass[] = {
34 { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
35 { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
36 { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
37 { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
38 { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nve0_disp_base_oclass[] = {
44 { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
45 {}
46};
47
48static int
49nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nv50_disp_priv *priv;
54 int ret;
55
56 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
57 "display", &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 nv_engine(priv)->sclass = nve0_disp_base_oclass;
63 nv_engine(priv)->cclass = &nv50_disp_cclass;
64 nv_subdev(priv)->intr = nvd0_disp_intr;
65 priv->sclass = nve0_disp_sclass;
66 priv->head.nr = nv_rd32(priv, 0x022448);
67 priv->dac.nr = 3;
68 priv->sor.nr = 4;
69 priv->dac.power = nv50_dac_power;
70 priv->dac.sense = nv50_dac_sense;
71 priv->sor.power = nv50_sor_power;
72 priv->sor.hda_eld = nvd0_hda_eld;
73 priv->sor.hdmi = nvd0_hdmi_ctrl;
74 priv->sor.dp_train = nvd0_sor_dp_train;
75 priv->sor.dp_train_init = nv94_sor_dp_train_init;
76 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
77 priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
78 priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
79
80 INIT_LIST_HEAD(&priv->base.vblank.list);
81 spin_lock_init(&priv->base.vblank.lock);
82 return 0;
83}
84
85struct nouveau_oclass
86nve0_disp_oclass = {
87 .handle = NV_ENGINE(DISP, 0x91),
88 .ofuncs = &(struct nouveau_ofuncs) {
89 .ctor = nve0_disp_ctor,
90 .dtor = _nouveau_disp_dtor,
91 .init = _nouveau_disp_init,
92 .fini = _nouveau_disp_fini,
93 },
94};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 000000000000..39b6b67732d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/timer.h>
31
32#include "nv50.h"
33
34int
35nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
36{
37 const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
38 const u32 soff = (or * 0x800);
39 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
40 nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
41 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
42 nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
43 return 0;
44}
45
46int
47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
48{
49 struct nv50_disp_priv *priv = (void *)object->engine;
50 struct nouveau_bios *bios = nouveau_bios(priv);
51 const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
52 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
53 const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
54 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
55 const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
56 struct dcb_output outp;
57 u8 ver, hdr;
58 u32 data;
59 int ret = -EINVAL;
60
61 if (size < sizeof(u32))
62 return -EINVAL;
63 data = *(u32 *)args;
64
65 if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
66 return -ENODEV;
67
68 switch (mthd & ~0x3f) {
69 case NV50_DISP_SOR_PWR:
70 ret = priv->sor.power(priv, or, data);
71 break;
72 case NVA3_DISP_SOR_HDA_ELD:
73 ret = priv->sor.hda_eld(priv, or, args, size);
74 break;
75 case NV84_DISP_SOR_HDMI_PWR:
76 ret = priv->sor.hdmi(priv, head, or, data);
77 break;
78 case NV50_DISP_SOR_LVDS_SCRIPT:
79 priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
80 ret = 0;
81 break;
82 case NV94_DISP_SOR_DP_TRAIN:
83 switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
84 case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
85 ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
86 break;
87 case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
88 ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
89 break;
90 case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
91 ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
92 break;
93 default:
94 break;
95 }
96 break;
97 case NV94_DISP_SOR_DP_LNKCTL:
98 ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
99 break;
100 case NV94_DISP_SOR_DP_DRVCTL(0):
101 case NV94_DISP_SOR_DP_DRVCTL(1):
102 case NV94_DISP_SOR_DP_DRVCTL(2):
103 case NV94_DISP_SOR_DP_DRVCTL(3):
104 ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
105 type, mask, data, &outp);
106 break;
107 default:
108 BUG_ON(1);
109 }
110
111 return ret;
112}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 000000000000..f6edd009762e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h>
32
33#include "nv50.h"
34
35static inline u32
36nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
37{
38 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
39 static const u8 nv94[] = { 16, 8, 0, 24 };
40 if (nv_device(priv)->chipset == 0xaf)
41 return nvaf[lane];
42 return nv94[lane];
43}
44
45int
46nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
47 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
48{
49 struct nouveau_bios *bios = nouveau_bios(priv);
50 struct nvbios_dpout info;
51 u8 ver, hdr, cnt, len;
52 u16 outp;
53
54 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
55 if (outp) {
56 struct nvbios_init init = {
57 .subdev = nv_subdev(priv),
58 .bios = bios,
59 .outp = dcbo,
60 .crtc = head,
61 .execute = 1,
62 };
63
64 if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
65 init.offset = info.script[2];
66 else
67 init.offset = info.script[3];
68 nvbios_exec(&init);
69
70 init.offset = info.script[0];
71 nvbios_exec(&init);
72 }
73
74 return 0;
75}
76
77int
78nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
79 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
80{
81 struct nouveau_bios *bios = nouveau_bios(priv);
82 struct nvbios_dpout info;
83 u8 ver, hdr, cnt, len;
84 u16 outp;
85
86 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
87 if (outp) {
88 struct nvbios_init init = {
89 .subdev = nv_subdev(priv),
90 .bios = bios,
91 .offset = info.script[1],
92 .outp = dcbo,
93 .crtc = head,
94 .execute = 1,
95 };
96
97 nvbios_exec(&init);
98 }
99
100 return 0;
101}
102
103int
104nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
105 u16 type, u16 mask, u32 data, struct dcb_output *info)
106{
107 const u32 loff = (or * 0x800) + (link * 0x80);
108 const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
109 nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
110 return 0;
111}
112
113int
114nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
115 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
116{
117 struct nouveau_bios *bios = nouveau_bios(priv);
118 const u32 loff = (or * 0x800) + (link * 0x80);
119 const u32 soff = (or * 0x800);
120 u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
121 u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
122 u32 dpctrl = 0x00000000;
123 u32 clksor = 0x00000000;
124 u32 outp, lane = 0;
125 u8 ver, hdr, cnt, len;
126 struct nvbios_dpout info;
127 int i;
128
129 /* -> 10Khz units */
130 link_bw *= 2700;
131
132 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
133 if (outp && info.lnkcmp) {
134 struct nvbios_init init = {
135 .subdev = nv_subdev(priv),
136 .bios = bios,
137 .offset = 0x0000,
138 .outp = dcbo,
139 .crtc = head,
140 .execute = 1,
141 };
142
143 while (link_bw < nv_ro16(bios, info.lnkcmp))
144 info.lnkcmp += 4;
145 init.offset = nv_ro16(bios, info.lnkcmp + 2);
146
147 nvbios_exec(&init);
148 }
149
150 dpctrl |= ((1 << link_nr) - 1) << 16;
151 if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
152 dpctrl |= 0x00004000;
153 if (link_bw > 16200)
154 clksor |= 0x00040000;
155
156 for (i = 0; i < link_nr; i++)
157 lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
158
159 nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
160 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
161 nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
162 return 0;
163}
164
165int
166nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
167 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
168{
169 struct nouveau_bios *bios = nouveau_bios(priv);
170 const u32 loff = (or * 0x800) + (link * 0x80);
171 const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
172 const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
173 u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
174 u8 ver, hdr, cnt, len;
175 struct nvbios_dpout outp;
176 struct nvbios_dpcfg ocfg;
177
178 addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
179 if (!addr)
180 return -ENODEV;
181
182 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
183 if (!addr)
184 return -EINVAL;
185
186 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
187 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
188 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
189 return 0;
190}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 000000000000..c37ce7e29f5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h>
32
33#include "nv50.h"
34
35static inline u32
36nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
37{
38 static const u8 nvd0[] = { 16, 8, 0, 24 };
39 return nvd0[lane];
40}
41
42int
43nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
44 u16 type, u16 mask, u32 data, struct dcb_output *info)
45{
46 const u32 loff = (or * 0x800) + (link * 0x80);
47 const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
48 nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
49 return 0;
50}
51
52int
53nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
54 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
55{
56 struct nouveau_bios *bios = nouveau_bios(priv);
57 const u32 loff = (or * 0x800) + (link * 0x80);
58 const u32 soff = (or * 0x800);
59 const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
60 const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
61 u32 dpctrl = 0x00000000;
62 u32 clksor = 0x00000000;
63 u32 outp, lane = 0;
64 u8 ver, hdr, cnt, len;
65 struct nvbios_dpout info;
66 int i;
67
68 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
69 if (outp && info.lnkcmp) {
70 struct nvbios_init init = {
71 .subdev = nv_subdev(priv),
72 .bios = bios,
73 .offset = 0x0000,
74 .outp = dcbo,
75 .crtc = head,
76 .execute = 1,
77 };
78
79 while (nv_ro08(bios, info.lnkcmp) < link_bw)
80 info.lnkcmp += 3;
81 init.offset = nv_ro16(bios, info.lnkcmp + 1);
82
83 nvbios_exec(&init);
84 }
85
86 clksor |= link_bw << 18;
87 dpctrl |= ((1 << link_nr) - 1) << 16;
88 if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
89 dpctrl |= 0x00004000;
90
91 for (i = 0; i < link_nr; i++)
92 lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
93
94 nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
95 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
96 nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
97 return 0;
98}
99
100int
101nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
102 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
103{
104 struct nouveau_bios *bios = nouveau_bios(priv);
105 const u32 loff = (or * 0x800) + (link * 0x80);
106 const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
107 const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
108 u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
109 u8 ver, hdr, cnt, len;
110 struct nvbios_dpout outp;
111 struct nvbios_dpcfg ocfg;
112
113 addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
114 if (!addr)
115 return -ENODEV;
116
117 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
118 if (!addr)
119 return -EINVAL;
120
121 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
122 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
123 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
124 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
125 return 0;
126}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index e1f013d39768..5103e88d1877 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -28,37 +28,39 @@
28#include <subdev/fb.h> 28#include <subdev/fb.h>
29#include <engine/dmaobj.h> 29#include <engine/dmaobj.h>
30 30
31int 31static int
32nouveau_dmaobj_create_(struct nouveau_object *parent, 32nouveau_dmaobj_ctor(struct nouveau_object *parent,
33 struct nouveau_object *engine, 33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass, 34 struct nouveau_oclass *oclass, void *data, u32 size,
35 void *data, u32 size, int len, void **pobject) 35 struct nouveau_object **pobject)
36{ 36{
37 struct nouveau_dmaeng *dmaeng = (void *)engine;
38 struct nouveau_dmaobj *dmaobj;
39 struct nouveau_gpuobj *gpuobj;
37 struct nv_dma_class *args = data; 40 struct nv_dma_class *args = data;
38 struct nouveau_dmaobj *object;
39 int ret; 41 int ret;
40 42
41 if (size < sizeof(*args)) 43 if (size < sizeof(*args))
42 return -EINVAL; 44 return -EINVAL;
43 45
44 ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject); 46 ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
45 object = *pobject; 47 *pobject = nv_object(dmaobj);
46 if (ret) 48 if (ret)
47 return ret; 49 return ret;
48 50
49 switch (args->flags & NV_DMA_TARGET_MASK) { 51 switch (args->flags & NV_DMA_TARGET_MASK) {
50 case NV_DMA_TARGET_VM: 52 case NV_DMA_TARGET_VM:
51 object->target = NV_MEM_TARGET_VM; 53 dmaobj->target = NV_MEM_TARGET_VM;
52 break; 54 break;
53 case NV_DMA_TARGET_VRAM: 55 case NV_DMA_TARGET_VRAM:
54 object->target = NV_MEM_TARGET_VRAM; 56 dmaobj->target = NV_MEM_TARGET_VRAM;
55 break; 57 break;
56 case NV_DMA_TARGET_PCI: 58 case NV_DMA_TARGET_PCI:
57 object->target = NV_MEM_TARGET_PCI; 59 dmaobj->target = NV_MEM_TARGET_PCI;
58 break; 60 break;
59 case NV_DMA_TARGET_PCI_US: 61 case NV_DMA_TARGET_PCI_US:
60 case NV_DMA_TARGET_AGP: 62 case NV_DMA_TARGET_AGP:
61 object->target = NV_MEM_TARGET_PCI_NOSNOOP; 63 dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
62 break; 64 break;
63 default: 65 default:
64 return -EINVAL; 66 return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
66 68
67 switch (args->flags & NV_DMA_ACCESS_MASK) { 69 switch (args->flags & NV_DMA_ACCESS_MASK) {
68 case NV_DMA_ACCESS_VM: 70 case NV_DMA_ACCESS_VM:
69 object->access = NV_MEM_ACCESS_VM; 71 dmaobj->access = NV_MEM_ACCESS_VM;
70 break; 72 break;
71 case NV_DMA_ACCESS_RD: 73 case NV_DMA_ACCESS_RD:
72 object->access = NV_MEM_ACCESS_RO; 74 dmaobj->access = NV_MEM_ACCESS_RO;
73 break; 75 break;
74 case NV_DMA_ACCESS_WR: 76 case NV_DMA_ACCESS_WR:
75 object->access = NV_MEM_ACCESS_WO; 77 dmaobj->access = NV_MEM_ACCESS_WO;
76 break; 78 break;
77 case NV_DMA_ACCESS_RDWR: 79 case NV_DMA_ACCESS_RDWR:
78 object->access = NV_MEM_ACCESS_RW; 80 dmaobj->access = NV_MEM_ACCESS_RW;
79 break; 81 break;
80 default: 82 default:
81 return -EINVAL; 83 return -EINVAL;
82 } 84 }
83 85
84 object->start = args->start; 86 dmaobj->start = args->start;
85 object->limit = args->limit; 87 dmaobj->limit = args->limit;
86 return 0; 88 dmaobj->conf0 = args->conf0;
89
90 switch (nv_mclass(parent)) {
91 case NV_DEVICE_CLASS:
92 /* delayed, or no, binding */
93 break;
94 default:
95 ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
96 if (ret == 0) {
97 nouveau_object_ref(NULL, pobject);
98 *pobject = nv_object(gpuobj);
99 }
100 break;
101 }
102
103 return ret;
87} 104}
105
106static struct nouveau_ofuncs
107nouveau_dmaobj_ofuncs = {
108 .ctor = nouveau_dmaobj_ctor,
109 .dtor = nouveau_object_destroy,
110 .init = nouveau_object_init,
111 .fini = nouveau_object_fini,
112};
113
114struct nouveau_oclass
115nouveau_dmaobj_sclass[] = {
116 { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
117 { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
118 { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
119 {}
120};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 9f4cc2f31994..027d8217c0fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
34 struct nouveau_dmaeng base; 34 struct nouveau_dmaeng base;
35}; 35};
36 36
37struct nv04_dmaobj_priv {
38 struct nouveau_dmaobj base;
39};
40
41static int 37static int
42nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 38nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
43 struct nouveau_object *parent, 39 struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
53 u32 length = dmaobj->limit - dmaobj->start; 49 u32 length = dmaobj->limit - dmaobj->start;
54 int ret; 50 int ret;
55 51
52 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
53 switch (nv_mclass(parent->parent)) {
54 case NV03_CHANNEL_DMA_CLASS:
55 case NV10_CHANNEL_DMA_CLASS:
56 case NV17_CHANNEL_DMA_CLASS:
57 case NV40_CHANNEL_DMA_CLASS:
58 break;
59 default:
60 return -EINVAL;
61 }
62 }
63
56 if (dmaobj->target == NV_MEM_TARGET_VM) { 64 if (dmaobj->target == NV_MEM_TARGET_VM) {
57 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { 65 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
58 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; 66 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
106} 114}
107 115
108static int 116static int
109nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nouveau_dmaeng *dmaeng = (void *)engine;
114 struct nv04_dmaobj_priv *dmaobj;
115 struct nouveau_gpuobj *gpuobj;
116 int ret;
117
118 ret = nouveau_dmaobj_create(parent, engine, oclass,
119 data, size, &dmaobj);
120 *pobject = nv_object(dmaobj);
121 if (ret)
122 return ret;
123
124 switch (nv_mclass(parent)) {
125 case NV_DEVICE_CLASS:
126 break;
127 case NV03_CHANNEL_DMA_CLASS:
128 case NV10_CHANNEL_DMA_CLASS:
129 case NV17_CHANNEL_DMA_CLASS:
130 case NV40_CHANNEL_DMA_CLASS:
131 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
132 nouveau_object_ref(NULL, pobject);
133 *pobject = nv_object(gpuobj);
134 break;
135 default:
136 return -EINVAL;
137 }
138
139 return ret;
140}
141
142static struct nouveau_ofuncs
143nv04_dmaobj_ofuncs = {
144 .ctor = nv04_dmaobj_ctor,
145 .dtor = _nouveau_dmaobj_dtor,
146 .init = _nouveau_dmaobj_init,
147 .fini = _nouveau_dmaobj_fini,
148};
149
150static struct nouveau_oclass
151nv04_dmaobj_sclass[] = {
152 { 0x0002, &nv04_dmaobj_ofuncs },
153 { 0x0003, &nv04_dmaobj_ofuncs },
154 { 0x003d, &nv04_dmaobj_ofuncs },
155 {}
156};
157
158static int
159nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 117nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
160 struct nouveau_oclass *oclass, void *data, u32 size, 118 struct nouveau_oclass *oclass, void *data, u32 size,
161 struct nouveau_object **pobject) 119 struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
168 if (ret) 126 if (ret)
169 return ret; 127 return ret;
170 128
171 priv->base.base.sclass = nv04_dmaobj_sclass; 129 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
172 priv->base.bind = nv04_dmaobj_bind; 130 priv->base.bind = nv04_dmaobj_bind;
173 return 0; 131 return 0;
174} 132}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 045d2565e289..750183f7c057 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
32 struct nouveau_dmaeng base; 32 struct nouveau_dmaeng base;
33}; 33};
34 34
35struct nv50_dmaobj_priv {
36 struct nouveau_dmaobj base;
37};
38
39static int 35static int
40nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 36nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
41 struct nouveau_object *parent, 37 struct nouveau_object *parent,
42 struct nouveau_dmaobj *dmaobj, 38 struct nouveau_dmaobj *dmaobj,
43 struct nouveau_gpuobj **pgpuobj) 39 struct nouveau_gpuobj **pgpuobj)
44{ 40{
45 u32 flags = nv_mclass(dmaobj); 41 u32 flags0 = nv_mclass(dmaobj);
42 u32 flags5 = 0x00000000;
46 int ret; 43 int ret;
47 44
45 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
46 switch (nv_mclass(parent->parent)) {
47 case NV50_CHANNEL_DMA_CLASS:
48 case NV84_CHANNEL_DMA_CLASS:
49 case NV50_CHANNEL_IND_CLASS:
50 case NV84_CHANNEL_IND_CLASS:
51 case NV50_DISP_MAST_CLASS:
52 case NV84_DISP_MAST_CLASS:
53 case NV94_DISP_MAST_CLASS:
54 case NVA0_DISP_MAST_CLASS:
55 case NVA3_DISP_MAST_CLASS:
56 case NV50_DISP_SYNC_CLASS:
57 case NV84_DISP_SYNC_CLASS:
58 case NV94_DISP_SYNC_CLASS:
59 case NVA0_DISP_SYNC_CLASS:
60 case NVA3_DISP_SYNC_CLASS:
61 case NV50_DISP_OVLY_CLASS:
62 case NV84_DISP_OVLY_CLASS:
63 case NV94_DISP_OVLY_CLASS:
64 case NVA0_DISP_OVLY_CLASS:
65 case NVA3_DISP_OVLY_CLASS:
66 break;
67 default:
68 return -EINVAL;
69 }
70 }
71
72 if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
73 if (dmaobj->target == NV_MEM_TARGET_VM) {
74 dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
75 dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
76 dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
77 dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
78 } else {
79 dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
80 dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
81 dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
82 dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
83 }
84 }
85
86 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
87 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
88 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
89 flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
90
48 switch (dmaobj->target) { 91 switch (dmaobj->target) {
49 case NV_MEM_TARGET_VM: 92 case NV_MEM_TARGET_VM:
50 flags |= 0x00000000; 93 flags0 |= 0x00000000;
51 flags |= 0x60000000; /* COMPRESSION_USEVM */
52 flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
53 break; 94 break;
54 case NV_MEM_TARGET_VRAM: 95 case NV_MEM_TARGET_VRAM:
55 flags |= 0x00010000; 96 flags0 |= 0x00010000;
56 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
57 break; 97 break;
58 case NV_MEM_TARGET_PCI: 98 case NV_MEM_TARGET_PCI:
59 flags |= 0x00020000; 99 flags0 |= 0x00020000;
60 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
61 break; 100 break;
62 case NV_MEM_TARGET_PCI_NOSNOOP: 101 case NV_MEM_TARGET_PCI_NOSNOOP:
63 flags |= 0x00030000; 102 flags0 |= 0x00030000;
64 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
65 break; 103 break;
66 default: 104 default:
67 return -EINVAL; 105 return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
71 case NV_MEM_ACCESS_VM: 109 case NV_MEM_ACCESS_VM:
72 break; 110 break;
73 case NV_MEM_ACCESS_RO: 111 case NV_MEM_ACCESS_RO:
74 flags |= 0x00040000; 112 flags0 |= 0x00040000;
75 break; 113 break;
76 case NV_MEM_ACCESS_WO: 114 case NV_MEM_ACCESS_WO:
77 case NV_MEM_ACCESS_RW: 115 case NV_MEM_ACCESS_RW:
78 flags |= 0x00080000; 116 flags0 |= 0x00080000;
79 break; 117 break;
80 } 118 }
81 119
82 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); 120 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
83 if (ret == 0) { 121 if (ret == 0) {
84 nv_wo32(*pgpuobj, 0x00, flags); 122 nv_wo32(*pgpuobj, 0x00, flags0);
85 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); 123 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
86 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); 124 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
87 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | 125 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
88 upper_32_bits(dmaobj->start)); 126 upper_32_bits(dmaobj->start));
89 nv_wo32(*pgpuobj, 0x10, 0x00000000); 127 nv_wo32(*pgpuobj, 0x10, 0x00000000);
90 nv_wo32(*pgpuobj, 0x14, 0x00000000); 128 nv_wo32(*pgpuobj, 0x14, flags5);
91 } 129 }
92 130
93 return ret; 131 return ret;
94} 132}
95 133
96static int 134static int
97nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
98 struct nouveau_oclass *oclass, void *data, u32 size,
99 struct nouveau_object **pobject)
100{
101 struct nouveau_dmaeng *dmaeng = (void *)engine;
102 struct nv50_dmaobj_priv *dmaobj;
103 struct nouveau_gpuobj *gpuobj;
104 int ret;
105
106 ret = nouveau_dmaobj_create(parent, engine, oclass,
107 data, size, &dmaobj);
108 *pobject = nv_object(dmaobj);
109 if (ret)
110 return ret;
111
112 switch (nv_mclass(parent)) {
113 case NV_DEVICE_CLASS:
114 break;
115 case NV50_CHANNEL_DMA_CLASS:
116 case NV84_CHANNEL_DMA_CLASS:
117 case NV50_CHANNEL_IND_CLASS:
118 case NV84_CHANNEL_IND_CLASS:
119 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
120 nouveau_object_ref(NULL, pobject);
121 *pobject = nv_object(gpuobj);
122 break;
123 default:
124 return -EINVAL;
125 }
126
127 return ret;
128}
129
130static struct nouveau_ofuncs
131nv50_dmaobj_ofuncs = {
132 .ctor = nv50_dmaobj_ctor,
133 .dtor = _nouveau_dmaobj_dtor,
134 .init = _nouveau_dmaobj_init,
135 .fini = _nouveau_dmaobj_fini,
136};
137
138static struct nouveau_oclass
139nv50_dmaobj_sclass[] = {
140 { 0x0002, &nv50_dmaobj_ofuncs },
141 { 0x0003, &nv50_dmaobj_ofuncs },
142 { 0x003d, &nv50_dmaobj_ofuncs },
143 {}
144};
145
146static int
147nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 135nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
148 struct nouveau_oclass *oclass, void *data, u32 size, 136 struct nouveau_oclass *oclass, void *data, u32 size,
149 struct nouveau_object **pobject) 137 struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
156 if (ret) 144 if (ret)
157 return ret; 145 return ret;
158 146
159 priv->base.base.sclass = nv50_dmaobj_sclass; 147 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
160 priv->base.bind = nv50_dmaobj_bind; 148 priv->base.bind = nv50_dmaobj_bind;
161 return 0; 149 return 0;
162} 150}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index 5baa08695535..cd3970d03b80 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,7 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/device.h>
25#include <core/gpuobj.h> 26#include <core/gpuobj.h>
27#include <core/class.h>
26 28
27#include <subdev/fb.h> 29#include <subdev/fb.h>
28#include <engine/dmaobj.h> 30#include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
31 struct nouveau_dmaeng base; 33 struct nouveau_dmaeng base;
32}; 34};
33 35
34struct nvc0_dmaobj_priv {
35 struct nouveau_dmaobj base;
36};
37
38static int 36static int
39nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 37nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
40 struct nouveau_oclass *oclass, void *data, u32 size, 38 struct nouveau_object *parent,
41 struct nouveau_object **pobject) 39 struct nouveau_dmaobj *dmaobj,
40 struct nouveau_gpuobj **pgpuobj)
42{ 41{
43 struct nvc0_dmaobj_priv *dmaobj; 42 u32 flags0 = nv_mclass(dmaobj);
43 u32 flags5 = 0x00000000;
44 int ret; 44 int ret;
45 45
46 ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj); 46 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
47 *pobject = nv_object(dmaobj); 47 switch (nv_mclass(parent->parent)) {
48 if (ret) 48 case NVA3_DISP_MAST_CLASS:
49 return ret; 49 case NVA3_DISP_SYNC_CLASS:
50 case NVA3_DISP_OVLY_CLASS:
51 break;
52 default:
53 return -EINVAL;
54 }
55 } else
56 return 0;
57
58 if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
59 if (dmaobj->target == NV_MEM_TARGET_VM) {
60 dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
61 dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
62 } else {
63 dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
64 dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
65 dmaobj->conf0 |= 0x00020000;
66 }
67 }
50 68
51 if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start) 69 flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
70 flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
71 flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
72
73 switch (dmaobj->target) {
74 case NV_MEM_TARGET_VM:
75 flags0 |= 0x00000000;
76 break;
77 case NV_MEM_TARGET_VRAM:
78 flags0 |= 0x00010000;
79 break;
80 case NV_MEM_TARGET_PCI:
81 flags0 |= 0x00020000;
82 break;
83 case NV_MEM_TARGET_PCI_NOSNOOP:
84 flags0 |= 0x00030000;
85 break;
86 default:
52 return -EINVAL; 87 return -EINVAL;
88 }
53 89
54 return 0; 90 switch (dmaobj->access) {
55} 91 case NV_MEM_ACCESS_VM:
92 break;
93 case NV_MEM_ACCESS_RO:
94 flags0 |= 0x00040000;
95 break;
96 case NV_MEM_ACCESS_WO:
97 case NV_MEM_ACCESS_RW:
98 flags0 |= 0x00080000;
99 break;
100 }
56 101
57static struct nouveau_ofuncs 102 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
58nvc0_dmaobj_ofuncs = { 103 if (ret == 0) {
59 .ctor = nvc0_dmaobj_ctor, 104 nv_wo32(*pgpuobj, 0x00, flags0);
60 .dtor = _nouveau_dmaobj_dtor, 105 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
61 .init = _nouveau_dmaobj_init, 106 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
62 .fini = _nouveau_dmaobj_fini, 107 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
63}; 108 upper_32_bits(dmaobj->start));
109 nv_wo32(*pgpuobj, 0x10, 0x00000000);
110 nv_wo32(*pgpuobj, 0x14, flags5);
111 }
64 112
65static struct nouveau_oclass 113 return ret;
66nvc0_dmaobj_sclass[] = { 114}
67 { 0x0002, &nvc0_dmaobj_ofuncs },
68 { 0x0003, &nvc0_dmaobj_ofuncs },
69 { 0x003d, &nvc0_dmaobj_ofuncs },
70 {}
71};
72 115
73static int 116static int
74nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 117nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
83 if (ret) 126 if (ret)
84 return ret; 127 return ret;
85 128
86 priv->base.base.sclass = nvc0_dmaobj_sclass; 129 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
130 priv->base.bind = nvc0_dmaobj_bind;
87 return 0; 131 return 0;
88} 132}
89 133
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 000000000000..d1528752980c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/device.h>
26#include <core/gpuobj.h>
27#include <core/class.h>
28
29#include <subdev/fb.h>
30#include <engine/dmaobj.h>
31
32struct nvd0_dmaeng_priv {
33 struct nouveau_dmaeng base;
34};
35
36static int
37nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
38 struct nouveau_object *parent,
39 struct nouveau_dmaobj *dmaobj,
40 struct nouveau_gpuobj **pgpuobj)
41{
42 u32 flags0 = 0x00000000;
43 int ret;
44
45 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
46 switch (nv_mclass(parent->parent)) {
47 case NVD0_DISP_MAST_CLASS:
48 case NVD0_DISP_SYNC_CLASS:
49 case NVD0_DISP_OVLY_CLASS:
50 case NVE0_DISP_MAST_CLASS:
51 case NVE0_DISP_SYNC_CLASS:
52 case NVE0_DISP_OVLY_CLASS:
53 break;
54 default:
55 return -EINVAL;
56 }
57 } else
58 return 0;
59
60 if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
61 if (dmaobj->target == NV_MEM_TARGET_VM) {
62 dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
63 dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
64 } else {
65 dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
66 dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
67 }
68 }
69
70 flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
71 flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
72
73 switch (dmaobj->target) {
74 case NV_MEM_TARGET_VRAM:
75 flags0 |= 0x00000009;
76 break;
77 default:
78 return -EINVAL;
79 break;
80 }
81
82 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
83 if (ret == 0) {
84 nv_wo32(*pgpuobj, 0x00, flags0);
85 nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
86 nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
87 nv_wo32(*pgpuobj, 0x0c, 0x00000000);
88 nv_wo32(*pgpuobj, 0x10, 0x00000000);
89 nv_wo32(*pgpuobj, 0x14, 0x00000000);
90 }
91
92 return ret;
93}
94
95static int
96nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
97 struct nouveau_oclass *oclass, void *data, u32 size,
98 struct nouveau_object **pobject)
99{
100 struct nvd0_dmaeng_priv *priv;
101 int ret;
102
103 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
109 priv->base.bind = nvd0_dmaobj_bind;
110 return 0;
111}
112
113struct nouveau_oclass
114nvd0_dmaeng_oclass = {
115 .handle = NV_ENGINE(DMAOBJ, 0xd0),
116 .ofuncs = &(struct nouveau_ofuncs) {
117 .ctor = nvd0_dmaeng_ctor,
118 .dtor = _nouveau_dmaeng_dtor,
119 .init = _nouveau_dmaeng_init,
120 .fini = _nouveau_dmaeng_fini,
121 },
122};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index bbb43c67c2ae..c2b9db335816 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -24,6 +24,7 @@
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/handle.h> 26#include <core/handle.h>
27#include <core/class.h>
27 28
28#include <engine/dmaobj.h> 29#include <engine/dmaobj.h>
29#include <engine/fifo.h> 30#include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine, 34 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass, 35 struct nouveau_oclass *oclass,
35 int bar, u32 addr, u32 size, u32 pushbuf, 36 int bar, u32 addr, u32 size, u32 pushbuf,
36 u32 engmask, int len, void **ptr) 37 u64 engmask, int len, void **ptr)
37{ 38{
38 struct nouveau_device *device = nv_device(engine); 39 struct nouveau_device *device = nv_device(engine);
39 struct nouveau_fifo *priv = (void *)engine; 40 struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
56 57
57 dmaeng = (void *)chan->pushdma->base.engine; 58 dmaeng = (void *)chan->pushdma->base.engine;
58 switch (chan->pushdma->base.oclass->handle) { 59 switch (chan->pushdma->base.oclass->handle) {
59 case 0x0002: 60 case NV_DMA_FROM_MEMORY_CLASS:
60 case 0x003d: 61 case NV_DMA_IN_MEMORY_CLASS:
61 break; 62 break;
62 default: 63 default:
63 return -EINVAL; 64 return -EINVAL;
64 } 65 }
65 66
66 if (dmaeng->bind) { 67 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
67 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); 68 if (ret)
68 if (ret) 69 return ret;
69 return ret;
70 }
71 70
72 /* find a free fifo channel */ 71 /* find a free fifo channel */
73 spin_lock_irqsave(&priv->lock, flags); 72 spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
119} 118}
120 119
121u32 120u32
122_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr) 121_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
123{ 122{
124 struct nouveau_fifo_chan *chan = (void *)object; 123 struct nouveau_fifo_chan *chan = (void *)object;
125 return ioread32_native(chan->user + addr); 124 return ioread32_native(chan->user + addr);
126} 125}
127 126
128void 127void
129_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data) 128_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
130{ 129{
131 struct nouveau_fifo_chan *chan = (void *)object; 130 struct nouveau_fifo_chan *chan = (void *)object;
132 iowrite32_native(data, chan->user + addr); 131 iowrite32_native(data, chan->user + addr);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ea76e3e8c9c2..a47a8548f9e0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
126 126
127 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 127 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
128 0x10000, args->pushbuf, 128 0x10000, args->pushbuf,
129 (1 << NVDEV_ENGINE_DMAOBJ) | 129 (1ULL << NVDEV_ENGINE_DMAOBJ) |
130 (1 << NVDEV_ENGINE_SW) | 130 (1ULL << NVDEV_ENGINE_SW) |
131 (1 << NVDEV_ENGINE_GR), &chan); 131 (1ULL << NVDEV_ENGINE_GR), &chan);
132 *pobject = nv_object(chan); 132 *pobject = nv_object(chan);
133 if (ret) 133 if (ret)
134 return ret; 134 return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
440 } 440 }
441 441
442 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { 442 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
443 nv_info(priv, "CACHE_ERROR - Ch %d/%d " 443 nv_error(priv, "CACHE_ERROR - Ch %d/%d "
444 "Mthd 0x%04x Data 0x%08x\n", 444 "Mthd 0x%04x Data 0x%08x\n",
445 chid, (mthd >> 13) & 7, mthd & 0x1ffc, 445 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
446 data); 446 data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
476 u32 ib_get = nv_rd32(priv, 0x003334); 476 u32 ib_get = nv_rd32(priv, 0x003334);
477 u32 ib_put = nv_rd32(priv, 0x003330); 477 u32 ib_put = nv_rd32(priv, 0x003330);
478 478
479 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x " 479 nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
480 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 480 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
481 "State 0x%08x (err: %s) Push 0x%08x\n", 481 "State 0x%08x (err: %s) Push 0x%08x\n",
482 chid, ho_get, dma_get, ho_put, 482 chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
494 nv_wr32(priv, 0x003334, ib_put); 494 nv_wr32(priv, 0x003334, ib_put);
495 } 495 }
496 } else { 496 } else {
497 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x " 497 nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
498 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n", 498 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
499 chid, dma_get, dma_put, state, 499 chid, dma_get, dma_put, state,
500 nv_dma_state_err(state), push); 500 nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
525 525
526 if (device->card_type == NV_50) { 526 if (device->card_type == NV_50) {
527 if (status & 0x00000010) { 527 if (status & 0x00000010) {
528 nv50_fb_trap(nouveau_fb(priv), 1);
529 status &= ~0x00000010; 528 status &= ~0x00000010;
530 nv_wr32(priv, 0x002100, 0x00000010); 529 nv_wr32(priv, 0x002100, 0x00000010);
531 } 530 }
532 } 531 }
533 532
534 if (status) { 533 if (status) {
535 nv_info(priv, "unknown intr 0x%08x, ch %d\n", 534 nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
536 status, chid); 535 status, chid);
537 nv_wr32(priv, NV03_PFIFO_INTR_0, status); 536 nv_wr32(priv, NV03_PFIFO_INTR_0, status);
538 status = 0; 537 status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
542 } 541 }
543 542
544 if (status) { 543 if (status) {
545 nv_info(priv, "still angry after %d spins, halt\n", cnt); 544 nv_error(priv, "still angry after %d spins, halt\n", cnt);
546 nv_wr32(priv, 0x002140, 0); 545 nv_wr32(priv, 0x002140, 0);
547 nv_wr32(priv, 0x000140, 0); 546 nv_wr32(priv, 0x000140, 0);
548 } 547 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 4ba75422b89d..2c927c1d173b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
69 69
70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
71 0x10000, args->pushbuf, 71 0x10000, args->pushbuf,
72 (1 << NVDEV_ENGINE_DMAOBJ) | 72 (1ULL << NVDEV_ENGINE_DMAOBJ) |
73 (1 << NVDEV_ENGINE_SW) | 73 (1ULL << NVDEV_ENGINE_SW) |
74 (1 << NVDEV_ENGINE_GR), &chan); 74 (1ULL << NVDEV_ENGINE_GR), &chan);
75 *pobject = nv_object(chan); 75 *pobject = nv_object(chan);
76 if (ret) 76 if (ret)
77 return ret; 77 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index b96e6b0ae2b1..a9cb51d38c57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
74 74
75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
76 0x10000, args->pushbuf, 76 0x10000, args->pushbuf,
77 (1 << NVDEV_ENGINE_DMAOBJ) | 77 (1ULL << NVDEV_ENGINE_DMAOBJ) |
78 (1 << NVDEV_ENGINE_SW) | 78 (1ULL << NVDEV_ENGINE_SW) |
79 (1 << NVDEV_ENGINE_GR) | 79 (1ULL << NVDEV_ENGINE_GR) |
80 (1 << NVDEV_ENGINE_MPEG), /* NV31- */ 80 (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
81 &chan); 81 &chan);
82 *pobject = nv_object(chan); 82 *pobject = nv_object(chan);
83 if (ret) 83 if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 559c3b4e1b86..2b1f91721225 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
192 192
193 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 193 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
194 0x1000, args->pushbuf, 194 0x1000, args->pushbuf,
195 (1 << NVDEV_ENGINE_DMAOBJ) | 195 (1ULL << NVDEV_ENGINE_DMAOBJ) |
196 (1 << NVDEV_ENGINE_SW) | 196 (1ULL << NVDEV_ENGINE_SW) |
197 (1 << NVDEV_ENGINE_GR) | 197 (1ULL << NVDEV_ENGINE_GR) |
198 (1 << NVDEV_ENGINE_MPEG), &chan); 198 (1ULL << NVDEV_ENGINE_MPEG), &chan);
199 *pobject = nv_object(chan); 199 *pobject = nv_object(chan);
200 if (ret) 200 if (ret)
201 return ret; 201 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 536e7634a00d..bd096364f680 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
112 return -EINVAL; 112 return -EINVAL;
113 } 113 }
114 114
115 nv_wo32(base->eng, addr + 0x00, 0x00000000);
116 nv_wo32(base->eng, addr + 0x04, 0x00000000);
117 nv_wo32(base->eng, addr + 0x08, 0x00000000);
118 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
119 nv_wo32(base->eng, addr + 0x10, 0x00000000);
120 nv_wo32(base->eng, addr + 0x14, 0x00000000);
121 bar->flush(bar);
122
123 /* HW bug workaround: 115 /* HW bug workaround:
124 * 116 *
125 * PFIFO will hang forever if the connected engines don't report 117 * PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
141 if (suspend) 133 if (suspend)
142 ret = -EBUSY; 134 ret = -EBUSY;
143 } 135 }
144
145 nv_wr32(priv, 0x00b860, me); 136 nv_wr32(priv, 0x00b860, me);
137
138 if (ret == 0) {
139 nv_wo32(base->eng, addr + 0x00, 0x00000000);
140 nv_wo32(base->eng, addr + 0x04, 0x00000000);
141 nv_wo32(base->eng, addr + 0x08, 0x00000000);
142 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
143 nv_wo32(base->eng, addr + 0x10, 0x00000000);
144 nv_wo32(base->eng, addr + 0x14, 0x00000000);
145 bar->flush(bar);
146 }
147
146 return ret; 148 return ret;
147} 149}
148 150
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
194 196
195 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 197 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
196 0x2000, args->pushbuf, 198 0x2000, args->pushbuf,
197 (1 << NVDEV_ENGINE_DMAOBJ) | 199 (1ULL << NVDEV_ENGINE_DMAOBJ) |
198 (1 << NVDEV_ENGINE_SW) | 200 (1ULL << NVDEV_ENGINE_SW) |
199 (1 << NVDEV_ENGINE_GR) | 201 (1ULL << NVDEV_ENGINE_GR) |
200 (1 << NVDEV_ENGINE_MPEG), &chan); 202 (1ULL << NVDEV_ENGINE_MPEG), &chan);
201 *pobject = nv_object(chan); 203 *pobject = nv_object(chan);
202 if (ret) 204 if (ret)
203 return ret; 205 return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
247 249
248 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 250 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
249 0x2000, args->pushbuf, 251 0x2000, args->pushbuf,
250 (1 << NVDEV_ENGINE_DMAOBJ) | 252 (1ULL << NVDEV_ENGINE_DMAOBJ) |
251 (1 << NVDEV_ENGINE_SW) | 253 (1ULL << NVDEV_ENGINE_SW) |
252 (1 << NVDEV_ENGINE_GR) | 254 (1ULL << NVDEV_ENGINE_GR) |
253 (1 << NVDEV_ENGINE_MPEG), &chan); 255 (1ULL << NVDEV_ENGINE_MPEG), &chan);
254 *pobject = nv_object(chan); 256 *pobject = nv_object(chan);
255 if (ret) 257 if (ret)
256 return ret; 258 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index b4fd26d8f166..1eb1c512f503 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
95 return -EINVAL; 95 return -EINVAL;
96 } 96 }
97 97
98 nv_wo32(base->eng, addr + 0x00, 0x00000000);
99 nv_wo32(base->eng, addr + 0x04, 0x00000000);
100 nv_wo32(base->eng, addr + 0x08, 0x00000000);
101 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
102 nv_wo32(base->eng, addr + 0x10, 0x00000000);
103 nv_wo32(base->eng, addr + 0x14, 0x00000000);
104 bar->flush(bar);
105
106 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn); 98 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
107 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); 99 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
108 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); 100 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
112 if (suspend) 104 if (suspend)
113 return -EBUSY; 105 return -EBUSY;
114 } 106 }
107
108 nv_wo32(base->eng, addr + 0x00, 0x00000000);
109 nv_wo32(base->eng, addr + 0x04, 0x00000000);
110 nv_wo32(base->eng, addr + 0x08, 0x00000000);
111 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
112 nv_wo32(base->eng, addr + 0x10, 0x00000000);
113 nv_wo32(base->eng, addr + 0x14, 0x00000000);
114 bar->flush(bar);
115 return 0; 115 return 0;
116} 116}
117 117
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
163 163
164 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 164 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
165 0x2000, args->pushbuf, 165 0x2000, args->pushbuf,
166 (1 << NVDEV_ENGINE_DMAOBJ) | 166 (1ULL << NVDEV_ENGINE_DMAOBJ) |
167 (1 << NVDEV_ENGINE_SW) | 167 (1ULL << NVDEV_ENGINE_SW) |
168 (1 << NVDEV_ENGINE_GR) | 168 (1ULL << NVDEV_ENGINE_GR) |
169 (1 << NVDEV_ENGINE_MPEG) | 169 (1ULL << NVDEV_ENGINE_MPEG) |
170 (1 << NVDEV_ENGINE_ME) | 170 (1ULL << NVDEV_ENGINE_ME) |
171 (1 << NVDEV_ENGINE_VP) | 171 (1ULL << NVDEV_ENGINE_VP) |
172 (1 << NVDEV_ENGINE_CRYPT) | 172 (1ULL << NVDEV_ENGINE_CRYPT) |
173 (1 << NVDEV_ENGINE_BSP) | 173 (1ULL << NVDEV_ENGINE_BSP) |
174 (1 << NVDEV_ENGINE_PPP) | 174 (1ULL << NVDEV_ENGINE_PPP) |
175 (1 << NVDEV_ENGINE_COPY0) | 175 (1ULL << NVDEV_ENGINE_COPY0) |
176 (1 << NVDEV_ENGINE_UNK1C1), &chan); 176 (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
177 *pobject = nv_object(chan); 177 *pobject = nv_object(chan);
178 if (ret) 178 if (ret)
179 return ret; 179 return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
225 225
226 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 226 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
227 0x2000, args->pushbuf, 227 0x2000, args->pushbuf,
228 (1 << NVDEV_ENGINE_DMAOBJ) | 228 (1ULL << NVDEV_ENGINE_DMAOBJ) |
229 (1 << NVDEV_ENGINE_SW) | 229 (1ULL << NVDEV_ENGINE_SW) |
230 (1 << NVDEV_ENGINE_GR) | 230 (1ULL << NVDEV_ENGINE_GR) |
231 (1 << NVDEV_ENGINE_MPEG) | 231 (1ULL << NVDEV_ENGINE_MPEG) |
232 (1 << NVDEV_ENGINE_ME) | 232 (1ULL << NVDEV_ENGINE_ME) |
233 (1 << NVDEV_ENGINE_VP) | 233 (1ULL << NVDEV_ENGINE_VP) |
234 (1 << NVDEV_ENGINE_CRYPT) | 234 (1ULL << NVDEV_ENGINE_CRYPT) |
235 (1 << NVDEV_ENGINE_BSP) | 235 (1ULL << NVDEV_ENGINE_BSP) |
236 (1 << NVDEV_ENGINE_PPP) | 236 (1ULL << NVDEV_ENGINE_PPP) |
237 (1 << NVDEV_ENGINE_COPY0) | 237 (1ULL << NVDEV_ENGINE_COPY0) |
238 (1 << NVDEV_ENGINE_UNK1C1), &chan); 238 (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
239 *pobject = nv_object(chan); 239 *pobject = nv_object(chan);
240 if (ret) 240 if (ret)
241 return ret; 241 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 6f21be600557..b4365dde1859 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
103 case NVDEV_ENGINE_GR : addr = 0x0210; break; 103 case NVDEV_ENGINE_GR : addr = 0x0210; break;
104 case NVDEV_ENGINE_COPY0: addr = 0x0230; break; 104 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
105 case NVDEV_ENGINE_COPY1: addr = 0x0240; break; 105 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
106 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
107 case NVDEV_ENGINE_VP : addr = 0x0250; break;
108 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
106 default: 109 default:
107 return -EINVAL; 110 return -EINVAL;
108 } 111 }
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
137 case NVDEV_ENGINE_GR : addr = 0x0210; break; 140 case NVDEV_ENGINE_GR : addr = 0x0210; break;
138 case NVDEV_ENGINE_COPY0: addr = 0x0230; break; 141 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
139 case NVDEV_ENGINE_COPY1: addr = 0x0240; break; 142 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
143 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
144 case NVDEV_ENGINE_VP : addr = 0x0250; break;
145 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
140 default: 146 default:
141 return -EINVAL; 147 return -EINVAL;
142 } 148 }
143 149
144 nv_wo32(base, addr + 0x00, 0x00000000);
145 nv_wo32(base, addr + 0x04, 0x00000000);
146 bar->flush(bar);
147
148 nv_wr32(priv, 0x002634, chan->base.chid); 150 nv_wr32(priv, 0x002634, chan->base.chid);
149 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 151 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
150 nv_error(priv, "channel %d kick timeout\n", chan->base.chid); 152 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
152 return -EBUSY; 154 return -EBUSY;
153 } 155 }
154 156
157 nv_wo32(base, addr + 0x00, 0x00000000);
158 nv_wo32(base, addr + 0x04, 0x00000000);
159 bar->flush(bar);
155 return 0; 160 return 0;
156} 161}
157 162
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
175 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, 180 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
176 priv->user.bar.offset, 0x1000, 181 priv->user.bar.offset, 0x1000,
177 args->pushbuf, 182 args->pushbuf,
178 (1 << NVDEV_ENGINE_SW) | 183 (1ULL << NVDEV_ENGINE_SW) |
179 (1 << NVDEV_ENGINE_GR) | 184 (1ULL << NVDEV_ENGINE_GR) |
180 (1 << NVDEV_ENGINE_COPY0) | 185 (1ULL << NVDEV_ENGINE_COPY0) |
181 (1 << NVDEV_ENGINE_COPY1), &chan); 186 (1ULL << NVDEV_ENGINE_COPY1) |
187 (1ULL << NVDEV_ENGINE_BSP) |
188 (1ULL << NVDEV_ENGINE_VP) |
189 (1ULL << NVDEV_ENGINE_PPP), &chan);
182 *pobject = nv_object(chan); 190 *pobject = nv_object(chan);
183 if (ret) 191 if (ret)
184 return ret; 192 return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
494 u32 stat = nv_rd32(priv, 0x002100) & mask; 502 u32 stat = nv_rd32(priv, 0x002100) & mask;
495 503
496 if (stat & 0x00000100) { 504 if (stat & 0x00000100) {
497 nv_info(priv, "unknown status 0x00000100\n"); 505 nv_warn(priv, "unknown status 0x00000100\n");
498 nv_wr32(priv, 0x002100, 0x00000100); 506 nv_wr32(priv, 0x002100, 0x00000100);
499 stat &= ~0x00000100; 507 stat &= ~0x00000100;
500 } 508 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 36e81b6fafbc..c930da99c2c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -38,12 +38,12 @@
38#include <engine/dmaobj.h> 38#include <engine/dmaobj.h>
39#include <engine/fifo.h> 39#include <engine/fifo.h>
40 40
41#define _(a,b) { (a), ((1 << (a)) | (b)) } 41#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
42static const struct { 42static const struct {
43 int subdev; 43 u64 subdev;
44 u32 mask; 44 u64 mask;
45} fifo_engine[] = { 45} fifo_engine[] = {
46 _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)), 46 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)),
47 _(NVDEV_ENGINE_VP , 0), 47 _(NVDEV_ENGINE_VP , 0),
48 _(NVDEV_ENGINE_PPP , 0), 48 _(NVDEV_ENGINE_PPP , 0),
49 _(NVDEV_ENGINE_BSP , 0), 49 _(NVDEV_ENGINE_BSP , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
138 case NVDEV_ENGINE_GR : 138 case NVDEV_ENGINE_GR :
139 case NVDEV_ENGINE_COPY0: 139 case NVDEV_ENGINE_COPY0:
140 case NVDEV_ENGINE_COPY1: addr = 0x0210; break; 140 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
141 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
142 case NVDEV_ENGINE_VP : addr = 0x0250; break;
143 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
141 default: 144 default:
142 return -EINVAL; 145 return -EINVAL;
143 } 146 }
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
172 case NVDEV_ENGINE_GR : 175 case NVDEV_ENGINE_GR :
173 case NVDEV_ENGINE_COPY0: 176 case NVDEV_ENGINE_COPY0:
174 case NVDEV_ENGINE_COPY1: addr = 0x0210; break; 177 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
178 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
179 case NVDEV_ENGINE_VP : addr = 0x0250; break;
180 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
175 default: 181 default:
176 return -EINVAL; 182 return -EINVAL;
177 } 183 }
178 184
179 nv_wo32(base, addr + 0x00, 0x00000000);
180 nv_wo32(base, addr + 0x04, 0x00000000);
181 bar->flush(bar);
182
183 nv_wr32(priv, 0x002634, chan->base.chid); 185 nv_wr32(priv, 0x002634, chan->base.chid);
184 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 186 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
185 nv_error(priv, "channel %d kick timeout\n", chan->base.chid); 187 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
187 return -EBUSY; 189 return -EBUSY;
188 } 190 }
189 191
192 nv_wo32(base, addr + 0x00, 0x00000000);
193 nv_wo32(base, addr + 0x04, 0x00000000);
194 bar->flush(bar);
190 return 0; 195 return 0;
191} 196}
192 197
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
index e45035efb8ca..7bbb1e1b7a8d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
@@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
669 }); 669 });
670} 670}
671 671
672void 672int
673nv40_grctx_init(struct nouveau_device *device, u32 *size) 673nv40_grctx_init(struct nouveau_device *device, u32 *size)
674{ 674{
675 u32 ctxprog[256], i; 675 u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
676 struct nouveau_grctx ctx = { 676 struct nouveau_grctx ctx = {
677 .device = device, 677 .device = device,
678 .mode = NOUVEAU_GRCTX_PROG, 678 .mode = NOUVEAU_GRCTX_PROG,
679 .data = ctxprog, 679 .data = ctxprog,
680 .ctxprog_max = ARRAY_SIZE(ctxprog) 680 .ctxprog_max = 256,
681 }; 681 };
682 682
683 if (!ctxprog)
684 return -ENOMEM;
685
683 nv40_grctx_generate(&ctx); 686 nv40_grctx_generate(&ctx);
684 687
685 nv_wr32(device, 0x400324, 0); 688 nv_wr32(device, 0x400324, 0);
686 for (i = 0; i < ctx.ctxprog_len; i++) 689 for (i = 0; i < ctx.ctxprog_len; i++)
687 nv_wr32(device, 0x400328, ctxprog[i]); 690 nv_wr32(device, 0x400328, ctxprog[i]);
688 *size = ctx.ctxvals_pos * 4; 691 *size = ctx.ctxvals_pos * 4;
692
693 kfree(ctxprog);
694 return 0;
689} 695}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
index 7b715fda2763..62ab231cd6b6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc
@@ -57,6 +57,11 @@ chipsets:
57.b16 #nve4_gpc_mmio_tail 57.b16 #nve4_gpc_mmio_tail
58.b16 #nve4_tpc_mmio_head 58.b16 #nve4_tpc_mmio_head
59.b16 #nve4_tpc_mmio_tail 59.b16 #nve4_tpc_mmio_tail
60.b8 0xe6 0 0 0
61.b16 #nve4_gpc_mmio_head
62.b16 #nve4_gpc_mmio_tail
63.b16 #nve4_tpc_mmio_head
64.b16 #nve4_tpc_mmio_tail
60.b8 0 0 0 0 65.b8 0 0 0 0
61 66
62// GPC mmio lists 67// GPC mmio lists
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
index 26c2165bad0f..09ee4702c8b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpcnve0.fuc.h
@@ -34,13 +34,16 @@ uint32_t nve0_grgpc_data[] = {
34 0x00000000, 34 0x00000000,
35/* 0x0064: chipsets */ 35/* 0x0064: chipsets */
36 0x000000e4, 36 0x000000e4,
37 0x01040080, 37 0x0110008c,
38 0x014c0104, 38 0x01580110,
39 0x000000e7, 39 0x000000e7,
40 0x01040080, 40 0x0110008c,
41 0x014c0104, 41 0x01580110,
42 0x000000e6,
43 0x0110008c,
44 0x01580110,
42 0x00000000, 45 0x00000000,
43/* 0x0080: nve4_gpc_mmio_head */ 46/* 0x008c: nve4_gpc_mmio_head */
44 0x00000380, 47 0x00000380,
45 0x04000400, 48 0x04000400,
46 0x0800040c, 49 0x0800040c,
@@ -74,8 +77,8 @@ uint32_t nve0_grgpc_data[] = {
74 0x14003100, 77 0x14003100,
75 0x000031d0, 78 0x000031d0,
76 0x040031e0, 79 0x040031e0,
77/* 0x0104: nve4_gpc_mmio_tail */ 80/* 0x0110: nve4_gpc_mmio_tail */
78/* 0x0104: nve4_tpc_mmio_head */ 81/* 0x0110: nve4_tpc_mmio_head */
79 0x00000048, 82 0x00000048,
80 0x00000064, 83 0x00000064,
81 0x00000088, 84 0x00000088,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
index acfc457654bd..0bcfa4d447e5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc
@@ -754,6 +754,16 @@ ctx_mmio_exec:
754// on load it means: "a save preceeded this load" 754// on load it means: "a save preceeded this load"
755// 755//
756ctx_xfer: 756ctx_xfer:
757 // according to mwk, some kind of wait for idle
758 mov $r15 0xc00
759 shl b32 $r15 6
760 mov $r14 4
761 iowr I[$r15 + 0x200] $r14
762 ctx_xfer_idle:
763 iord $r14 I[$r15 + 0x000]
764 and $r14 0x2000
765 bra ne #ctx_xfer_idle
766
757 bra not $p1 #ctx_xfer_pre 767 bra not $p1 #ctx_xfer_pre
758 bra $p2 #ctx_xfer_pre_load 768 bra $p2 #ctx_xfer_pre_load
759 ctx_xfer_pre: 769 ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
index 85a8d556f484..bb03d2a1d57b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
@@ -799,79 +799,80 @@ uint32_t nvc0_grhub_code[] = {
799 0x01fa0613, 799 0x01fa0613,
800 0xf803f806, 800 0xf803f806,
801/* 0x0829: ctx_xfer */ 801/* 0x0829: ctx_xfer */
802 0x0611f400, 802 0x00f7f100,
803/* 0x082f: ctx_xfer_pre */ 803 0x06f4b60c,
804 0xf01102f4, 804 0xd004e7f0,
805 0x21f510f7, 805/* 0x0836: ctx_xfer_idle */
806 0x21f50698, 806 0xfecf80fe,
807 0x11f40631, 807 0x00e4f100,
808/* 0x083d: ctx_xfer_pre_load */ 808 0xf91bf420,
809 0x02f7f01c, 809 0xf40611f4,
810 0x065721f5, 810/* 0x0846: ctx_xfer_pre */
811 0x066621f5, 811 0xf7f01102,
812 0x067821f5, 812 0x9821f510,
813 0x21f5f4bd, 813 0x3121f506,
814 0x21f50657, 814 0x1c11f406,
815/* 0x0856: ctx_xfer_exec */ 815/* 0x0854: ctx_xfer_pre_load */
816 0x019806b8, 816 0xf502f7f0,
817 0x1427f116, 817 0xf5065721,
818 0x0624b604, 818 0xf5066621,
819 0xf10020d0, 819 0xbd067821,
820 0xf0a500e7, 820 0x5721f5f4,
821 0x1fb941e3, 821 0xb821f506,
822 0x8d21f402, 822/* 0x086d: ctx_xfer_exec */
823 0xf004e0b6, 823 0x16019806,
824 0x2cf001fc, 824 0x041427f1,
825 0x0124b602, 825 0xd00624b6,
826 0xf405f2fd, 826 0xe7f10020,
827 0x17f18d21, 827 0xe3f0a500,
828 0x13f04afc, 828 0x021fb941,
829 0x0c27f002, 829 0xb68d21f4,
830 0xf50012d0, 830 0xfcf004e0,
831 0xf1020721, 831 0x022cf001,
832 0xf047fc27, 832 0xfd0124b6,
833 0x20d00223, 833 0x21f405f2,
834 0x012cf000, 834 0xfc17f18d,
835 0xd00320b6, 835 0x0213f04a,
836 0xacf00012, 836 0xd00c27f0,
837 0x06a5f001, 837 0x21f50012,
838 0x9800b7f0, 838 0x27f10207,
839 0x0d98140c, 839 0x23f047fc,
840 0x00e7f015, 840 0x0020d002,
841 0x015c21f5, 841 0xb6012cf0,
842 0xf508a7f0, 842 0x12d00320,
843 0xf5010321, 843 0x01acf000,
844 0xf4020721, 844 0xf006a5f0,
845 0xa7f02201, 845 0x0c9800b7,
846 0xc921f40c, 846 0x150d9814,
847 0x0a1017f1, 847 0xf500e7f0,
848 0xf00614b6, 848 0xf0015c21,
849 0x12d00527, 849 0x21f508a7,
850/* 0x08dd: ctx_xfer_post_save_wait */ 850 0x21f50103,
851 0x0012cf00, 851 0x01f40207,
852 0xf40522fd, 852 0x0ca7f022,
853 0x02f4fa1b, 853 0xf1c921f4,
854/* 0x08e9: ctx_xfer_post */ 854 0xb60a1017,
855 0x02f7f032, 855 0x27f00614,
856 0x065721f5, 856 0x0012d005,
857 0x21f5f4bd, 857/* 0x08f4: ctx_xfer_post_save_wait */
858 0x21f50698, 858 0xfd0012cf,
859 0x21f50226, 859 0x1bf40522,
860 0xf4bd0666, 860 0x3202f4fa,
861 0x065721f5, 861/* 0x0900: ctx_xfer_post */
862 0x981011f4, 862 0xf502f7f0,
863 0x11fd8001, 863 0xbd065721,
864 0x070bf405, 864 0x9821f5f4,
865 0x07df21f5, 865 0x2621f506,
866/* 0x0914: ctx_xfer_no_post_mmio */ 866 0x6621f502,
867 0x064921f5, 867 0xf5f4bd06,
868/* 0x0918: ctx_xfer_done */ 868 0xf4065721,
869 0x000000f8, 869 0x01981011,
870 0x00000000, 870 0x0511fd80,
871 0x00000000, 871 0xf5070bf4,
872 0x00000000, 872/* 0x092b: ctx_xfer_no_post_mmio */
873 0x00000000, 873 0xf507df21,
874 0x00000000, 874/* 0x092f: ctx_xfer_done */
875 0xf8064921,
875 0x00000000, 876 0x00000000,
876 0x00000000, 877 0x00000000,
877 0x00000000, 878 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
index 138eeaa28665..7fe9d7cf486b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc
@@ -44,6 +44,9 @@ chipsets:
44.b8 0xe7 0 0 0 44.b8 0xe7 0 0 0
45.b16 #nve4_hub_mmio_head 45.b16 #nve4_hub_mmio_head
46.b16 #nve4_hub_mmio_tail 46.b16 #nve4_hub_mmio_tail
47.b8 0xe6 0 0 0
48.b16 #nve4_hub_mmio_head
49.b16 #nve4_hub_mmio_tail
47.b8 0 0 0 0 50.b8 0 0 0 0
48 51
49nve4_hub_mmio_head: 52nve4_hub_mmio_head:
@@ -680,6 +683,16 @@ ctx_mmio_exec:
680// on load it means: "a save preceeded this load" 683// on load it means: "a save preceeded this load"
681// 684//
682ctx_xfer: 685ctx_xfer:
686 // according to mwk, some kind of wait for idle
687 mov $r15 0xc00
688 shl b32 $r15 6
689 mov $r14 4
690 iowr I[$r15 + 0x200] $r14
691 ctx_xfer_idle:
692 iord $r14 I[$r15 + 0x000]
693 and $r14 0x2000
694 bra ne #ctx_xfer_idle
695
683 bra not $p1 #ctx_xfer_pre 696 bra not $p1 #ctx_xfer_pre
684 bra $p2 #ctx_xfer_pre_load 697 bra $p2 #ctx_xfer_pre_load
685 ctx_xfer_pre: 698 ctx_xfer_pre:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
index decf0c60ca3b..e3421af68ab9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
@@ -30,11 +30,13 @@ uint32_t nve0_grhub_data[] = {
30 0x00000000, 30 0x00000000,
31/* 0x005c: chipsets */ 31/* 0x005c: chipsets */
32 0x000000e4, 32 0x000000e4,
33 0x013c0070, 33 0x01440078,
34 0x000000e7, 34 0x000000e7,
35 0x013c0070, 35 0x01440078,
36 0x000000e6,
37 0x01440078,
36 0x00000000, 38 0x00000000,
37/* 0x0070: nve4_hub_mmio_head */ 39/* 0x0078: nve4_hub_mmio_head */
38 0x0417e91c, 40 0x0417e91c,
39 0x04400204, 41 0x04400204,
40 0x18404010, 42 0x18404010,
@@ -86,9 +88,7 @@ uint32_t nve0_grhub_data[] = {
86 0x00408840, 88 0x00408840,
87 0x08408900, 89 0x08408900,
88 0x00408980, 90 0x00408980,
89/* 0x013c: nve4_hub_mmio_tail */ 91/* 0x0144: nve4_hub_mmio_tail */
90 0x00000000,
91 0x00000000,
92 0x00000000, 92 0x00000000,
93 0x00000000, 93 0x00000000,
94 0x00000000, 94 0x00000000,
@@ -781,77 +781,78 @@ uint32_t nve0_grhub_code[] = {
781 0x0613f002, 781 0x0613f002,
782 0xf80601fa, 782 0xf80601fa,
783/* 0x07fb: ctx_xfer */ 783/* 0x07fb: ctx_xfer */
784 0xf400f803, 784 0xf100f803,
785 0x02f40611, 785 0xb60c00f7,
786/* 0x0801: ctx_xfer_pre */ 786 0xe7f006f4,
787 0x10f7f00d, 787 0x80fed004,
788 0x067221f5, 788/* 0x0808: ctx_xfer_idle */
789/* 0x080b: ctx_xfer_pre_load */ 789 0xf100fecf,
790 0xf01c11f4, 790 0xf42000e4,
791 0x21f502f7, 791 0x11f4f91b,
792 0x21f50631, 792 0x0d02f406,
793 0x21f50640, 793/* 0x0818: ctx_xfer_pre */
794 0xf4bd0652, 794 0xf510f7f0,
795 0x063121f5, 795 0xf4067221,
796 0x069221f5, 796/* 0x0822: ctx_xfer_pre_load */
797/* 0x0824: ctx_xfer_exec */ 797 0xf7f01c11,
798 0xf1160198, 798 0x3121f502,
799 0xb6041427, 799 0x4021f506,
800 0x20d00624, 800 0x5221f506,
801 0x00e7f100, 801 0xf5f4bd06,
802 0x41e3f0a5, 802 0xf5063121,
803 0xf4021fb9, 803/* 0x083b: ctx_xfer_exec */
804 0xe0b68d21, 804 0x98069221,
805 0x01fcf004, 805 0x27f11601,
806 0xb6022cf0, 806 0x24b60414,
807 0xf2fd0124, 807 0x0020d006,
808 0x8d21f405, 808 0xa500e7f1,
809 0x4afc17f1, 809 0xb941e3f0,
810 0xf00213f0, 810 0x21f4021f,
811 0x12d00c27, 811 0x04e0b68d,
812 0x0721f500, 812 0xf001fcf0,
813 0xfc27f102, 813 0x24b6022c,
814 0x0223f047, 814 0x05f2fd01,
815 0xf00020d0, 815 0xf18d21f4,
816 0x20b6012c, 816 0xf04afc17,
817 0x0012d003, 817 0x27f00213,
818 0xf001acf0, 818 0x0012d00c,
819 0xb7f006a5, 819 0x020721f5,
820 0x140c9800, 820 0x47fc27f1,
821 0xf0150d98, 821 0xd00223f0,
822 0x21f500e7, 822 0x2cf00020,
823 0xa7f0015c, 823 0x0320b601,
824 0x0321f508, 824 0xf00012d0,
825 0x0721f501, 825 0xa5f001ac,
826 0x2201f402, 826 0x00b7f006,
827 0xf40ca7f0, 827 0x98140c98,
828 0x17f1c921, 828 0xe7f0150d,
829 0x14b60a10, 829 0x5c21f500,
830 0x0527f006, 830 0x08a7f001,
831/* 0x08ab: ctx_xfer_post_save_wait */ 831 0x010321f5,
832 0xcf0012d0, 832 0x020721f5,
833 0x22fd0012, 833 0xf02201f4,
834 0xfa1bf405, 834 0x21f40ca7,
835/* 0x08b7: ctx_xfer_post */ 835 0x1017f1c9,
836 0xf02e02f4, 836 0x0614b60a,
837 0x21f502f7, 837 0xd00527f0,
838 0xf4bd0631, 838/* 0x08c2: ctx_xfer_post_save_wait */
839 0x067221f5, 839 0x12cf0012,
840 0x022621f5, 840 0x0522fd00,
841 0x064021f5, 841 0xf4fa1bf4,
842 0x21f5f4bd, 842/* 0x08ce: ctx_xfer_post */
843 0x11f40631, 843 0xf7f02e02,
844 0x80019810, 844 0x3121f502,
845 0xf40511fd, 845 0xf5f4bd06,
846 0x21f5070b, 846 0xf5067221,
847/* 0x08e2: ctx_xfer_no_post_mmio */ 847 0xf5022621,
848/* 0x08e2: ctx_xfer_done */ 848 0xbd064021,
849 0x00f807b1, 849 0x3121f5f4,
850 0x00000000, 850 0x1011f406,
851 0x00000000, 851 0xfd800198,
852 0x00000000, 852 0x0bf40511,
853 0x00000000, 853 0xb121f507,
854 0x00000000, 854/* 0x08f9: ctx_xfer_no_post_mmio */
855 0x00000000, 855/* 0x08f9: ctx_xfer_done */
856 0x0000f807,
856 0x00000000, 857 0x00000000,
857}; 858};
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 618528248457..e30a9c5ff1fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
787 787
788static struct nouveau_omthds 788static struct nouveau_omthds
789nv03_graph_gdi_omthds[] = { 789nv03_graph_gdi_omthds[] = {
790 { 0x0184, nv01_graph_mthd_bind_patt }, 790 { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
791 { 0x0188, nv04_graph_mthd_bind_rop }, 791 { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
792 { 0x018c, nv04_graph_mthd_bind_beta1 }, 792 { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
793 { 0x0190, nv04_graph_mthd_bind_surf_dst }, 793 { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
794 { 0x02fc, nv04_graph_mthd_set_operation }, 794 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
795 {} 795 {}
796}; 796};
797 797
798static struct nouveau_omthds 798static struct nouveau_omthds
799nv04_graph_gdi_omthds[] = { 799nv04_graph_gdi_omthds[] = {
800 { 0x0188, nv04_graph_mthd_bind_patt }, 800 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
801 { 0x018c, nv04_graph_mthd_bind_rop }, 801 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
802 { 0x0190, nv04_graph_mthd_bind_beta1 }, 802 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
803 { 0x0194, nv04_graph_mthd_bind_beta4 }, 803 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
804 { 0x0198, nv04_graph_mthd_bind_surf2d }, 804 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
805 { 0x02fc, nv04_graph_mthd_set_operation }, 805 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
806 {} 806 {}
807}; 807};
808 808
809static struct nouveau_omthds 809static struct nouveau_omthds
810nv01_graph_blit_omthds[] = { 810nv01_graph_blit_omthds[] = {
811 { 0x0184, nv01_graph_mthd_bind_chroma }, 811 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
812 { 0x0188, nv01_graph_mthd_bind_clip }, 812 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
813 { 0x018c, nv01_graph_mthd_bind_patt }, 813 { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
814 { 0x0190, nv04_graph_mthd_bind_rop }, 814 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
815 { 0x0194, nv04_graph_mthd_bind_beta1 }, 815 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
816 { 0x0198, nv04_graph_mthd_bind_surf_dst }, 816 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
817 { 0x019c, nv04_graph_mthd_bind_surf_src }, 817 { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
818 { 0x02fc, nv04_graph_mthd_set_operation }, 818 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
819 {} 819 {}
820}; 820};
821 821
822static struct nouveau_omthds 822static struct nouveau_omthds
823nv04_graph_blit_omthds[] = { 823nv04_graph_blit_omthds[] = {
824 { 0x0184, nv01_graph_mthd_bind_chroma }, 824 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
825 { 0x0188, nv01_graph_mthd_bind_clip }, 825 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
826 { 0x018c, nv04_graph_mthd_bind_patt }, 826 { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
827 { 0x0190, nv04_graph_mthd_bind_rop }, 827 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
828 { 0x0194, nv04_graph_mthd_bind_beta1 }, 828 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
829 { 0x0198, nv04_graph_mthd_bind_beta4 }, 829 { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
830 { 0x019c, nv04_graph_mthd_bind_surf2d }, 830 { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
831 { 0x02fc, nv04_graph_mthd_set_operation }, 831 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
832 {} 832 {}
833}; 833};
834 834
835static struct nouveau_omthds 835static struct nouveau_omthds
836nv04_graph_iifc_omthds[] = { 836nv04_graph_iifc_omthds[] = {
837 { 0x0188, nv01_graph_mthd_bind_chroma }, 837 { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
838 { 0x018c, nv01_graph_mthd_bind_clip }, 838 { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
839 { 0x0190, nv04_graph_mthd_bind_patt }, 839 { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
840 { 0x0194, nv04_graph_mthd_bind_rop }, 840 { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
841 { 0x0198, nv04_graph_mthd_bind_beta1 }, 841 { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
842 { 0x019c, nv04_graph_mthd_bind_beta4 }, 842 { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
843 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, 843 { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
844 { 0x03e4, nv04_graph_mthd_set_operation }, 844 { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
845 {} 845 {}
846}; 846};
847 847
848static struct nouveau_omthds 848static struct nouveau_omthds
849nv01_graph_ifc_omthds[] = { 849nv01_graph_ifc_omthds[] = {
850 { 0x0184, nv01_graph_mthd_bind_chroma }, 850 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
851 { 0x0188, nv01_graph_mthd_bind_clip }, 851 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
852 { 0x018c, nv01_graph_mthd_bind_patt }, 852 { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
853 { 0x0190, nv04_graph_mthd_bind_rop }, 853 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
854 { 0x0194, nv04_graph_mthd_bind_beta1 }, 854 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
855 { 0x0198, nv04_graph_mthd_bind_surf_dst }, 855 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
856 { 0x02fc, nv04_graph_mthd_set_operation }, 856 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
857 {} 857 {}
858}; 858};
859 859
860static struct nouveau_omthds 860static struct nouveau_omthds
861nv04_graph_ifc_omthds[] = { 861nv04_graph_ifc_omthds[] = {
862 { 0x0184, nv01_graph_mthd_bind_chroma }, 862 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
863 { 0x0188, nv01_graph_mthd_bind_clip }, 863 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
864 { 0x018c, nv04_graph_mthd_bind_patt }, 864 { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
865 { 0x0190, nv04_graph_mthd_bind_rop }, 865 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
866 { 0x0194, nv04_graph_mthd_bind_beta1 }, 866 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
867 { 0x0198, nv04_graph_mthd_bind_beta4 }, 867 { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
868 { 0x019c, nv04_graph_mthd_bind_surf2d }, 868 { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
869 { 0x02fc, nv04_graph_mthd_set_operation }, 869 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
870 {} 870 {}
871}; 871};
872 872
873static struct nouveau_omthds 873static struct nouveau_omthds
874nv03_graph_sifc_omthds[] = { 874nv03_graph_sifc_omthds[] = {
875 { 0x0184, nv01_graph_mthd_bind_chroma }, 875 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
876 { 0x0188, nv01_graph_mthd_bind_patt }, 876 { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
877 { 0x018c, nv04_graph_mthd_bind_rop }, 877 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
878 { 0x0190, nv04_graph_mthd_bind_beta1 }, 878 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
879 { 0x0194, nv04_graph_mthd_bind_surf_dst }, 879 { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
880 { 0x02fc, nv04_graph_mthd_set_operation }, 880 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
881 {} 881 {}
882}; 882};
883 883
884static struct nouveau_omthds 884static struct nouveau_omthds
885nv04_graph_sifc_omthds[] = { 885nv04_graph_sifc_omthds[] = {
886 { 0x0184, nv01_graph_mthd_bind_chroma }, 886 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
887 { 0x0188, nv04_graph_mthd_bind_patt }, 887 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
888 { 0x018c, nv04_graph_mthd_bind_rop }, 888 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
889 { 0x0190, nv04_graph_mthd_bind_beta1 }, 889 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
890 { 0x0194, nv04_graph_mthd_bind_beta4 }, 890 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
891 { 0x0198, nv04_graph_mthd_bind_surf2d }, 891 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
892 { 0x02fc, nv04_graph_mthd_set_operation }, 892 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
893 {} 893 {}
894}; 894};
895 895
896static struct nouveau_omthds 896static struct nouveau_omthds
897nv03_graph_sifm_omthds[] = { 897nv03_graph_sifm_omthds[] = {
898 { 0x0188, nv01_graph_mthd_bind_patt }, 898 { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
899 { 0x018c, nv04_graph_mthd_bind_rop }, 899 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
900 { 0x0190, nv04_graph_mthd_bind_beta1 }, 900 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
901 { 0x0194, nv04_graph_mthd_bind_surf_dst }, 901 { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
902 { 0x0304, nv04_graph_mthd_set_operation }, 902 { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
903 {} 903 {}
904}; 904};
905 905
906static struct nouveau_omthds 906static struct nouveau_omthds
907nv04_graph_sifm_omthds[] = { 907nv04_graph_sifm_omthds[] = {
908 { 0x0188, nv04_graph_mthd_bind_patt }, 908 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
909 { 0x018c, nv04_graph_mthd_bind_rop }, 909 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
910 { 0x0190, nv04_graph_mthd_bind_beta1 }, 910 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
911 { 0x0194, nv04_graph_mthd_bind_beta4 }, 911 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
912 { 0x0198, nv04_graph_mthd_bind_surf2d }, 912 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
913 { 0x0304, nv04_graph_mthd_set_operation }, 913 { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
914 {} 914 {}
915}; 915};
916 916
917static struct nouveau_omthds 917static struct nouveau_omthds
918nv04_graph_surf3d_omthds[] = { 918nv04_graph_surf3d_omthds[] = {
919 { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, 919 { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
920 { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, 920 { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
921 {} 921 {}
922}; 922};
923 923
924static struct nouveau_omthds 924static struct nouveau_omthds
925nv03_graph_ttri_omthds[] = { 925nv03_graph_ttri_omthds[] = {
926 { 0x0188, nv01_graph_mthd_bind_clip }, 926 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
927 { 0x018c, nv04_graph_mthd_bind_surf_color }, 927 { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
928 { 0x0190, nv04_graph_mthd_bind_surf_zeta }, 928 { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
929 {} 929 {}
930}; 930};
931 931
932static struct nouveau_omthds 932static struct nouveau_omthds
933nv01_graph_prim_omthds[] = { 933nv01_graph_prim_omthds[] = {
934 { 0x0184, nv01_graph_mthd_bind_clip }, 934 { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
935 { 0x0188, nv01_graph_mthd_bind_patt }, 935 { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
936 { 0x018c, nv04_graph_mthd_bind_rop }, 936 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
937 { 0x0190, nv04_graph_mthd_bind_beta1 }, 937 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
938 { 0x0194, nv04_graph_mthd_bind_surf_dst }, 938 { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
939 { 0x02fc, nv04_graph_mthd_set_operation }, 939 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
940 {} 940 {}
941}; 941};
942 942
943static struct nouveau_omthds 943static struct nouveau_omthds
944nv04_graph_prim_omthds[] = { 944nv04_graph_prim_omthds[] = {
945 { 0x0184, nv01_graph_mthd_bind_clip }, 945 { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
946 { 0x0188, nv04_graph_mthd_bind_patt }, 946 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
947 { 0x018c, nv04_graph_mthd_bind_rop }, 947 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
948 { 0x0190, nv04_graph_mthd_bind_beta1 }, 948 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
949 { 0x0194, nv04_graph_mthd_bind_beta4 }, 949 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
950 { 0x0198, nv04_graph_mthd_bind_surf2d }, 950 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
951 { 0x02fc, nv04_graph_mthd_set_operation }, 951 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
952 {} 952 {}
953}; 953};
954 954
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 92521c89e77f..5c0f843ea249 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
570 570
571static struct nouveau_omthds 571static struct nouveau_omthds
572nv17_celcius_omthds[] = { 572nv17_celcius_omthds[] = {
573 { 0x1638, nv17_graph_mthd_lma_window }, 573 { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
574 { 0x163c, nv17_graph_mthd_lma_window }, 574 { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
575 { 0x1640, nv17_graph_mthd_lma_window }, 575 { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
576 { 0x1644, nv17_graph_mthd_lma_window }, 576 { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
577 { 0x1658, nv17_graph_mthd_lma_enable }, 577 { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
578 {} 578 {}
579}; 579};
580 580
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 8f3f619c4a78..5b20401bf911 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
183 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 183 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
184 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr); 184 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
185 185
186 if (nv_device(engine)->card_type == NV_20) { 186 if (nv_device(engine)->chipset != 0x34) {
187 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); 187 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
188 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); 188 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
189 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); 189 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
224 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 224 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
225 225
226 if (show) { 226 if (show) {
227 nv_info(priv, ""); 227 nv_error(priv, "");
228 nouveau_bitfield_print(nv10_graph_intr_name, show); 228 nouveau_bitfield_print(nv10_graph_intr_name, show);
229 printk(" nsource:"); 229 printk(" nsource:");
230 nouveau_bitfield_print(nv04_graph_nsource, nsource); 230 nouveau_bitfield_print(nv04_graph_nsource, nsource);
231 printk(" nstatus:"); 231 printk(" nstatus:");
232 nouveau_bitfield_print(nv10_graph_nstatus, nstatus); 232 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
233 printk("\n"); 233 printk("\n");
234 nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n", 234 nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
235 chid, subc, class, mthd, data); 235 chid, subc, class, mthd, data);
236 } 236 }
237 237
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index 8d0021049ec0..0b36dd3deebd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -156,8 +156,8 @@ nv40_graph_context_ctor(struct nouveau_object *parent,
156static int 156static int
157nv40_graph_context_fini(struct nouveau_object *object, bool suspend) 157nv40_graph_context_fini(struct nouveau_object *object, bool suspend)
158{ 158{
159 struct nv04_graph_priv *priv = (void *)object->engine; 159 struct nv40_graph_priv *priv = (void *)object->engine;
160 struct nv04_graph_chan *chan = (void *)object; 160 struct nv40_graph_chan *chan = (void *)object;
161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4; 161 u32 inst = 0x01000000 | nv_gpuobj(chan)->addr >> 4;
162 int ret = 0; 162 int ret = 0;
163 163
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
216 216
217 switch (nv_device(priv)->chipset) { 217 switch (nv_device(priv)->chipset) {
218 case 0x40: 218 case 0x40:
219 case 0x41: /* guess */ 219 case 0x41:
220 case 0x42: 220 case 0x42:
221 case 0x43: 221 case 0x43:
222 case 0x45: /* guess */ 222 case 0x45:
223 case 0x4e: 223 case 0x4e:
224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); 224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); 225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); 227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); 228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); 229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
230 switch (nv_device(priv)->chipset) {
231 case 0x40:
232 case 0x45:
233 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
234 nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
235 break;
236 case 0x41:
237 case 0x42:
238 case 0x43:
239 nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
240 nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
241 break;
242 default:
243 break;
244 }
230 break; 245 break;
231 case 0x44: 246 case 0x44:
232 case 0x4a: 247 case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
235 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); 250 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
236 break; 251 break;
237 case 0x46: 252 case 0x46:
253 case 0x4c:
238 case 0x47: 254 case 0x47:
239 case 0x49: 255 case 0x49:
240 case 0x4b: 256 case 0x4b:
241 case 0x4c: 257 case 0x63:
242 case 0x67: 258 case 0x67:
243 default: 259 case 0x68:
244 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch); 260 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
245 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit); 261 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
246 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr); 262 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
247 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); 263 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
248 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); 264 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
249 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); 265 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
266 switch (nv_device(priv)->chipset) {
267 case 0x47:
268 case 0x49:
269 case 0x4b:
270 nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
271 nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
272 break;
273 default:
274 break;
275 }
276 break;
277 default:
250 break; 278 break;
251 } 279 }
252 280
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
293 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 321 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
294 322
295 if (show) { 323 if (show) {
296 nv_info(priv, ""); 324 nv_error(priv, "");
297 nouveau_bitfield_print(nv10_graph_intr_name, show); 325 nouveau_bitfield_print(nv10_graph_intr_name, show);
298 printk(" nsource:"); 326 printk(" nsource:");
299 nouveau_bitfield_print(nv04_graph_nsource, nsource); 327 nouveau_bitfield_print(nv04_graph_nsource, nsource);
@@ -346,7 +374,9 @@ nv40_graph_init(struct nouveau_object *object)
346 return ret; 374 return ret;
347 375
348 /* generate and upload context program */ 376 /* generate and upload context program */
349 nv40_grctx_init(nv_device(priv), &priv->size); 377 ret = nv40_grctx_init(nv_device(priv), &priv->size);
378 if (ret)
379 return ret;
350 380
351 /* No context present currently */ 381 /* No context present currently */
352 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); 382 nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
index d2ac975afc2e..7da35a4e7970 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -15,7 +15,7 @@ nv44_graph_class(void *priv)
15 return !(0x0baf & (1 << (device->chipset & 0x0f))); 15 return !(0x0baf & (1 << (device->chipset & 0x0f)));
16} 16}
17 17
18void nv40_grctx_init(struct nouveau_device *, u32 *size); 18int nv40_grctx_init(struct nouveau_device *, u32 *size);
19void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *); 19void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
20 20
21#endif 21#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index ab3b9dcaf478..b1c3d835b4c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
184 return 0; 184 return 0;
185} 185}
186 186
187static const struct nouveau_bitfield nv50_pgraph_status[] = {
188 { 0x00000001, "BUSY" }, /* set when any bit is set */
189 { 0x00000002, "DISPATCH" },
190 { 0x00000004, "UNK2" },
191 { 0x00000008, "UNK3" },
192 { 0x00000010, "UNK4" },
193 { 0x00000020, "UNK5" },
194 { 0x00000040, "M2MF" },
195 { 0x00000080, "UNK7" },
196 { 0x00000100, "CTXPROG" },
197 { 0x00000200, "VFETCH" },
198 { 0x00000400, "CCACHE_UNK4" },
199 { 0x00000800, "STRMOUT_GSCHED_UNK5" },
200 { 0x00001000, "UNK14XX" },
201 { 0x00002000, "UNK24XX_CSCHED" },
202 { 0x00004000, "UNK1CXX" },
203 { 0x00008000, "CLIPID" },
204 { 0x00010000, "ZCULL" },
205 { 0x00020000, "ENG2D" },
206 { 0x00040000, "UNK34XX" },
207 { 0x00080000, "TPRAST" },
208 { 0x00100000, "TPROP" },
209 { 0x00200000, "TEX" },
210 { 0x00400000, "TPVP" },
211 { 0x00800000, "MP" },
212 { 0x01000000, "ROP" },
213 {}
214};
215
216static const char *const nv50_pgraph_vstatus_0[] = {
217 "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
218};
219
220static const char *const nv50_pgraph_vstatus_1[] = {
221 "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
222};
223
224static const char *const nv50_pgraph_vstatus_2[] = {
225 "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
226 "ROP", NULL
227};
228
229static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
230 const char *const units[], u32 status)
231{
232 int i;
233
234 nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
235
236 for (i = 0; units[i] && status; i++) {
237 if ((status & 7) == 1)
238 pr_cont(" %s", units[i]);
239 status >>= 3;
240 }
241 if (status)
242 pr_cont(" (invalid: 0x%x)", status);
243 pr_cont("\n");
244}
245
187static int 246static int
188nv84_graph_tlb_flush(struct nouveau_engine *engine) 247nv84_graph_tlb_flush(struct nouveau_engine *engine)
189{ 248{
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
219 !(timeout = ptimer->read(ptimer) - start > 2000000000)); 278 !(timeout = ptimer->read(ptimer) - start > 2000000000));
220 279
221 if (timeout) { 280 if (timeout) {
222 nv_error(priv, "PGRAPH TLB flush idle timeout fail: " 281 nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
223 "0x%08x 0x%08x 0x%08x 0x%08x\n", 282
224 nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380), 283 tmp = nv_rd32(priv, 0x400700);
225 nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388)); 284 nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
285 nouveau_bitfield_print(nv50_pgraph_status, tmp);
286 pr_cont("\n");
287
288 nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
289 nv_rd32(priv, 0x400380));
290 nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
291 nv_rd32(priv, 0x400384));
292 nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
293 nv_rd32(priv, 0x400388));
226 } 294 }
227 295
228 nv50_vm_flush_engine(&engine->base, 0x00); 296 nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
453 } 521 }
454 if (ustatus) { 522 if (ustatus) {
455 if (display) 523 if (display)
456 nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); 524 nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
457 } 525 }
458 nv_wr32(priv, ustatus_addr, 0xc0000000); 526 nv_wr32(priv, ustatus_addr, 0xc0000000);
459 } 527 }
460 528
461 if (!tps && display) 529 if (!tps && display)
462 nv_info(priv, "%s - No TPs claiming errors?\n", name); 530 nv_warn(priv, "%s - No TPs claiming errors?\n", name);
463} 531}
464 532
465static int 533static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
718 nv_wr32(priv, 0x400500, 0x00010001); 786 nv_wr32(priv, 0x400500, 0x00010001);
719 787
720 if (show) { 788 if (show) {
721 nv_info(priv, ""); 789 nv_error(priv, "");
722 nouveau_bitfield_print(nv50_graph_intr_name, show); 790 nouveau_bitfield_print(nv50_graph_intr_name, show);
723 printk("\n"); 791 printk("\n");
724 nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x " 792 nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
725 "mthd 0x%04x data 0x%08x\n", 793 "mthd 0x%04x data 0x%08x\n",
726 chid, (u64)inst << 12, subc, class, mthd, data); 794 chid, (u64)inst << 12, subc, class, mthd, data);
727 nv50_fb_trap(nouveau_fb(priv), 1);
728 } 795 }
729 796
730 if (nv_rd32(priv, 0x400824) & (1 << 31)) 797 if (nv_rd32(priv, 0x400824) & (1 << 31))
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index c62f2d0f5f0a..45aff5f5085a 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -516,18 +516,9 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
516{ 516{
517 struct nouveau_device *device = nv_device(parent); 517 struct nouveau_device *device = nv_device(parent);
518 struct nvc0_graph_priv *priv; 518 struct nvc0_graph_priv *priv;
519 bool enable = true;
520 int ret, i; 519 int ret, i;
521 520
522 switch (device->chipset) { 521 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
523 case 0xd9: /* known broken without binary driver firmware */
524 enable = false;
525 break;
526 default:
527 break;
528 }
529
530 ret = nouveau_graph_create(parent, engine, oclass, enable, &priv);
531 *pobject = nv_object(priv); 522 *pobject = nv_object(priv);
532 if (ret) 523 if (ret)
533 return ret; 524 return ret;
@@ -814,7 +805,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
814 nv_wr32(priv, 0x41a100, 0x00000002); 805 nv_wr32(priv, 0x41a100, 0x00000002);
815 nv_wr32(priv, 0x409100, 0x00000002); 806 nv_wr32(priv, 0x409100, 0x00000002);
816 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) 807 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
817 nv_info(priv, "0x409800 wait failed\n"); 808 nv_warn(priv, "0x409800 wait failed\n");
818 809
819 nv_wr32(priv, 0x409840, 0xffffffff); 810 nv_wr32(priv, 0x409840, 0xffffffff);
820 nv_wr32(priv, 0x409500, 0x7fffffff); 811 nv_wr32(priv, 0x409500, 0x7fffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
index 18d2210e12eb..a1e78de46456 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
@@ -121,6 +121,7 @@ nvc0_graph_class(void *obj)
121 return 0x9297; 121 return 0x9297;
122 case 0xe4: 122 case 0xe4:
123 case 0xe7: 123 case 0xe7:
124 case 0xe6:
124 return 0xa097; 125 return 0xa097;
125 default: 126 default:
126 return 0; 127 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
index 539d4c72f192..9f82e9702b46 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nve0.c
@@ -203,7 +203,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
203 struct nvc0_graph_priv *priv; 203 struct nvc0_graph_priv *priv;
204 int ret, i; 204 int ret, i;
205 205
206 ret = nouveau_graph_create(parent, engine, oclass, false, &priv); 206 ret = nouveau_graph_create(parent, engine, oclass, true, &priv);
207 *pobject = nv_object(priv); 207 *pobject = nv_object(priv);
208 if (ret) 208 if (ret)
209 return ret; 209 return ret;
@@ -252,6 +252,7 @@ nve0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
252 priv->magic_not_rop_nr = 1; 252 priv->magic_not_rop_nr = 1;
253 break; 253 break;
254 case 0xe7: 254 case 0xe7:
255 case 0xe6:
255 priv->magic_not_rop_nr = 1; 256 priv->magic_not_rop_nr = 1;
256 break; 257 break;
257 default: 258 default:
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
index 9c715a25cecb..fde8e24415e4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -205,6 +205,7 @@
205#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) 205#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
206#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) 206#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
207#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) 207#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
208#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i))
208#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) 209#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
209#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) 210#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
210#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) 211#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -216,6 +217,7 @@
216#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) 217#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
217#define NV04_PGRAPH_V_RAM 0x00400D40 218#define NV04_PGRAPH_V_RAM 0x00400D40
218#define NV04_PGRAPH_W_RAM 0x00400D80 219#define NV04_PGRAPH_W_RAM 0x00400D80
220#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i))
219#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 221#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
220#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 222#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
221#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 223#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
@@ -261,9 +263,12 @@
261#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 263#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
262#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C 264#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
263#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 265#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
266#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i))
264#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) 267#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
265#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) 268#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
266#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) 269#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
267#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) 270#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
271#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i))
272#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i))
268 273
269#endif 274#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 1f394a2629e7..9fd86375f4c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
121 121
122static struct nouveau_omthds 122static struct nouveau_omthds
123nv31_mpeg_omthds[] = { 123nv31_mpeg_omthds[] = {
124 { 0x0190, nv31_mpeg_mthd_dma }, 124 { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
125 { 0x01a0, nv31_mpeg_mthd_dma }, 125 { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
126 { 0x01b0, nv31_mpeg_mthd_dma }, 126 { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
127 {} 127 {}
128}; 128};
129 129
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index 12418574efea..f7c581ad1991 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -38,7 +38,7 @@ struct nv40_mpeg_priv {
38}; 38};
39 39
40struct nv40_mpeg_chan { 40struct nv40_mpeg_chan {
41 struct nouveau_mpeg base; 41 struct nouveau_mpeg_chan base;
42}; 42};
43 43
44/******************************************************************************* 44/*******************************************************************************
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 8678a9996d57..bc7d12b30fc1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
157 157
158 nv_wr32(priv, 0x00b100, stat); 158 nv_wr32(priv, 0x00b100, stat);
159 nv_wr32(priv, 0x00b230, 0x00000001); 159 nv_wr32(priv, 0x00b230, 0x00000001);
160 nv50_fb_trap(nouveau_fb(priv), 1);
161} 160}
162 161
163static void 162static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 50e7e0da1981..5a5b2a773ed7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -22,18 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/engine.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
27#include <core/class.h>
28 28
29#include <engine/ppp.h> 29#include <engine/ppp.h>
30 30
31struct nv98_ppp_priv { 31struct nv98_ppp_priv {
32 struct nouveau_ppp base; 32 struct nouveau_engine base;
33}; 33};
34 34
35struct nv98_ppp_chan { 35struct nv98_ppp_chan {
36 struct nouveau_ppp_chan base; 36 struct nouveau_engctx base;
37}; 37};
38 38
39/******************************************************************************* 39/*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
49 * PPPP context 49 * PPPP context
50 ******************************************************************************/ 50 ******************************************************************************/
51 51
52static int
53nv98_ppp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv98_ppp_chan *priv;
59 int ret;
60
61 ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv98_ppp_context_dtor(struct nouveau_object *object)
72{
73 struct nv98_ppp_chan *priv = (void *)object;
74 nouveau_ppp_context_destroy(&priv->base);
75}
76
77static int
78nv98_ppp_context_init(struct nouveau_object *object)
79{
80 struct nv98_ppp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_ppp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv98_ppp_chan *priv = (void *)object;
94 return nouveau_ppp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass 52static struct nouveau_oclass
98nv98_ppp_cclass = { 53nv98_ppp_cclass = {
99 .handle = NV_ENGCTX(PPP, 0x98), 54 .handle = NV_ENGCTX(PPP, 0x98),
100 .ofuncs = &(struct nouveau_ofuncs) { 55 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv98_ppp_context_ctor, 56 .ctor = _nouveau_engctx_ctor,
102 .dtor = nv98_ppp_context_dtor, 57 .dtor = _nouveau_engctx_dtor,
103 .init = nv98_ppp_context_init, 58 .init = _nouveau_engctx_init,
104 .fini = nv98_ppp_context_fini, 59 .fini = _nouveau_engctx_fini,
105 .rd32 = _nouveau_ppp_context_rd32, 60 .rd32 = _nouveau_engctx_rd32,
106 .wr32 = _nouveau_ppp_context_wr32, 61 .wr32 = _nouveau_engctx_wr32,
107 }, 62 },
108}; 63};
109 64
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
111 * PPPP engine/subdev functions 66 * PPPP engine/subdev functions
112 ******************************************************************************/ 67 ******************************************************************************/
113 68
114static void
115nv98_ppp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int 69static int
120nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 70nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size, 71 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
124 struct nv98_ppp_priv *priv; 74 struct nv98_ppp_priv *priv;
125 int ret; 75 int ret;
126 76
127 ret = nouveau_ppp_create(parent, engine, oclass, &priv); 77 ret = nouveau_engine_create(parent, engine, oclass, true,
78 "PPPP", "ppp", &priv);
128 *pobject = nv_object(priv); 79 *pobject = nv_object(priv);
129 if (ret) 80 if (ret)
130 return ret; 81 return ret;
131 82
132 nv_subdev(priv)->unit = 0x00400002; 83 nv_subdev(priv)->unit = 0x00400002;
133 nv_subdev(priv)->intr = nv98_ppp_intr;
134 nv_engine(priv)->cclass = &nv98_ppp_cclass; 84 nv_engine(priv)->cclass = &nv98_ppp_cclass;
135 nv_engine(priv)->sclass = nv98_ppp_sclass; 85 nv_engine(priv)->sclass = nv98_ppp_sclass;
136 return 0; 86 return 0;
137} 87}
138 88
139static void
140nv98_ppp_dtor(struct nouveau_object *object)
141{
142 struct nv98_ppp_priv *priv = (void *)object;
143 nouveau_ppp_destroy(&priv->base);
144}
145
146static int
147nv98_ppp_init(struct nouveau_object *object)
148{
149 struct nv98_ppp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_ppp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv98_ppp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv98_ppp_priv *priv = (void *)object;
163 return nouveau_ppp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass 89struct nouveau_oclass
167nv98_ppp_oclass = { 90nv98_ppp_oclass = {
168 .handle = NV_ENGINE(PPP, 0x98), 91 .handle = NV_ENGINE(PPP, 0x98),
169 .ofuncs = &(struct nouveau_ofuncs) { 92 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv98_ppp_ctor, 93 .ctor = nv98_ppp_ctor,
171 .dtor = nv98_ppp_dtor, 94 .dtor = _nouveau_engine_dtor,
172 .init = nv98_ppp_init, 95 .init = _nouveau_engine_init,
173 .fini = nv98_ppp_fini, 96 .fini = _nouveau_engine_fini,
174 }, 97 },
175}; 98};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 000000000000..ebf0d860e2dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Maarten Lankhorst
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Maarten Lankhorst
23 */
24
25#include <core/falcon.h>
26
27#include <engine/ppp.h>
28
29struct nvc0_ppp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * PPP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nvc0_ppp_sclass[] = {
39 { 0x90b3, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PPPP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_ppp_cclass = {
49 .handle = NV_ENGCTX(PPP, 0xc0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PPPP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nvc0_ppp_init(struct nouveau_object *object)
66{
67 struct nvc0_ppp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x086010, 0x0000fff2);
75 nv_wr32(priv, 0x08601c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nvc0_ppp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
88 "PPPP", "ppp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00000002;
94 nv_engine(priv)->cclass = &nvc0_ppp_cclass;
95 nv_engine(priv)->sclass = nvc0_ppp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nvc0_ppp_oclass = {
101 .handle = NV_ENGINE(PPP, 0xc0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nvc0_ppp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nvc0_ppp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 3ca4c3aa90b7..2a859a31c30d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
63 63
64static struct nouveau_omthds 64static struct nouveau_omthds
65nv04_software_omthds[] = { 65nv04_software_omthds[] = {
66 { 0x0150, nv04_software_set_ref }, 66 { 0x0150, 0x0150, nv04_software_set_ref },
67 { 0x0500, nv04_software_flip }, 67 { 0x0500, 0x0500, nv04_software_flip },
68 {} 68 {}
69}; 69};
70 70
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index 6e699afbfdb7..a019364b1e13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
52 52
53static struct nouveau_omthds 53static struct nouveau_omthds
54nv10_software_omthds[] = { 54nv10_software_omthds[] = {
55 { 0x0500, nv10_software_flip }, 55 { 0x0500, 0x0500, nv10_software_flip },
56 {} 56 {}
57}; 57};
58 58
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index a2edcd38544a..b0e7e1c01ce6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
117 117
118static struct nouveau_omthds 118static struct nouveau_omthds
119nv50_software_omthds[] = { 119nv50_software_omthds[] = {
120 { 0x018c, nv50_software_mthd_dma_vblsem }, 120 { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
121 { 0x0400, nv50_software_mthd_vblsem_offset }, 121 { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
122 { 0x0404, nv50_software_mthd_vblsem_value }, 122 { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
123 { 0x0408, nv50_software_mthd_vblsem_release }, 123 { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
124 { 0x0500, nv50_software_mthd_flip }, 124 { 0x0500, 0x0500, nv50_software_mthd_flip },
125 {} 125 {}
126}; 126};
127 127
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index b7b0d7e330d6..282a1cd1bc2f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
99 99
100static struct nouveau_omthds 100static struct nouveau_omthds
101nvc0_software_omthds[] = { 101nvc0_software_omthds[] = {
102 { 0x0400, nvc0_software_mthd_vblsem_offset }, 102 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
103 { 0x0404, nvc0_software_mthd_vblsem_offset }, 103 { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
104 { 0x0408, nvc0_software_mthd_vblsem_value }, 104 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
105 { 0x040c, nvc0_software_mthd_vblsem_release }, 105 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
106 { 0x0500, nvc0_software_mthd_flip }, 106 { 0x0500, 0x0500, nvc0_software_mthd_flip },
107 {} 107 {}
108}; 108};
109 109
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index dd23c80e5405..261cd96e6951 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -22,18 +22,13 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 25#include <core/engctx.h>
26#include <core/class.h>
28 27
29#include <engine/vp.h> 28#include <engine/vp.h>
30 29
31struct nv84_vp_priv { 30struct nv84_vp_priv {
32 struct nouveau_vp base; 31 struct nouveau_engine base;
33};
34
35struct nv84_vp_chan {
36 struct nouveau_vp_chan base;
37}; 32};
38 33
39/******************************************************************************* 34/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
49 * PVP context 44 * PVP context
50 ******************************************************************************/ 45 ******************************************************************************/
51 46
52static int
53nv84_vp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_vp_chan *priv;
59 int ret;
60
61 ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_vp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_vp_chan *priv = (void *)object;
74 nouveau_vp_context_destroy(&priv->base);
75}
76
77static int
78nv84_vp_context_init(struct nouveau_object *object)
79{
80 struct nv84_vp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_vp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_vp_chan *priv = (void *)object;
94 return nouveau_vp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass 47static struct nouveau_oclass
98nv84_vp_cclass = { 48nv84_vp_cclass = {
99 .handle = NV_ENGCTX(VP, 0x84), 49 .handle = NV_ENGCTX(VP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) { 50 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_vp_context_ctor, 51 .ctor = _nouveau_engctx_ctor,
102 .dtor = nv84_vp_context_dtor, 52 .dtor = _nouveau_engctx_dtor,
103 .init = nv84_vp_context_init, 53 .init = _nouveau_engctx_init,
104 .fini = nv84_vp_context_fini, 54 .fini = _nouveau_engctx_fini,
105 .rd32 = _nouveau_vp_context_rd32, 55 .rd32 = _nouveau_engctx_rd32,
106 .wr32 = _nouveau_vp_context_wr32, 56 .wr32 = _nouveau_engctx_wr32,
107 }, 57 },
108}; 58};
109 59
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
111 * PVP engine/subdev functions 61 * PVP engine/subdev functions
112 ******************************************************************************/ 62 ******************************************************************************/
113 63
114static void
115nv84_vp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int 64static int
120nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 65nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size, 66 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
124 struct nv84_vp_priv *priv; 69 struct nv84_vp_priv *priv;
125 int ret; 70 int ret;
126 71
127 ret = nouveau_vp_create(parent, engine, oclass, &priv); 72 ret = nouveau_engine_create(parent, engine, oclass, true,
73 "PVP", "vp", &priv);
128 *pobject = nv_object(priv); 74 *pobject = nv_object(priv);
129 if (ret) 75 if (ret)
130 return ret; 76 return ret;
131 77
132 nv_subdev(priv)->unit = 0x01020000; 78 nv_subdev(priv)->unit = 0x01020000;
133 nv_subdev(priv)->intr = nv84_vp_intr;
134 nv_engine(priv)->cclass = &nv84_vp_cclass; 79 nv_engine(priv)->cclass = &nv84_vp_cclass;
135 nv_engine(priv)->sclass = nv84_vp_sclass; 80 nv_engine(priv)->sclass = nv84_vp_sclass;
136 return 0; 81 return 0;
137} 82}
138 83
139static void
140nv84_vp_dtor(struct nouveau_object *object)
141{
142 struct nv84_vp_priv *priv = (void *)object;
143 nouveau_vp_destroy(&priv->base);
144}
145
146static int
147nv84_vp_init(struct nouveau_object *object)
148{
149 struct nv84_vp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_vp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_vp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_vp_priv *priv = (void *)object;
163 return nouveau_vp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass 84struct nouveau_oclass
167nv84_vp_oclass = { 85nv84_vp_oclass = {
168 .handle = NV_ENGINE(VP, 0x84), 86 .handle = NV_ENGINE(VP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) { 87 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_vp_ctor, 88 .ctor = nv84_vp_ctor,
171 .dtor = nv84_vp_dtor, 89 .dtor = _nouveau_engine_dtor,
172 .init = nv84_vp_init, 90 .init = _nouveau_engine_init,
173 .fini = nv84_vp_fini, 91 .fini = _nouveau_engine_fini,
174 }, 92 },
175}; 93};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 000000000000..f761949d7039
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Maarten Lankhorst
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Maarten Lankhorst
23 */
24
25#include <core/falcon.h>
26
27#include <engine/vp.h>
28
29struct nvc0_vp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * VP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nvc0_vp_sclass[] = {
39 { 0x90b2, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PVP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_vp_cclass = {
49 .handle = NV_ENGCTX(VP, 0xc0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PVP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nvc0_vp_init(struct nouveau_object *object)
66{
67 struct nvc0_vp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x085010, 0x0000fff2);
75 nv_wr32(priv, 0x08501c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nvc0_vp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
88 "PVP", "vp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00020000;
94 nv_engine(priv)->cclass = &nvc0_vp_cclass;
95 nv_engine(priv)->sclass = nvc0_vp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nvc0_vp_oclass = {
101 .handle = NV_ENGINE(VP, 0xc0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nvc0_vp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nvc0_vp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 000000000000..2384ce5dbe16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/falcon.h>
26
27#include <engine/vp.h>
28
29struct nve0_vp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * VP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nve0_vp_sclass[] = {
39 { 0x95b2, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PVP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nve0_vp_cclass = {
49 .handle = NV_ENGCTX(VP, 0xe0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PVP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nve0_vp_init(struct nouveau_object *object)
66{
67 struct nve0_vp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x085010, 0x0000fff2);
75 nv_wr32(priv, 0x08501c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nve0_vp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
88 "PVP", "vp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00020000;
94 nv_engine(priv)->cclass = &nve0_vp_cclass;
95 nv_engine(priv)->sclass = nve0_vp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nve0_vp_oclass = {
101 .handle = NV_ENGINE(VP, 0xe0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nve0_vp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nve0_vp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 6180ae9800fc..47c4b3a5bd3a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -23,6 +23,7 @@
23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL 23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL 24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL 25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
26#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
26 27
27struct nv_device_class { 28struct nv_device_class {
28 u64 device; /* device identifier, ~0 for client default */ 29 u64 device; /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
52#define NV_DMA_ACCESS_WR 0x00000200 53#define NV_DMA_ACCESS_WR 0x00000200
53#define NV_DMA_ACCESS_RDWR 0x00000300 54#define NV_DMA_ACCESS_RDWR 0x00000300
54 55
56/* NV50:NVC0 */
57#define NV50_DMA_CONF0_ENABLE 0x80000000
58#define NV50_DMA_CONF0_PRIV 0x00300000
59#define NV50_DMA_CONF0_PRIV_VM 0x00000000
60#define NV50_DMA_CONF0_PRIV_US 0x00100000
61#define NV50_DMA_CONF0_PRIV__S 0x00200000
62#define NV50_DMA_CONF0_PART 0x00030000
63#define NV50_DMA_CONF0_PART_VM 0x00000000
64#define NV50_DMA_CONF0_PART_256 0x00010000
65#define NV50_DMA_CONF0_PART_1KB 0x00020000
66#define NV50_DMA_CONF0_COMP 0x00000180
67#define NV50_DMA_CONF0_COMP_NONE 0x00000000
68#define NV50_DMA_CONF0_COMP_VM 0x00000180
69#define NV50_DMA_CONF0_TYPE 0x0000007f
70#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
71#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
72
73/* NVC0:NVD9 */
74#define NVC0_DMA_CONF0_ENABLE 0x80000000
75#define NVC0_DMA_CONF0_PRIV 0x00300000
76#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
77#define NVC0_DMA_CONF0_PRIV_US 0x00100000
78#define NVC0_DMA_CONF0_PRIV__S 0x00200000
79#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
80#define NVC0_DMA_CONF0_TYPE 0x000000ff
81#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
82#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
83
84/* NVD9- */
85#define NVD0_DMA_CONF0_ENABLE 0x80000000
86#define NVD0_DMA_CONF0_PAGE 0x00000400
87#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
88#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
89#define NVD0_DMA_CONF0_TYPE 0x000000ff
90#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
91#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
92
55struct nv_dma_class { 93struct nv_dma_class {
56 u32 flags; 94 u32 flags;
57 u32 pad0; 95 u32 pad0;
58 u64 start; 96 u64 start;
59 u64 limit; 97 u64 limit;
98 u32 conf0;
60}; 99};
61 100
62/* DMA FIFO channel classes 101/* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
115 u32 engine; 154 u32 engine;
116}; 155};
117 156
157/* 5070: NV50_DISP
158 * 8270: NV84_DISP
159 * 8370: NVA0_DISP
160 * 8870: NV94_DISP
161 * 8570: NVA3_DISP
162 * 9070: NVD0_DISP
163 * 9170: NVE0_DISP
164 */
165
166#define NV50_DISP_CLASS 0x00005070
167#define NV84_DISP_CLASS 0x00008270
168#define NVA0_DISP_CLASS 0x00008370
169#define NV94_DISP_CLASS 0x00008870
170#define NVA3_DISP_CLASS 0x00008570
171#define NVD0_DISP_CLASS 0x00009070
172#define NVE0_DISP_CLASS 0x00009170
173
174#define NV50_DISP_SOR_MTHD 0x00010000
175#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
176#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
177#define NV50_DISP_SOR_MTHD_LINK 0x00000004
178#define NV50_DISP_SOR_MTHD_OR 0x00000003
179
180#define NV50_DISP_SOR_PWR 0x00010000
181#define NV50_DISP_SOR_PWR_STATE 0x00000001
182#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
183#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
184#define NVA3_DISP_SOR_HDA_ELD 0x00010100
185#define NV84_DISP_SOR_HDMI_PWR 0x00012000
186#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
187#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
188#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
189#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
190#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
191#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
192#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
193#define NV94_DISP_SOR_DP_TRAIN 0x00016000
194#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
195#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
196#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
197#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
198#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
199#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
200#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
201#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
202#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
203#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
204#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
205#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
206#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
207#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
208#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
209#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
210#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
211#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
212
213#define NV50_DISP_DAC_MTHD 0x00020000
214#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
215#define NV50_DISP_DAC_MTHD_OR 0x00000003
216
217#define NV50_DISP_DAC_PWR 0x00020000
218#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
219#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
220#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
221#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
222#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
223#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
224#define NV50_DISP_DAC_PWR_DATA 0x00000010
225#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
226#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
227#define NV50_DISP_DAC_PWR_STATE 0x00000040
228#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
229#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
230#define NV50_DISP_DAC_LOAD 0x0002000c
231#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
232
233struct nv50_display_class {
234};
235
236/* 507a: NV50_DISP_CURS
237 * 827a: NV84_DISP_CURS
238 * 837a: NVA0_DISP_CURS
239 * 887a: NV94_DISP_CURS
240 * 857a: NVA3_DISP_CURS
241 * 907a: NVD0_DISP_CURS
242 * 917a: NVE0_DISP_CURS
243 */
244
245#define NV50_DISP_CURS_CLASS 0x0000507a
246#define NV84_DISP_CURS_CLASS 0x0000827a
247#define NVA0_DISP_CURS_CLASS 0x0000837a
248#define NV94_DISP_CURS_CLASS 0x0000887a
249#define NVA3_DISP_CURS_CLASS 0x0000857a
250#define NVD0_DISP_CURS_CLASS 0x0000907a
251#define NVE0_DISP_CURS_CLASS 0x0000917a
252
253struct nv50_display_curs_class {
254 u32 head;
255};
256
257/* 507b: NV50_DISP_OIMM
258 * 827b: NV84_DISP_OIMM
259 * 837b: NVA0_DISP_OIMM
260 * 887b: NV94_DISP_OIMM
261 * 857b: NVA3_DISP_OIMM
262 * 907b: NVD0_DISP_OIMM
263 * 917b: NVE0_DISP_OIMM
264 */
265
266#define NV50_DISP_OIMM_CLASS 0x0000507b
267#define NV84_DISP_OIMM_CLASS 0x0000827b
268#define NVA0_DISP_OIMM_CLASS 0x0000837b
269#define NV94_DISP_OIMM_CLASS 0x0000887b
270#define NVA3_DISP_OIMM_CLASS 0x0000857b
271#define NVD0_DISP_OIMM_CLASS 0x0000907b
272#define NVE0_DISP_OIMM_CLASS 0x0000917b
273
274struct nv50_display_oimm_class {
275 u32 head;
276};
277
278/* 507c: NV50_DISP_SYNC
279 * 827c: NV84_DISP_SYNC
280 * 837c: NVA0_DISP_SYNC
281 * 887c: NV94_DISP_SYNC
282 * 857c: NVA3_DISP_SYNC
283 * 907c: NVD0_DISP_SYNC
284 * 917c: NVE0_DISP_SYNC
285 */
286
287#define NV50_DISP_SYNC_CLASS 0x0000507c
288#define NV84_DISP_SYNC_CLASS 0x0000827c
289#define NVA0_DISP_SYNC_CLASS 0x0000837c
290#define NV94_DISP_SYNC_CLASS 0x0000887c
291#define NVA3_DISP_SYNC_CLASS 0x0000857c
292#define NVD0_DISP_SYNC_CLASS 0x0000907c
293#define NVE0_DISP_SYNC_CLASS 0x0000917c
294
295struct nv50_display_sync_class {
296 u32 pushbuf;
297 u32 head;
298};
299
300/* 507d: NV50_DISP_MAST
301 * 827d: NV84_DISP_MAST
302 * 837d: NVA0_DISP_MAST
303 * 887d: NV94_DISP_MAST
304 * 857d: NVA3_DISP_MAST
305 * 907d: NVD0_DISP_MAST
306 * 917d: NVE0_DISP_MAST
307 */
308
309#define NV50_DISP_MAST_CLASS 0x0000507d
310#define NV84_DISP_MAST_CLASS 0x0000827d
311#define NVA0_DISP_MAST_CLASS 0x0000837d
312#define NV94_DISP_MAST_CLASS 0x0000887d
313#define NVA3_DISP_MAST_CLASS 0x0000857d
314#define NVD0_DISP_MAST_CLASS 0x0000907d
315#define NVE0_DISP_MAST_CLASS 0x0000917d
316
317struct nv50_display_mast_class {
318 u32 pushbuf;
319};
320
321/* 507e: NV50_DISP_OVLY
322 * 827e: NV84_DISP_OVLY
323 * 837e: NVA0_DISP_OVLY
324 * 887e: NV94_DISP_OVLY
325 * 857e: NVA3_DISP_OVLY
326 * 907e: NVD0_DISP_OVLY
327 * 917e: NVE0_DISP_OVLY
328 */
329
330#define NV50_DISP_OVLY_CLASS 0x0000507e
331#define NV84_DISP_OVLY_CLASS 0x0000827e
332#define NVA0_DISP_OVLY_CLASS 0x0000837e
333#define NV94_DISP_OVLY_CLASS 0x0000887e
334#define NVA3_DISP_OVLY_CLASS 0x0000857e
335#define NVD0_DISP_OVLY_CLASS 0x0000907e
336#define NVE0_DISP_OVLY_CLASS 0x0000917e
337
338struct nv50_display_ovly_class {
339 u32 pushbuf;
340 u32 head;
341};
342
118#endif 343#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 0193532ceac9..63acc0346ff2 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -36,6 +36,9 @@ nouveau_client(void *obj)
36 36
37int nouveau_client_create_(const char *name, u64 device, const char *cfg, 37int nouveau_client_create_(const char *name, u64 device, const char *cfg,
38 const char *dbg, int, void **); 38 const char *dbg, int, void **);
39#define nouveau_client_destroy(p) \
40 nouveau_namedb_destroy(&(p)->base)
41
39int nouveau_client_init(struct nouveau_client *); 42int nouveau_client_init(struct nouveau_client *);
40int nouveau_client_fini(struct nouveau_client *, bool suspend); 43int nouveau_client_fini(struct nouveau_client *, bool suspend);
41 44
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
index 8a947b6872eb..2fd48b564c7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
39int nouveau_engctx_init(struct nouveau_engctx *); 39int nouveau_engctx_init(struct nouveau_engctx *);
40int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend); 40int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
41 41
42int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
43 struct nouveau_oclass *, void *, u32,
44 struct nouveau_object **);
42void _nouveau_engctx_dtor(struct nouveau_object *); 45void _nouveau_engctx_dtor(struct nouveau_object *);
43int _nouveau_engctx_init(struct nouveau_object *); 46int _nouveau_engctx_init(struct nouveau_object *);
44int _nouveau_engctx_fini(struct nouveau_object *, bool suspend); 47int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 000000000000..1edec386ab36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
1#ifndef __NOUVEAU_FALCON_H__
2#define __NOUVEAU_FALCON_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6#include <core/gpuobj.h>
7
8struct nouveau_falcon_chan {
9 struct nouveau_engctx base;
10};
11
12#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \
13 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
14#define nouveau_falcon_context_destroy(d) \
15 nouveau_engctx_destroy(&(d)->base)
16#define nouveau_falcon_context_init(d) \
17 nouveau_engctx_init(&(d)->base)
18#define nouveau_falcon_context_fini(d,s) \
19 nouveau_engctx_fini(&(d)->base, (s))
20
21#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
22#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
23#define _nouveau_falcon_context_init _nouveau_engctx_init
24#define _nouveau_falcon_context_fini _nouveau_engctx_fini
25#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
26#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
27
28struct nouveau_falcon_data {
29 bool external;
30};
31
32struct nouveau_falcon {
33 struct nouveau_engine base;
34
35 u32 addr;
36 u8 version;
37 u8 secret;
38
39 struct nouveau_gpuobj *core;
40 bool external;
41
42 struct {
43 u32 limit;
44 u32 *data;
45 u32 size;
46 } code;
47
48 struct {
49 u32 limit;
50 u32 *data;
51 u32 size;
52 } data;
53};
54
55#define nv_falcon(priv) (&(priv)->base)
56
57#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \
58 nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
59 sizeof(**r),(void **)r)
60#define nouveau_falcon_destroy(p) \
61 nouveau_engine_destroy(&(p)->base)
62#define nouveau_falcon_init(p) ({ \
63 struct nouveau_falcon *falcon = (p); \
64 _nouveau_falcon_init(nv_object(falcon)); \
65})
66#define nouveau_falcon_fini(p,s) ({ \
67 struct nouveau_falcon *falcon = (p); \
68 _nouveau_falcon_fini(nv_object(falcon), (s)); \
69})
70
71int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
72 struct nouveau_oclass *, u32, bool, const char *,
73 const char *, int, void **);
74
75#define _nouveau_falcon_dtor _nouveau_engine_dtor
76int _nouveau_falcon_init(struct nouveau_object *);
77int _nouveau_falcon_fini(struct nouveau_object *, bool);
78u32 _nouveau_falcon_rd32(struct nouveau_object *, u64);
79void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
80
81#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
index 6eaff79377ae..b3b9ce4e9d38 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
65void _nouveau_gpuobj_dtor(struct nouveau_object *); 65void _nouveau_gpuobj_dtor(struct nouveau_object *);
66int _nouveau_gpuobj_init(struct nouveau_object *); 66int _nouveau_gpuobj_init(struct nouveau_object *);
67int _nouveau_gpuobj_fini(struct nouveau_object *, bool); 67int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
68u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32); 68u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
69void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32); 69void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
70 70
71#endif 71#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 975137ba34a6..2514e81ade02 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -21,6 +21,12 @@ struct nouveau_mm {
21 int heap_nodes; 21 int heap_nodes;
22}; 22};
23 23
24static inline bool
25nouveau_mm_initialised(struct nouveau_mm *mm)
26{
27 return mm->block_size != 0;
28}
29
24int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); 30int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
25int nouveau_mm_fini(struct nouveau_mm *); 31int nouveau_mm_fini(struct nouveau_mm *);
26int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, 32int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 818feabbf4a0..5982935ee23a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
70} 70}
71 71
72struct nouveau_omthds { 72struct nouveau_omthds {
73 u32 method; 73 u32 start;
74 u32 limit;
74 int (*call)(struct nouveau_object *, u32, void *, u32); 75 int (*call)(struct nouveau_object *, u32, void *, u32);
75}; 76};
76 77
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
81 void (*dtor)(struct nouveau_object *); 82 void (*dtor)(struct nouveau_object *);
82 int (*init)(struct nouveau_object *); 83 int (*init)(struct nouveau_object *);
83 int (*fini)(struct nouveau_object *, bool suspend); 84 int (*fini)(struct nouveau_object *, bool suspend);
84 u8 (*rd08)(struct nouveau_object *, u32 offset); 85 u8 (*rd08)(struct nouveau_object *, u64 offset);
85 u16 (*rd16)(struct nouveau_object *, u32 offset); 86 u16 (*rd16)(struct nouveau_object *, u64 offset);
86 u32 (*rd32)(struct nouveau_object *, u32 offset); 87 u32 (*rd32)(struct nouveau_object *, u64 offset);
87 void (*wr08)(struct nouveau_object *, u32 offset, u8 data); 88 void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
88 void (*wr16)(struct nouveau_object *, u32 offset, u16 data); 89 void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
89 void (*wr32)(struct nouveau_object *, u32 offset, u32 data); 90 void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
90}; 91};
91 92
92static inline struct nouveau_ofuncs * 93static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
109void nouveau_object_debug(void); 110void nouveau_object_debug(void);
110 111
111static inline int 112static inline int
112nv_call(void *obj, u32 mthd, u32 data) 113nv_exec(void *obj, u32 mthd, void *data, u32 size)
113{ 114{
114 struct nouveau_omthds *method = nv_oclass(obj)->omthds; 115 struct nouveau_omthds *method = nv_oclass(obj)->omthds;
115 116
116 while (method && method->call) { 117 while (method && method->call) {
117 if (method->method == mthd) 118 if (mthd >= method->start && mthd <= method->limit)
118 return method->call(obj, mthd, &data, sizeof(data)); 119 return method->call(obj, mthd, data, size);
119 method++; 120 method++;
120 } 121 }
121 122
122 return -EINVAL; 123 return -EINVAL;
123} 124}
124 125
126static inline int
127nv_call(void *obj, u32 mthd, u32 data)
128{
129 return nv_exec(obj, mthd, &data, sizeof(data));
130}
131
125static inline u8 132static inline u8
126nv_ro08(void *obj, u32 addr) 133nv_ro08(void *obj, u64 addr)
127{ 134{
128 u8 data = nv_ofuncs(obj)->rd08(obj, addr); 135 u8 data = nv_ofuncs(obj)->rd08(obj, addr);
129 nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data); 136 nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
131} 138}
132 139
133static inline u16 140static inline u16
134nv_ro16(void *obj, u32 addr) 141nv_ro16(void *obj, u64 addr)
135{ 142{
136 u16 data = nv_ofuncs(obj)->rd16(obj, addr); 143 u16 data = nv_ofuncs(obj)->rd16(obj, addr);
137 nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data); 144 nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
139} 146}
140 147
141static inline u32 148static inline u32
142nv_ro32(void *obj, u32 addr) 149nv_ro32(void *obj, u64 addr)
143{ 150{
144 u32 data = nv_ofuncs(obj)->rd32(obj, addr); 151 u32 data = nv_ofuncs(obj)->rd32(obj, addr);
145 nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data); 152 nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,42 +154,46 @@ nv_ro32(void *obj, u32 addr)
147} 154}
148 155
149static inline void 156static inline void
150nv_wo08(void *obj, u32 addr, u8 data) 157nv_wo08(void *obj, u64 addr, u8 data)
151{ 158{
152 nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data); 159 nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
153 nv_ofuncs(obj)->wr08(obj, addr, data); 160 nv_ofuncs(obj)->wr08(obj, addr, data);
154} 161}
155 162
156static inline void 163static inline void
157nv_wo16(void *obj, u32 addr, u16 data) 164nv_wo16(void *obj, u64 addr, u16 data)
158{ 165{
159 nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data); 166 nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
160 nv_ofuncs(obj)->wr16(obj, addr, data); 167 nv_ofuncs(obj)->wr16(obj, addr, data);
161} 168}
162 169
163static inline void 170static inline void
164nv_wo32(void *obj, u32 addr, u32 data) 171nv_wo32(void *obj, u64 addr, u32 data)
165{ 172{
166 nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data); 173 nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
167 nv_ofuncs(obj)->wr32(obj, addr, data); 174 nv_ofuncs(obj)->wr32(obj, addr, data);
168} 175}
169 176
170static inline u32 177static inline u32
171nv_mo32(void *obj, u32 addr, u32 mask, u32 data) 178nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
172{ 179{
173 u32 temp = nv_ro32(obj, addr); 180 u32 temp = nv_ro32(obj, addr);
174 nv_wo32(obj, addr, (temp & ~mask) | data); 181 nv_wo32(obj, addr, (temp & ~mask) | data);
175 return temp; 182 return temp;
176} 183}
177 184
178static inline bool 185static inline int
179nv_strncmp(void *obj, u32 addr, u32 len, const char *str) 186nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
180{ 187{
188 unsigned char c1, c2;
189
181 while (len--) { 190 while (len--) {
182 if (nv_ro08(obj, addr++) != *(str++)) 191 c1 = nv_ro08(obj, addr++);
183 return false; 192 c2 = *(str++);
193 if (c1 != c2)
194 return c1 - c2;
184 } 195 }
185 return true; 196 return 0;
186} 197}
187 198
188#endif 199#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 3c2e940eb0f8..31cd852c96df 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -14,7 +14,7 @@ struct nouveau_parent {
14 struct nouveau_object base; 14 struct nouveau_object base;
15 15
16 struct nouveau_sclass *sclass; 16 struct nouveau_sclass *sclass;
17 u32 engine; 17 u64 engine;
18 18
19 int (*context_attach)(struct nouveau_object *, 19 int (*context_attach)(struct nouveau_object *,
20 struct nouveau_object *); 20 struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
index 75d1ed5f85fd..13ccdf54dfad 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -1,45 +1,8 @@
1#ifndef __NOUVEAU_BSP_H__ 1#ifndef __NOUVEAU_BSP_H__
2#define __NOUVEAU_BSP_H__ 2#define __NOUVEAU_BSP_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_bsp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_bsp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_bsp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_bsp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_bsp_context_init _nouveau_engctx_init
22#define _nouveau_bsp_context_fini _nouveau_engctx_fini
23#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_bsp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_bsp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
32#define nouveau_bsp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_bsp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_bsp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_bsp_dtor _nouveau_engine_dtor
40#define _nouveau_bsp_init _nouveau_engine_init
41#define _nouveau_bsp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_bsp_oclass; 4extern struct nouveau_oclass nv84_bsp_oclass;
5extern struct nouveau_oclass nvc0_bsp_oclass;
6extern struct nouveau_oclass nve0_bsp_oclass;
44 7
45#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
index 70b9d8c5fcf5..8cad2cf28cef 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -1,44 +1,7 @@
1#ifndef __NOUVEAU_COPY_H__ 1#ifndef __NOUVEAU_COPY_H__
2#define __NOUVEAU_COPY_H__ 2#define __NOUVEAU_COPY_H__
3 3
4#include <core/engine.h> 4void nva3_copy_intr(struct nouveau_subdev *);
5#include <core/engctx.h>
6
7struct nouveau_copy_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_copy_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_copy_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_copy_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
21#define _nouveau_copy_context_init _nouveau_engctx_init
22#define _nouveau_copy_context_fini _nouveau_engctx_fini
23#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_copy {
27 struct nouveau_engine base;
28};
29
30#define nouveau_copy_create(p,e,c,y,i,d) \
31 nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
32#define nouveau_copy_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_copy_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_copy_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_copy_dtor _nouveau_engine_dtor
40#define _nouveau_copy_init _nouveau_engine_init
41#define _nouveau_copy_fini _nouveau_engine_fini
42 5
43extern struct nouveau_oclass nva3_copy_oclass; 6extern struct nouveau_oclass nva3_copy_oclass;
44extern struct nouveau_oclass nvc0_copy0_oclass; 7extern struct nouveau_oclass nvc0_copy0_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
index e3674743baaa..db975618e937 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -1,45 +1,6 @@
1#ifndef __NOUVEAU_CRYPT_H__ 1#ifndef __NOUVEAU_CRYPT_H__
2#define __NOUVEAU_CRYPT_H__ 2#define __NOUVEAU_CRYPT_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_crypt_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_crypt_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_crypt_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_crypt_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
21#define _nouveau_crypt_context_init _nouveau_engctx_init
22#define _nouveau_crypt_context_fini _nouveau_engctx_fini
23#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_crypt {
27 struct nouveau_engine base;
28};
29
30#define nouveau_crypt_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
32#define nouveau_crypt_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_crypt_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_crypt_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_crypt_dtor _nouveau_engine_dtor
40#define _nouveau_crypt_init _nouveau_engine_init
41#define _nouveau_crypt_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_crypt_oclass; 4extern struct nouveau_oclass nv84_crypt_oclass;
44extern struct nouveau_oclass nv98_crypt_oclass; 5extern struct nouveau_oclass nv98_crypt_oclass;
45 6
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 38ec1252cbaa..46948285f3e7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
39 39
40extern struct nouveau_oclass nv04_disp_oclass; 40extern struct nouveau_oclass nv04_disp_oclass;
41extern struct nouveau_oclass nv50_disp_oclass; 41extern struct nouveau_oclass nv50_disp_oclass;
42extern struct nouveau_oclass nv84_disp_oclass;
43extern struct nouveau_oclass nva0_disp_oclass;
44extern struct nouveau_oclass nv94_disp_oclass;
45extern struct nouveau_oclass nva3_disp_oclass;
42extern struct nouveau_oclass nvd0_disp_oclass; 46extern struct nouveau_oclass nvd0_disp_oclass;
47extern struct nouveau_oclass nve0_disp_oclass;
43 48
44#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index 700ccbb1941f..b28914ed1752 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
12 u32 access; 12 u32 access;
13 u64 start; 13 u64 start;
14 u64 limit; 14 u64 limit;
15 u32 conf0;
15}; 16};
16 17
17#define nouveau_dmaobj_create(p,e,c,a,s,d) \
18 nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
19#define nouveau_dmaobj_destroy(p) \
20 nouveau_object_destroy(&(p)->base)
21#define nouveau_dmaobj_init(p) \
22 nouveau_object_init(&(p)->base)
23#define nouveau_dmaobj_fini(p,s) \
24 nouveau_object_fini(&(p)->base, (s))
25
26int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
27 struct nouveau_oclass *, void *data, u32 size,
28 int length, void **);
29
30#define _nouveau_dmaobj_dtor nouveau_object_destroy
31#define _nouveau_dmaobj_init nouveau_object_init
32#define _nouveau_dmaobj_fini nouveau_object_fini
33
34struct nouveau_dmaeng { 18struct nouveau_dmaeng {
35 struct nouveau_engine base; 19 struct nouveau_engine base;
36 int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent, 20
37 struct nouveau_dmaobj *, struct nouveau_gpuobj **); 21 /* creates a "physical" dma object from a struct nouveau_dmaobj */
22 int (*bind)(struct nouveau_dmaeng *dmaeng,
23 struct nouveau_object *parent,
24 struct nouveau_dmaobj *dmaobj,
25 struct nouveau_gpuobj **);
38}; 26};
39 27
40#define nouveau_dmaeng_create(p,e,c,d) \ 28#define nouveau_dmaeng_create(p,e,c,d) \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
53extern struct nouveau_oclass nv04_dmaeng_oclass; 41extern struct nouveau_oclass nv04_dmaeng_oclass;
54extern struct nouveau_oclass nv50_dmaeng_oclass; 42extern struct nouveau_oclass nv50_dmaeng_oclass;
55extern struct nouveau_oclass nvc0_dmaeng_oclass; 43extern struct nouveau_oclass nvc0_dmaeng_oclass;
44extern struct nouveau_oclass nvd0_dmaeng_oclass;
45
46extern struct nouveau_oclass nouveau_dmaobj_sclass[];
56 47
57#endif 48#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index d67fed1e3970..f18846c8c6fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *,
33 struct nouveau_object *, 33 struct nouveau_object *,
34 struct nouveau_oclass *, 34 struct nouveau_oclass *,
35 int bar, u32 addr, u32 size, u32 push, 35 int bar, u32 addr, u32 size, u32 push,
36 u32 engmask, int len, void **); 36 u64 engmask, int len, void **);
37void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *); 37void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
38 38
39#define _nouveau_fifo_channel_init _nouveau_namedb_init 39#define _nouveau_fifo_channel_init _nouveau_namedb_init
40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini 40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
41 41
42void _nouveau_fifo_channel_dtor(struct nouveau_object *); 42void _nouveau_fifo_channel_dtor(struct nouveau_object *);
43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32); 43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32); 44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
45 45
46struct nouveau_fifo_base { 46struct nouveau_fifo_base {
47 struct nouveau_gpuobj base; 47 struct nouveau_gpuobj base;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
index 74d554fb3281..0a66781e8cf1 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -1,45 +1,7 @@
1#ifndef __NOUVEAU_PPP_H__ 1#ifndef __NOUVEAU_PPP_H__
2#define __NOUVEAU_PPP_H__ 2#define __NOUVEAU_PPP_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_ppp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_ppp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_ppp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_ppp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_ppp_context_init _nouveau_engctx_init
22#define _nouveau_ppp_context_fini _nouveau_engctx_fini
23#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_ppp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_ppp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
32#define nouveau_ppp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_ppp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_ppp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_ppp_dtor _nouveau_engine_dtor
40#define _nouveau_ppp_init _nouveau_engine_init
41#define _nouveau_ppp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv98_ppp_oclass; 4extern struct nouveau_oclass nv98_ppp_oclass;
5extern struct nouveau_oclass nvc0_ppp_oclass;
44 6
45#endif 7#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
index 05cd08fba377..d7b287b115bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -1,45 +1,8 @@
1#ifndef __NOUVEAU_VP_H__ 1#ifndef __NOUVEAU_VP_H__
2#define __NOUVEAU_VP_H__ 2#define __NOUVEAU_VP_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_vp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_vp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_vp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_vp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_vp_context_init _nouveau_engctx_init
22#define _nouveau_vp_context_fini _nouveau_engctx_fini
23#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_vp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_vp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
32#define nouveau_vp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_vp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_vp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_vp_dtor _nouveau_engine_dtor
40#define _nouveau_vp_init _nouveau_engine_init
41#define _nouveau_vp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_vp_oclass; 4extern struct nouveau_oclass nv84_vp_oclass;
5extern struct nouveau_oclass nvc0_vp_oclass;
6extern struct nouveau_oclass nve0_vp_oclass;
44 7
45#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
index d145b25e6be4..5bd1ca8cd20d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios.h
@@ -17,6 +17,7 @@ struct nouveau_bios {
17 u8 chip; 17 u8 chip;
18 u8 minor; 18 u8 minor;
19 u8 micro; 19 u8 micro;
20 u8 patch;
20 } version; 21 } version;
21}; 22};
22 23
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index d682fb625833..b79025da581e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -23,6 +23,7 @@ struct dcb_output {
23 uint8_t bus; 23 uint8_t bus;
24 uint8_t location; 24 uint8_t location;
25 uint8_t or; 25 uint8_t or;
26 uint8_t link;
26 bool duallink_possible; 27 bool duallink_possible;
27 union { 28 union {
28 struct sor_conf { 29 struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
55 56
56u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len); 57u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
57u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); 58u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
59u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
60 struct dcb_output *);
61u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
62 struct dcb_output *);
58int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec) 63int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
59 (struct nouveau_bios *, void *, int index, u16 entry)); 64 (struct nouveau_bios *, void *, int index, u16 entry));
60 65
61
62/* BIT 'U'/'d' table encoder subtables have hashes matching them to
63 * a particular set of encoders.
64 *
65 * This function returns true if a particular DCB entry matches.
66 */
67static inline bool
68dcb_hash_match(struct dcb_output *dcb, u32 hash)
69{
70 if ((hash & 0x000000f0) != (dcb->location << 4))
71 return false;
72 if ((hash & 0x0000000f) != dcb->type)
73 return false;
74 if (!(hash & (dcb->or << 16)))
75 return false;
76
77 switch (dcb->type) {
78 case DCB_OUTPUT_TMDS:
79 case DCB_OUTPUT_LVDS:
80 case DCB_OUTPUT_DP:
81 if (hash & 0x00c00000) {
82 if (!(hash & (dcb->sorconf.link << 22)))
83 return false;
84 }
85 default:
86 return true;
87 }
88}
89
90#endif 66#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 000000000000..c35937e2f6a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
1#ifndef __NVBIOS_DISP_H__
2#define __NVBIOS_DISP_H__
3
4u16 nvbios_disp_table(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
6
7struct nvbios_disp {
8 u16 data;
9};
10
11u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
12 u8 *ver, u8 *hdr__, u8 *sub);
13u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
14 u8 *ver, u8 *hdr__, u8 *sub,
15 struct nvbios_disp *);
16
17struct nvbios_outp {
18 u16 type;
19 u16 mask;
20 u16 script[3];
21};
22
23u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
24 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
25u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
26 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
27 struct nvbios_outp *);
28u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
29 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
30 struct nvbios_outp *);
31
32
33struct nvbios_ocfg {
34 u16 match;
35 u16 clkcmp[2];
36};
37
38u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
39 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
40u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
41 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
42 struct nvbios_ocfg *);
43u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
44 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
45 struct nvbios_ocfg *);
46u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
47
48#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 73b5e5d3e75a..6e54218b55fc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -1,8 +1,34 @@
1#ifndef __NVBIOS_DP_H__ 1#ifndef __NVBIOS_DP_H__
2#define __NVBIOS_DP_H__ 2#define __NVBIOS_DP_H__
3 3
4u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 4struct nvbios_dpout {
5u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); 5 u16 type;
6u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len); 6 u16 mask;
7 u8 flags;
8 u32 script[5];
9 u32 lnkcmp;
10};
11
12u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
13 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
14 struct nvbios_dpout *);
15u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
16 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
17 struct nvbios_dpout *);
18
19struct nvbios_dpcfg {
20 u8 drv;
21 u8 pre;
22 u8 unk;
23};
24
25u16
26nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
27 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
28 struct nvbios_dpcfg *);
29u16
30nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
32 struct nvbios_dpcfg *);
7 33
8#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
index 2bf178082a36..e6563b5cb08e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/gpio.h
@@ -25,9 +25,11 @@ struct dcb_gpio_func {
25 u8 param; 25 u8 param;
26}; 26};
27 27
28u16 dcb_gpio_table(struct nouveau_bios *); 28u16 dcb_gpio_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
29u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver); 29u16 dcb_gpio_entry(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len);
30int dcb_gpio_parse(struct nouveau_bios *, int idx, u8 func, u8 line, 30u16 dcb_gpio_parse(struct nouveau_bios *, int idx, int ent, u8 *ver, u8 *len,
31 struct dcb_gpio_func *); 31 struct dcb_gpio_func *);
32u16 dcb_gpio_match(struct nouveau_bios *, int idx, u8 func, u8 line,
33 u8 *ver, u8 *len, struct dcb_gpio_func *);
32 34
33#endif 35#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
index e69a8bdc6e97..ca2f6bf37f46 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/init.h
@@ -13,6 +13,7 @@ struct nvbios_init {
13 u32 nested; 13 u32 nested;
14 u16 repeat; 14 u16 repeat;
15 u16 repend; 15 u16 repend;
16 u32 ramcfg;
16}; 17};
17 18
18int nvbios_exec(struct nvbios_init *); 19int nvbios_exec(struct nvbios_init *);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
index c345097592f2..b2f3d4d0aa49 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pll.h
@@ -38,6 +38,8 @@ enum nvbios_pll_type {
38 PLL_UNK42 = 0x42, 38 PLL_UNK42 = 0x42,
39 PLL_VPLL0 = 0x80, 39 PLL_VPLL0 = 0x80,
40 PLL_VPLL1 = 0x81, 40 PLL_VPLL1 = 0x81,
41 PLL_VPLL2 = 0x82,
42 PLL_VPLL3 = 0x83,
41 PLL_MAX = 0xff 43 PLL_MAX = 0xff
42}; 44};
43 45
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
index 39e73b91d360..41b7a6a76f19 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/clock.h
@@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
54 int clk, struct nouveau_pll_vals *); 54 int clk, struct nouveau_pll_vals *);
55int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1, 55int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
56 struct nouveau_pll_vals *); 56 struct nouveau_pll_vals *);
57 57int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
58 int clk, struct nouveau_pll_vals *);
58 59
59#endif 60#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 5c1b5e1904f9..da470e6851b1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -69,8 +69,11 @@ struct nouveau_fb {
69 } type; 69 } type;
70 u64 stolen; 70 u64 stolen;
71 u64 size; 71 u64 size;
72
72 int ranks; 73 int ranks;
74 int parts;
73 75
76 int (*init)(struct nouveau_fb *);
74 int (*get)(struct nouveau_fb *, u64 size, u32 align, 77 int (*get)(struct nouveau_fb *, u64 size, u32 align,
75 u32 size_nc, u32 type, struct nouveau_mem **); 78 u32 size_nc, u32 type, struct nouveau_mem **);
76 void (*put)(struct nouveau_fb *, struct nouveau_mem **); 79 void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
84 int regions; 87 int regions;
85 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size, 88 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
86 u32 pitch, u32 flags, struct nouveau_fb_tile *); 89 u32 pitch, u32 flags, struct nouveau_fb_tile *);
90 void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
91 struct nouveau_fb_tile *);
87 void (*fini)(struct nouveau_fb *, int i, 92 void (*fini)(struct nouveau_fb *, int i,
88 struct nouveau_fb_tile *); 93 struct nouveau_fb_tile *);
89 void (*prog)(struct nouveau_fb *, int i, 94 void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
99 104
100#define nouveau_fb_create(p,e,c,d) \ 105#define nouveau_fb_create(p,e,c,d) \
101 nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d)) 106 nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
102int nouveau_fb_created(struct nouveau_fb *); 107int nouveau_fb_preinit(struct nouveau_fb *);
103void nouveau_fb_destroy(struct nouveau_fb *); 108void nouveau_fb_destroy(struct nouveau_fb *);
104int nouveau_fb_init(struct nouveau_fb *); 109int nouveau_fb_init(struct nouveau_fb *);
105#define nouveau_fb_fini(p,s) \ 110#define nouveau_fb_fini(p,s) \
@@ -111,9 +116,19 @@ int _nouveau_fb_init(struct nouveau_object *);
111 116
112extern struct nouveau_oclass nv04_fb_oclass; 117extern struct nouveau_oclass nv04_fb_oclass;
113extern struct nouveau_oclass nv10_fb_oclass; 118extern struct nouveau_oclass nv10_fb_oclass;
119extern struct nouveau_oclass nv1a_fb_oclass;
114extern struct nouveau_oclass nv20_fb_oclass; 120extern struct nouveau_oclass nv20_fb_oclass;
121extern struct nouveau_oclass nv25_fb_oclass;
115extern struct nouveau_oclass nv30_fb_oclass; 122extern struct nouveau_oclass nv30_fb_oclass;
123extern struct nouveau_oclass nv35_fb_oclass;
124extern struct nouveau_oclass nv36_fb_oclass;
116extern struct nouveau_oclass nv40_fb_oclass; 125extern struct nouveau_oclass nv40_fb_oclass;
126extern struct nouveau_oclass nv41_fb_oclass;
127extern struct nouveau_oclass nv44_fb_oclass;
128extern struct nouveau_oclass nv46_fb_oclass;
129extern struct nouveau_oclass nv47_fb_oclass;
130extern struct nouveau_oclass nv49_fb_oclass;
131extern struct nouveau_oclass nv4e_fb_oclass;
117extern struct nouveau_oclass nv50_fb_oclass; 132extern struct nouveau_oclass nv50_fb_oclass;
118extern struct nouveau_oclass nvc0_fb_oclass; 133extern struct nouveau_oclass nvc0_fb_oclass;
119 134
@@ -122,13 +137,35 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *);
122 137
123bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype); 138bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
124 139
140void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
141 u32 pitch, u32 flags, struct nouveau_fb_tile *);
142void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
125void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); 143void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
126 144
145int nv20_fb_vram_init(struct nouveau_fb *);
146void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
147 u32 pitch, u32 flags, struct nouveau_fb_tile *);
148void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
149void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
150
151int nv30_fb_init(struct nouveau_object *);
127void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, 152void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
128 u32 pitch, u32 flags, struct nouveau_fb_tile *); 153 u32 pitch, u32 flags, struct nouveau_fb_tile *);
129void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); 154
155void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
156 struct nouveau_fb_tile *);
157
158int nv41_fb_vram_init(struct nouveau_fb *);
159int nv41_fb_init(struct nouveau_object *);
160void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
161
162int nv44_fb_vram_init(struct nouveau_fb *);
163int nv44_fb_init(struct nouveau_object *);
164void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
165
166void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
167 u32 pitch, u32 flags, struct nouveau_fb_tile *);
130 168
131void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **); 169void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
132void nv50_fb_trap(struct nouveau_fb *, int display);
133 170
134#endif 171#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
index 9ea2b12cc15d..b75e8f18e52c 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/gpio.h
@@ -11,7 +11,7 @@ struct nouveau_gpio {
11 struct nouveau_subdev base; 11 struct nouveau_subdev base;
12 12
13 /* hardware interfaces */ 13 /* hardware interfaces */
14 void (*reset)(struct nouveau_gpio *); 14 void (*reset)(struct nouveau_gpio *, u8 func);
15 int (*drive)(struct nouveau_gpio *, int line, int dir, int out); 15 int (*drive)(struct nouveau_gpio *, int line, int dir, int out);
16 int (*sense)(struct nouveau_gpio *, int line); 16 int (*sense)(struct nouveau_gpio *, int line);
17 void (*irq_enable)(struct nouveau_gpio *, int line, bool); 17 void (*irq_enable)(struct nouveau_gpio *, int line, bool);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index cd01c533007a..d70ba342aa2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
65} 65}
66 66
67static u32 67static u32
68nouveau_barobj_rd32(struct nouveau_object *object, u32 addr) 68nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
69{ 69{
70 struct nouveau_barobj *barobj = (void *)object; 70 struct nouveau_barobj *barobj = (void *)object;
71 return ioread32_native(barobj->iomem + addr); 71 return ioread32_native(barobj->iomem + addr);
72} 72}
73 73
74static void 74static void
75nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 75nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
76{ 76{
77 struct nouveau_barobj *barobj = (void *)object; 77 struct nouveau_barobj *barobj = (void *)object;
78 iowrite32_native(data, barobj->iomem + addr); 78 iowrite32_native(data, barobj->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 70ca7d5a1aa1..f621f69fa1a2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
63 struct pci_dev *pdev = nv_device(bios)->pdev; 63 struct pci_dev *pdev = nv_device(bios)->pdev;
64 struct device_node *dn; 64 struct device_node *dn;
65 const u32 *data; 65 const u32 *data;
66 int size, i; 66 int size;
67 67
68 dn = pci_device_to_OF_node(pdev); 68 dn = pci_device_to_OF_node(pdev);
69 if (!dn) { 69 if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
210 return; 210 return;
211 211
212 bios->data = kmalloc(bios->size, GFP_KERNEL); 212 bios->data = kmalloc(bios->size, GFP_KERNEL);
213 for (i = 0; bios->data && i < bios->size; i += cnt) { 213 if (bios->data) {
214 cnt = min((bios->size - i), (u32)4096); 214 /* disobey the acpi spec - much faster on at least w530 ... */
215 ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt); 215 ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
216 if (ret != cnt) 216 if (ret != bios->size ||
217 break; 217 nvbios_checksum(bios->data, bios->size)) {
218 /* ... that didn't work, ok, i'll be good now */
219 for (i = 0; i < bios->size; i += cnt) {
220 cnt = min((bios->size - i), (u32)4096);
221 ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
222 if (ret != cnt)
223 break;
224 }
225 }
218 } 226 }
219} 227}
220 228
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
358} 366}
359 367
360static u8 368static u8
361nouveau_bios_rd08(struct nouveau_object *object, u32 addr) 369nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
362{ 370{
363 struct nouveau_bios *bios = (void *)object; 371 struct nouveau_bios *bios = (void *)object;
364 return bios->data[addr]; 372 return bios->data[addr];
365} 373}
366 374
367static u16 375static u16
368nouveau_bios_rd16(struct nouveau_object *object, u32 addr) 376nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
369{ 377{
370 struct nouveau_bios *bios = (void *)object; 378 struct nouveau_bios *bios = (void *)object;
371 return get_unaligned_le16(&bios->data[addr]); 379 return get_unaligned_le16(&bios->data[addr]);
372} 380}
373 381
374static u32 382static u32
375nouveau_bios_rd32(struct nouveau_object *object, u32 addr) 383nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
376{ 384{
377 struct nouveau_bios *bios = (void *)object; 385 struct nouveau_bios *bios = (void *)object;
378 return get_unaligned_le32(&bios->data[addr]); 386 return get_unaligned_le32(&bios->data[addr]);
379} 387}
380 388
381static void 389static void
382nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data) 390nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
383{ 391{
384 struct nouveau_bios *bios = (void *)object; 392 struct nouveau_bios *bios = (void *)object;
385 bios->data[addr] = data; 393 bios->data[addr] = data;
386} 394}
387 395
388static void 396static void
389nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data) 397nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
390{ 398{
391 struct nouveau_bios *bios = (void *)object; 399 struct nouveau_bios *bios = (void *)object;
392 put_unaligned_le16(data, &bios->data[addr]); 400 put_unaligned_le16(data, &bios->data[addr]);
393} 401}
394 402
395static void 403static void
396nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data) 404nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
397{ 405{
398 struct nouveau_bios *bios = (void *)object; 406 struct nouveau_bios *bios = (void *)object;
399 put_unaligned_le32(data, &bios->data[addr]); 407 put_unaligned_le32(data, &bios->data[addr]);
@@ -439,6 +447,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
439 bios->version.chip = nv_ro08(bios, bit_i.offset + 2); 447 bios->version.chip = nv_ro08(bios, bit_i.offset + 2);
440 bios->version.minor = nv_ro08(bios, bit_i.offset + 1); 448 bios->version.minor = nv_ro08(bios, bit_i.offset + 1);
441 bios->version.micro = nv_ro08(bios, bit_i.offset + 0); 449 bios->version.micro = nv_ro08(bios, bit_i.offset + 0);
450 bios->version.patch = nv_ro08(bios, bit_i.offset + 4);
442 } else 451 } else
443 if (bmp_version(bios)) { 452 if (bmp_version(bios)) {
444 bios->version.major = nv_ro08(bios, bios->bmp_offset + 13); 453 bios->version.major = nv_ro08(bios, bios->bmp_offset + 13);
@@ -447,9 +456,9 @@ nouveau_bios_ctor(struct nouveau_object *parent,
447 bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10); 456 bios->version.micro = nv_ro08(bios, bios->bmp_offset + 10);
448 } 457 }
449 458
450 nv_info(bios, "version %02x.%02x.%02x.%02x\n", 459 nv_info(bios, "version %02x.%02x.%02x.%02x.%02x\n",
451 bios->version.major, bios->version.chip, 460 bios->version.major, bios->version.chip,
452 bios->version.minor, bios->version.micro); 461 bios->version.minor, bios->version.micro, bios->version.patch);
453 462
454 return 0; 463 return 0;
455} 464}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index 7d750382a833..0fd87df99dd6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
64 } 64 }
65 } else 65 } else
66 if (*ver >= 0x15) { 66 if (*ver >= 0x15) {
67 if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) { 67 if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
68 u16 i2c = nv_ro16(bios, dcb + 2); 68 u16 i2c = nv_ro16(bios, dcb + 2);
69 *hdr = 4; 69 *hdr = 4;
70 *cnt = (i2c - dcb) / 10; 70 *cnt = (i2c - dcb) / 10;
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
107 return 0x0000; 107 return 0x0000;
108} 108}
109 109
110u16
111dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
112 struct dcb_output *outp)
113{
114 u16 dcb = dcb_outp(bios, idx, ver, len);
115 if (dcb) {
116 if (*ver >= 0x20) {
117 u32 conn = nv_ro32(bios, dcb + 0x00);
118 outp->or = (conn & 0x0f000000) >> 24;
119 outp->location = (conn & 0x00300000) >> 20;
120 outp->bus = (conn & 0x000f0000) >> 16;
121 outp->connector = (conn & 0x0000f000) >> 12;
122 outp->heads = (conn & 0x00000f00) >> 8;
123 outp->i2c_index = (conn & 0x000000f0) >> 4;
124 outp->type = (conn & 0x0000000f);
125 outp->link = 0;
126 } else {
127 dcb = 0x0000;
128 }
129
130 if (*ver >= 0x40) {
131 u32 conf = nv_ro32(bios, dcb + 0x04);
132 switch (outp->type) {
133 case DCB_OUTPUT_TMDS:
134 case DCB_OUTPUT_LVDS:
135 case DCB_OUTPUT_DP:
136 outp->link = (conf & 0x00000030) >> 4;
137 outp->sorconf.link = outp->link; /*XXX*/
138 break;
139 default:
140 break;
141 }
142 }
143 }
144 return dcb;
145}
146
147static inline u16
148dcb_outp_hasht(struct dcb_output *outp)
149{
150 return outp->type;
151}
152
153static inline u16
154dcb_outp_hashm(struct dcb_output *outp)
155{
156 return (outp->heads << 8) | (outp->link << 6) | outp->or;
157}
158
159u16
160dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
161 u8 *ver, u8 *len, struct dcb_output *outp)
162{
163 u16 dcb, idx = 0;
164 while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
165 if (dcb_outp_hasht(outp) == type) {
166 if ((dcb_outp_hashm(outp) & mask) == mask)
167 break;
168 }
169 }
170 return dcb;
171}
172
110int 173int
111dcb_outp_foreach(struct nouveau_bios *bios, void *data, 174dcb_outp_foreach(struct nouveau_bios *bios, void *data,
112 int (*exec)(struct nouveau_bios *, void *, int, u16)) 175 int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 000000000000..7f16e52d9bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/disp.h>
28
29u16
30nvbios_disp_table(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
32{
33 struct bit_entry U;
34
35 if (!bit_entry(bios, 'U', &U)) {
36 if (U.version == 1) {
37 u16 data = nv_ro16(bios, U.offset);
38 if (data) {
39 *ver = nv_ro08(bios, data + 0x00);
40 switch (*ver) {
41 case 0x20:
42 case 0x21:
43 *hdr = nv_ro08(bios, data + 0x01);
44 *len = nv_ro08(bios, data + 0x02);
45 *cnt = nv_ro08(bios, data + 0x03);
46 *sub = nv_ro08(bios, data + 0x04);
47 return data;
48 default:
49 break;
50 }
51 }
52 }
53 }
54
55 return 0x0000;
56}
57
58u16
59nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
60 u8 *ver, u8 *len, u8 *sub)
61{
62 u8 hdr, cnt;
63 u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
64 if (data && idx < cnt)
65 return data + hdr + (idx * *len);
66 *ver = 0x00;
67 return 0x0000;
68}
69
70u16
71nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
72 u8 *ver, u8 *len, u8 *sub,
73 struct nvbios_disp *info)
74{
75 u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
76 if (data && *len >= 2) {
77 info->data = nv_ro16(bios, data + 0);
78 return data;
79 }
80 return 0x0000;
81}
82
83u16
84nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
85 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
86{
87 struct nvbios_disp info;
88 u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
89 if (data) {
90 *cnt = nv_ro08(bios, info.data + 0x05);
91 *len = 0x06;
92 data = info.data;
93 }
94 return data;
95}
96
97u16
98nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
99 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
100 struct nvbios_outp *info)
101{
102 u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
103 if (data && *hdr >= 0x0a) {
104 info->type = nv_ro16(bios, data + 0x00);
105 info->mask = nv_ro32(bios, data + 0x02);
106 if (*ver <= 0x20) /* match any link */
107 info->mask |= 0x00c0;
108 info->script[0] = nv_ro16(bios, data + 0x06);
109 info->script[1] = nv_ro16(bios, data + 0x08);
110 info->script[2] = 0x0000;
111 if (*hdr >= 0x0c)
112 info->script[2] = nv_ro16(bios, data + 0x0a);
113 return data;
114 }
115 return 0x0000;
116}
117
118u16
119nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
120 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
121 struct nvbios_outp *info)
122{
123 u16 data, idx = 0;
124 while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
125 if (data && info->type == type) {
126 if ((info->mask & mask) == mask)
127 break;
128 }
129 }
130 return data;
131}
132
133u16
134nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
135 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
136{
137 if (idx < *cnt)
138 return outp + *hdr + (idx * *len);
139 return 0x0000;
140}
141
142u16
143nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
144 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
145 struct nvbios_ocfg *info)
146{
147 u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
148 if (data) {
149 info->match = nv_ro16(bios, data + 0x00);
150 info->clkcmp[0] = nv_ro16(bios, data + 0x02);
151 info->clkcmp[1] = nv_ro16(bios, data + 0x04);
152 }
153 return data;
154}
155
156u16
157nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
158 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
159 struct nvbios_ocfg *info)
160{
161 u16 data, idx = 0;
162 while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
163 if (info->match == type)
164 break;
165 }
166 return data;
167}
168
169u16
170nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
171{
172 while (cmp) {
173 if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
174 return nv_ro16(bios, cmp + 0x02);
175 cmp += 0x04;
176 }
177 return 0x0000;
178}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 3cbc0f3e8d5e..663853bcca82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -25,23 +25,29 @@
25 25
26#include "subdev/bios.h" 26#include "subdev/bios.h"
27#include "subdev/bios/bit.h" 27#include "subdev/bios/bit.h"
28#include "subdev/bios/dcb.h"
29#include "subdev/bios/dp.h" 28#include "subdev/bios/dp.h"
30 29
31u16 30static u16
32dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 31nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33{ 32{
34 struct bit_entry bit_d; 33 struct bit_entry d;
35 34
36 if (!bit_entry(bios, 'd', &bit_d)) { 35 if (!bit_entry(bios, 'd', &d)) {
37 if (bit_d.version == 1) { 36 if (d.version == 1 && d.length >= 2) {
38 u16 data = nv_ro16(bios, bit_d.offset); 37 u16 data = nv_ro16(bios, d.offset);
39 if (data) { 38 if (data) {
40 *ver = nv_ro08(bios, data + 0); 39 *ver = nv_ro08(bios, data + 0x00);
41 *hdr = nv_ro08(bios, data + 1); 40 switch (*ver) {
42 *len = nv_ro08(bios, data + 2); 41 case 0x21:
43 *cnt = nv_ro08(bios, data + 3); 42 case 0x30:
44 return data; 43 case 0x40:
44 *hdr = nv_ro08(bios, data + 0x01);
45 *len = nv_ro08(bios, data + 0x02);
46 *cnt = nv_ro08(bios, data + 0x03);
47 return data;
48 default:
49 break;
50 }
45 } 51 }
46 } 52 }
47 } 53 }
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
49 return 0x0000; 55 return 0x0000;
50} 56}
51 57
58static u16
59nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
60 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
61{
62 u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
63 if (data && idx < *cnt) {
64 u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
65 switch (*ver * !!outp) {
66 case 0x21:
67 case 0x30:
68 *hdr = nv_ro08(bios, data + 0x04);
69 *len = nv_ro08(bios, data + 0x05);
70 *cnt = nv_ro08(bios, outp + 0x04);
71 break;
72 case 0x40:
73 *hdr = nv_ro08(bios, data + 0x04);
74 *cnt = 0;
75 *len = 0;
76 break;
77 default:
78 break;
79 }
80 return outp;
81 }
82 *ver = 0x00;
83 return 0x0000;
84}
85
52u16 86u16
53dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) 87nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
89 struct nvbios_dpout *info)
54{ 90{
55 u8 hdr, cnt; 91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
56 u16 table = dp_table(bios, ver, &hdr, &cnt, len); 92 if (data && *ver) {
57 if (table && idx < cnt) 93 info->type = nv_ro16(bios, data + 0x00);
58 return nv_ro16(bios, table + hdr + (idx * *len)); 94 info->mask = nv_ro16(bios, data + 0x02);
59 return 0xffff; 95 switch (*ver) {
96 case 0x21:
97 case 0x30:
98 info->flags = nv_ro08(bios, data + 0x05);
99 info->script[0] = nv_ro16(bios, data + 0x06);
100 info->script[1] = nv_ro16(bios, data + 0x08);
101 info->lnkcmp = nv_ro16(bios, data + 0x0a);
102 info->script[2] = nv_ro16(bios, data + 0x0c);
103 info->script[3] = nv_ro16(bios, data + 0x0e);
104 info->script[4] = nv_ro16(bios, data + 0x10);
105 break;
106 case 0x40:
107 info->flags = nv_ro08(bios, data + 0x04);
108 info->script[0] = nv_ro16(bios, data + 0x05);
109 info->script[1] = nv_ro16(bios, data + 0x07);
110 info->lnkcmp = nv_ro16(bios, data + 0x09);
111 info->script[2] = nv_ro16(bios, data + 0x0b);
112 info->script[3] = nv_ro16(bios, data + 0x0d);
113 info->script[4] = nv_ro16(bios, data + 0x0f);
114 break;
115 default:
116 data = 0x0000;
117 break;
118 }
119 }
120 return data;
60} 121}
61 122
62u16 123u16
63dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp, 124nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
64 u8 *ver, u8 *len) 125 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
126 struct nvbios_dpout *info)
65{ 127{
66 u8 idx = 0; 128 u16 data, idx = 0;
67 u16 data; 129 while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
68 while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) { 130 if (data && info->type == type) {
69 if (data) { 131 if ((info->mask & mask) == mask)
70 u32 hash = nv_ro32(bios, data); 132 break;
71 if (dcb_hash_match(outp, hash))
72 return data;
73 } 133 }
74 } 134 }
135 return data;
136}
137
138static u16
139nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
140 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
141{
142 if (*ver >= 0x40) {
143 outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
144 *hdr = *hdr + (*len * * cnt);
145 *len = nv_ro08(bios, outp + 0x06);
146 *cnt = nv_ro08(bios, outp + 0x07);
147 }
148
149 if (idx < *cnt)
150 return outp + *hdr + (idx * *len);
151
75 return 0x0000; 152 return 0x0000;
76} 153}
154
155u16
156nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
157 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
158 struct nvbios_dpcfg *info)
159{
160 u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
161 if (data) {
162 switch (*ver) {
163 case 0x21:
164 info->drv = nv_ro08(bios, data + 0x02);
165 info->pre = nv_ro08(bios, data + 0x03);
166 info->unk = nv_ro08(bios, data + 0x04);
167 break;
168 case 0x30:
169 case 0x40:
170 info->drv = nv_ro08(bios, data + 0x01);
171 info->pre = nv_ro08(bios, data + 0x02);
172 info->unk = nv_ro08(bios, data + 0x03);
173 break;
174 default:
175 data = 0x0000;
176 break;
177 }
178 }
179 return data;
180}
181
182u16
183nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
184 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
185 struct nvbios_dpcfg *info)
186{
187 u8 idx = 0xff;
188 u16 data;
189
190 if (*ver >= 0x30) {
191 const u8 vsoff[] = { 0, 4, 7, 9 };
192 idx = (un * 10) + vsoff[vs] + pe;
193 } else {
194 while ((data = nvbios_dpcfg_entry(bios, outp, idx,
195 ver, hdr, cnt, len))) {
196 if (nv_ro08(bios, data + 0x00) == vs &&
197 nv_ro08(bios, data + 0x01) == pe)
198 break;
199 idx++;
200 }
201 }
202
203 return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
204}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index 4c9f1e508165..c84e93fa6d95 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -27,84 +27,105 @@
27#include <subdev/bios/gpio.h> 27#include <subdev/bios/gpio.h>
28 28
29u16 29u16
30dcb_gpio_table(struct nouveau_bios *bios) 30dcb_gpio_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{ 31{
32 u8 ver, hdr, cnt, len; 32 u16 data = 0x0000;
33 u16 dcb = dcb_table(bios, &ver, &hdr, &cnt, &len); 33 u16 dcb = dcb_table(bios, ver, hdr, cnt, len);
34 if (dcb) { 34 if (dcb) {
35 if (ver >= 0x30 && hdr >= 0x0c) 35 if (*ver >= 0x30 && *hdr >= 0x0c)
36 return nv_ro16(bios, dcb + 0x0a); 36 data = nv_ro16(bios, dcb + 0x0a);
37 if (ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13) 37 else
38 return nv_ro16(bios, dcb - 0x0f); 38 if (*ver >= 0x22 && nv_ro08(bios, dcb - 1) >= 0x13)
39 data = nv_ro16(bios, dcb - 0x0f);
40
41 if (data) {
42 *ver = nv_ro08(bios, data + 0x00);
43 if (*ver < 0x30) {
44 *hdr = 3;
45 *cnt = nv_ro08(bios, data + 0x02);
46 *len = nv_ro08(bios, data + 0x01);
47 } else
48 if (*ver <= 0x41) {
49 *hdr = nv_ro08(bios, data + 0x01);
50 *cnt = nv_ro08(bios, data + 0x02);
51 *len = nv_ro08(bios, data + 0x03);
52 } else {
53 data = 0x0000;
54 }
55 }
39 } 56 }
40 return 0x0000; 57 return data;
41} 58}
42 59
43u16 60u16
44dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver) 61dcb_gpio_entry(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len)
45{ 62{
46 u16 gpio = dcb_gpio_table(bios); 63 u8 hdr, cnt;
47 if (gpio) { 64 u16 gpio = !idx ? dcb_gpio_table(bios, ver, &hdr, &cnt, len) : 0x0000;
48 *ver = nv_ro08(bios, gpio); 65 if (gpio && ent < cnt)
49 if (*ver < 0x30 && ent < nv_ro08(bios, gpio + 2)) 66 return gpio + hdr + (ent * *len);
50 return gpio + 3 + (ent * nv_ro08(bios, gpio + 1));
51 else if (ent < nv_ro08(bios, gpio + 2))
52 return gpio + nv_ro08(bios, gpio + 1) +
53 (ent * nv_ro08(bios, gpio + 3));
54 }
55 return 0x0000; 67 return 0x0000;
56} 68}
57 69
58int 70u16
59dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line, 71dcb_gpio_parse(struct nouveau_bios *bios, int idx, int ent, u8 *ver, u8 *len,
60 struct dcb_gpio_func *gpio) 72 struct dcb_gpio_func *gpio)
61{ 73{
62 u8 ver, hdr, cnt, len; 74 u16 data = dcb_gpio_entry(bios, idx, ent, ver, len);
63 u16 entry; 75 if (data) {
64 int i = -1; 76 if (*ver < 0x40) {
65 77 u16 info = nv_ro16(bios, data);
66 while ((entry = dcb_gpio_entry(bios, idx, ++i, &ver))) {
67 if (ver < 0x40) {
68 u16 data = nv_ro16(bios, entry);
69 *gpio = (struct dcb_gpio_func) { 78 *gpio = (struct dcb_gpio_func) {
70 .line = (data & 0x001f) >> 0, 79 .line = (info & 0x001f) >> 0,
71 .func = (data & 0x07e0) >> 5, 80 .func = (info & 0x07e0) >> 5,
72 .log[0] = (data & 0x1800) >> 11, 81 .log[0] = (info & 0x1800) >> 11,
73 .log[1] = (data & 0x6000) >> 13, 82 .log[1] = (info & 0x6000) >> 13,
74 .param = !!(data & 0x8000), 83 .param = !!(info & 0x8000),
75 }; 84 };
76 } else 85 } else
77 if (ver < 0x41) { 86 if (*ver < 0x41) {
78 u32 data = nv_ro32(bios, entry); 87 u32 info = nv_ro32(bios, data);
79 *gpio = (struct dcb_gpio_func) { 88 *gpio = (struct dcb_gpio_func) {
80 .line = (data & 0x0000001f) >> 0, 89 .line = (info & 0x0000001f) >> 0,
81 .func = (data & 0x0000ff00) >> 8, 90 .func = (info & 0x0000ff00) >> 8,
82 .log[0] = (data & 0x18000000) >> 27, 91 .log[0] = (info & 0x18000000) >> 27,
83 .log[1] = (data & 0x60000000) >> 29, 92 .log[1] = (info & 0x60000000) >> 29,
84 .param = !!(data & 0x80000000), 93 .param = !!(info & 0x80000000),
85 }; 94 };
86 } else { 95 } else {
87 u32 data = nv_ro32(bios, entry + 0); 96 u32 info = nv_ro32(bios, data + 0);
88 u8 data1 = nv_ro32(bios, entry + 4); 97 u8 info1 = nv_ro32(bios, data + 4);
89 *gpio = (struct dcb_gpio_func) { 98 *gpio = (struct dcb_gpio_func) {
90 .line = (data & 0x0000003f) >> 0, 99 .line = (info & 0x0000003f) >> 0,
91 .func = (data & 0x0000ff00) >> 8, 100 .func = (info & 0x0000ff00) >> 8,
92 .log[0] = (data1 & 0x30) >> 4, 101 .log[0] = (info1 & 0x30) >> 4,
93 .log[1] = (data1 & 0xc0) >> 6, 102 .log[1] = (info1 & 0xc0) >> 6,
94 .param = !!(data & 0x80000000), 103 .param = !!(info & 0x80000000),
95 }; 104 };
96 } 105 }
106 }
107
108 return data;
109}
97 110
111u16
112dcb_gpio_match(struct nouveau_bios *bios, int idx, u8 func, u8 line,
113 u8 *ver, u8 *len, struct dcb_gpio_func *gpio)
114{
115 u8 hdr, cnt, i = 0;
116 u16 data;
117
118 while ((data = dcb_gpio_parse(bios, idx, i++, ver, len, gpio))) {
98 if ((line == 0xff || line == gpio->line) && 119 if ((line == 0xff || line == gpio->line) &&
99 (func == 0xff || func == gpio->func)) 120 (func == 0xff || func == gpio->func))
100 return 0; 121 return data;
101 } 122 }
102 123
103 /* DCB 2.2, fixed TVDAC GPIO data */ 124 /* DCB 2.2, fixed TVDAC GPIO data */
104 if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) { 125 if ((data = dcb_table(bios, ver, &hdr, &cnt, len))) {
105 if (func == DCB_GPIO_TVDAC0) { 126 if (*ver >= 0x22 && *ver < 0x30 && func == DCB_GPIO_TVDAC0) {
106 u8 conf = nv_ro08(bios, entry - 5); 127 u8 conf = nv_ro08(bios, data - 5);
107 u8 addr = nv_ro08(bios, entry - 4); 128 u8 addr = nv_ro08(bios, data - 4);
108 if (conf & 0x01) { 129 if (conf & 0x01) {
109 *gpio = (struct dcb_gpio_func) { 130 *gpio = (struct dcb_gpio_func) {
110 .func = DCB_GPIO_TVDAC0, 131 .func = DCB_GPIO_TVDAC0,
@@ -112,10 +133,11 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
112 .log[0] = !!(conf & 0x02), 133 .log[0] = !!(conf & 0x02),
113 .log[1] = !(conf & 0x02), 134 .log[1] = !(conf & 0x02),
114 }; 135 };
115 return 0; 136 *ver = 0x00;
137 return data;
116 } 138 }
117 } 139 }
118 } 140 }
119 141
120 return -EINVAL; 142 return 0x0000;
121} 143}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 6be8c32f6e4c..690ed438b2ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2,11 +2,12 @@
2#include <core/device.h> 2#include <core/device.h>
3 3
4#include <subdev/bios.h> 4#include <subdev/bios.h>
5#include <subdev/bios/conn.h>
6#include <subdev/bios/bmp.h> 5#include <subdev/bios/bmp.h>
7#include <subdev/bios/bit.h> 6#include <subdev/bios/bit.h>
7#include <subdev/bios/conn.h>
8#include <subdev/bios/dcb.h> 8#include <subdev/bios/dcb.h>
9#include <subdev/bios/dp.h> 9#include <subdev/bios/dp.h>
10#include <subdev/bios/gpio.h>
10#include <subdev/bios/init.h> 11#include <subdev/bios/init.h>
11#include <subdev/devinit.h> 12#include <subdev/devinit.h>
12#include <subdev/clock.h> 13#include <subdev/clock.h>
@@ -410,9 +411,25 @@ init_ram_restrict_group_count(struct nvbios_init *init)
410} 411}
411 412
412static u8 413static u8
414init_ram_restrict_strap(struct nvbios_init *init)
415{
416 /* This appears to be the behaviour of the VBIOS parser, and *is*
417 * important to cache the NV_PEXTDEV_BOOT0 on later chipsets to
418 * avoid fucking up the memory controller (somehow) by reading it
419 * on every INIT_RAM_RESTRICT_ZM_GROUP opcode.
420 *
421 * Preserving the non-caching behaviour on earlier chipsets just
422 * in case *not* re-reading the strap causes similar breakage.
423 */
424 if (!init->ramcfg || init->bios->version.major < 0x70)
425 init->ramcfg = init_rd32(init, 0x101000);
426 return (init->ramcfg & 0x00000003c) >> 2;
427}
428
429static u8
413init_ram_restrict(struct nvbios_init *init) 430init_ram_restrict(struct nvbios_init *init)
414{ 431{
415 u32 strap = (init_rd32(init, 0x101000) & 0x0000003c) >> 2; 432 u8 strap = init_ram_restrict_strap(init);
416 u16 table = init_ram_restrict_table(init); 433 u16 table = init_ram_restrict_table(init);
417 if (table) 434 if (table)
418 return nv_ro08(init->bios, table + strap); 435 return nv_ro08(init->bios, table + strap);
@@ -743,9 +760,10 @@ static void
743init_dp_condition(struct nvbios_init *init) 760init_dp_condition(struct nvbios_init *init)
744{ 761{
745 struct nouveau_bios *bios = init->bios; 762 struct nouveau_bios *bios = init->bios;
763 struct nvbios_dpout info;
746 u8 cond = nv_ro08(bios, init->offset + 1); 764 u8 cond = nv_ro08(bios, init->offset + 1);
747 u8 unkn = nv_ro08(bios, init->offset + 2); 765 u8 unkn = nv_ro08(bios, init->offset + 2);
748 u8 ver, len; 766 u8 ver, hdr, cnt, len;
749 u16 data; 767 u16 data;
750 768
751 trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn); 769 trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +777,12 @@ init_dp_condition(struct nvbios_init *init)
759 case 1: 777 case 1:
760 case 2: 778 case 2:
761 if ( init->outp && 779 if ( init->outp &&
762 (data = dp_outp_match(bios, init->outp, &ver, &len))) { 780 (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
763 if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond)) 781 (init->outp->or << 0) |
764 init_exec_set(init, false); 782 (init->outp->sorconf.link << 6),
765 if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond)) 783 &ver, &hdr, &cnt, &len, &info)))
784 {
785 if (!(info.flags & cond))
766 init_exec_set(init, false); 786 init_exec_set(init, false);
767 break; 787 break;
768 } 788 }
@@ -1514,7 +1534,6 @@ init_io(struct nvbios_init *init)
1514 mdelay(10); 1534 mdelay(10);
1515 init_wr32(init, 0x614100, 0x10000018); 1535 init_wr32(init, 0x614100, 0x10000018);
1516 init_wr32(init, 0x614900, 0x10000018); 1536 init_wr32(init, 0x614900, 0x10000018);
1517 return;
1518 } 1537 }
1519 1538
1520 value = init_rdport(init, port) & mask; 1539 value = init_rdport(init, port) & mask;
@@ -1778,7 +1797,7 @@ init_gpio(struct nvbios_init *init)
1778 init->offset += 1; 1797 init->offset += 1;
1779 1798
1780 if (init_exec(init) && gpio && gpio->reset) 1799 if (init_exec(init) && gpio && gpio->reset)
1781 gpio->reset(gpio); 1800 gpio->reset(gpio, DCB_GPIO_UNUSED);
1782} 1801}
1783 1802
1784/** 1803/**
@@ -1992,6 +2011,47 @@ init_i2c_long_if(struct nvbios_init *init)
1992 init_exec_set(init, false); 2011 init_exec_set(init, false);
1993} 2012}
1994 2013
2014/**
2015 * INIT_GPIO_NE - opcode 0xa9
2016 *
2017 */
2018static void
2019init_gpio_ne(struct nvbios_init *init)
2020{
2021 struct nouveau_bios *bios = init->bios;
2022 struct nouveau_gpio *gpio = nouveau_gpio(bios);
2023 struct dcb_gpio_func func;
2024 u8 count = nv_ro08(bios, init->offset + 1);
2025 u8 idx = 0, ver, len;
2026 u16 data, i;
2027
2028 trace("GPIO_NE\t");
2029 init->offset += 2;
2030
2031 for (i = init->offset; i < init->offset + count; i++)
2032 cont("0x%02x ", nv_ro08(bios, i));
2033 cont("\n");
2034
2035 while ((data = dcb_gpio_parse(bios, 0, idx++, &ver, &len, &func))) {
2036 if (func.func != DCB_GPIO_UNUSED) {
2037 for (i = init->offset; i < init->offset + count; i++) {
2038 if (func.func == nv_ro08(bios, i))
2039 break;
2040 }
2041
2042 trace("\tFUNC[0x%02x]", func.func);
2043 if (i == (init->offset + count)) {
2044 cont(" *");
2045 if (init_exec(init) && gpio && gpio->reset)
2046 gpio->reset(gpio, func.func);
2047 }
2048 cont("\n");
2049 }
2050 }
2051
2052 init->offset += count;
2053}
2054
1995static struct nvbios_init_opcode { 2055static struct nvbios_init_opcode {
1996 void (*exec)(struct nvbios_init *); 2056 void (*exec)(struct nvbios_init *);
1997} init_opcode[] = { 2057} init_opcode[] = {
@@ -2056,6 +2116,7 @@ static struct nvbios_init_opcode {
2056 [0x98] = { init_auxch }, 2116 [0x98] = { init_auxch },
2057 [0x99] = { init_zm_auxch }, 2117 [0x99] = { init_zm_auxch },
2058 [0x9a] = { init_i2c_long_if }, 2118 [0x9a] = { init_i2c_long_if },
2119 [0xa9] = { init_gpio_ne },
2059}; 2120};
2060 2121
2061#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0])) 2122#define init_opcode_nr (sizeof(init_opcode) / sizeof(init_opcode[0]))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index cc8d7d162d7c..9068c98b96f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
66 return ret; 66 return ret;
67} 67}
68 68
69int
70nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
71 int clk, struct nouveau_pll_vals *pv)
72{
73 int ret, N, M, P;
74
75 ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P);
76
77 if (ret > 0) {
78 pv->refclk = info->refclk;
79 pv->N1 = N;
80 pv->M1 = M;
81 pv->log2P = P;
82 }
83 return ret;
84}
85
86
69static int 87static int
70nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 88nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
71 struct nouveau_oclass *oclass, void *data, u32 size, 89 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
80 return ret; 98 return ret;
81 99
82 priv->base.pll_set = nva3_clock_pll_set; 100 priv->base.pll_set = nva3_clock_pll_set;
101 priv->base.pll_calc = nva3_clock_pll_calc;
83 return 0; 102 return 0;
84} 103}
85 104
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
index 5ccce0b17bf3..7c9626258a46 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
@@ -52,6 +52,8 @@ nvc0_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
52 switch (info.type) { 52 switch (info.type) {
53 case PLL_VPLL0: 53 case PLL_VPLL0:
54 case PLL_VPLL1: 54 case PLL_VPLL1:
55 case PLL_VPLL2:
56 case PLL_VPLL3:
55 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100); 57 nv_mask(priv, info.reg + 0x0c, 0x00000000, 0x00000100);
56 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M); 58 nv_wr32(priv, info.reg + 0x04, (P << 16) | (N << 8) | M);
57 nv_wr32(priv, info.reg + 0x10, fN << 16); 59 nv_wr32(priv, info.reg + 0x10, fN << 16);
@@ -79,6 +81,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
79 return ret; 81 return ret;
80 82
81 priv->base.pll_set = nvc0_clock_pll_set; 83 priv->base.pll_set = nvc0_clock_pll_set;
84 priv->base.pll_calc = nva3_clock_pll_calc;
82 return 0; 85 return 0;
83} 86}
84 87
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index ca9a4648bd8a..f8a7ed4166cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -25,7 +25,6 @@
25#include <core/object.h> 25#include <core/object.h>
26#include <core/device.h> 26#include <core/device.h>
27#include <core/client.h> 27#include <core/client.h>
28#include <core/device.h>
29#include <core/option.h> 28#include <core/option.h>
30 29
31#include <core/class.h> 30#include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
61 60
62static const u64 disable_map[] = { 61static const u64 disable_map[] = {
63 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, 62 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
63 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, 64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, 65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
66 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, 66 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, 68 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, 69 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, 70 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, 71 [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, 73 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, 75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
73 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, 76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
75 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, 77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, 78 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
79 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
80 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
77 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, 81 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
78 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, 82 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
79 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, 83 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
84 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, 88 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
85 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, 89 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
86 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1, 90 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
87 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, 91 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
88 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, 92 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
89 [NVDEV_SUBDEV_NR] = 0, 93 [NVDEV_SUBDEV_NR] = 0,
90}; 94};
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
208 212
209 /* determine frequency of timing crystal */ 213 /* determine frequency of timing crystal */
210 if ( device->chipset < 0x17 || 214 if ( device->chipset < 0x17 ||
211 (device->chipset >= 0x20 && device->chipset <= 0x25)) 215 (device->chipset >= 0x20 && device->chipset < 0x25))
212 strap &= 0x00000040; 216 strap &= 0x00000040;
213 else 217 else
214 strap &= 0x00400040; 218 strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
356} 360}
357 361
358static u8 362static u8
359nouveau_devobj_rd08(struct nouveau_object *object, u32 addr) 363nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
360{ 364{
361 return nv_rd08(object->engine, addr); 365 return nv_rd08(object->engine, addr);
362} 366}
363 367
364static u16 368static u16
365nouveau_devobj_rd16(struct nouveau_object *object, u32 addr) 369nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
366{ 370{
367 return nv_rd16(object->engine, addr); 371 return nv_rd16(object->engine, addr);
368} 372}
369 373
370static u32 374static u32
371nouveau_devobj_rd32(struct nouveau_object *object, u32 addr) 375nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
372{ 376{
373 return nv_rd32(object->engine, addr); 377 return nv_rd32(object->engine, addr);
374} 378}
375 379
376static void 380static void
377nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data) 381nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
378{ 382{
379 nv_wr08(object->engine, addr, data); 383 nv_wr08(object->engine, addr, data);
380} 384}
381 385
382static void 386static void
383nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data) 387nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
384{ 388{
385 nv_wr16(object->engine, addr, data); 389 nv_wr16(object->engine, addr, data);
386} 390}
387 391
388static void 392static void
389nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 393nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
390{ 394{
391 nv_wr32(object->engine, addr, data); 395 nv_wr32(object->engine, addr, data);
392} 396}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index f09accfd0e31..9c40b0fb23f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
105 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 105 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
106 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 106 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
108 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 108 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
110 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 110 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
111 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 111 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
159 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 159 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
160 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 160 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 161 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 162 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 163 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
164 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 164 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 5fa58b7369b5..74f88f48e1c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 93 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
108 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 108 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
109 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 109 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
110 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 110 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
111 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 111 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
112 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 112 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
113 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 113 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 7f4b8fe6cccc..0ac1b2c4f61d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 111 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
112 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 112 device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
113 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 113 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
114 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 114 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
115 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 115 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 130 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
131 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 131 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
133 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 133 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
134 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 134 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 42deadca0f0a..41d59689a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
76 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 76 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
77 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 77 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
79 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 79 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
80 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 80 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
81 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 81 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
82 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 82 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
96 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 96 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
97 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 97 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
99 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 99 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
100 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 100 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
101 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 101 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
116 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 116 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
117 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 117 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
118 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 118 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
119 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 119 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
120 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 120 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
121 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 121 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
156 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 156 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
157 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 157 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
159 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 159 device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
160 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 160 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
161 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 161 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 177 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 179 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
182 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 182 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
196 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 196 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
197 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 197 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 199 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 200 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
201 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 201 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
202 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 202 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
216 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 216 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
217 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 217 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
219 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 219 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
221 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 221 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
222 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 222 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 237 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 239 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
241 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 241 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
256 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 256 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
257 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 257 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
258 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 258 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
259 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 259 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
260 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 260 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
261 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 261 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
262 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 262 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
276 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 276 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
277 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 277 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
278 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 278 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
279 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 279 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
281 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 281 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
282 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 282 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 297 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 298 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
299 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 299 device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
300 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 300 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
301 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 301 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
316 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 316 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
317 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 317 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
318 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 318 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
319 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 319 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
320 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 320 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
321 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 321 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
322 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 322 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
337 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 337 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
338 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 338 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
339 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 339 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
340 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 340 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
341 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 341 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
342 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 342 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
356 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 356 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
357 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 357 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
358 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 358 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
359 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 359 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
360 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 360 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
361 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 361 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
362 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 362 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index fec3bcc9a6fc..6ccfd8585ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
98 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 98 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
99 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 99 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
100 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 100 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 101 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
102 break; 102 break;
103 case 0x86: 103 case 0x86:
104 device->cname = "G86"; 104 device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
123 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 123 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
124 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 124 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
125 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 125 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
126 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 126 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
127 break; 127 break;
128 case 0x92: 128 case 0x92:
129 device->cname = "G92"; 129 device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
148 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 148 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
149 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 149 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
150 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 150 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
151 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 151 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
152 break; 152 break;
153 case 0x94: 153 case 0x94:
154 device->cname = "G94"; 154 device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
173 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 173 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
174 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 174 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
175 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 175 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
176 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 176 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
177 break; 177 break;
178 case 0x96: 178 case 0x96:
179 device->cname = "G96"; 179 device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
198 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 198 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
199 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 199 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
200 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 200 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
201 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 201 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
202 break; 202 break;
203 case 0x98: 203 case 0x98:
204 device->cname = "G98"; 204 device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
223 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 223 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
224 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 224 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
225 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 225 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
226 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 226 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
227 break; 227 break;
228 case 0xa0: 228 case 0xa0:
229 device->cname = "G200"; 229 device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
248 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 248 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
249 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 249 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
250 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 250 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
251 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 251 device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
252 break; 252 break;
253 case 0xaa: 253 case 0xaa:
254 device->cname = "MCP77/MCP78"; 254 device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
273 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 273 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 276 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
277 break; 277 break;
278 case 0xac: 278 case 0xac:
279 device->cname = "MCP79/MCP7A"; 279 device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
298 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 298 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
299 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 299 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
300 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 300 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
301 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 301 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
302 break; 302 break;
303 case 0xa3: 303 case 0xa3:
304 device->cname = "GT215"; 304 device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
324 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 324 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
325 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 325 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
326 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 326 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
327 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 327 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
328 break; 328 break;
329 case 0xa5: 329 case 0xa5:
330 device->cname = "GT216"; 330 device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
349 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 349 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
350 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 350 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
351 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 351 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
352 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 352 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
353 break; 353 break;
354 case 0xa8: 354 case 0xa8:
355 device->cname = "GT218"; 355 device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
374 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 374 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
375 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 375 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
376 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 376 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
377 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 377 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
378 break; 378 break;
379 case 0xaf: 379 case 0xaf:
380 device->cname = "MCP89"; 380 device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
399 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 399 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
400 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 400 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
401 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 401 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
402 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 402 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
403 break; 403 break;
404 default: 404 default:
405 nv_fatal(device, "unknown Tesla chipset\n"); 405 nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 6697f0f9c293..f0461685a422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
74 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 74 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
76 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 76 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
77 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 77 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
78 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 78 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
79 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 79 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
81 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 81 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
83 break; 83 break;
84 case 0xc4: 84 case 0xc4:
85 device->cname = "GF104"; 85 device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
102 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 102 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
104 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 104 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
105 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 105 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
106 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 106 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
107 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 107 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
108 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 108 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
109 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 109 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
110 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 110 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
111 break; 111 break;
112 case 0xc3: 112 case 0xc3:
113 device->cname = "GF106"; 113 device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 130 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 132 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
133 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 133 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
134 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 134 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
135 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 135 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
136 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 136 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
137 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 137 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
138 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 138 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
139 break; 139 break;
140 case 0xce: 140 case 0xce:
141 device->cname = "GF114"; 141 device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
158 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 158 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
159 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 159 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
160 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 160 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
161 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 161 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
162 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 162 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
163 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 163 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
164 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 164 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
165 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 165 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
166 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 166 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
167 break; 167 break;
168 case 0xcf: 168 case 0xcf:
169 device->cname = "GF116"; 169 device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
186 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 186 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
187 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 187 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
188 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 188 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
189 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 189 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
190 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 190 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
191 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 191 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
192 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 192 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
193 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 193 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
194 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 194 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
195 break; 195 break;
196 case 0xc1: 196 case 0xc1:
197 device->cname = "GF108"; 197 device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
214 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 214 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
215 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 215 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
216 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 216 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
217 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 217 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
218 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 222 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
223 break; 223 break;
224 case 0xc8: 224 case 0xc8:
225 device->cname = "GF110"; 225 device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
242 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 242 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
243 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 243 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
244 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 244 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
245 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 245 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
246 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 246 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
247 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 247 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
248 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 248 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
249 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 249 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
250 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 250 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
251 break; 251 break;
252 case 0xd9: 252 case 0xd9:
253 device->cname = "GF119"; 253 device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
267 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 267 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
268 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 268 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
269 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 269 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
270 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 270 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
271 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 271 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
272 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 272 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
273 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 273 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 274 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 275 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 276 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
277 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 277 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
278 break; 278 break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 4a280b7ab853..03a652876e73 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -45,6 +45,9 @@
45#include <engine/graph.h> 45#include <engine/graph.h>
46#include <engine/disp.h> 46#include <engine/disp.h>
47#include <engine/copy.h> 47#include <engine/copy.h>
48#include <engine/bsp.h>
49#include <engine/vp.h>
50#include <engine/ppp.h>
48 51
49int 52int
50nve0_identify(struct nouveau_device *device) 53nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 71 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 72 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
70 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 73 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
71 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 74 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
72 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
73 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; 76 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
74 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 77 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
75 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 78 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
76 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 79 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
80 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
81 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
82 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
77 break; 83 break;
78 case 0xe7: 84 case 0xe7:
79 device->cname = "GK107"; 85 device->cname = "GK107";
@@ -92,13 +98,44 @@ nve0_identify(struct nouveau_device *device)
92 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
93 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
94 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 100 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
95 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 101 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
96 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 102 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
97 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
98 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; 104 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
99 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 105 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
100 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 106 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
101 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 107 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
108 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
109 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
110 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
111 break;
112 case 0xe6:
113 device->cname = "GK106";
114 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
115 device->oclass[NVDEV_SUBDEV_GPIO ] = &nvd0_gpio_oclass;
116 device->oclass[NVDEV_SUBDEV_I2C ] = &nouveau_i2c_oclass;
117 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nvc0_clock_oclass;
118 device->oclass[NVDEV_SUBDEV_THERM ] = &nv50_therm_oclass;
119 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
120 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv50_devinit_oclass;
121 device->oclass[NVDEV_SUBDEV_MC ] = &nvc0_mc_oclass;
122 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
123 device->oclass[NVDEV_SUBDEV_FB ] = &nvc0_fb_oclass;
124 device->oclass[NVDEV_SUBDEV_LTCG ] = &nvc0_ltcg_oclass;
125 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
126 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
127 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
128 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
129 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
133 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
134 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
135 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
136 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
137 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
138 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
102 break; 139 break;
103 default: 140 default:
104 nv_fatal(device, "unknown Kepler chipset\n"); 141 nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 61becfa732e9..ae7249b09797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -22,6 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h>
27#include <subdev/bios/disp.h>
28#include <subdev/bios/init.h>
25#include <subdev/devinit.h> 29#include <subdev/devinit.h>
26#include <subdev/vga.h> 30#include <subdev/vga.h>
27 31
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
55static int 59static int
56nv50_devinit_init(struct nouveau_object *object) 60nv50_devinit_init(struct nouveau_object *object)
57{ 61{
62 struct nouveau_bios *bios = nouveau_bios(object);
58 struct nv50_devinit_priv *priv = (void *)object; 63 struct nv50_devinit_priv *priv = (void *)object;
64 struct nvbios_outp info;
65 struct dcb_output outp;
66 u8 ver = 0xff, hdr, cnt, len;
67 int ret, i = 0;
59 68
60 if (!priv->base.post) { 69 if (!priv->base.post) {
61 if (!nv_rdvgac(priv, 0, 0x00) && 70 if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
65 } 74 }
66 } 75 }
67 76
68 return nouveau_devinit_init(&priv->base); 77 ret = nouveau_devinit_init(&priv->base);
78 if (ret)
79 return ret;
80
81 /* if we ran the init tables, execute first script pointer for each
82 * display table output entry that has a matching dcb entry.
83 */
84 while (priv->base.post && ver) {
85 u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
86 if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
87 struct nvbios_init init = {
88 .subdev = nv_subdev(priv),
89 .bios = bios,
90 .offset = info.script[0],
91 .outp = &outp,
92 .crtc = -1,
93 .execute = 1,
94 };
95
96 nvbios_exec(&init);
97 }
98 };
99
100 return 0;
69} 101}
70 102
71static int 103static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f0086de8af31..d6d16007ec1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
57} 57}
58 58
59int 59int
60nouveau_fb_init(struct nouveau_fb *pfb) 60nouveau_fb_preinit(struct nouveau_fb *pfb)
61{ 61{
62 int ret, i; 62 static const char *name[] = {
63 [NV_MEM_TYPE_UNKNOWN] = "unknown",
64 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
65 [NV_MEM_TYPE_SGRAM ] = "SGRAM",
66 [NV_MEM_TYPE_SDRAM ] = "SDRAM",
67 [NV_MEM_TYPE_DDR1 ] = "DDR1",
68 [NV_MEM_TYPE_DDR2 ] = "DDR2",
69 [NV_MEM_TYPE_DDR3 ] = "DDR3",
70 [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
71 [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
72 [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
73 [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
74 };
75 int ret, tags;
63 76
64 ret = nouveau_subdev_init(&pfb->base); 77 tags = pfb->ram.init(pfb);
65 if (ret) 78 if (tags < 0 || !pfb->ram.size) {
66 return ret; 79 nv_fatal(pfb, "error detecting memory configuration!!\n");
80 return (tags < 0) ? tags : -ERANGE;
81 }
67 82
68 for (i = 0; i < pfb->tile.regions; i++) 83 if (!nouveau_mm_initialised(&pfb->vram)) {
69 pfb->tile.prog(pfb, i, &pfb->tile.region[i]); 84 ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
85 if (ret)
86 return ret;
87 }
70 88
71 return 0; 89 if (!nouveau_mm_initialised(&pfb->tags) && tags) {
72} 90 ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
91 if (ret)
92 return ret;
93 }
73 94
74int 95 nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
75_nouveau_fb_init(struct nouveau_object *object) 96 nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
76{ 97 nv_info(pfb, " ZCOMP: %d tags\n", tags);
77 struct nouveau_fb *pfb = (void *)object; 98 return 0;
78 return nouveau_fb_init(pfb);
79} 99}
80 100
81void 101void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
85 105
86 for (i = 0; i < pfb->tile.regions; i++) 106 for (i = 0; i < pfb->tile.regions; i++)
87 pfb->tile.fini(pfb, i, &pfb->tile.region[i]); 107 pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
88 108 nouveau_mm_fini(&pfb->tags);
89 if (pfb->tags.block_size) 109 nouveau_mm_fini(&pfb->vram);
90 nouveau_mm_fini(&pfb->tags);
91
92 if (pfb->vram.block_size)
93 nouveau_mm_fini(&pfb->vram);
94 110
95 nouveau_subdev_destroy(&pfb->base); 111 nouveau_subdev_destroy(&pfb->base);
96} 112}
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
101 struct nouveau_fb *pfb = (void *)object; 117 struct nouveau_fb *pfb = (void *)object;
102 nouveau_fb_destroy(pfb); 118 nouveau_fb_destroy(pfb);
103} 119}
104
105int 120int
106nouveau_fb_created(struct nouveau_fb *pfb) 121nouveau_fb_init(struct nouveau_fb *pfb)
107{ 122{
108 static const char *name[] = { 123 int ret, i;
109 [NV_MEM_TYPE_UNKNOWN] = "unknown",
110 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
111 [NV_MEM_TYPE_SGRAM ] = "SGRAM",
112 [NV_MEM_TYPE_SDRAM ] = "SDRAM",
113 [NV_MEM_TYPE_DDR1 ] = "DDR1",
114 [NV_MEM_TYPE_DDR2 ] = "DDR2",
115 [NV_MEM_TYPE_DDR3 ] = "DDR3",
116 [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
117 [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
118 [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
119 [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
120 };
121 124
122 if (pfb->ram.size == 0) { 125 ret = nouveau_subdev_init(&pfb->base);
123 nv_fatal(pfb, "no vram detected!!\n"); 126 if (ret)
124 return -ERANGE; 127 return ret;
125 } 128
129 for (i = 0; i < pfb->tile.regions; i++)
130 pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
126 131
127 nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
128 nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
129 return 0; 132 return 0;
130} 133}
134
135int
136_nouveau_fb_init(struct nouveau_object *object)
137{
138 struct nouveau_fb *pfb = (void *)object;
139 return nouveau_fb_init(pfb);
140}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index eb06836b69f7..6e369f85361e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
56} 56}
57 57
58static int 58static int
59nv04_fb_vram_init(struct nouveau_fb *pfb)
60{
61 u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
62 if (boot0 & 0x00000100) {
63 pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
64 pfb->ram.size *= 1024 * 1024;
65 } else {
66 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
67 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
68 pfb->ram.size = 32 * 1024 * 1024;
69 break;
70 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
71 pfb->ram.size = 16 * 1024 * 1024;
72 break;
73 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
74 pfb->ram.size = 8 * 1024 * 1024;
75 break;
76 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
77 pfb->ram.size = 4 * 1024 * 1024;
78 break;
79 }
80 }
81
82 if ((boot0 & 0x00000038) <= 0x10)
83 pfb->ram.type = NV_MEM_TYPE_SGRAM;
84 else
85 pfb->ram.type = NV_MEM_TYPE_SDRAM;
86 return 0;
87}
88
89static int
59nv04_fb_init(struct nouveau_object *object) 90nv04_fb_init(struct nouveau_object *object)
60{ 91{
61 struct nv04_fb_priv *priv = (void *)object; 92 struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
79 struct nouveau_object **pobject) 110 struct nouveau_object **pobject)
80{ 111{
81 struct nv04_fb_priv *priv; 112 struct nv04_fb_priv *priv;
82 u32 boot0;
83 int ret; 113 int ret;
84 114
85 ret = nouveau_fb_create(parent, engine, oclass, &priv); 115 ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
87 if (ret) 117 if (ret)
88 return ret; 118 return ret;
89 119
90 boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
91 if (boot0 & 0x00000100) {
92 priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
93 priv->base.ram.size *= 1024 * 1024;
94 } else {
95 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
96 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
97 priv->base.ram.size = 32 * 1024 * 1024;
98 break;
99 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
100 priv->base.ram.size = 16 * 1024 * 1024;
101 break;
102 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
103 priv->base.ram.size = 8 * 1024 * 1024;
104 break;
105 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
106 priv->base.ram.size = 4 * 1024 * 1024;
107 break;
108 }
109 }
110
111 if ((boot0 & 0x00000038) <= 0x10)
112 priv->base.ram.type = NV_MEM_TYPE_SGRAM;
113 else
114 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
115
116
117 priv->base.memtype_valid = nv04_fb_memtype_valid; 120 priv->base.memtype_valid = nv04_fb_memtype_valid;
118 return nouveau_fb_created(&priv->base); 121 priv->base.ram.init = nv04_fb_vram_init;
122 return nouveau_fb_preinit(&priv->base);
119} 123}
120 124
121struct nouveau_oclass 125struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index f037a422d2f4..edbbe26e858d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
30 struct nouveau_fb base; 30 struct nouveau_fb base;
31}; 31};
32 32
33static void 33static int
34nv10_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 cfg0 = nv_rd32(pfb, 0x100200);
37 if (cfg0 & 0x00000001)
38 pfb->ram.type = NV_MEM_TYPE_DDR1;
39 else
40 pfb->ram.type = NV_MEM_TYPE_SDRAM;
41
42 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
43 return 0;
44}
45
46void
34nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 47nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile) 48 u32 flags, struct nouveau_fb_tile *tile)
36{ 49{
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
39 tile->pitch = pitch; 52 tile->pitch = pitch;
40} 53}
41 54
42static void 55void
43nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 56nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{ 57{
45 tile->addr = 0; 58 tile->addr = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
54 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); 67 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
55 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); 68 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
56 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); 69 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
70 nv_rd32(pfb, 0x100240 + (i * 0x10));
57} 71}
58 72
59static int 73static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
61 struct nouveau_oclass *oclass, void *data, u32 size, 75 struct nouveau_oclass *oclass, void *data, u32 size,
62 struct nouveau_object **pobject) 76 struct nouveau_object **pobject)
63{ 77{
64 struct nouveau_device *device = nv_device(parent);
65 struct nv10_fb_priv *priv; 78 struct nv10_fb_priv *priv;
66 int ret; 79 int ret;
67 80
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
70 if (ret) 83 if (ret)
71 return ret; 84 return ret;
72 85
73 if (device->chipset == 0x1a || device->chipset == 0x1f) {
74 struct pci_dev *bridge;
75 u32 mem, mib;
76
77 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
78 if (!bridge) {
79 nv_fatal(device, "no bridge device\n");
80 return 0;
81 }
82
83 if (device->chipset == 0x1a) {
84 pci_read_config_dword(bridge, 0x7c, &mem);
85 mib = ((mem >> 6) & 31) + 1;
86 } else {
87 pci_read_config_dword(bridge, 0x84, &mem);
88 mib = ((mem >> 4) & 127) + 1;
89 }
90
91 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
92 priv->base.ram.size = mib * 1024 * 1024;
93 } else {
94 u32 cfg0 = nv_rd32(priv, 0x100200);
95 if (cfg0 & 0x00000001)
96 priv->base.ram.type = NV_MEM_TYPE_DDR1;
97 else
98 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
99
100 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
101 }
102
103 priv->base.memtype_valid = nv04_fb_memtype_valid; 86 priv->base.memtype_valid = nv04_fb_memtype_valid;
87 priv->base.ram.init = nv10_fb_vram_init;
104 priv->base.tile.regions = 8; 88 priv->base.tile.regions = 8;
105 priv->base.tile.init = nv10_fb_tile_init; 89 priv->base.tile.init = nv10_fb_tile_init;
106 priv->base.tile.fini = nv10_fb_tile_fini; 90 priv->base.tile.fini = nv10_fb_tile_fini;
107 priv->base.tile.prog = nv10_fb_tile_prog; 91 priv->base.tile.prog = nv10_fb_tile_prog;
108 return nouveau_fb_created(&priv->base); 92 return nouveau_fb_preinit(&priv->base);
109} 93}
110 94
111struct nouveau_oclass 95struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 000000000000..48366841db4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv1a_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv1a_fb_vram_init(struct nouveau_fb *pfb)
35{
36 struct pci_dev *bridge;
37 u32 mem, mib;
38
39 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
40 if (!bridge) {
41 nv_fatal(pfb, "no bridge device\n");
42 return -ENODEV;
43 }
44
45 if (nv_device(pfb)->chipset == 0x1a) {
46 pci_read_config_dword(bridge, 0x7c, &mem);
47 mib = ((mem >> 6) & 31) + 1;
48 } else {
49 pci_read_config_dword(bridge, 0x84, &mem);
50 mib = ((mem >> 4) & 127) + 1;
51 }
52
53 pfb->ram.type = NV_MEM_TYPE_STOLEN;
54 pfb->ram.size = mib * 1024 * 1024;
55 return 0;
56}
57
58static int
59nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 struct nouveau_oclass *oclass, void *data, u32 size,
61 struct nouveau_object **pobject)
62{
63 struct nv1a_fb_priv *priv;
64 int ret;
65
66 ret = nouveau_fb_create(parent, engine, oclass, &priv);
67 *pobject = nv_object(priv);
68 if (ret)
69 return ret;
70
71 priv->base.memtype_valid = nv04_fb_memtype_valid;
72 priv->base.ram.init = nv1a_fb_vram_init;
73 priv->base.tile.regions = 8;
74 priv->base.tile.init = nv10_fb_tile_init;
75 priv->base.tile.fini = nv10_fb_tile_fini;
76 priv->base.tile.prog = nv10_fb_tile_prog;
77 return nouveau_fb_preinit(&priv->base);
78}
79
80struct nouveau_oclass
81nv1a_fb_oclass = {
82 .handle = NV_SUBDEV(FB, 0x1a),
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nv1a_fb_ctor,
85 .dtor = _nouveau_fb_dtor,
86 .init = _nouveau_fb_init,
87 .fini = _nouveau_fb_fini,
88 },
89};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index 4b3578fcb7fb..5d14612a2c8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
30 struct nouveau_fb base; 30 struct nouveau_fb base;
31}; 31};
32 32
33static void 33int
34nv20_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pbus1218 = nv_rd32(pfb, 0x001218);
37
38 switch (pbus1218 & 0x00000300) {
39 case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
40 case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
41 case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
42 case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
43 }
44 pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
45 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
46
47 return nv_rd32(pfb, 0x100320);
48}
49
50void
34nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 51nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile) 52 u32 flags, struct nouveau_fb_tile *tile)
36{ 53{
37 struct nouveau_device *device = nv_device(pfb);
38 int bpp = (flags & 2) ? 32 : 16;
39
40 tile->addr = 0x00000001 | addr; 54 tile->addr = 0x00000001 | addr;
41 tile->limit = max(1u, addr + size) - 1; 55 tile->limit = max(1u, addr + size) - 1;
42 tile->pitch = pitch; 56 tile->pitch = pitch;
43
44 /* Allocate some of the on-die tag memory, used to store Z
45 * compression meta-data (most likely just a bitmap determining
46 * if a given tile is compressed or not).
47 */
48 size /= 256;
49 if (flags & 4) { 57 if (flags & 4) {
50 if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) { 58 pfb->tile.comp(pfb, i, size, flags, tile);
51 /* Enable Z compression */
52 tile->zcomp = tile->tag->offset;
53 if (device->chipset >= 0x25) {
54 if (bpp == 16)
55 tile->zcomp |= 0x00100000;
56 else
57 tile->zcomp |= 0x00200000;
58 } else {
59 tile->zcomp |= 0x80000000;
60 if (bpp != 16)
61 tile->zcomp |= 0x04000000;
62 }
63 }
64
65 tile->addr |= 2; 59 tile->addr |= 2;
66 } 60 }
67} 61}
68 62
69static void 63static void
64nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
65 struct nouveau_fb_tile *tile)
66{
67 u32 tiles = DIV_ROUND_UP(size, 0x40);
68 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
69 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
70 if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
71 else tile->zcomp = 0x04000000; /* Z24S8 */
72 tile->zcomp |= tile->tag->offset;
73 tile->zcomp |= 0x80000000; /* enable */
74#ifdef __BIG_ENDIAN
75 tile->zcomp |= 0x08000000;
76#endif
77 }
78}
79
80void
70nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 81nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
71{ 82{
72 tile->addr = 0; 83 tile->addr = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
76 nouveau_mm_free(&pfb->tags, &tile->tag); 87 nouveau_mm_free(&pfb->tags, &tile->tag);
77} 88}
78 89
79static void 90void
80nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 91nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
81{ 92{
82 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); 93 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
83 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); 94 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
84 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); 95 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
96 nv_rd32(pfb, 0x100240 + (i * 0x10));
85 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); 97 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
86} 98}
87 99
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 struct nouveau_oclass *oclass, void *data, u32 size, 102 struct nouveau_oclass *oclass, void *data, u32 size,
91 struct nouveau_object **pobject) 103 struct nouveau_object **pobject)
92{ 104{
93 struct nouveau_device *device = nv_device(parent);
94 struct nv20_fb_priv *priv; 105 struct nv20_fb_priv *priv;
95 u32 pbus1218;
96 int ret; 106 int ret;
97 107
98 ret = nouveau_fb_create(parent, engine, oclass, &priv); 108 ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 if (ret) 110 if (ret)
101 return ret; 111 return ret;
102 112
103 pbus1218 = nv_rd32(priv, 0x001218);
104 switch (pbus1218 & 0x00000300) {
105 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
106 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
107 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
108 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
109 }
110 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
111
112 if (device->chipset >= 0x25)
113 ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
114 else
115 ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
116 if (ret)
117 return ret;
118
119 priv->base.memtype_valid = nv04_fb_memtype_valid; 113 priv->base.memtype_valid = nv04_fb_memtype_valid;
114 priv->base.ram.init = nv20_fb_vram_init;
120 priv->base.tile.regions = 8; 115 priv->base.tile.regions = 8;
121 priv->base.tile.init = nv20_fb_tile_init; 116 priv->base.tile.init = nv20_fb_tile_init;
117 priv->base.tile.comp = nv20_fb_tile_comp;
122 priv->base.tile.fini = nv20_fb_tile_fini; 118 priv->base.tile.fini = nv20_fb_tile_fini;
123 priv->base.tile.prog = nv20_fb_tile_prog; 119 priv->base.tile.prog = nv20_fb_tile_prog;
124 return nouveau_fb_created(&priv->base); 120 return nouveau_fb_preinit(&priv->base);
125} 121}
126 122
127struct nouveau_oclass 123struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 000000000000..0042ace6bef9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv25_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
35 struct nouveau_fb_tile *tile)
36{
37 u32 tiles = DIV_ROUND_UP(size, 0x40);
38 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
39 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
40 if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
41 else tile->zcomp = 0x00200000; /* Z24S8 */
42 tile->zcomp |= tile->tag->offset;
43#ifdef __BIG_ENDIAN
44 tile->zcomp |= 0x01000000;
45#endif
46 }
47}
48
49static int
50nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{
54 struct nv25_fb_priv *priv;
55 int ret;
56
57 ret = nouveau_fb_create(parent, engine, oclass, &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 priv->base.memtype_valid = nv04_fb_memtype_valid;
63 priv->base.ram.init = nv20_fb_vram_init;
64 priv->base.tile.regions = 8;
65 priv->base.tile.init = nv20_fb_tile_init;
66 priv->base.tile.comp = nv25_fb_tile_comp;
67 priv->base.tile.fini = nv20_fb_tile_fini;
68 priv->base.tile.prog = nv20_fb_tile_prog;
69 return nouveau_fb_preinit(&priv->base);
70}
71
72struct nouveau_oclass
73nv25_fb_oclass = {
74 .handle = NV_SUBDEV(FB, 0x25),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv25_fb_ctor,
77 .dtor = _nouveau_fb_dtor,
78 .init = _nouveau_fb_init,
79 .fini = _nouveau_fb_fini,
80 },
81};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index cba67bc91390..a7ba0d048aec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -34,17 +34,36 @@ void
34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile) 35 u32 flags, struct nouveau_fb_tile *tile)
36{ 36{
37 tile->addr = addr | 1; 37 /* for performance, select alternate bank offset for zeta */
38 if (!(flags & 4)) {
39 tile->addr = (0 << 4);
40 } else {
41 if (pfb->tile.comp) /* z compression */
42 pfb->tile.comp(pfb, i, size, flags, tile);
43 tile->addr = (1 << 4);
44 }
45
46 tile->addr |= 0x00000001; /* enable */
47 tile->addr |= addr;
38 tile->limit = max(1u, addr + size) - 1; 48 tile->limit = max(1u, addr + size) - 1;
39 tile->pitch = pitch; 49 tile->pitch = pitch;
40} 50}
41 51
42void 52static void
43nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 53nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
54 struct nouveau_fb_tile *tile)
44{ 55{
45 tile->addr = 0; 56 u32 tiles = DIV_ROUND_UP(size, 0x40);
46 tile->limit = 0; 57 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
47 tile->pitch = 0; 58 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
59 if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
60 else tile->zcomp |= 0x02000000; /* Z24S8 */
61 tile->zcomp |= ((tile->tag->offset ) >> 6);
62 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
63#ifdef __BIG_ENDIAN
64 tile->zcomp |= 0x10000000;
65#endif
66 }
48} 67}
49 68
50static int 69static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
72 return x; 91 return x;
73} 92}
74 93
75static int 94int
76nv30_fb_init(struct nouveau_object *object) 95nv30_fb_init(struct nouveau_object *object)
77{ 96{
78 struct nouveau_device *device = nv_device(object); 97 struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
111 struct nouveau_object **pobject) 130 struct nouveau_object **pobject)
112{ 131{
113 struct nv30_fb_priv *priv; 132 struct nv30_fb_priv *priv;
114 u32 pbus1218;
115 int ret; 133 int ret;
116 134
117 ret = nouveau_fb_create(parent, engine, oclass, &priv); 135 ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
119 if (ret) 137 if (ret)
120 return ret; 138 return ret;
121 139
122 pbus1218 = nv_rd32(priv, 0x001218);
123 switch (pbus1218 & 0x00000300) {
124 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
125 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
126 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
127 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
128 }
129 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
130
131 priv->base.memtype_valid = nv04_fb_memtype_valid; 140 priv->base.memtype_valid = nv04_fb_memtype_valid;
141 priv->base.ram.init = nv20_fb_vram_init;
132 priv->base.tile.regions = 8; 142 priv->base.tile.regions = 8;
133 priv->base.tile.init = nv30_fb_tile_init; 143 priv->base.tile.init = nv30_fb_tile_init;
134 priv->base.tile.fini = nv30_fb_tile_fini; 144 priv->base.tile.comp = nv30_fb_tile_comp;
135 priv->base.tile.prog = nv10_fb_tile_prog; 145 priv->base.tile.fini = nv20_fb_tile_fini;
136 return nouveau_fb_created(&priv->base); 146 priv->base.tile.prog = nv20_fb_tile_prog;
147 return nouveau_fb_preinit(&priv->base);
137} 148}
138 149
139struct nouveau_oclass 150struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 000000000000..092f6f4f3521
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv35_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
35 struct nouveau_fb_tile *tile)
36{
37 u32 tiles = DIV_ROUND_UP(size, 0x40);
38 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
39 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
40 if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
41 else tile->zcomp |= 0x08000000; /* Z24S8 */
42 tile->zcomp |= ((tile->tag->offset ) >> 6);
43 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
44#ifdef __BIG_ENDIAN
45 tile->zcomp |= 0x40000000;
46#endif
47 }
48}
49
50static int
51nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, void *data, u32 size,
53 struct nouveau_object **pobject)
54{
55 struct nv35_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.ram.init = nv20_fb_vram_init;
65 priv->base.tile.regions = 8;
66 priv->base.tile.init = nv30_fb_tile_init;
67 priv->base.tile.comp = nv35_fb_tile_comp;
68 priv->base.tile.fini = nv20_fb_tile_fini;
69 priv->base.tile.prog = nv20_fb_tile_prog;
70 return nouveau_fb_preinit(&priv->base);
71}
72
73struct nouveau_oclass
74nv35_fb_oclass = {
75 .handle = NV_SUBDEV(FB, 0x35),
76 .ofuncs = &(struct nouveau_ofuncs) {
77 .ctor = nv35_fb_ctor,
78 .dtor = _nouveau_fb_dtor,
79 .init = nv30_fb_init,
80 .fini = _nouveau_fb_fini,
81 },
82};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 000000000000..797ab3b821b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv36_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
35 struct nouveau_fb_tile *tile)
36{
37 u32 tiles = DIV_ROUND_UP(size, 0x40);
38 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
39 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
40 if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
41 else tile->zcomp |= 0x20000000; /* Z24S8 */
42 tile->zcomp |= ((tile->tag->offset ) >> 6);
43 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
44#ifdef __BIG_ENDIAN
45 tile->zcomp |= 0x80000000;
46#endif
47 }
48}
49
50static int
51nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, void *data, u32 size,
53 struct nouveau_object **pobject)
54{
55 struct nv36_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.ram.init = nv20_fb_vram_init;
65 priv->base.tile.regions = 8;
66 priv->base.tile.init = nv30_fb_tile_init;
67 priv->base.tile.comp = nv36_fb_tile_comp;
68 priv->base.tile.fini = nv20_fb_tile_fini;
69 priv->base.tile.prog = nv20_fb_tile_prog;
70 return nouveau_fb_preinit(&priv->base);
71}
72
73struct nouveau_oclass
74nv36_fb_oclass = {
75 .handle = NV_SUBDEV(FB, 0x36),
76 .ofuncs = &(struct nouveau_ofuncs) {
77 .ctor = nv36_fb_ctor,
78 .dtor = _nouveau_fb_dtor,
79 .init = nv30_fb_init,
80 .fini = _nouveau_fb_fini,
81 },
82};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 347a496fcad8..65e131b90f37 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
30 struct nouveau_fb base; 30 struct nouveau_fb base;
31}; 31};
32 32
33static inline int 33static int
34nv44_graph_class(struct nouveau_device *device) 34nv40_fb_vram_init(struct nouveau_fb *pfb)
35{
36 if ((device->chipset & 0xf0) == 0x60)
37 return 1;
38
39 return !(0x0baf & (1 << (device->chipset & 0x0f)));
40}
41
42static void
43nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{ 35{
45 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); 36 u32 pbus1218 = nv_rd32(pfb, 0x001218);
46 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); 37 switch (pbus1218 & 0x00000300) {
47 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); 38 case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
48} 39 case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
40 case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
41 case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
42 }
49 43
50static void 44 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
51nv40_fb_init_gart(struct nv40_fb_priv *priv) 45 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
52{ 46 return nv_rd32(pfb, 0x100320);
53 nv_wr32(priv, 0x100800, 0x00000001);
54} 47}
55 48
56static void 49void
57nv44_fb_init_gart(struct nv40_fb_priv *priv) 50nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
51 struct nouveau_fb_tile *tile)
58{ 52{
59 nv_wr32(priv, 0x100850, 0x80000000); 53 u32 tiles = DIV_ROUND_UP(size, 0x80);
60 nv_wr32(priv, 0x100800, 0x00000001); 54 u32 tags = round_up(tiles / pfb->ram.parts, 0x100);
55 if ( (flags & 2) &&
56 !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
57 tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
58 tile->zcomp |= ((tile->tag->offset ) >> 8);
59 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
60#ifdef __BIG_ENDIAN
61 tile->zcomp |= 0x40000000;
62#endif
63 }
61} 64}
62 65
63static int 66static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
70 if (ret) 73 if (ret)
71 return ret; 74 return ret;
72 75
73 switch (nv_device(priv)->chipset) { 76 nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
74 case 0x40:
75 case 0x45:
76 nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
77 break;
78 default:
79 if (nv44_graph_class(nv_device(priv)))
80 nv44_fb_init_gart(priv);
81 else
82 nv40_fb_init_gart(priv);
83 break;
84 }
85
86 return 0; 77 return 0;
87} 78}
88 79
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
91 struct nouveau_oclass *oclass, void *data, u32 size, 82 struct nouveau_oclass *oclass, void *data, u32 size,
92 struct nouveau_object **pobject) 83 struct nouveau_object **pobject)
93{ 84{
94 struct nouveau_device *device = nv_device(parent);
95 struct nv40_fb_priv *priv; 85 struct nv40_fb_priv *priv;
96 int ret; 86 int ret;
97 87
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 if (ret) 90 if (ret)
101 return ret; 91 return ret;
102 92
103 /* 0x001218 is actually present on a few other NV4X I looked at,
104 * and even contains sane values matching 0x100474. From looking
105 * at various vbios images however, this isn't the case everywhere.
106 * So, I chose to use the same regs I've seen NVIDIA reading around
107 * the memory detection, hopefully that'll get us the right numbers
108 */
109 if (device->chipset == 0x40) {
110 u32 pbus1218 = nv_rd32(priv, 0x001218);
111 switch (pbus1218 & 0x00000300) {
112 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
113 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
114 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
115 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
116 }
117 } else
118 if (device->chipset == 0x49 || device->chipset == 0x4b) {
119 u32 pfb914 = nv_rd32(priv, 0x100914);
120 switch (pfb914 & 0x00000003) {
121 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
122 case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
123 case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
124 case 0x00000003: break;
125 }
126 } else
127 if (device->chipset != 0x4e) {
128 u32 pfb474 = nv_rd32(priv, 0x100474);
129 if (pfb474 & 0x00000004)
130 priv->base.ram.type = NV_MEM_TYPE_GDDR3;
131 if (pfb474 & 0x00000002)
132 priv->base.ram.type = NV_MEM_TYPE_DDR2;
133 if (pfb474 & 0x00000001)
134 priv->base.ram.type = NV_MEM_TYPE_DDR1;
135 } else {
136 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
137 }
138
139 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
140
141 priv->base.memtype_valid = nv04_fb_memtype_valid; 93 priv->base.memtype_valid = nv04_fb_memtype_valid;
142 switch (device->chipset) { 94 priv->base.ram.init = nv40_fb_vram_init;
143 case 0x40: 95 priv->base.tile.regions = 8;
144 case 0x45:
145 priv->base.tile.regions = 8;
146 break;
147 case 0x46:
148 case 0x47:
149 case 0x49:
150 case 0x4b:
151 case 0x4c:
152 priv->base.tile.regions = 15;
153 break;
154 default:
155 priv->base.tile.regions = 12;
156 break;
157 }
158 priv->base.tile.init = nv30_fb_tile_init; 96 priv->base.tile.init = nv30_fb_tile_init;
159 priv->base.tile.fini = nv30_fb_tile_fini; 97 priv->base.tile.comp = nv40_fb_tile_comp;
160 if (device->chipset == 0x40) 98 priv->base.tile.fini = nv20_fb_tile_fini;
161 priv->base.tile.prog = nv10_fb_tile_prog; 99 priv->base.tile.prog = nv20_fb_tile_prog;
162 else 100 return nouveau_fb_preinit(&priv->base);
163 priv->base.tile.prog = nv40_fb_tile_prog;
164
165 return nouveau_fb_created(&priv->base);
166} 101}
167 102
168 103
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 000000000000..e9e5a08c41a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv41_fb_priv {
30 struct nouveau_fb base;
31};
32
33int
34nv41_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pfb474 = nv_rd32(pfb, 0x100474);
37 if (pfb474 & 0x00000004)
38 pfb->ram.type = NV_MEM_TYPE_GDDR3;
39 if (pfb474 & 0x00000002)
40 pfb->ram.type = NV_MEM_TYPE_DDR2;
41 if (pfb474 & 0x00000001)
42 pfb->ram.type = NV_MEM_TYPE_DDR1;
43
44 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
45 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
46 return nv_rd32(pfb, 0x100320);
47}
48
49void
50nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
51{
52 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
53 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
54 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
55 nv_rd32(pfb, 0x100600 + (i * 0x10));
56 nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
57}
58
59int
60nv41_fb_init(struct nouveau_object *object)
61{
62 struct nv41_fb_priv *priv = (void *)object;
63 int ret;
64
65 ret = nouveau_fb_init(&priv->base);
66 if (ret)
67 return ret;
68
69 nv_wr32(priv, 0x100800, 0x00000001);
70 return 0;
71}
72
73static int
74nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
75 struct nouveau_oclass *oclass, void *data, u32 size,
76 struct nouveau_object **pobject)
77{
78 struct nv41_fb_priv *priv;
79 int ret;
80
81 ret = nouveau_fb_create(parent, engine, oclass, &priv);
82 *pobject = nv_object(priv);
83 if (ret)
84 return ret;
85
86 priv->base.memtype_valid = nv04_fb_memtype_valid;
87 priv->base.ram.init = nv41_fb_vram_init;
88 priv->base.tile.regions = 12;
89 priv->base.tile.init = nv30_fb_tile_init;
90 priv->base.tile.comp = nv40_fb_tile_comp;
91 priv->base.tile.fini = nv20_fb_tile_fini;
92 priv->base.tile.prog = nv41_fb_tile_prog;
93 return nouveau_fb_preinit(&priv->base);
94}
95
96
97struct nouveau_oclass
98nv41_fb_oclass = {
99 .handle = NV_SUBDEV(FB, 0x41),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv41_fb_ctor,
102 .dtor = _nouveau_fb_dtor,
103 .init = nv41_fb_init,
104 .fini = _nouveau_fb_fini,
105 },
106};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 000000000000..ae89b5006f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv44_fb_priv {
30 struct nouveau_fb base;
31};
32
33int
34nv44_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pfb474 = nv_rd32(pfb, 0x100474);
37 if (pfb474 & 0x00000004)
38 pfb->ram.type = NV_MEM_TYPE_GDDR3;
39 if (pfb474 & 0x00000002)
40 pfb->ram.type = NV_MEM_TYPE_DDR2;
41 if (pfb474 & 0x00000001)
42 pfb->ram.type = NV_MEM_TYPE_DDR1;
43
44 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
45 return 0;
46}
47
48static void
49nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
50 u32 flags, struct nouveau_fb_tile *tile)
51{
52 tile->addr = 0x00000001; /* mode = vram */
53 tile->addr |= addr;
54 tile->limit = max(1u, addr + size) - 1;
55 tile->pitch = pitch;
56}
57
58void
59nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
60{
61 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
62 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
63 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
64 nv_rd32(pfb, 0x100600 + (i * 0x10));
65}
66
67int
68nv44_fb_init(struct nouveau_object *object)
69{
70 struct nv44_fb_priv *priv = (void *)object;
71 int ret;
72
73 ret = nouveau_fb_init(&priv->base);
74 if (ret)
75 return ret;
76
77 nv_wr32(priv, 0x100850, 0x80000000);
78 nv_wr32(priv, 0x100800, 0x00000001);
79 return 0;
80}
81
82static int
83nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
84 struct nouveau_oclass *oclass, void *data, u32 size,
85 struct nouveau_object **pobject)
86{
87 struct nv44_fb_priv *priv;
88 int ret;
89
90 ret = nouveau_fb_create(parent, engine, oclass, &priv);
91 *pobject = nv_object(priv);
92 if (ret)
93 return ret;
94
95 priv->base.memtype_valid = nv04_fb_memtype_valid;
96 priv->base.ram.init = nv44_fb_vram_init;
97 priv->base.tile.regions = 12;
98 priv->base.tile.init = nv44_fb_tile_init;
99 priv->base.tile.fini = nv20_fb_tile_fini;
100 priv->base.tile.prog = nv44_fb_tile_prog;
101 return nouveau_fb_preinit(&priv->base);
102}
103
104
105struct nouveau_oclass
106nv44_fb_oclass = {
107 .handle = NV_SUBDEV(FB, 0x44),
108 .ofuncs = &(struct nouveau_ofuncs) {
109 .ctor = nv44_fb_ctor,
110 .dtor = _nouveau_fb_dtor,
111 .init = nv44_fb_init,
112 .fini = _nouveau_fb_fini,
113 },
114};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 000000000000..589b93ea2994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv46_fb_priv {
30 struct nouveau_fb base;
31};
32
33void
34nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 /* for performance, select alternate bank offset for zeta */
38 if (!(flags & 4)) tile->addr = (0 << 3);
39 else tile->addr = (1 << 3);
40
41 tile->addr |= 0x00000001; /* mode = vram */
42 tile->addr |= addr;
43 tile->limit = max(1u, addr + size) - 1;
44 tile->pitch = pitch;
45}
46
47static int
48nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
49 struct nouveau_oclass *oclass, void *data, u32 size,
50 struct nouveau_object **pobject)
51{
52 struct nv46_fb_priv *priv;
53 int ret;
54
55 ret = nouveau_fb_create(parent, engine, oclass, &priv);
56 *pobject = nv_object(priv);
57 if (ret)
58 return ret;
59
60 priv->base.memtype_valid = nv04_fb_memtype_valid;
61 priv->base.ram.init = nv44_fb_vram_init;
62 priv->base.tile.regions = 15;
63 priv->base.tile.init = nv46_fb_tile_init;
64 priv->base.tile.fini = nv20_fb_tile_fini;
65 priv->base.tile.prog = nv44_fb_tile_prog;
66 return nouveau_fb_preinit(&priv->base);
67}
68
69
70struct nouveau_oclass
71nv46_fb_oclass = {
72 .handle = NV_SUBDEV(FB, 0x46),
73 .ofuncs = &(struct nouveau_ofuncs) {
74 .ctor = nv46_fb_ctor,
75 .dtor = _nouveau_fb_dtor,
76 .init = nv44_fb_init,
77 .fini = _nouveau_fb_fini,
78 },
79};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 000000000000..818bba35b368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv47_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv47_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.ram.init = nv41_fb_vram_init;
48 priv->base.tile.regions = 15;
49 priv->base.tile.init = nv30_fb_tile_init;
50 priv->base.tile.comp = nv40_fb_tile_comp;
51 priv->base.tile.fini = nv20_fb_tile_fini;
52 priv->base.tile.prog = nv41_fb_tile_prog;
53 return nouveau_fb_preinit(&priv->base);
54}
55
56
57struct nouveau_oclass
58nv47_fb_oclass = {
59 .handle = NV_SUBDEV(FB, 0x47),
60 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nv47_fb_ctor,
62 .dtor = _nouveau_fb_dtor,
63 .init = nv41_fb_init,
64 .fini = _nouveau_fb_fini,
65 },
66};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 000000000000..84a31af16ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv49_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv49_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pfb914 = nv_rd32(pfb, 0x100914);
37
38 switch (pfb914 & 0x00000003) {
39 case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
40 case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
41 case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
42 case 0x00000003: break;
43 }
44
45 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
46 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
47 return nv_rd32(pfb, 0x100320);
48}
49
50static int
51nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, void *data, u32 size,
53 struct nouveau_object **pobject)
54{
55 struct nv49_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.ram.init = nv49_fb_vram_init;
65 priv->base.tile.regions = 15;
66 priv->base.tile.init = nv30_fb_tile_init;
67 priv->base.tile.comp = nv40_fb_tile_comp;
68 priv->base.tile.fini = nv20_fb_tile_fini;
69 priv->base.tile.prog = nv41_fb_tile_prog;
70
71 return nouveau_fb_preinit(&priv->base);
72}
73
74
75struct nouveau_oclass
76nv49_fb_oclass = {
77 .handle = NV_SUBDEV(FB, 0x49),
78 .ofuncs = &(struct nouveau_ofuncs) {
79 .ctor = nv49_fb_ctor,
80 .dtor = _nouveau_fb_dtor,
81 .init = nv41_fb_init,
82 .fini = _nouveau_fb_fini,
83 },
84};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 000000000000..797fd558170b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv4e_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv4e_fb_vram_init(struct nouveau_fb *pfb)
35{
36 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
37 pfb->ram.type = NV_MEM_TYPE_STOLEN;
38 return 0;
39}
40
41static int
42nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
43 struct nouveau_oclass *oclass, void *data, u32 size,
44 struct nouveau_object **pobject)
45{
46 struct nv4e_fb_priv *priv;
47 int ret;
48
49 ret = nouveau_fb_create(parent, engine, oclass, &priv);
50 *pobject = nv_object(priv);
51 if (ret)
52 return ret;
53
54 priv->base.memtype_valid = nv04_fb_memtype_valid;
55 priv->base.ram.init = nv4e_fb_vram_init;
56 priv->base.tile.regions = 12;
57 priv->base.tile.init = nv46_fb_tile_init;
58 priv->base.tile.fini = nv20_fb_tile_fini;
59 priv->base.tile.prog = nv44_fb_tile_prog;
60 return nouveau_fb_preinit(&priv->base);
61}
62
63struct nouveau_oclass
64nv4e_fb_oclass = {
65 .handle = NV_SUBDEV(FB, 0x4e),
66 .ofuncs = &(struct nouveau_ofuncs) {
67 .ctor = nv4e_fb_ctor,
68 .dtor = _nouveau_fb_dtor,
69 .init = nv44_fb_init,
70 .fini = _nouveau_fb_fini,
71 },
72};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 5f570806143a..487cb8c6c204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
51 return types[(memtype & 0xff00) >> 8] != 0; 51 return types[(memtype & 0xff00) >> 8] != 0;
52} 52}
53 53
54static u32
55nv50_fb_vram_rblock(struct nouveau_fb *pfb)
56{
57 int i, parts, colbits, rowbitsa, rowbitsb, banks;
58 u64 rowsize, predicted;
59 u32 r0, r4, rt, ru, rblock_size;
60
61 r0 = nv_rd32(pfb, 0x100200);
62 r4 = nv_rd32(pfb, 0x100204);
63 rt = nv_rd32(pfb, 0x100250);
64 ru = nv_rd32(pfb, 0x001540);
65 nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
66
67 for (i = 0, parts = 0; i < 8; i++) {
68 if (ru & (0x00010000 << i))
69 parts++;
70 }
71
72 colbits = (r4 & 0x0000f000) >> 12;
73 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
74 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
75 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
76
77 rowsize = parts * banks * (1 << colbits) * 8;
78 predicted = rowsize << rowbitsa;
79 if (r0 & 0x00000004)
80 predicted += rowsize << rowbitsb;
81
82 if (predicted != pfb->ram.size) {
83 nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
84 (u32)(pfb->ram.size >> 20));
85 }
86
87 rblock_size = rowsize;
88 if (rt & 1)
89 rblock_size *= 3;
90
91 nv_debug(pfb, "rblock %d bytes\n", rblock_size);
92 return rblock_size;
93}
94
95static int
96nv50_fb_vram_init(struct nouveau_fb *pfb)
97{
98 struct nouveau_device *device = nv_device(pfb);
99 struct nouveau_bios *bios = nouveau_bios(device);
100 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
101 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
102 u32 size;
103 int ret;
104
105 pfb->ram.size = nv_rd32(pfb, 0x10020c);
106 pfb->ram.size = (pfb->ram.size & 0xffffff00) |
107 ((pfb->ram.size & 0x000000ff) << 32);
108
109 size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
110 switch (device->chipset) {
111 case 0xaa:
112 case 0xac:
113 case 0xaf: /* IGPs, no reordering, no real VRAM */
114 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
115 if (ret)
116 return ret;
117
118 pfb->ram.type = NV_MEM_TYPE_STOLEN;
119 pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
120 break;
121 default:
122 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
123 case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
124 case 1:
125 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
126 pfb->ram.type = NV_MEM_TYPE_DDR3;
127 else
128 pfb->ram.type = NV_MEM_TYPE_DDR2;
129 break;
130 case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
131 case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
132 case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
133 default:
134 break;
135 }
136
137 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
138 nv50_fb_vram_rblock(pfb) >> 12);
139 if (ret)
140 return ret;
141
142 pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
143 break;
144 }
145
146 return nv_rd32(pfb, 0x100320);
147}
148
54static int 149static int
55nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, 150nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
56 u32 memtype, struct nouveau_mem **pmem) 151 u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
140 kfree(mem); 235 kfree(mem);
141} 236}
142 237
143static u32
144nv50_vram_rblock(struct nv50_fb_priv *priv)
145{
146 int i, parts, colbits, rowbitsa, rowbitsb, banks;
147 u64 rowsize, predicted;
148 u32 r0, r4, rt, ru, rblock_size;
149
150 r0 = nv_rd32(priv, 0x100200);
151 r4 = nv_rd32(priv, 0x100204);
152 rt = nv_rd32(priv, 0x100250);
153 ru = nv_rd32(priv, 0x001540);
154 nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
155
156 for (i = 0, parts = 0; i < 8; i++) {
157 if (ru & (0x00010000 << i))
158 parts++;
159 }
160
161 colbits = (r4 & 0x0000f000) >> 12;
162 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
163 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
164 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
165
166 rowsize = parts * banks * (1 << colbits) * 8;
167 predicted = rowsize << rowbitsa;
168 if (r0 & 0x00000004)
169 predicted += rowsize << rowbitsb;
170
171 if (predicted != priv->base.ram.size) {
172 nv_warn(priv, "memory controller reports %d MiB VRAM\n",
173 (u32)(priv->base.ram.size >> 20));
174 }
175
176 rblock_size = rowsize;
177 if (rt & 1)
178 rblock_size *= 3;
179
180 nv_debug(priv, "rblock %d bytes\n", rblock_size);
181 return rblock_size;
182}
183
184static int
185nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
186 struct nouveau_oclass *oclass, void *data, u32 size,
187 struct nouveau_object **pobject)
188{
189 struct nouveau_device *device = nv_device(parent);
190 struct nouveau_bios *bios = nouveau_bios(device);
191 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
192 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
193 struct nv50_fb_priv *priv;
194 u32 tags;
195 int ret;
196
197 ret = nouveau_fb_create(parent, engine, oclass, &priv);
198 *pobject = nv_object(priv);
199 if (ret)
200 return ret;
201
202 switch (nv_rd32(priv, 0x100714) & 0x00000007) {
203 case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
204 case 1:
205 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
206 priv->base.ram.type = NV_MEM_TYPE_DDR3;
207 else
208 priv->base.ram.type = NV_MEM_TYPE_DDR2;
209 break;
210 case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
211 case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
212 case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
213 default:
214 break;
215 }
216
217 priv->base.ram.size = nv_rd32(priv, 0x10020c);
218 priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
219 ((priv->base.ram.size & 0x000000ff) << 32);
220
221 tags = nv_rd32(priv, 0x100320);
222 ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
223 if (ret)
224 return ret;
225
226 nv_debug(priv, "%d compression tags\n", tags);
227
228 size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
229 switch (device->chipset) {
230 case 0xaa:
231 case 0xac:
232 case 0xaf: /* IGPs, no reordering, no real VRAM */
233 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
234 if (ret)
235 return ret;
236
237 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
238 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
239 break;
240 default:
241 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
242 nv50_vram_rblock(priv) >> 12);
243 if (ret)
244 return ret;
245
246 priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
247 break;
248 }
249
250 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
251 if (priv->r100c08_page) {
252 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
253 0, PAGE_SIZE,
254 PCI_DMA_BIDIRECTIONAL);
255 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
256 nv_warn(priv, "failed 0x100c08 page map\n");
257 } else {
258 nv_warn(priv, "failed 0x100c08 page alloc\n");
259 }
260
261 priv->base.memtype_valid = nv50_fb_memtype_valid;
262 priv->base.ram.get = nv50_fb_vram_new;
263 priv->base.ram.put = nv50_fb_vram_del;
264 return nouveau_fb_created(&priv->base);
265}
266
267static void
268nv50_fb_dtor(struct nouveau_object *object)
269{
270 struct nouveau_device *device = nv_device(object);
271 struct nv50_fb_priv *priv = (void *)object;
272
273 if (priv->r100c08_page) {
274 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
275 PCI_DMA_BIDIRECTIONAL);
276 __free_page(priv->r100c08_page);
277 }
278
279 nouveau_fb_destroy(&priv->base);
280}
281
282static int
283nv50_fb_init(struct nouveau_object *object)
284{
285 struct nouveau_device *device = nv_device(object);
286 struct nv50_fb_priv *priv = (void *)object;
287 int ret;
288
289 ret = nouveau_fb_init(&priv->base);
290 if (ret)
291 return ret;
292
293 /* Not a clue what this is exactly. Without pointing it at a
294 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
295 * cause IOMMU "read from address 0" errors (rh#561267)
296 */
297 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
298
299 /* This is needed to get meaningful information from 100c90
300 * on traps. No idea what these values mean exactly. */
301 switch (device->chipset) {
302 case 0x50:
303 nv_wr32(priv, 0x100c90, 0x000707ff);
304 break;
305 case 0xa3:
306 case 0xa5:
307 case 0xa8:
308 nv_wr32(priv, 0x100c90, 0x000d0fff);
309 break;
310 case 0xaf:
311 nv_wr32(priv, 0x100c90, 0x089d1fff);
312 break;
313 default:
314 nv_wr32(priv, 0x100c90, 0x001d07ff);
315 break;
316 }
317
318 return 0;
319}
320
321struct nouveau_oclass
322nv50_fb_oclass = {
323 .handle = NV_SUBDEV(FB, 0x50),
324 .ofuncs = &(struct nouveau_ofuncs) {
325 .ctor = nv50_fb_ctor,
326 .dtor = nv50_fb_dtor,
327 .init = nv50_fb_init,
328 .fini = _nouveau_fb_fini,
329 },
330};
331
332static const struct nouveau_enum vm_dispatch_subclients[] = { 238static const struct nouveau_enum vm_dispatch_subclients[] = {
333 { 0x00000000, "GRCTX", NULL }, 239 { 0x00000000, "GRCTX", NULL },
334 { 0x00000001, "NOTIFY", NULL }, 240 { 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
424 {} 330 {}
425}; 331};
426 332
427void 333static void
428nv50_fb_trap(struct nouveau_fb *pfb, int display) 334nv50_fb_intr(struct nouveau_subdev *subdev)
429{ 335{
430 struct nouveau_device *device = nv_device(pfb); 336 struct nouveau_device *device = nv_device(subdev);
431 struct nv50_fb_priv *priv = (void *)pfb; 337 struct nv50_fb_priv *priv = (void *)subdev;
432 const struct nouveau_enum *en, *cl; 338 const struct nouveau_enum *en, *cl;
433 u32 trap[6], idx, chan; 339 u32 trap[6], idx, chan;
434 u8 st0, st1, st2, st3; 340 u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
445 } 351 }
446 nv_wr32(priv, 0x100c90, idx | 0x80000000); 352 nv_wr32(priv, 0x100c90, idx | 0x80000000);
447 353
448 if (!display)
449 return;
450
451 /* decode status bits into something more useful */ 354 /* decode status bits into something more useful */
452 if (device->chipset < 0xa3 || 355 if (device->chipset < 0xa3 ||
453 device->chipset == 0xaa || device->chipset == 0xac) { 356 device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
494 else 397 else
495 printk("0x%08x\n", st1); 398 printk("0x%08x\n", st1);
496} 399}
400
401static int
402nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
403 struct nouveau_oclass *oclass, void *data, u32 size,
404 struct nouveau_object **pobject)
405{
406 struct nouveau_device *device = nv_device(parent);
407 struct nv50_fb_priv *priv;
408 int ret;
409
410 ret = nouveau_fb_create(parent, engine, oclass, &priv);
411 *pobject = nv_object(priv);
412 if (ret)
413 return ret;
414
415 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
416 if (priv->r100c08_page) {
417 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
418 0, PAGE_SIZE,
419 PCI_DMA_BIDIRECTIONAL);
420 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
421 nv_warn(priv, "failed 0x100c08 page map\n");
422 } else {
423 nv_warn(priv, "failed 0x100c08 page alloc\n");
424 }
425
426 priv->base.memtype_valid = nv50_fb_memtype_valid;
427 priv->base.ram.init = nv50_fb_vram_init;
428 priv->base.ram.get = nv50_fb_vram_new;
429 priv->base.ram.put = nv50_fb_vram_del;
430 nv_subdev(priv)->intr = nv50_fb_intr;
431 return nouveau_fb_preinit(&priv->base);
432}
433
434static void
435nv50_fb_dtor(struct nouveau_object *object)
436{
437 struct nouveau_device *device = nv_device(object);
438 struct nv50_fb_priv *priv = (void *)object;
439
440 if (priv->r100c08_page) {
441 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
442 PCI_DMA_BIDIRECTIONAL);
443 __free_page(priv->r100c08_page);
444 }
445
446 nouveau_fb_destroy(&priv->base);
447}
448
449static int
450nv50_fb_init(struct nouveau_object *object)
451{
452 struct nouveau_device *device = nv_device(object);
453 struct nv50_fb_priv *priv = (void *)object;
454 int ret;
455
456 ret = nouveau_fb_init(&priv->base);
457 if (ret)
458 return ret;
459
460 /* Not a clue what this is exactly. Without pointing it at a
461 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
462 * cause IOMMU "read from address 0" errors (rh#561267)
463 */
464 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
465
466 /* This is needed to get meaningful information from 100c90
467 * on traps. No idea what these values mean exactly. */
468 switch (device->chipset) {
469 case 0x50:
470 nv_wr32(priv, 0x100c90, 0x000707ff);
471 break;
472 case 0xa3:
473 case 0xa5:
474 case 0xa8:
475 nv_wr32(priv, 0x100c90, 0x000d0fff);
476 break;
477 case 0xaf:
478 nv_wr32(priv, 0x100c90, 0x089d1fff);
479 break;
480 default:
481 nv_wr32(priv, 0x100c90, 0x001d07ff);
482 break;
483 }
484
485 return 0;
486}
487
488struct nouveau_oclass
489nv50_fb_oclass = {
490 .handle = NV_SUBDEV(FB, 0x50),
491 .ofuncs = &(struct nouveau_ofuncs) {
492 .ctor = nv50_fb_ctor,
493 .dtor = nv50_fb_dtor,
494 .init = nv50_fb_init,
495 .fini = _nouveau_fb_fini,
496 },
497};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 9f59f2bf0079..7606ed15b6fa 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
62} 62}
63 63
64static int 64static int
65nvc0_fb_vram_init(struct nouveau_fb *pfb)
66{
67 struct nouveau_bios *bios = nouveau_bios(pfb);
68 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
69 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
70 u32 parts = nv_rd32(pfb, 0x022438);
71 u32 pmask = nv_rd32(pfb, 0x022554);
72 u32 bsize = nv_rd32(pfb, 0x10f20c);
73 u32 offset, length;
74 bool uniform = true;
75 int ret, part;
76
77 nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
78 nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
79
80 pfb->ram.type = nouveau_fb_bios_memtype(bios);
81 pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
82
83 /* read amount of vram attached to each memory controller */
84 for (part = 0; part < parts; part++) {
85 if (!(pmask & (1 << part))) {
86 u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
87 if (psize != bsize) {
88 if (psize < bsize)
89 bsize = psize;
90 uniform = false;
91 }
92
93 nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
94 pfb->ram.size += (u64)psize << 20;
95 }
96 }
97
98 /* if all controllers have the same amount attached, there's no holes */
99 if (uniform) {
100 offset = rsvd_head;
101 length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
102 return nouveau_mm_init(&pfb->vram, offset, length, 1);
103 }
104
105 /* otherwise, address lowest common amount from 0GiB */
106 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
107 if (ret)
108 return ret;
109
110 /* and the rest starting from (8GiB + common_size) */
111 offset = (0x0200000000ULL >> 12) + (bsize << 8);
112 length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
113
114 ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
115 if (ret) {
116 nouveau_mm_fini(&pfb->vram);
117 return ret;
118 }
119
120 return 0;
121}
122
123static int
65nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, 124nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
66 u32 memtype, struct nouveau_mem **pmem) 125 u32 memtype, struct nouveau_mem **pmem)
67{ 126{
@@ -86,14 +145,14 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
86 mem->memtype = type; 145 mem->memtype = type;
87 mem->size = size; 146 mem->size = size;
88 147
89 mutex_lock(&mm->mutex); 148 mutex_lock(&pfb->base.mutex);
90 do { 149 do {
91 if (back) 150 if (back)
92 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r); 151 ret = nouveau_mm_tail(mm, 1, size, ncmin, align, &r);
93 else 152 else
94 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r); 153 ret = nouveau_mm_head(mm, 1, size, ncmin, align, &r);
95 if (ret) { 154 if (ret) {
96 mutex_unlock(&mm->mutex); 155 mutex_unlock(&pfb->base.mutex);
97 pfb->ram.put(pfb, &mem); 156 pfb->ram.put(pfb, &mem);
98 return ret; 157 return ret;
99 } 158 }
@@ -101,7 +160,7 @@ nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
101 list_add_tail(&r->rl_entry, &mem->regions); 160 list_add_tail(&r->rl_entry, &mem->regions);
102 size -= r->length; 161 size -= r->length;
103 } while (size); 162 } while (size);
104 mutex_unlock(&mm->mutex); 163 mutex_unlock(&pfb->base.mutex);
105 164
106 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry); 165 r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
107 mem->offset = (u64)r->offset << 12; 166 mem->offset = (u64)r->offset << 12;
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
139} 198}
140 199
141static int 200static int
142nvc0_vram_detect(struct nvc0_fb_priv *priv)
143{
144 struct nouveau_bios *bios = nouveau_bios(priv);
145 struct nouveau_fb *pfb = &priv->base;
146 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
147 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
148 u32 parts = nv_rd32(priv, 0x022438);
149 u32 pmask = nv_rd32(priv, 0x022554);
150 u32 bsize = nv_rd32(priv, 0x10f20c);
151 u32 offset, length;
152 bool uniform = true;
153 int ret, part;
154
155 nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
156 nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
157
158 priv->base.ram.type = nouveau_fb_bios_memtype(bios);
159 priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
160
161 /* read amount of vram attached to each memory controller */
162 for (part = 0; part < parts; part++) {
163 if (!(pmask & (1 << part))) {
164 u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
165 if (psize != bsize) {
166 if (psize < bsize)
167 bsize = psize;
168 uniform = false;
169 }
170
171 nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
172 priv->base.ram.size += (u64)psize << 20;
173 }
174 }
175
176 /* if all controllers have the same amount attached, there's no holes */
177 if (uniform) {
178 offset = rsvd_head;
179 length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
180 return nouveau_mm_init(&pfb->vram, offset, length, 1);
181 }
182
183 /* otherwise, address lowest common amount from 0GiB */
184 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
185 if (ret)
186 return ret;
187
188 /* and the rest starting from (8GiB + common_size) */
189 offset = (0x0200000000ULL >> 12) + (bsize << 8);
190 length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
191
192 ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
193 if (ret) {
194 nouveau_mm_fini(&pfb->vram);
195 return ret;
196 }
197
198 return 0;
199}
200
201static int
202nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 201nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
203 struct nouveau_oclass *oclass, void *data, u32 size, 202 struct nouveau_oclass *oclass, void *data, u32 size,
204 struct nouveau_object **pobject) 203 struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
213 return ret; 212 return ret;
214 213
215 priv->base.memtype_valid = nvc0_fb_memtype_valid; 214 priv->base.memtype_valid = nvc0_fb_memtype_valid;
215 priv->base.ram.init = nvc0_fb_vram_init;
216 priv->base.ram.get = nvc0_fb_vram_new; 216 priv->base.ram.get = nvc0_fb_vram_new;
217 priv->base.ram.put = nv50_fb_vram_del; 217 priv->base.ram.put = nv50_fb_vram_del;
218 218
219 ret = nvc0_vram_detect(priv);
220 if (ret)
221 return ret;
222
223 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 219 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
224 if (!priv->r100c10_page) 220 if (!priv->r100c10_page)
225 return -ENOMEM; 221 return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
229 if (pci_dma_mapping_error(device->pdev, priv->r100c10)) 225 if (pci_dma_mapping_error(device->pdev, priv->r100c10))
230 return -EFAULT; 226 return -EFAULT;
231 227
232 return nouveau_fb_created(&priv->base); 228 return nouveau_fb_preinit(&priv->base);
233} 229}
234 230
235 231
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
index acf818c58bf0..9fb0f9b92d49 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/base.c
@@ -43,10 +43,15 @@ static int
43nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line, 43nouveau_gpio_find(struct nouveau_gpio *gpio, int idx, u8 tag, u8 line,
44 struct dcb_gpio_func *func) 44 struct dcb_gpio_func *func)
45{ 45{
46 struct nouveau_bios *bios = nouveau_bios(gpio);
47 u8 ver, len;
48 u16 data;
49
46 if (line == 0xff && tag == 0xff) 50 if (line == 0xff && tag == 0xff)
47 return -EINVAL; 51 return -EINVAL;
48 52
49 if (!dcb_gpio_parse(nouveau_bios(gpio), idx, tag, line, func)) 53 data = dcb_gpio_match(bios, idx, tag, line, &ver, &len, func);
54 if (data)
50 return 0; 55 return 0;
51 56
52 /* Apple iMac G4 NV18 */ 57 /* Apple iMac G4 NV18 */
@@ -265,7 +270,7 @@ nouveau_gpio_init(struct nouveau_gpio *gpio)
265 int ret = nouveau_subdev_init(&gpio->base); 270 int ret = nouveau_subdev_init(&gpio->base);
266 if (ret == 0 && gpio->reset) { 271 if (ret == 0 && gpio->reset) {
267 if (dmi_check_system(gpio_reset_ids)) 272 if (dmi_check_system(gpio_reset_ids))
268 gpio->reset(gpio); 273 gpio->reset(gpio, DCB_GPIO_UNUSED);
269 } 274 }
270 return ret; 275 return ret;
271} 276}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index f3502c961cd9..bf13a1200f26 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -29,15 +29,15 @@ struct nv50_gpio_priv {
29}; 29};
30 30
31static void 31static void
32nv50_gpio_reset(struct nouveau_gpio *gpio) 32nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
33{ 33{
34 struct nouveau_bios *bios = nouveau_bios(gpio); 34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nv50_gpio_priv *priv = (void *)gpio; 35 struct nv50_gpio_priv *priv = (void *)gpio;
36 u8 ver, len;
36 u16 entry; 37 u16 entry;
37 u8 ver;
38 int ent = -1; 38 int ent = -1;
39 39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) { 40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
41 static const u32 regs[] = { 0xe100, 0xe28c }; 41 static const u32 regs[] = { 0xe100, 0xe28c };
42 u32 data = nv_ro32(bios, entry); 42 u32 data = nv_ro32(bios, entry);
43 u8 line = (data & 0x0000001f); 43 u8 line = (data & 0x0000001f);
@@ -48,7 +48,8 @@ nv50_gpio_reset(struct nouveau_gpio *gpio)
48 u32 val = (unk1 << 16) | unk0; 48 u32 val = (unk1 << 16) | unk0;
49 u32 reg = regs[line >> 4]; line &= 0x0f; 49 u32 reg = regs[line >> 4]; line &= 0x0f;
50 50
51 if (func == 0xff) 51 if ( func == DCB_GPIO_UNUSED ||
52 (match != DCB_GPIO_UNUSED && match != func))
52 continue; 53 continue;
53 54
54 gpio->set(gpio, 0, func, line, defs); 55 gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
index 8d18fcad26e0..83e8b8f16e6a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nvd0.c
@@ -29,15 +29,15 @@ struct nvd0_gpio_priv {
29}; 29};
30 30
31static void 31static void
32nvd0_gpio_reset(struct nouveau_gpio *gpio) 32nvd0_gpio_reset(struct nouveau_gpio *gpio, u8 match)
33{ 33{
34 struct nouveau_bios *bios = nouveau_bios(gpio); 34 struct nouveau_bios *bios = nouveau_bios(gpio);
35 struct nvd0_gpio_priv *priv = (void *)gpio; 35 struct nvd0_gpio_priv *priv = (void *)gpio;
36 u8 ver, len;
36 u16 entry; 37 u16 entry;
37 u8 ver;
38 int ent = -1; 38 int ent = -1;
39 39
40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver))) { 40 while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) {
41 u32 data = nv_ro32(bios, entry); 41 u32 data = nv_ro32(bios, entry);
42 u8 line = (data & 0x0000003f); 42 u8 line = (data & 0x0000003f);
43 u8 defs = !!(data & 0x00000080); 43 u8 defs = !!(data & 0x00000080);
@@ -45,7 +45,8 @@ nvd0_gpio_reset(struct nouveau_gpio *gpio)
45 u8 unk0 = (data & 0x00ff0000) >> 16; 45 u8 unk0 = (data & 0x00ff0000) >> 16;
46 u8 unk1 = (data & 0x1f000000) >> 24; 46 u8 unk1 = (data & 0x1f000000) >> 24;
47 47
48 if (func == 0xff) 48 if ( func == DCB_GPIO_UNUSED ||
49 (match != DCB_GPIO_UNUSED && match != func))
49 continue; 50 continue;
50 51
51 gpio->set(gpio, 0, func, line, defs); 52 gpio->set(gpio, 0, func, line, defs);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index fe1ebf199ba9..dc27e794a851 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
50 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); 50 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
51 udelay(1); 51 udelay(1);
52 if (!timeout--) { 52 if (!timeout--) {
53 AUX_ERR("begin idle timeout 0x%08x", ctrl); 53 AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
54 return -EBUSY; 54 return -EBUSY;
55 } 55 }
56 } while (ctrl & 0x03010000); 56 } while (ctrl & 0x03010000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
index 1188227ca6aa..6565f3dbbe04 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/base.c
@@ -40,15 +40,21 @@ nouveau_instobj_create_(struct nouveau_object *parent,
40 if (ret) 40 if (ret)
41 return ret; 41 return ret;
42 42
43 mutex_lock(&imem->base.mutex);
43 list_add(&iobj->head, &imem->list); 44 list_add(&iobj->head, &imem->list);
45 mutex_unlock(&imem->base.mutex);
44 return 0; 46 return 0;
45} 47}
46 48
47void 49void
48nouveau_instobj_destroy(struct nouveau_instobj *iobj) 50nouveau_instobj_destroy(struct nouveau_instobj *iobj)
49{ 51{
50 if (iobj->head.prev) 52 struct nouveau_subdev *subdev = nv_subdev(iobj->base.engine);
51 list_del(&iobj->head); 53
54 mutex_lock(&subdev->mutex);
55 list_del(&iobj->head);
56 mutex_unlock(&subdev->mutex);
57
52 return nouveau_object_destroy(&iobj->base); 58 return nouveau_object_destroy(&iobj->base);
53} 59}
54 60
@@ -88,6 +94,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
88 if (ret) 94 if (ret)
89 return ret; 95 return ret;
90 96
97 mutex_lock(&imem->base.mutex);
98
91 list_for_each_entry(iobj, &imem->list, head) { 99 list_for_each_entry(iobj, &imem->list, head) {
92 if (iobj->suspend) { 100 if (iobj->suspend) {
93 for (i = 0; i < iobj->size; i += 4) 101 for (i = 0; i < iobj->size; i += 4)
@@ -97,6 +105,8 @@ nouveau_instmem_init(struct nouveau_instmem *imem)
97 } 105 }
98 } 106 }
99 107
108 mutex_unlock(&imem->base.mutex);
109
100 return 0; 110 return 0;
101} 111}
102 112
@@ -104,17 +114,26 @@ int
104nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend) 114nouveau_instmem_fini(struct nouveau_instmem *imem, bool suspend)
105{ 115{
106 struct nouveau_instobj *iobj; 116 struct nouveau_instobj *iobj;
107 int i; 117 int i, ret = 0;
108 118
109 if (suspend) { 119 if (suspend) {
120 mutex_lock(&imem->base.mutex);
121
110 list_for_each_entry(iobj, &imem->list, head) { 122 list_for_each_entry(iobj, &imem->list, head) {
111 iobj->suspend = vmalloc(iobj->size); 123 iobj->suspend = vmalloc(iobj->size);
112 if (iobj->suspend) { 124 if (!iobj->suspend) {
113 for (i = 0; i < iobj->size; i += 4) 125 ret = -ENOMEM;
114 iobj->suspend[i / 4] = nv_ro32(iobj, i); 126 break;
115 } else 127 }
116 return -ENOMEM; 128
129 for (i = 0; i < iobj->size; i += 4)
130 iobj->suspend[i / 4] = nv_ro32(iobj, i);
117 } 131 }
132
133 mutex_unlock(&imem->base.mutex);
134
135 if (ret)
136 return ret;
118 } 137 }
119 138
120 return nouveau_subdev_fini(&imem->base, suspend); 139 return nouveau_subdev_fini(&imem->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index ba4d28b50368..f5bbd3834116 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
63} 63}
64 64
65static u32 65static u32
66nv04_instobj_rd32(struct nouveau_object *object, u32 addr) 66nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
67{ 67{
68 struct nv04_instobj_priv *node = (void *)object; 68 struct nv04_instobj_priv *node = (void *)object;
69 return nv_ro32(object->engine, node->mem->offset + addr); 69 return nv_ro32(object->engine, node->mem->offset + addr);
70} 70}
71 71
72static void 72static void
73nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 73nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
74{ 74{
75 struct nv04_instobj_priv *node = (void *)object; 75 struct nv04_instobj_priv *node = (void *)object;
76 nv_wo32(object->engine, node->mem->offset + addr, data); 76 nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
173} 173}
174 174
175static u32 175static u32
176nv04_instmem_rd32(struct nouveau_object *object, u32 addr) 176nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
177{ 177{
178 return nv_rd32(object, 0x700000 + addr); 178 return nv_rd32(object, 0x700000 + addr);
179} 179}
180 180
181static void 181static void
182nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) 182nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
183{ 183{
184 return nv_wr32(object, 0x700000 + addr, data); 184 return nv_wr32(object, 0x700000 + addr, data);
185} 185}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 73c52ebd5932..da64253201ef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
111} 111}
112 112
113static u32 113static u32
114nv40_instmem_rd32(struct nouveau_object *object, u32 addr) 114nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
115{ 115{
116 struct nv04_instmem_priv *priv = (void *)object; 116 struct nv04_instmem_priv *priv = (void *)object;
117 return ioread32_native(priv->iomem + addr); 117 return ioread32_native(priv->iomem + addr);
118} 118}
119 119
120static void 120static void
121nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) 121nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
122{ 122{
123 struct nv04_instmem_priv *priv = (void *)object; 123 struct nv04_instmem_priv *priv = (void *)object;
124 iowrite32_native(data, priv->iomem + addr); 124 iowrite32_native(data, priv->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 27ef0891d10b..cfc7e31461de 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
76} 76}
77 77
78static u32 78static u32
79nv50_instobj_rd32(struct nouveau_object *object, u32 offset) 79nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
80{ 80{
81 struct nv50_instmem_priv *priv = (void *)object->engine; 81 struct nv50_instmem_priv *priv = (void *)object->engine;
82 struct nv50_instobj_priv *node = (void *)object; 82 struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
96} 96}
97 97
98static void 98static void
99nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data) 99nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
100{ 100{
101 struct nv50_instmem_priv *priv = (void *)object->engine; 101 struct nv50_instmem_priv *priv = (void *)object->engine;
102 struct nv50_instobj_priv *node = (void *)object; 102 struct nv50_instobj_priv *node = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index de5721cfc4c2..8379aafa6e1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
30 struct nouveau_mc *pmc = nouveau_mc(subdev); 30 struct nouveau_mc *pmc = nouveau_mc(subdev);
31 const struct nouveau_mc_intr *map = pmc->intr_map; 31 const struct nouveau_mc_intr *map = pmc->intr_map;
32 struct nouveau_subdev *unit; 32 struct nouveau_subdev *unit;
33 u32 stat; 33 u32 stat, intr;
34 34
35 stat = nv_rd32(pmc, 0x000100); 35 intr = stat = nv_rd32(pmc, 0x000100);
36 while (stat && map->stat) { 36 while (stat && map->stat) {
37 if (stat & map->stat) { 37 if (stat & map->stat) {
38 unit = nouveau_subdev(subdev, map->unit); 38 unit = nouveau_subdev(subdev, map->unit);
39 if (unit && unit->intr) 39 if (unit && unit->intr)
40 unit->intr(unit); 40 unit->intr(unit);
41 stat &= ~map->stat; 41 intr &= ~map->stat;
42 } 42 }
43 map++; 43 map++;
44 } 44 }
45 45
46 if (stat) { 46 if (intr) {
47 nv_error(pmc, "unknown intr 0x%08x\n", stat); 47 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 } 48 }
49} 49}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index cedf33b02977..8d759f830323 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
39 { 0x00200000, NVDEV_SUBDEV_GPIO }, 39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x04000000, NVDEV_ENGINE_DISP }, 40 { 0x04000000, NVDEV_ENGINE_DISP },
41 { 0x80000000, NVDEV_ENGINE_SW }, 41 { 0x80000000, NVDEV_ENGINE_SW },
42 { 0x0000d101, NVDEV_SUBDEV_FB },
42 {}, 43 {},
43}; 44};
44 45
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index a001e4c4d38d..ceb5c83f9459 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
40 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ 40 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
41 { 0x04000000, NVDEV_ENGINE_DISP }, 41 { 0x04000000, NVDEV_ENGINE_DISP },
42 { 0x80000000, NVDEV_ENGINE_SW }, 42 { 0x80000000, NVDEV_ENGINE_SW },
43 { 0x0040d101, NVDEV_SUBDEV_FB },
43 {}, 44 {},
44}; 45};
45 46
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c2b81e30a17d..92796682722d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
36 { 0x00000100, NVDEV_ENGINE_FIFO }, 36 { 0x00000100, NVDEV_ENGINE_FIFO },
37 { 0x00001000, NVDEV_ENGINE_GR }, 37 { 0x00001000, NVDEV_ENGINE_GR },
38 { 0x00008000, NVDEV_ENGINE_BSP }, 38 { 0x00008000, NVDEV_ENGINE_BSP },
39 { 0x00020000, NVDEV_ENGINE_VP },
39 { 0x00100000, NVDEV_SUBDEV_TIMER }, 40 { 0x00100000, NVDEV_SUBDEV_TIMER },
40 { 0x00200000, NVDEV_SUBDEV_GPIO }, 41 { 0x00200000, NVDEV_SUBDEV_GPIO },
41 { 0x02000000, NVDEV_SUBDEV_LTCG }, 42 { 0x02000000, NVDEV_SUBDEV_LTCG },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
index 93e3ddf7303a..e286e132c7e7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mxm/base.c
@@ -260,7 +260,7 @@ nouveau_mxm_create_(struct nouveau_object *parent,
260 260
261 data = mxm_table(bios, &ver, &len); 261 data = mxm_table(bios, &ver, &len);
262 if (!data || !(ver = nv_ro08(bios, data))) { 262 if (!data || !(ver = nv_ro08(bios, data))) {
263 nv_info(mxm, "no VBIOS data, nothing to do\n"); 263 nv_debug(mxm, "no VBIOS data, nothing to do\n");
264 return 0; 264 return 0;
265 } 265 }
266 266
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 082c11b75acb..77c67fc970e6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -352,7 +352,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
352 u64 mm_length = (offset + length) - mm_offset; 352 u64 mm_length = (offset + length) - mm_offset;
353 int ret; 353 int ret;
354 354
355 vm = *pvm = kzalloc(sizeof(*vm), GFP_KERNEL); 355 vm = kzalloc(sizeof(*vm), GFP_KERNEL);
356 if (!vm) 356 if (!vm)
357 return -ENOMEM; 357 return -ENOMEM;
358 358
@@ -376,6 +376,8 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
376 return ret; 376 return ret;
377 } 377 }
378 378
379 *pvm = vm;
380
379 return 0; 381 return 0;
380} 382}
381 383
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
index 49050d991e75..9474cfca6e4c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv41.c
@@ -67,7 +67,7 @@ nv41_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
67static void 67static void
68nv41_vm_flush(struct nouveau_vm *vm) 68nv41_vm_flush(struct nouveau_vm *vm)
69{ 69{
70 struct nv04_vm_priv *priv = (void *)vm->vmm; 70 struct nv04_vmmgr_priv *priv = (void *)vm->vmm;
71 71
72 mutex_lock(&nv_subdev(priv)->mutex); 72 mutex_lock(&nv_subdev(priv)->mutex);
73 nv_wr32(priv, 0x100810, 0x00000022); 73 nv_wr32(priv, 0x100810, 0x00000022);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cc79c796afee..41241922263f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -241,15 +241,31 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
241 241
242 if (unlikely(!abi16)) 242 if (unlikely(!abi16))
243 return -ENOMEM; 243 return -ENOMEM;
244 client = nv_client(abi16->client);
245 244
246 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) 245 if (!drm->channel)
247 return nouveau_abi16_put(abi16, -EINVAL); 246 return nouveau_abi16_put(abi16, -ENODEV);
248 247
248 client = nv_client(abi16->client);
249 device = nv_device(abi16->device); 249 device = nv_device(abi16->device);
250 imem = nouveau_instmem(device); 250 imem = nouveau_instmem(device);
251 pfb = nouveau_fb(device); 251 pfb = nouveau_fb(device);
252 252
253 /* hack to allow channel engine type specification on kepler */
254 if (device->card_type >= NV_E0) {
255 if (init->fb_ctxdma_handle != ~0)
256 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
257 else
258 init->fb_ctxdma_handle = init->tt_ctxdma_handle;
259
260 /* allow flips to be executed if this is a graphics channel */
261 init->tt_ctxdma_handle = 0;
262 if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
263 init->tt_ctxdma_handle = 1;
264 }
265
266 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
267 return nouveau_abi16_put(abi16, -EINVAL);
268
253 /* allocate "abi16 channel" data and make up a handle for it */ 269 /* allocate "abi16 channel" data and make up a handle for it */
254 init->channel = ffsll(~abi16->handles); 270 init->channel = ffsll(~abi16->handles);
255 if (!init->channel--) 271 if (!init->channel--)
@@ -264,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
264 abi16->handles |= (1 << init->channel); 280 abi16->handles |= (1 << init->channel);
265 281
266 /* create channel object and initialise dma and fence management */ 282 /* create channel object and initialise dma and fence management */
267 if (device->card_type >= NV_E0) {
268 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
269 init->tt_ctxdma_handle = 0;
270 }
271
272 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | 283 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
273 init->channel, init->fb_ctxdma_handle, 284 init->channel, init->fb_ctxdma_handle,
274 init->tt_ctxdma_handle, &chan->chan); 285 init->tt_ctxdma_handle, &chan->chan);
@@ -378,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
378 struct nouveau_abi16_chan *chan, *temp; 389 struct nouveau_abi16_chan *chan, *temp;
379 struct nouveau_abi16_ntfy *ntfy; 390 struct nouveau_abi16_ntfy *ntfy;
380 struct nouveau_object *object; 391 struct nouveau_object *object;
381 struct nv_dma_class args; 392 struct nv_dma_class args = {};
382 int ret; 393 int ret;
383 394
384 if (unlikely(!abi16)) 395 if (unlikely(!abi16))
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48783e14114c..d97f20069d3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
35 acpi_handle rom_handle; 35 acpi_handle rom_handle;
36} nouveau_dsm_priv; 36} nouveau_dsm_priv;
37 37
38bool nouveau_is_optimus(void) {
39 return nouveau_dsm_priv.optimus_detected;
40}
41
42bool nouveau_is_v1_dsm(void) {
43 return nouveau_dsm_priv.dsm_detected;
44}
45
38#define NOUVEAU_DSM_HAS_MUX 0x1 46#define NOUVEAU_DSM_HAS_MUX 0x1
39#define NOUVEAU_DSM_HAS_OPT 0x2 47#define NOUVEAU_DSM_HAS_OPT 0x2
40 48
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
183 191
184static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) 192static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
185{ 193{
186 /* perhaps the _DSM functions are mutually exclusive, but prepare for 194 if (!nouveau_dsm_priv.dsm_detected)
187 * the future */
188 if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
189 return 0; 195 return 0;
190 if (id == VGA_SWITCHEROO_IGD) 196 if (id == VGA_SWITCHEROO_IGD)
191 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); 197 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
201 207
202 /* Optimus laptops have the card already disabled in 208 /* Optimus laptops have the card already disabled in
203 * nouveau_switcheroo_set_state */ 209 * nouveau_switcheroo_set_state */
204 if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) 210 if (!nouveau_dsm_priv.dsm_detected)
205 return 0; 211 return 0;
206 212
207 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); 213 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
283 has_optimus = 1; 289 has_optimus = 1;
284 } 290 }
285 291
286 if (vga_count == 2 && has_dsm && guid_valid) { 292 /* find the optimus DSM or the old v1 DSM */
293 if (has_optimus == 1) {
287 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 294 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
288 &buffer); 295 &buffer);
289 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 296 printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
290 acpi_method_name); 297 acpi_method_name);
291 nouveau_dsm_priv.dsm_detected = true; 298 nouveau_dsm_priv.optimus_detected = true;
292 ret = true; 299 ret = true;
293 } 300 } else if (vga_count == 2 && has_dsm && guid_valid) {
294
295 if (has_optimus == 1) {
296 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 301 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
297 &buffer); 302 &buffer);
298 printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", 303 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
299 acpi_method_name); 304 acpi_method_name);
300 nouveau_dsm_priv.optimus_detected = true; 305 nouveau_dsm_priv.dsm_detected = true;
301 ret = true; 306 ret = true;
302 } 307 }
303 308
309
304 return ret; 310 return ret;
305} 311}
306 312
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 08af67722b57..d0da230d7706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -4,6 +4,8 @@
4#define ROM_BIOS_PAGE 4096 4#define ROM_BIOS_PAGE 4096
5 5
6#if defined(CONFIG_ACPI) 6#if defined(CONFIG_ACPI)
7bool nouveau_is_optimus(void);
8bool nouveau_is_v1_dsm(void);
7void nouveau_register_dsm_handler(void); 9void nouveau_register_dsm_handler(void);
8void nouveau_unregister_dsm_handler(void); 10void nouveau_unregister_dsm_handler(void);
9void nouveau_switcheroo_optimus_dsm(void); 11void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
11bool nouveau_acpi_rom_supported(struct pci_dev *pdev); 13bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
12void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *); 14void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
13#else 15#else
16static inline bool nouveau_is_optimus(void) { return false; };
17static inline bool nouveau_is_v1_dsm(void) { return false; };
14static inline void nouveau_register_dsm_handler(void) {} 18static inline void nouveau_register_dsm_handler(void) {}
15static inline void nouveau_unregister_dsm_handler(void) {} 19static inline void nouveau_unregister_dsm_handler(void) {}
16static inline void nouveau_switcheroo_optimus_dsm(void) {} 20static inline void nouveau_switcheroo_optimus_dsm(void) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 09fdef235882..865eddfa30a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
624 return 0; 624 return 0;
625} 625}
626 626
627/* BIT 'U'/'d' table encoder subtables have hashes matching them to
628 * a particular set of encoders.
629 *
630 * This function returns true if a particular DCB entry matches.
631 */
632bool
633bios_encoder_match(struct dcb_output *dcb, u32 hash)
634{
635 if ((hash & 0x000000f0) != (dcb->location << 4))
636 return false;
637 if ((hash & 0x0000000f) != dcb->type)
638 return false;
639 if (!(hash & (dcb->or << 16)))
640 return false;
641
642 switch (dcb->type) {
643 case DCB_OUTPUT_TMDS:
644 case DCB_OUTPUT_LVDS:
645 case DCB_OUTPUT_DP:
646 if (hash & 0x00c00000) {
647 if (!(hash & (dcb->sorconf.link << 22)))
648 return false;
649 }
650 default:
651 return true;
652 }
653}
654
655int
656nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
657 struct dcb_output *dcbent, int crtc)
658{
659 /*
660 * The display script table is located by the BIT 'U' table.
661 *
662 * It contains an array of pointers to various tables describing
663 * a particular output type. The first 32-bits of the output
664 * tables contains similar information to a DCB entry, and is
665 * used to decide whether that particular table is suitable for
666 * the output you want to access.
667 *
668 * The "record header length" field here seems to indicate the
669 * offset of the first configuration entry in the output tables.
670 * This is 10 on most cards I've seen, but 12 has been witnessed
671 * on DP cards, and there's another script pointer within the
672 * header.
673 *
674 * offset + 0 ( 8 bits): version
675 * offset + 1 ( 8 bits): header length
676 * offset + 2 ( 8 bits): record length
677 * offset + 3 ( 8 bits): number of records
678 * offset + 4 ( 8 bits): record header length
679 * offset + 5 (16 bits): pointer to first output script table
680 */
681
682 struct nouveau_drm *drm = nouveau_drm(dev);
683 struct nvbios *bios = &drm->vbios;
684 uint8_t *table = &bios->data[bios->display.script_table_ptr];
685 uint8_t *otable = NULL;
686 uint16_t script;
687 int i;
688
689 if (!bios->display.script_table_ptr) {
690 NV_ERROR(drm, "No pointer to output script table\n");
691 return 1;
692 }
693
694 /*
695 * Nothing useful has been in any of the pre-2.0 tables I've seen,
696 * so until they are, we really don't need to care.
697 */
698 if (table[0] < 0x20)
699 return 1;
700
701 if (table[0] != 0x20 && table[0] != 0x21) {
702 NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
703 table[0]);
704 return 1;
705 }
706
707 /*
708 * The output script tables describing a particular output type
709 * look as follows:
710 *
711 * offset + 0 (32 bits): output this table matches (hash of DCB)
712 * offset + 4 ( 8 bits): unknown
713 * offset + 5 ( 8 bits): number of configurations
714 * offset + 6 (16 bits): pointer to some script
715 * offset + 8 (16 bits): pointer to some script
716 *
717 * headerlen == 10
718 * offset + 10 : configuration 0
719 *
720 * headerlen == 12
721 * offset + 10 : pointer to some script
722 * offset + 12 : configuration 0
723 *
724 * Each config entry is as follows:
725 *
726 * offset + 0 (16 bits): unknown, assumed to be a match value
727 * offset + 2 (16 bits): pointer to script table (clock set?)
728 * offset + 4 (16 bits): pointer to script table (reset?)
729 *
730 * There doesn't appear to be a count value to say how many
731 * entries exist in each script table, instead, a 0 value in
732 * the first 16-bit word seems to indicate both the end of the
733 * list and the default entry. The second 16-bit word in the
734 * script tables is a pointer to the script to execute.
735 */
736
737 NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
738 dcbent->type, dcbent->location, dcbent->or);
739 for (i = 0; i < table[3]; i++) {
740 otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
741 if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
742 break;
743 }
744
745 if (!otable) {
746 NV_DEBUG(drm, "failed to match any output table\n");
747 return 1;
748 }
749
750 if (pclk < -2 || pclk > 0) {
751 /* Try to find matching script table entry */
752 for (i = 0; i < otable[5]; i++) {
753 if (ROM16(otable[table[4] + i*6]) == type)
754 break;
755 }
756
757 if (i == otable[5]) {
758 NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
759 "using first\n",
760 type, dcbent->type, dcbent->or);
761 i = 0;
762 }
763 }
764
765 if (pclk == 0) {
766 script = ROM16(otable[6]);
767 if (!script) {
768 NV_DEBUG(drm, "output script 0 not found\n");
769 return 1;
770 }
771
772 NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
773 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
774 } else
775 if (pclk == -1) {
776 script = ROM16(otable[8]);
777 if (!script) {
778 NV_DEBUG(drm, "output script 1 not found\n");
779 return 1;
780 }
781
782 NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
783 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
784 } else
785 if (pclk == -2) {
786 if (table[4] >= 12)
787 script = ROM16(otable[10]);
788 else
789 script = 0;
790 if (!script) {
791 NV_DEBUG(drm, "output script 2 not found\n");
792 return 1;
793 }
794
795 NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
796 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
797 } else
798 if (pclk > 0) {
799 script = ROM16(otable[table[4] + i*6 + 2]);
800 if (script)
801 script = clkcmptable(bios, script, pclk);
802 if (!script) {
803 NV_DEBUG(drm, "clock script 0 not found\n");
804 return 1;
805 }
806
807 NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
808 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
809 } else
810 if (pclk < 0) {
811 script = ROM16(otable[table[4] + i*6 + 4]);
812 if (script)
813 script = clkcmptable(bios, script, -pclk);
814 if (!script) {
815 NV_DEBUG(drm, "clock script 1 not found\n");
816 return 1;
817 }
818
819 NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
820 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
821 }
822
823 return 0;
824}
825
826
827int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk) 627int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
828{ 628{
829 /* 629 /*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
1212 return 0; 1012 return 0;
1213} 1013}
1214 1014
1215static int
1216parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
1217 struct bit_entry *bitentry)
1218{
1219 /*
1220 * Parses the pointer to the G80 output script tables
1221 *
1222 * Starting at bitentry->offset:
1223 *
1224 * offset + 0 (16 bits): output script table pointer
1225 */
1226
1227 struct nouveau_drm *drm = nouveau_drm(dev);
1228 uint16_t outputscripttableptr;
1229
1230 if (bitentry->length != 3) {
1231 NV_ERROR(drm, "Do not understand BIT U table\n");
1232 return -EINVAL;
1233 }
1234
1235 outputscripttableptr = ROM16(bios->data[bitentry->offset]);
1236 bios->display.script_table_ptr = outputscripttableptr;
1237 return 0;
1238}
1239
1240struct bit_table { 1015struct bit_table {
1241 const char id; 1016 const char id;
1242 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); 1017 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
1313 parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */ 1088 parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
1314 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); 1089 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
1315 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); 1090 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
1316 parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
1317 1091
1318 return 0; 1092 return 0;
1319} 1093}
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
2324{ 2098{
2325 struct nouveau_drm *drm = nouveau_drm(dev); 2099 struct nouveau_drm *drm = nouveau_drm(dev);
2326 struct nvbios *bios = &drm->vbios; 2100 struct nvbios *bios = &drm->vbios;
2327 int i, ret = 0; 2101 int ret = 0;
2328 2102
2329 /* Reset the BIOS head to 0. */ 2103 /* Reset the BIOS head to 0. */
2330 bios->state.crtchead = 0; 2104 bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
2337 bios->fp.lvds_init_run = false; 2111 bios->fp.lvds_init_run = false;
2338 } 2112 }
2339 2113
2340 if (nv_device(drm->device)->card_type >= NV_50) {
2341 for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
2342 nouveau_bios_run_display_table(dev, 0, 0,
2343 &bios->dcb.entry[i], -1);
2344 }
2345 }
2346
2347 return ret; 2114 return ret;
2348} 2115}
2349 2116
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 3befbb821a56..f68c54ca422f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -128,12 +128,6 @@ struct nvbios {
128 } state; 128 } state;
129 129
130 struct { 130 struct {
131 struct dcb_output *output;
132 int crtc;
133 uint16_t script_table_ptr;
134 } display;
135
136 struct {
137 uint16_t fptablepointer; /* also used by tmds */ 131 uint16_t fptablepointer; /* also used by tmds */
138 uint16_t fpxlatetableptr; 132 uint16_t fpxlatetableptr;
139 int xlatwidth; 133 int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
185int nouveau_run_vbios_init(struct drm_device *); 179int nouveau_run_vbios_init(struct drm_device *);
186struct dcb_connector_table_entry * 180struct dcb_connector_table_entry *
187nouveau_bios_connector_entry(struct drm_device *, int index); 181nouveau_bios_connector_entry(struct drm_device *, int index);
188int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
189 struct dcb_output *, int crtc);
190bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); 182bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
191uint8_t *nouveau_bios_embedded_edid(struct drm_device *); 183uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
192int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, 184int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
195 int head, int pxclk); 187 int head, int pxclk);
196int call_lvds_script(struct drm_device *, struct dcb_output *, int head, 188int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
197 enum LVDS_script, int pxclk); 189 enum LVDS_script, int pxclk);
198bool bios_encoder_match(struct dcb_output *, u32 hash);
199 190
200#endif 191#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35ac57f0aab6..69d7b1d0b9d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
225 225
226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, 226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
227 type, &nvbo->placement, 227 type, &nvbo->placement,
228 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, 228 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
229 nouveau_bo_del_ttm); 229 nouveau_bo_del_ttm);
230 if (ret) { 230 if (ret) {
231 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 231 /* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
315 315
316 nouveau_bo_placement_set(nvbo, memtype, 0); 316 nouveau_bo_placement_set(nvbo, memtype, 0);
317 317
318 ret = nouveau_bo_validate(nvbo, false, false, false); 318 ret = nouveau_bo_validate(nvbo, false, false);
319 if (ret == 0) { 319 if (ret == 0) {
320 switch (bo->mem.mem_type) { 320 switch (bo->mem.mem_type) {
321 case TTM_PL_VRAM: 321 case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
351 351
352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
353 353
354 ret = nouveau_bo_validate(nvbo, false, false, false); 354 ret = nouveau_bo_validate(nvbo, false, false);
355 if (ret == 0) { 355 if (ret == 0) {
356 switch (bo->mem.mem_type) { 356 switch (bo->mem.mem_type) {
357 case TTM_PL_VRAM: 357 case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
392 392
393int 393int
394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
395 bool no_wait_reserve, bool no_wait_gpu) 395 bool no_wait_gpu)
396{ 396{
397 int ret; 397 int ret;
398 398
399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, 399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
400 no_wait_reserve, no_wait_gpu); 400 interruptible, no_wait_gpu);
401 if (ret) 401 if (ret)
402 return ret; 402 return ret;
403 403
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
556static int 556static int
557nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 557nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
558 struct nouveau_bo *nvbo, bool evict, 558 struct nouveau_bo *nvbo, bool evict,
559 bool no_wait_reserve, bool no_wait_gpu, 559 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
560 struct ttm_mem_reg *new_mem)
561{ 560{
562 struct nouveau_fence *fence = NULL; 561 struct nouveau_fence *fence = NULL;
563 int ret; 562 int ret;
@@ -566,8 +565,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
566 if (ret) 565 if (ret)
567 return ret; 566 return ret;
568 567
569 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, 568 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
570 no_wait_reserve, no_wait_gpu, new_mem); 569 no_wait_gpu, new_mem);
571 nouveau_fence_unref(&fence); 570 nouveau_fence_unref(&fence);
572 return ret; 571 return ret;
573} 572}
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
965 964
966static int 965static int
967nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 966nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
968 bool no_wait_reserve, bool no_wait_gpu, 967 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
969 struct ttm_mem_reg *new_mem)
970{ 968{
971 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 969 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
972 struct nouveau_channel *chan = chan = drm->channel; 970 struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
995 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 993 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
996 if (ret == 0) { 994 if (ret == 0) {
997 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 995 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
998 no_wait_reserve,
999 no_wait_gpu, new_mem); 996 no_wait_gpu, new_mem);
1000 } 997 }
1001 998
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1064 1061
1065static int 1062static int
1066nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1063nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1067 bool no_wait_reserve, bool no_wait_gpu, 1064 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1068 struct ttm_mem_reg *new_mem)
1069{ 1065{
1070 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1066 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1071 struct ttm_placement placement; 1067 struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1078 1074
1079 tmp_mem = *new_mem; 1075 tmp_mem = *new_mem;
1080 tmp_mem.mm_node = NULL; 1076 tmp_mem.mm_node = NULL;
1081 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1077 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1082 if (ret) 1078 if (ret)
1083 return ret; 1079 return ret;
1084 1080
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1086 if (ret) 1082 if (ret)
1087 goto out; 1083 goto out;
1088 1084
1089 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 1085 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1090 if (ret) 1086 if (ret)
1091 goto out; 1087 goto out;
1092 1088
1093 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 1089 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1094out: 1090out:
1095 ttm_bo_mem_put(bo, &tmp_mem); 1091 ttm_bo_mem_put(bo, &tmp_mem);
1096 return ret; 1092 return ret;
@@ -1098,8 +1094,7 @@ out:
1098 1094
1099static int 1095static int
1100nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1096nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1101 bool no_wait_reserve, bool no_wait_gpu, 1097 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1102 struct ttm_mem_reg *new_mem)
1103{ 1098{
1104 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1099 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1105 struct ttm_placement placement; 1100 struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1112 1107
1113 tmp_mem = *new_mem; 1108 tmp_mem = *new_mem;
1114 tmp_mem.mm_node = NULL; 1109 tmp_mem.mm_node = NULL;
1115 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1110 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1116 if (ret) 1111 if (ret)
1117 return ret; 1112 return ret;
1118 1113
1119 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 1114 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1120 if (ret) 1115 if (ret)
1121 goto out; 1116 goto out;
1122 1117
1123 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); 1118 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1124 if (ret) 1119 if (ret)
1125 goto out; 1120 goto out;
1126 1121
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1195 1190
1196static int 1191static int
1197nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 1192nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1198 bool no_wait_reserve, bool no_wait_gpu, 1193 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1199 struct ttm_mem_reg *new_mem)
1200{ 1194{
1201 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1195 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1202 struct nouveau_bo *nvbo = nouveau_bo(bo); 1196 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1220 1214
1221 /* CPU copy if we have no accelerated method available */ 1215 /* CPU copy if we have no accelerated method available */
1222 if (!drm->ttm.move) { 1216 if (!drm->ttm.move) {
1223 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1217 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1224 goto out; 1218 goto out;
1225 } 1219 }
1226 1220
1227 /* Hardware assisted copy. */ 1221 /* Hardware assisted copy. */
1228 if (new_mem->mem_type == TTM_PL_SYSTEM) 1222 if (new_mem->mem_type == TTM_PL_SYSTEM)
1229 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1223 ret = nouveau_bo_move_flipd(bo, evict, intr,
1224 no_wait_gpu, new_mem);
1230 else if (old_mem->mem_type == TTM_PL_SYSTEM) 1225 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1231 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1226 ret = nouveau_bo_move_flips(bo, evict, intr,
1227 no_wait_gpu, new_mem);
1232 else 1228 else
1233 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1229 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1230 no_wait_gpu, new_mem);
1234 1231
1235 if (!ret) 1232 if (!ret)
1236 goto out; 1233 goto out;
1237 1234
1238 /* Fallback to software copy. */ 1235 /* Fallback to software copy. */
1239 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1236 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1240 1237
1241out: 1238out:
1242 if (nv_device(drm->device)->card_type < NV_50) { 1239 if (nv_device(drm->device)->card_type < NV_50) {
@@ -1279,7 +1276,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1279 if (drm->agp.stat == ENABLED) { 1276 if (drm->agp.stat == ENABLED) {
1280 mem->bus.offset = mem->start << PAGE_SHIFT; 1277 mem->bus.offset = mem->start << PAGE_SHIFT;
1281 mem->bus.base = drm->agp.base; 1278 mem->bus.base = drm->agp.base;
1282 mem->bus.is_iomem = true; 1279 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
1283 } 1280 }
1284#endif 1281#endif
1285 break; 1282 break;
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1343 nvbo->placement.fpfn = 0; 1340 nvbo->placement.fpfn = 0;
1344 nvbo->placement.lpfn = mappable; 1341 nvbo->placement.lpfn = mappable;
1345 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1342 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1346 return nouveau_bo_validate(nvbo, false, true, false); 1343 return nouveau_bo_validate(nvbo, false, false);
1347} 1344}
1348 1345
1349static int 1346static int
@@ -1472,19 +1469,19 @@ nouveau_bo_fence_ref(void *sync_obj)
1472} 1469}
1473 1470
1474static bool 1471static bool
1475nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg) 1472nouveau_bo_fence_signalled(void *sync_obj)
1476{ 1473{
1477 return nouveau_fence_done(sync_obj); 1474 return nouveau_fence_done(sync_obj);
1478} 1475}
1479 1476
1480static int 1477static int
1481nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) 1478nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1482{ 1479{
1483 return nouveau_fence_wait(sync_obj, lazy, intr); 1480 return nouveau_fence_wait(sync_obj, lazy, intr);
1484} 1481}
1485 1482
1486static int 1483static int
1487nouveau_bo_fence_flush(void *sync_obj, void *sync_arg) 1484nouveau_bo_fence_flush(void *sync_obj)
1488{ 1485{
1489 return 0; 1486 return 0;
1490} 1487}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098fe..25ca37989d2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); 76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); 77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
79 bool no_wait_reserve, bool no_wait_gpu); 79 bool no_wait_gpu);
80 80
81struct nouveau_vma * 81struct nouveau_vma *
82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); 82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index c1d7301c0e9c..174300b6a02e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); 76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); 77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
78 nouveau_bo_unmap(chan->push.buffer); 78 nouveau_bo_unmap(chan->push.buffer);
79 if (chan->push.buffer && chan->push.buffer->pin_refcnt)
80 nouveau_bo_unpin(chan->push.buffer);
79 nouveau_bo_ref(NULL, &chan->push.buffer); 81 nouveau_bo_ref(NULL, &chan->push.buffer);
80 kfree(chan); 82 kfree(chan);
81 } 83 }
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
267 struct nouveau_fb *pfb = nouveau_fb(device); 269 struct nouveau_fb *pfb = nouveau_fb(device);
268 struct nouveau_software_chan *swch; 270 struct nouveau_software_chan *swch;
269 struct nouveau_object *object; 271 struct nouveau_object *object;
270 struct nv_dma_class args; 272 struct nv_dma_class args = {};
271 int ret, i; 273 int ret, i;
272 274
273 /* allocate dma objects to cover all allowed vram, and gart */ 275 /* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
346 /* allocate software object class (used for fences on <= nv05, and 348 /* allocate software object class (used for fences on <= nv05, and
347 * to signal flip completion), bind it to a subchannel. 349 * to signal flip completion), bind it to a subchannel.
348 */ 350 */
349 if (chan != chan->drm->cechan) { 351 if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
350 ret = nouveau_object_new(nv_object(client), chan->handle, 352 ret = nouveau_object_new(nv_object(client), chan->handle,
351 NvSw, nouveau_abi16_swclass(chan->drm), 353 NvSw, nouveau_abi16_swclass(chan->drm),
352 NULL, 0, &object); 354 NULL, 0, &object);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 9a6e2cb282dc..e620ba8271b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
110 dev = nv_connector->base.dev; 110 dev = nv_connector->base.dev;
111 drm = nouveau_drm(dev); 111 drm = nouveau_drm(dev);
112 gpio = nouveau_gpio(drm->device); 112 gpio = nouveau_gpio(drm->device);
113 NV_DEBUG(drm, "\n");
114 113
115 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) { 114 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
116 gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff, 115 gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -128,12 +127,26 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
128 struct nouveau_encoder **pnv_encoder) 127 struct nouveau_encoder **pnv_encoder)
129{ 128{
130 struct drm_device *dev = connector->dev; 129 struct drm_device *dev = connector->dev;
130 struct nouveau_connector *nv_connector = nouveau_connector(connector);
131 struct nouveau_drm *drm = nouveau_drm(dev); 131 struct nouveau_drm *drm = nouveau_drm(dev);
132 struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
132 struct nouveau_i2c *i2c = nouveau_i2c(drm->device); 133 struct nouveau_i2c *i2c = nouveau_i2c(drm->device);
133 int i; 134 struct nouveau_i2c_port *port = NULL;
135 int i, panel = -ENODEV;
136
137 /* eDP panels need powering on by us (if the VBIOS doesn't default it
138 * to on) before doing any AUX channel transactions. LVDS panel power
139 * is handled by the SOR itself, and not required for LVDS DDC.
140 */
141 if (nv_connector->type == DCB_CONNECTOR_eDP) {
142 panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
143 if (panel == 0) {
144 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
145 msleep(300);
146 }
147 }
134 148
135 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { 149 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
136 struct nouveau_i2c_port *port = NULL;
137 struct nouveau_encoder *nv_encoder; 150 struct nouveau_encoder *nv_encoder;
138 struct drm_mode_object *obj; 151 struct drm_mode_object *obj;
139 int id; 152 int id;
@@ -151,11 +164,19 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
151 port = i2c->find(i2c, nv_encoder->dcb->i2c_index); 164 port = i2c->find(i2c, nv_encoder->dcb->i2c_index);
152 if (port && nv_probe_i2c(port, 0x50)) { 165 if (port && nv_probe_i2c(port, 0x50)) {
153 *pnv_encoder = nv_encoder; 166 *pnv_encoder = nv_encoder;
154 return port; 167 break;
155 } 168 }
169
170 port = NULL;
156 } 171 }
157 172
158 return NULL; 173 /* eDP panel not detected, restore panel power GPIO to previous
174 * state to avoid confusing the SOR for other output types.
175 */
176 if (!port && panel == 0)
177 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
178
179 return port;
159} 180}
160 181
161static struct nouveau_encoder * 182static struct nouveau_encoder *
@@ -221,7 +242,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
221 } 242 }
222 243
223 if (nv_connector->type == DCB_CONNECTOR_DVI_I) { 244 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
224 drm_connector_property_set_value(connector, 245 drm_object_property_set_value(&connector->base,
225 dev->mode_config.dvi_i_subconnector_property, 246 dev->mode_config.dvi_i_subconnector_property,
226 nv_encoder->dcb->type == DCB_OUTPUT_TMDS ? 247 nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
227 DRM_MODE_SUBCONNECTOR_DVID : 248 DRM_MODE_SUBCONNECTOR_DVID :
@@ -355,7 +376,7 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
355 * valid - it's not (rh#613284) 376 * valid - it's not (rh#613284)
356 */ 377 */
357 if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { 378 if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
358 if (!(nv_connector->edid = nouveau_acpi_edid(dev, connector))) { 379 if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
359 status = connector_status_connected; 380 status = connector_status_connected;
360 goto out; 381 goto out;
361 } 382 }
@@ -929,8 +950,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
929 int type, ret = 0; 950 int type, ret = 0;
930 bool dummy; 951 bool dummy;
931 952
932 NV_DEBUG(drm, "\n");
933
934 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 953 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
935 nv_connector = nouveau_connector(connector); 954 nv_connector = nouveau_connector(connector);
936 if (nv_connector->index == index) 955 if (nv_connector->index == index)
@@ -1043,7 +1062,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1043 1062
1044 /* Init DVI-I specific properties */ 1063 /* Init DVI-I specific properties */
1045 if (nv_connector->type == DCB_CONNECTOR_DVI_I) 1064 if (nv_connector->type == DCB_CONNECTOR_DVI_I)
1046 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); 1065 drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
1047 1066
1048 /* Add overscan compensation options to digital outputs */ 1067 /* Add overscan compensation options to digital outputs */
1049 if (disp->underscan_property && 1068 if (disp->underscan_property &&
@@ -1051,31 +1070,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
1051 type == DRM_MODE_CONNECTOR_DVII || 1070 type == DRM_MODE_CONNECTOR_DVII ||
1052 type == DRM_MODE_CONNECTOR_HDMIA || 1071 type == DRM_MODE_CONNECTOR_HDMIA ||
1053 type == DRM_MODE_CONNECTOR_DisplayPort)) { 1072 type == DRM_MODE_CONNECTOR_DisplayPort)) {
1054 drm_connector_attach_property(connector, 1073 drm_object_attach_property(&connector->base,
1055 disp->underscan_property, 1074 disp->underscan_property,
1056 UNDERSCAN_OFF); 1075 UNDERSCAN_OFF);
1057 drm_connector_attach_property(connector, 1076 drm_object_attach_property(&connector->base,
1058 disp->underscan_hborder_property, 1077 disp->underscan_hborder_property,
1059 0); 1078 0);
1060 drm_connector_attach_property(connector, 1079 drm_object_attach_property(&connector->base,
1061 disp->underscan_vborder_property, 1080 disp->underscan_vborder_property,
1062 0); 1081 0);
1063 } 1082 }
1064 1083
1065 /* Add hue and saturation options */ 1084 /* Add hue and saturation options */
1066 if (disp->vibrant_hue_property) 1085 if (disp->vibrant_hue_property)
1067 drm_connector_attach_property(connector, 1086 drm_object_attach_property(&connector->base,
1068 disp->vibrant_hue_property, 1087 disp->vibrant_hue_property,
1069 90); 1088 90);
1070 if (disp->color_vibrance_property) 1089 if (disp->color_vibrance_property)
1071 drm_connector_attach_property(connector, 1090 drm_object_attach_property(&connector->base,
1072 disp->color_vibrance_property, 1091 disp->color_vibrance_property,
1073 150); 1092 150);
1074 1093
1075 switch (nv_connector->type) { 1094 switch (nv_connector->type) {
1076 case DCB_CONNECTOR_VGA: 1095 case DCB_CONNECTOR_VGA:
1077 if (nv_device(drm->device)->card_type >= NV_50) { 1096 if (nv_device(drm->device)->card_type >= NV_50) {
1078 drm_connector_attach_property(connector, 1097 drm_object_attach_property(&connector->base,
1079 dev->mode_config.scaling_mode_property, 1098 dev->mode_config.scaling_mode_property,
1080 nv_connector->scaling_mode); 1099 nv_connector->scaling_mode);
1081 } 1100 }
@@ -1088,18 +1107,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
1088 default: 1107 default:
1089 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; 1108 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
1090 1109
1091 drm_connector_attach_property(connector, 1110 drm_object_attach_property(&connector->base,
1092 dev->mode_config.scaling_mode_property, 1111 dev->mode_config.scaling_mode_property,
1093 nv_connector->scaling_mode); 1112 nv_connector->scaling_mode);
1094 if (disp->dithering_mode) { 1113 if (disp->dithering_mode) {
1095 nv_connector->dithering_mode = DITHERING_MODE_AUTO; 1114 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1096 drm_connector_attach_property(connector, 1115 drm_object_attach_property(&connector->base,
1097 disp->dithering_mode, 1116 disp->dithering_mode,
1098 nv_connector->dithering_mode); 1117 nv_connector->dithering_mode);
1099 } 1118 }
1100 if (disp->dithering_depth) { 1119 if (disp->dithering_depth) {
1101 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; 1120 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1102 drm_connector_attach_property(connector, 1121 drm_object_attach_property(&connector->base,
1103 disp->dithering_depth, 1122 disp->dithering_depth,
1104 nv_connector->dithering_depth); 1123 nv_connector->dithering_depth);
1105 } 1124 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index ebdb87670a8f..20eb84cce9e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,6 +28,7 @@
28#define __NOUVEAU_CONNECTOR_H__ 28#define __NOUVEAU_CONNECTOR_H__
29 29
30#include <drm/drm_edid.h> 30#include <drm/drm_edid.h>
31#include "nouveau_crtc.h"
31 32
32struct nouveau_i2c_port; 33struct nouveau_i2c_port;
33 34
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
80 return container_of(con, struct nouveau_connector, base); 81 return container_of(con, struct nouveau_connector, base);
81} 82}
82 83
84static inline struct nouveau_connector *
85nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
86{
87 struct drm_device *dev = nv_crtc->base.dev;
88 struct drm_connector *connector;
89 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
90
91 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
92 if (connector->encoder && connector->encoder->crtc == crtc)
93 return nouveau_connector(connector);
94 }
95
96 return NULL;
97}
98
83struct drm_connector * 99struct drm_connector *
84nouveau_connector_create(struct drm_device *, int index); 100nouveau_connector_create(struct drm_device *, int index);
85 101
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1eb0133..d1e5890784d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
82 return &crtc->base; 82 return &crtc->base;
83} 83}
84 84
85int nv50_crtc_create(struct drm_device *dev, int index);
86int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
87 uint32_t buffer_handle, uint32_t width,
88 uint32_t height);
89int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
90
91int nv04_cursor_init(struct nouveau_crtc *); 85int nv04_cursor_init(struct nouveau_crtc *);
92int nv50_cursor_init(struct nouveau_crtc *);
93
94struct nouveau_connector *
95nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
96 86
97#endif /* __NOUVEAU_CRTC_H__ */ 87#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 86124b131f4f..508b00a2ce0d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
98 nv_fb->r_dma = NvEvoVRAM_LP; 98 nv_fb->r_dma = NvEvoVRAM_LP;
99 99
100 switch (fb->depth) { 100 switch (fb->depth) {
101 case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break; 101 case 8: nv_fb->r_format = 0x1e00; break;
102 case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break; 102 case 15: nv_fb->r_format = 0xe900; break;
103 case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break; 103 case 16: nv_fb->r_format = 0xe800; break;
104 case 24: 104 case 24:
105 case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break; 105 case 32: nv_fb->r_format = 0xcf00; break;
106 case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break; 106 case 30: nv_fb->r_format = 0xd100; break;
107 default: 107 default:
108 NV_ERROR(drm, "unknown depth %d\n", fb->depth); 108 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
109 return -EINVAL; 109 return -EINVAL;
@@ -225,15 +225,6 @@ nouveau_display_init(struct drm_device *dev)
225 if (ret) 225 if (ret)
226 return ret; 226 return ret;
227 227
228 /* power on internal panel if it's not already. the init tables of
229 * some vbios default this to off for some reason, causing the
230 * panel to not work after resume
231 */
232 if (gpio && gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff) == 0) {
233 gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
234 msleep(300);
235 }
236
237 /* enable polling for external displays */ 228 /* enable polling for external displays */
238 drm_kms_helper_poll_enable(dev); 229 drm_kms_helper_poll_enable(dev);
239 230
@@ -324,7 +315,7 @@ nouveau_display_create(struct drm_device *dev)
324 disp->underscan_vborder_property = 315 disp->underscan_vborder_property =
325 drm_property_create_range(dev, 0, "underscan vborder", 0, 128); 316 drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
326 317
327 if (gen == 1) { 318 if (gen >= 1) {
328 disp->vibrant_hue_property = 319 disp->vibrant_hue_property =
329 drm_property_create(dev, DRM_MODE_PROP_RANGE, 320 drm_property_create(dev, DRM_MODE_PROP_RANGE,
330 "vibrant hue", 2); 321 "vibrant hue", 2);
@@ -366,10 +357,7 @@ nouveau_display_create(struct drm_device *dev)
366 if (nv_device(drm->device)->card_type < NV_50) 357 if (nv_device(drm->device)->card_type < NV_50)
367 ret = nv04_display_create(dev); 358 ret = nv04_display_create(dev);
368 else 359 else
369 if (nv_device(drm->device)->card_type < NV_D0)
370 ret = nv50_display_create(dev); 360 ret = nv50_display_create(dev);
371 else
372 ret = nvd0_display_create(dev);
373 if (ret) 361 if (ret)
374 goto disp_create_err; 362 goto disp_create_err;
375 363
@@ -400,11 +388,12 @@ nouveau_display_destroy(struct drm_device *dev)
400 nouveau_backlight_exit(dev); 388 nouveau_backlight_exit(dev);
401 drm_vblank_cleanup(dev); 389 drm_vblank_cleanup(dev);
402 390
391 drm_kms_helper_poll_fini(dev);
392 drm_mode_config_cleanup(dev);
393
403 if (disp->dtor) 394 if (disp->dtor)
404 disp->dtor(dev); 395 disp->dtor(dev);
405 396
406 drm_kms_helper_poll_fini(dev);
407 drm_mode_config_cleanup(dev);
408 nouveau_drm(dev)->display = NULL; 397 nouveau_drm(dev)->display = NULL;
409 kfree(disp); 398 kfree(disp);
410} 399}
@@ -659,10 +648,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
659 648
660 /* Emit a page flip */ 649 /* Emit a page flip */
661 if (nv_device(drm->device)->card_type >= NV_50) { 650 if (nv_device(drm->device)->card_type >= NV_50) {
662 if (nv_device(drm->device)->card_type >= NV_D0) 651 ret = nv50_display_flip_next(crtc, fb, chan, 0);
663 ret = nvd0_display_flip_next(crtc, fb, chan, 0);
664 else
665 ret = nv50_display_flip_next(crtc, fb, chan);
666 if (ret) { 652 if (ret) {
667 mutex_unlock(&chan->cli->mutex); 653 mutex_unlock(&chan->cli->mutex);
668 goto fail_unreserve; 654 goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 978a108ba7a1..59838651ee8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,60 +30,17 @@
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32 32
33#include <core/class.h>
34
33#include <subdev/gpio.h> 35#include <subdev/gpio.h>
34#include <subdev/i2c.h> 36#include <subdev/i2c.h>
35 37
36u8 *
37nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
38{
39 struct nouveau_drm *drm = nouveau_drm(dev);
40 struct bit_entry d;
41 u8 *table;
42 int i;
43
44 if (bit_table(dev, 'd', &d)) {
45 NV_ERROR(drm, "BIT 'd' table not found\n");
46 return NULL;
47 }
48
49 if (d.version != 1) {
50 NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
51 return NULL;
52 }
53
54 table = ROMPTR(dev, d.data[0]);
55 if (!table) {
56 NV_ERROR(drm, "displayport table pointer invalid\n");
57 return NULL;
58 }
59
60 switch (table[0]) {
61 case 0x20:
62 case 0x21:
63 case 0x30:
64 case 0x40:
65 break;
66 default:
67 NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
68 return NULL;
69 }
70
71 for (i = 0; i < table[3]; i++) {
72 *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
73 if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
74 return table;
75 }
76
77 NV_ERROR(drm, "displayport encoder table not found\n");
78 return NULL;
79}
80
81/****************************************************************************** 38/******************************************************************************
82 * link training 39 * link training
83 *****************************************************************************/ 40 *****************************************************************************/
84struct dp_state { 41struct dp_state {
85 struct nouveau_i2c_port *auxch; 42 struct nouveau_i2c_port *auxch;
86 struct dp_train_func *func; 43 struct nouveau_object *core;
87 struct dcb_output *dcb; 44 struct dcb_output *dcb;
88 int crtc; 45 int crtc;
89 u8 *dpcd; 46 u8 *dpcd;
@@ -97,13 +54,20 @@ static void
97dp_set_link_config(struct drm_device *dev, struct dp_state *dp) 54dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
98{ 55{
99 struct nouveau_drm *drm = nouveau_drm(dev); 56 struct nouveau_drm *drm = nouveau_drm(dev);
57 struct dcb_output *dcb = dp->dcb;
58 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
59 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
100 u8 sink[2]; 60 u8 sink[2];
61 u32 data;
101 62
102 NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 63 NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
103 64
104 /* set desired link configuration on the source */ 65 /* set desired link configuration on the source */
105 dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, 66 data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
106 dp->dpcd[2] & DP_ENHANCED_FRAME_CAP); 67 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
68 data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
69
70 nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
107 71
108 /* inform the sink of the new configuration */ 72 /* inform the sink of the new configuration */
109 sink[0] = dp->link_bw / 27000; 73 sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
118dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) 82dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
119{ 83{
120 struct nouveau_drm *drm = nouveau_drm(dev); 84 struct nouveau_drm *drm = nouveau_drm(dev);
85 struct dcb_output *dcb = dp->dcb;
86 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
87 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
121 u8 sink_tp; 88 u8 sink_tp;
122 89
123 NV_DEBUG(drm, "training pattern %d\n", pattern); 90 NV_DEBUG(drm, "training pattern %d\n", pattern);
124 91
125 dp->func->train_set(dev, dp->dcb, pattern); 92 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
126 93
127 nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1); 94 nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
128 sink_tp &= ~DP_TRAINING_PATTERN_MASK; 95 sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
134dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) 101dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
135{ 102{
136 struct nouveau_drm *drm = nouveau_drm(dev); 103 struct nouveau_drm *drm = nouveau_drm(dev);
104 struct dcb_output *dcb = dp->dcb;
105 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
106 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
137 int i; 107 int i;
138 108
139 for (i = 0; i < dp->link_nr; i++) { 109 for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
148 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 118 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
149 119
150 NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]); 120 NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
151 dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); 121
122 nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
152 } 123 }
153 124
154 return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4); 125 return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
234} 205}
235 206
236static void 207static void
237dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable) 208dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
238{ 209{
239 u16 script = 0x0000; 210 struct dcb_output *dcb = dp->dcb;
240 u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); 211 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
241 if (table) { 212 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
242 if (table[0] >= 0x20 && table[0] <= 0x30) { 213
243 if (enable) script = ROM16(entry[12]); 214 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
244 else script = ROM16(entry[14]); 215 NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
245 } else 216 NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
246 if (table[0] == 0x40) { 217 NV94_DISP_SOR_DP_TRAIN_OP_INIT);
247 if (enable) script = ROM16(entry[11]);
248 else script = ROM16(entry[13]);
249 }
250 }
251
252 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
253}
254
255static void
256dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
257{
258 u16 script = 0x0000;
259 u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
260 if (table) {
261 if (table[0] >= 0x20 && table[0] <= 0x30)
262 script = ROM16(entry[6]);
263 else
264 if (table[0] == 0x40)
265 script = ROM16(entry[5]);
266 }
267
268 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
269} 218}
270 219
271static void 220static void
272dp_link_train_fini(struct drm_device *dev, struct dp_state *dp) 221dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
273{ 222{
274 u16 script = 0x0000; 223 struct dcb_output *dcb = dp->dcb;
275 u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); 224 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
276 if (table) { 225 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
277 if (table[0] >= 0x20 && table[0] <= 0x30)
278 script = ROM16(entry[8]);
279 else
280 if (table[0] == 0x40)
281 script = ROM16(entry[7]);
282 }
283 226
284 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); 227 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
228 NV94_DISP_SOR_DP_TRAIN_OP_FINI);
285} 229}
286 230
287static bool 231static bool
288nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, 232nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
289 struct dp_train_func *func) 233 struct nouveau_object *core)
290{ 234{
291 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 235 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
292 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 236 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
304 if (!dp.auxch) 248 if (!dp.auxch)
305 return false; 249 return false;
306 250
307 dp.func = func; 251 dp.core = core;
308 dp.dcb = nv_encoder->dcb; 252 dp.dcb = nv_encoder->dcb;
309 dp.crtc = nv_crtc->index; 253 dp.crtc = nv_crtc->index;
310 dp.dpcd = nv_encoder->dp.dpcd; 254 dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
318 */ 262 */
319 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false); 263 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
320 264
321 /* enable down-spreading, if possible */ 265 /* enable down-spreading and execute pre-train script from vbios */
322 dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); 266 dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
323
324 /* execute pre-train script from vbios */
325 dp_link_train_init(dev, &dp);
326 267
327 /* start off at highest link rate supported by encoder and display */ 268 /* start off at highest link rate supported by encoder and display */
328 while (*link_bw > nv_encoder->dp.link_bw) 269 while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
365 306
366void 307void
367nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, 308nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
368 struct dp_train_func *func) 309 struct nouveau_object *core)
369{ 310{
370 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 311 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
371 struct nouveau_drm *drm = nouveau_drm(encoder->dev); 312 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
385 nv_wraux(auxch, DP_SET_POWER, &status, 1); 326 nv_wraux(auxch, DP_SET_POWER, &status, 1);
386 327
387 if (mode == DRM_MODE_DPMS_ON) 328 if (mode == DRM_MODE_DPMS_ON)
388 nouveau_dp_link_train(encoder, datarate, func); 329 nouveau_dp_link_train(encoder, datarate, core);
389} 330}
390 331
391static void 332static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 0910125cbbc3..8b090f1eb51d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -49,8 +49,6 @@
49#include "nouveau_fbcon.h" 49#include "nouveau_fbcon.h"
50#include "nouveau_fence.h" 50#include "nouveau_fence.h"
51 51
52#include "nouveau_ttm.h"
53
54MODULE_PARM_DESC(config, "option string to pass to driver core"); 52MODULE_PARM_DESC(config, "option string to pass to driver core");
55static char *nouveau_config; 53static char *nouveau_config;
56module_param_named(config, nouveau_config, charp, 0400); 54module_param_named(config, nouveau_config, charp, 0400);
@@ -86,11 +84,16 @@ nouveau_cli_create(struct pci_dev *pdev, const char *name,
86 struct nouveau_cli *cli; 84 struct nouveau_cli *cli;
87 int ret; 85 int ret;
88 86
87 *pcli = NULL;
89 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config, 88 ret = nouveau_client_create_(name, nouveau_name(pdev), nouveau_config,
90 nouveau_debug, size, pcli); 89 nouveau_debug, size, pcli);
91 cli = *pcli; 90 cli = *pcli;
92 if (ret) 91 if (ret) {
92 if (cli)
93 nouveau_client_destroy(&cli->base);
94 *pcli = NULL;
93 return ret; 95 return ret;
96 }
94 97
95 mutex_init(&cli->mutex); 98 mutex_init(&cli->mutex);
96 return 0; 99 return 0;
@@ -129,7 +132,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
129 132
130 /* initialise synchronisation routines */ 133 /* initialise synchronisation routines */
131 if (device->card_type < NV_10) ret = nv04_fence_create(drm); 134 if (device->card_type < NV_10) ret = nv04_fence_create(drm);
132 else if (device->chipset < 0x84) ret = nv10_fence_create(drm); 135 else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
136 else if (device->chipset < 0x84) ret = nv50_fence_create(drm);
133 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); 137 else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
134 else ret = nvc0_fence_create(drm); 138 else ret = nvc0_fence_create(drm);
135 if (ret) { 139 if (ret) {
@@ -148,7 +152,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
148 NV_ERROR(drm, "failed to create ce channel, %d\n", ret); 152 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
149 153
150 arg0 = NVE0_CHANNEL_IND_ENGINE_GR; 154 arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
151 arg1 = 0; 155 arg1 = 1;
152 } else { 156 } else {
153 arg0 = NvDmaFB; 157 arg0 = NvDmaFB;
154 arg1 = NvDmaTT; 158 arg1 = NvDmaTT;
@@ -190,8 +194,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
190 nouveau_bo_move_init(drm); 194 nouveau_bo_move_init(drm);
191} 195}
192 196
193static int __devinit 197static int nouveau_drm_probe(struct pci_dev *pdev,
194nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) 198 const struct pci_device_id *pent)
195{ 199{
196 struct nouveau_device *device; 200 struct nouveau_device *device;
197 struct apertures_struct *aper; 201 struct apertures_struct *aper;
@@ -223,6 +227,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
223 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 227 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
224#endif 228#endif
225 remove_conflicting_framebuffers(aper, "nouveaufb", boot); 229 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
230 kfree(aper);
226 231
227 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev), 232 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
228 nouveau_config, nouveau_debug, &device); 233 nouveau_config, nouveau_debug, &device);
@@ -394,17 +399,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
394} 399}
395 400
396int 401int
397nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state) 402nouveau_do_suspend(struct drm_device *dev)
398{ 403{
399 struct drm_device *dev = pci_get_drvdata(pdev);
400 struct nouveau_drm *drm = nouveau_drm(dev); 404 struct nouveau_drm *drm = nouveau_drm(dev);
401 struct nouveau_cli *cli; 405 struct nouveau_cli *cli;
402 int ret; 406 int ret;
403 407
404 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
405 pm_state.event == PM_EVENT_PRETHAW)
406 return 0;
407
408 if (dev->mode_config.num_crtc) { 408 if (dev->mode_config.num_crtc) {
409 NV_INFO(drm, "suspending fbcon...\n"); 409 NV_INFO(drm, "suspending fbcon...\n");
410 nouveau_fbcon_set_suspend(dev, 1); 410 nouveau_fbcon_set_suspend(dev, 1);
@@ -435,13 +435,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
435 goto fail_client; 435 goto fail_client;
436 436
437 nouveau_agp_fini(drm); 437 nouveau_agp_fini(drm);
438
439 pci_save_state(pdev);
440 if (pm_state.event == PM_EVENT_SUSPEND) {
441 pci_disable_device(pdev);
442 pci_set_power_state(pdev, PCI_D3hot);
443 }
444
445 return 0; 438 return 0;
446 439
447fail_client: 440fail_client:
@@ -456,24 +449,33 @@ fail_client:
456 return ret; 449 return ret;
457} 450}
458 451
459int 452int nouveau_pmops_suspend(struct device *dev)
460nouveau_drm_resume(struct pci_dev *pdev)
461{ 453{
462 struct drm_device *dev = pci_get_drvdata(pdev); 454 struct pci_dev *pdev = to_pci_dev(dev);
463 struct nouveau_drm *drm = nouveau_drm(dev); 455 struct drm_device *drm_dev = pci_get_drvdata(pdev);
464 struct nouveau_cli *cli;
465 int ret; 456 int ret;
466 457
467 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 458 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
468 return 0; 459 return 0;
469 460
470 NV_INFO(drm, "re-enabling device...\n"); 461 ret = nouveau_do_suspend(drm_dev);
471 pci_set_power_state(pdev, PCI_D0);
472 pci_restore_state(pdev);
473 ret = pci_enable_device(pdev);
474 if (ret) 462 if (ret)
475 return ret; 463 return ret;
476 pci_set_master(pdev); 464
465 pci_save_state(pdev);
466 pci_disable_device(pdev);
467 pci_set_power_state(pdev, PCI_D3hot);
468
469 return 0;
470}
471
472int
473nouveau_do_resume(struct drm_device *dev)
474{
475 struct nouveau_drm *drm = nouveau_drm(dev);
476 struct nouveau_cli *cli;
477
478 NV_INFO(drm, "re-enabling device...\n");
477 479
478 nouveau_agp_reset(drm); 480 nouveau_agp_reset(drm);
479 481
@@ -499,6 +501,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
499 return 0; 501 return 0;
500} 502}
501 503
504int nouveau_pmops_resume(struct device *dev)
505{
506 struct pci_dev *pdev = to_pci_dev(dev);
507 struct drm_device *drm_dev = pci_get_drvdata(pdev);
508 int ret;
509
510 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
511 return 0;
512
513 pci_set_power_state(pdev, PCI_D0);
514 pci_restore_state(pdev);
515 ret = pci_enable_device(pdev);
516 if (ret)
517 return ret;
518 pci_set_master(pdev);
519
520 return nouveau_do_resume(drm_dev);
521}
522
523static int nouveau_pmops_freeze(struct device *dev)
524{
525 struct pci_dev *pdev = to_pci_dev(dev);
526 struct drm_device *drm_dev = pci_get_drvdata(pdev);
527
528 return nouveau_do_suspend(drm_dev);
529}
530
531static int nouveau_pmops_thaw(struct device *dev)
532{
533 struct pci_dev *pdev = to_pci_dev(dev);
534 struct drm_device *drm_dev = pci_get_drvdata(pdev);
535
536 return nouveau_do_resume(drm_dev);
537}
538
539
502static int 540static int
503nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) 541nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
504{ 542{
@@ -651,14 +689,22 @@ nouveau_drm_pci_table[] = {
651 {} 689 {}
652}; 690};
653 691
692static const struct dev_pm_ops nouveau_pm_ops = {
693 .suspend = nouveau_pmops_suspend,
694 .resume = nouveau_pmops_resume,
695 .freeze = nouveau_pmops_freeze,
696 .thaw = nouveau_pmops_thaw,
697 .poweroff = nouveau_pmops_freeze,
698 .restore = nouveau_pmops_resume,
699};
700
654static struct pci_driver 701static struct pci_driver
655nouveau_drm_pci_driver = { 702nouveau_drm_pci_driver = {
656 .name = "nouveau", 703 .name = "nouveau",
657 .id_table = nouveau_drm_pci_table, 704 .id_table = nouveau_drm_pci_table,
658 .probe = nouveau_drm_probe, 705 .probe = nouveau_drm_probe,
659 .remove = nouveau_drm_remove, 706 .remove = nouveau_drm_remove,
660 .suspend = nouveau_drm_suspend, 707 .driver.pm = &nouveau_pm_ops,
661 .resume = nouveau_drm_resume,
662}; 708};
663 709
664static int __init 710static int __init
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a10169927086..aa89eb938b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
129 return nv_device(nouveau_drm(dev)->device); 129 return nv_device(nouveau_drm(dev)->device);
130} 130}
131 131
132int nouveau_drm_suspend(struct pci_dev *, pm_message_t); 132int nouveau_pmops_suspend(struct device *);
133int nouveau_drm_resume(struct pci_dev *); 133int nouveau_pmops_resume(struct device *);
134 134
135#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 135#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
136#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 136#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 6a17bf2ba9a4..d0d95bd511ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
93/* nouveau_dp.c */ 93/* nouveau_dp.c */
94bool nouveau_dp_detect(struct drm_encoder *); 94bool nouveau_dp_detect(struct drm_encoder *);
95void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, 95void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
96 struct dp_train_func *); 96 struct nouveau_object *);
97u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
98 97
99struct nouveau_connector * 98struct nouveau_connector *
100nouveau_encoder_connector_get(struct nouveau_encoder *encoder); 99nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
101int nv50_sor_create(struct drm_connector *, struct dcb_output *);
102void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
103int nv50_dac_create(struct drm_connector *, struct dcb_output *);
104
105 100
106#endif /* __NOUVEAU_ENCODER_H__ */ 101#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index bedafd1c9539..cdb83acdffe2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -60,6 +60,7 @@ u32 nv10_fence_read(struct nouveau_channel *);
60void nv10_fence_context_del(struct nouveau_channel *); 60void nv10_fence_context_del(struct nouveau_channel *);
61void nv10_fence_destroy(struct nouveau_drm *); 61void nv10_fence_destroy(struct nouveau_drm *);
62int nv10_fence_create(struct nouveau_drm *); 62int nv10_fence_create(struct nouveau_drm *);
63void nv17_fence_resume(struct nouveau_drm *drm);
63 64
64int nv50_fence_create(struct nouveau_drm *); 65int nv50_fence_create(struct nouveau_drm *);
65int nv84_fence_create(struct nouveau_drm *); 66int nv84_fence_create(struct nouveau_drm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f19..8bf695c52f95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
433 return ret; 433 return ret;
434 } 434 }
435 435
436 ret = nouveau_bo_validate(nvbo, true, false, false); 436 ret = nouveau_bo_validate(nvbo, true, false);
437 if (unlikely(ret)) { 437 if (unlikely(ret)) {
438 if (ret != -ERESTARTSYS) 438 if (ret != -ERESTARTSYS)
439 NV_ERROR(drm, "fail ttm_validate\n"); 439 NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 2c672cebc889..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,261 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_connector.h"
28#include "nouveau_encoder.h"
29#include "nouveau_crtc.h"
30
31static bool
32hdmi_sor(struct drm_encoder *encoder)
33{
34 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
35 if (nv_device(drm->device)->chipset < 0xa3 ||
36 nv_device(drm->device)->chipset == 0xaa ||
37 nv_device(drm->device)->chipset == 0xac)
38 return false;
39 return true;
40}
41
42static inline u32
43hdmi_base(struct drm_encoder *encoder)
44{
45 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
46 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
47 if (!hdmi_sor(encoder))
48 return 0x616500 + (nv_crtc->index * 0x800);
49 return 0x61c500 + (nv_encoder->or * 0x800);
50}
51
52static void
53hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
54{
55 struct nouveau_device *device = nouveau_dev(encoder->dev);
56 nv_wr32(device, hdmi_base(encoder) + reg, val);
57}
58
59static u32
60hdmi_rd32(struct drm_encoder *encoder, u32 reg)
61{
62 struct nouveau_device *device = nouveau_dev(encoder->dev);
63 return nv_rd32(device, hdmi_base(encoder) + reg);
64}
65
66static u32
67hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
68{
69 u32 tmp = hdmi_rd32(encoder, reg);
70 hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
71 return tmp;
72}
73
74static void
75nouveau_audio_disconnect(struct drm_encoder *encoder)
76{
77 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
78 struct nouveau_device *device = nouveau_dev(encoder->dev);
79 u32 or = nv_encoder->or * 0x800;
80
81 if (hdmi_sor(encoder))
82 nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
83}
84
85static void
86nouveau_audio_mode_set(struct drm_encoder *encoder,
87 struct drm_display_mode *mode)
88{
89 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
90 struct nouveau_device *device = nouveau_dev(encoder->dev);
91 struct nouveau_connector *nv_connector;
92 u32 or = nv_encoder->or * 0x800;
93 int i;
94
95 nv_connector = nouveau_encoder_connector_get(nv_encoder);
96 if (!drm_detect_monitor_audio(nv_connector->edid)) {
97 nouveau_audio_disconnect(encoder);
98 return;
99 }
100
101 if (hdmi_sor(encoder)) {
102 nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
103
104 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
105 if (nv_connector->base.eld[0]) {
106 u8 *eld = nv_connector->base.eld;
107 for (i = 0; i < eld[2] * 4; i++)
108 nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
109 for (i = eld[2] * 4; i < 0x60; i++)
110 nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
111 nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
112 }
113 }
114}
115
116static void
117nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
118{
119 /* calculate checksum for the infoframe */
120 u8 sum = 0, i;
121 for (i = 0; i < frame[2]; i++)
122 sum += frame[i];
123 frame[3] = 256 - sum;
124
125 /* disable infoframe, and write header */
126 hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
127 hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
128
129 /* register scans tell me the audio infoframe has only one set of
130 * subpack regs, according to tegra (gee nvidia, it'd be nice if we
131 * could get those docs too!), the hdmi block pads out the rest of
132 * the packet on its own.
133 */
134 if (ctrl == 0x020)
135 frame[2] = 6;
136
137 /* write out checksum and data, weird weird 7 byte register pairs */
138 for (i = 0; i < frame[2] + 1; i += 7) {
139 u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
140 u32 *subpack = (u32 *)&frame[3 + i];
141 hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
142 hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
143 }
144
145 /* enable the infoframe */
146 hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
147}
148
149static void
150nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
151 struct drm_display_mode *mode)
152{
153 const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
154 const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
155 const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
156 u8 frame[20];
157
158 frame[0x00] = 0x82; /* AVI infoframe */
159 frame[0x01] = 0x02; /* version */
160 frame[0x02] = 0x0d; /* length */
161 frame[0x03] = 0x00;
162 frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
163 frame[0x05] = (C << 6) | (M << 4) | R;
164 frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
165 frame[0x07] = VIC;
166 frame[0x08] = PR;
167 frame[0x09] = bar_top & 0xff;
168 frame[0x0a] = bar_top >> 8;
169 frame[0x0b] = bar_bottom & 0xff;
170 frame[0x0c] = bar_bottom >> 8;
171 frame[0x0d] = bar_left & 0xff;
172 frame[0x0e] = bar_left >> 8;
173 frame[0x0f] = bar_right & 0xff;
174 frame[0x10] = bar_right >> 8;
175 frame[0x11] = 0x00;
176 frame[0x12] = 0x00;
177 frame[0x13] = 0x00;
178
179 nouveau_hdmi_infoframe(encoder, 0x020, frame);
180}
181
182static void
183nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
184 struct drm_display_mode *mode)
185{
186 const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
187 const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
188 u8 frame[12];
189
190 frame[0x00] = 0x84; /* Audio infoframe */
191 frame[0x01] = 0x01; /* version */
192 frame[0x02] = 0x0a; /* length */
193 frame[0x03] = 0x00;
194 frame[0x04] = (CT << 4) | CC;
195 frame[0x05] = (SF << 2) | ceaSS;
196 frame[0x06] = FMT;
197 frame[0x07] = CA;
198 frame[0x08] = (DM_INH << 7) | (LSV << 3);
199 frame[0x09] = 0x00;
200 frame[0x0a] = 0x00;
201 frame[0x0b] = 0x00;
202
203 nouveau_hdmi_infoframe(encoder, 0x000, frame);
204}
205
206static void
207nouveau_hdmi_disconnect(struct drm_encoder *encoder)
208{
209 nouveau_audio_disconnect(encoder);
210
211 /* disable audio and avi infoframes */
212 hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
213 hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
214
215 /* disable hdmi */
216 hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
217}
218
219void
220nouveau_hdmi_mode_set(struct drm_encoder *encoder,
221 struct drm_display_mode *mode)
222{
223 struct nouveau_device *device = nouveau_dev(encoder->dev);
224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
225 struct nouveau_connector *nv_connector;
226 u32 max_ac_packet, rekey;
227
228 nv_connector = nouveau_encoder_connector_get(nv_encoder);
229 if (!mode || !nv_connector || !nv_connector->edid ||
230 !drm_detect_hdmi_monitor(nv_connector->edid)) {
231 nouveau_hdmi_disconnect(encoder);
232 return;
233 }
234
235 nouveau_hdmi_video_infoframe(encoder, mode);
236 nouveau_hdmi_audio_infoframe(encoder, mode);
237
238 hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
239 hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
240 hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
241
242 nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
243 nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
244 nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
245
246 /* value matches nvidia binary driver, and tegra constant */
247 rekey = 56;
248
249 max_ac_packet = mode->htotal - mode->hdisplay;
250 max_ac_packet -= rekey;
251 max_ac_packet -= 18; /* constant from tegra */
252 max_ac_packet /= 32;
253
254 /* enable hdmi */
255 hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
256 0x1f000000 | /* unknown */
257 max_ac_packet << 16 |
258 rekey);
259
260 nouveau_audio_mode_set(encoder, mode);
261}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1d8cb506a28a..1303680affd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
60 return IRQ_NONE; 60 return IRQ_NONE;
61 61
62 nv_subdev(pmc)->intr(nv_subdev(pmc)); 62 nv_subdev(pmc)->intr(nv_subdev(pmc));
63
64 if (dev->mode_config.num_crtc) {
65 if (device->card_type >= NV_D0) {
66 if (nv_rd32(device, 0x000100) & 0x04000000)
67 nvd0_display_intr(dev);
68 } else
69 if (device->card_type >= NV_50) {
70 if (nv_rd32(device, 0x000100) & 0x04000000)
71 nv50_display_intr(dev);
72 }
73 }
74
75 return IRQ_HANDLED; 63 return IRQ_HANDLED;
76} 64}
77 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index 5566172774df..a701ff5ffa5b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -698,10 +698,10 @@ static int
698nouveau_hwmon_init(struct drm_device *dev) 698nouveau_hwmon_init(struct drm_device *dev)
699{ 699{
700 struct nouveau_pm *pm = nouveau_pm(dev); 700 struct nouveau_pm *pm = nouveau_pm(dev);
701 struct nouveau_drm *drm = nouveau_drm(dev);
702 struct nouveau_therm *therm = nouveau_therm(drm->device);
703 701
704#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE)) 702#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
703 struct nouveau_drm *drm = nouveau_drm(dev);
704 struct nouveau_therm *therm = nouveau_therm(drm->device);
705 struct device *hwmon_dev; 705 struct device *hwmon_dev;
706 int ret = 0; 706 int ret = 0;
707 707
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 366462cf8a2c..b8e05ae38212 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
155 return ret; 155 return ret;
156 nvbo = *pnvbo; 156 nvbo = *pnvbo;
157 157
158 /* we restrict allowed domains on nv50+ to only the types
159 * that were requested at creation time. not possibly on
160 * earlier chips without busting the ABI.
161 */
162 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 158 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
163 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 159 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
164 if (!nvbo->gem) { 160 if (!nvbo->gem) {
@@ -197,6 +193,7 @@ struct drm_gem_object *nouveau_gem_prime_import(struct drm_device *dev,
197 if (nvbo->gem) { 193 if (nvbo->gem) {
198 if (nvbo->gem->dev == dev) { 194 if (nvbo->gem->dev == dev) {
199 drm_gem_object_reference(nvbo->gem); 195 drm_gem_object_reference(nvbo->gem);
196 dma_buf_put(dma_buf);
200 return nvbo->gem; 197 return nvbo->gem;
201 } 198 }
202 } 199 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 6f0ac64873df..25d3495725eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
31 enum vga_switcheroo_state state) 31 enum vga_switcheroo_state state)
32{ 32{
33 struct drm_device *dev = pci_get_drvdata(pdev); 33 struct drm_device *dev = pci_get_drvdata(pdev);
34 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
35 34
36 if (state == VGA_SWITCHEROO_ON) { 35 if (state == VGA_SWITCHEROO_ON) {
37 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 36 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
38 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 37 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
39 nouveau_drm_resume(pdev); 38 nouveau_pmops_resume(&pdev->dev);
40 drm_kms_helper_poll_enable(dev); 39 drm_kms_helper_poll_enable(dev);
41 dev->switch_power_state = DRM_SWITCH_POWER_ON; 40 dev->switch_power_state = DRM_SWITCH_POWER_ON;
42 } else { 41 } else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
44 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 43 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
45 drm_kms_helper_poll_disable(dev); 44 drm_kms_helper_poll_disable(dev);
46 nouveau_switcheroo_optimus_dsm(); 45 nouveau_switcheroo_optimus_dsm();
47 nouveau_drm_suspend(pdev, pmm); 46 nouveau_pmops_suspend(&pdev->dev);
48 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 47 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
49 } 48 }
50} 49}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 82a0d9c6cda3..6578cd28c556 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
730 drm_crtc_cleanup(crtc); 730 drm_crtc_cleanup(crtc);
731 731
732 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 732 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
733 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
733 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 734 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
734 kfree(nv_crtc); 735 kfree(nv_crtc);
735} 736}
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1056 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); 1057 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
1057 if (!ret) { 1058 if (!ret) {
1058 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1059 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
1059 if (!ret) 1060 if (!ret) {
1060 ret = nouveau_bo_map(nv_crtc->cursor.nvbo); 1061 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
1062 if (ret)
1063 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
1064 }
1061 if (ret) 1065 if (ret)
1062 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 1066 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
1063 } 1067 }
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 184cdf806761..39ffc07f906b 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -505,7 +505,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
505 505
506static inline bool is_powersaving_dpms(int mode) 506static inline bool is_powersaving_dpms(int mode)
507{ 507{
508 return (mode != DRM_MODE_DPMS_ON); 508 return mode != DRM_MODE_DPMS_ON && mode != NV_DPMS_CLEARED;
509} 509}
510 510
511static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) 511static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 846050f04c23..2cd6fb8c548e 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
60 struct nv04_display *disp; 60 struct nv04_display *disp;
61 int i, ret; 61 int i, ret;
62 62
63 NV_DEBUG(drm, "\n");
64
65 disp = kzalloc(sizeof(*disp), GFP_KERNEL); 63 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
66 if (!disp) 64 if (!disp)
67 return -ENOMEM; 65 return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
132void 130void
133nv04_display_destroy(struct drm_device *dev) 131nv04_display_destroy(struct drm_device *dev)
134{ 132{
135 struct nouveau_drm *drm = nouveau_drm(dev);
136 struct nv04_display *disp = nv04_display(dev); 133 struct nv04_display *disp = nv04_display(dev);
137 struct drm_encoder *encoder; 134 struct drm_encoder *encoder;
138 struct drm_crtc *crtc; 135 struct drm_crtc *crtc;
139 136
140 NV_DEBUG(drm, "\n");
141
142 /* Turn every CRTC off. */ 137 /* Turn every CRTC off. */
143 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 138 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
144 struct drm_mode_set modeset = { 139 struct drm_mode_set modeset = {
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index ce752bf5cc4e..03017f24d593 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -155,11 +155,20 @@ nv10_fence_destroy(struct nouveau_drm *drm)
155{ 155{
156 struct nv10_fence_priv *priv = drm->fence; 156 struct nv10_fence_priv *priv = drm->fence;
157 nouveau_bo_unmap(priv->bo); 157 nouveau_bo_unmap(priv->bo);
158 if (priv->bo)
159 nouveau_bo_unpin(priv->bo);
158 nouveau_bo_ref(NULL, &priv->bo); 160 nouveau_bo_ref(NULL, &priv->bo);
159 drm->fence = NULL; 161 drm->fence = NULL;
160 kfree(priv); 162 kfree(priv);
161} 163}
162 164
165void nv17_fence_resume(struct nouveau_drm *drm)
166{
167 struct nv10_fence_priv *priv = drm->fence;
168
169 nouveau_bo_wr32(priv->bo, 0, priv->sequence);
170}
171
163int 172int
164nv10_fence_create(struct nouveau_drm *drm) 173nv10_fence_create(struct nouveau_drm *drm)
165{ 174{
@@ -183,8 +192,11 @@ nv10_fence_create(struct nouveau_drm *drm)
183 0, 0x0000, NULL, &priv->bo); 192 0, 0x0000, NULL, &priv->bo);
184 if (!ret) { 193 if (!ret) {
185 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 194 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
186 if (!ret) 195 if (!ret) {
187 ret = nouveau_bo_map(priv->bo); 196 ret = nouveau_bo_map(priv->bo);
197 if (ret)
198 nouveau_bo_unpin(priv->bo);
199 }
188 if (ret) 200 if (ret)
189 nouveau_bo_ref(NULL, &priv->bo); 201 nouveau_bo_ref(NULL, &priv->bo);
190 } 202 }
@@ -192,6 +204,7 @@ nv10_fence_create(struct nouveau_drm *drm)
192 if (ret == 0) { 204 if (ret == 0) {
193 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 205 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
194 priv->base.sync = nv17_fence_sync; 206 priv->base.sync = nv17_fence_sync;
207 priv->base.resume = nv17_fence_resume;
195 } 208 }
196 } 209 }
197 210
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 897b63621e2d..2ca276ada507 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
195 break; 195 break;
196 } 196 }
197 197
198 drm_connector_property_set_value(connector, 198 drm_object_property_set_value(&connector->base,
199 conf->tv_subconnector_property, 199 conf->tv_subconnector_property,
200 tv_enc->subconnector); 200 tv_enc->subconnector);
201 201
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
672 672
673 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); 673 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
674 674
675 drm_connector_attach_property(connector, 675 drm_object_attach_property(&connector->base,
676 conf->tv_select_subconnector_property, 676 conf->tv_select_subconnector_property,
677 tv_enc->select_subconnector); 677 tv_enc->select_subconnector);
678 drm_connector_attach_property(connector, 678 drm_object_attach_property(&connector->base,
679 conf->tv_subconnector_property, 679 conf->tv_subconnector_property,
680 tv_enc->subconnector); 680 tv_enc->subconnector);
681 drm_connector_attach_property(connector, 681 drm_object_attach_property(&connector->base,
682 conf->tv_mode_property, 682 conf->tv_mode_property,
683 tv_enc->tv_norm); 683 tv_enc->tv_norm);
684 drm_connector_attach_property(connector, 684 drm_object_attach_property(&connector->base,
685 conf->tv_flicker_reduction_property, 685 conf->tv_flicker_reduction_property,
686 tv_enc->flicker); 686 tv_enc->flicker);
687 drm_connector_attach_property(connector, 687 drm_object_attach_property(&connector->base,
688 conf->tv_saturation_property, 688 conf->tv_saturation_property,
689 tv_enc->saturation); 689 tv_enc->saturation);
690 drm_connector_attach_property(connector, 690 drm_object_attach_property(&connector->base,
691 conf->tv_hue_property, 691 conf->tv_hue_property,
692 tv_enc->hue); 692 tv_enc->hue);
693 drm_connector_attach_property(connector, 693 drm_object_attach_property(&connector->base,
694 conf->tv_overscan_property, 694 conf->tv_overscan_property,
695 tv_enc->overscan); 695 tv_enc->overscan);
696 696
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644
index 222de77d6269..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ /dev/null
@@ -1,764 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#include "nouveau_reg.h"
31#include "nouveau_drm.h"
32#include "nouveau_dma.h"
33#include "nouveau_gem.h"
34#include "nouveau_hw.h"
35#include "nouveau_encoder.h"
36#include "nouveau_crtc.h"
37#include "nouveau_connector.h"
38#include "nv50_display.h"
39
40#include <subdev/clock.h>
41
42static void
43nv50_crtc_lut_load(struct drm_crtc *crtc)
44{
45 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
46 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
47 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
48 int i;
49
50 NV_DEBUG(drm, "\n");
51
52 for (i = 0; i < 256; i++) {
53 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
54 writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
55 writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
56 }
57
58 if (nv_crtc->lut.depth == 30) {
59 writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
60 writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
61 writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
62 }
63}
64
65int
66nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
67{
68 struct drm_device *dev = nv_crtc->base.dev;
69 struct nouveau_drm *drm = nouveau_drm(dev);
70 struct nouveau_channel *evo = nv50_display(dev)->master;
71 int index = nv_crtc->index, ret;
72
73 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
74 NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
75
76 if (blanked) {
77 nv_crtc->cursor.hide(nv_crtc, false);
78
79 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
80 if (ret) {
81 NV_ERROR(drm, "no space while blanking crtc\n");
82 return ret;
83 }
84 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
85 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
86 OUT_RING(evo, 0);
87 if (nv_device(drm->device)->chipset != 0x50) {
88 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
89 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
90 }
91
92 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
93 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
94 } else {
95 if (nv_crtc->cursor.visible)
96 nv_crtc->cursor.show(nv_crtc, false);
97 else
98 nv_crtc->cursor.hide(nv_crtc, false);
99
100 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
101 if (ret) {
102 NV_ERROR(drm, "no space while unblanking crtc\n");
103 return ret;
104 }
105 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
106 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
107 NV50_EVO_CRTC_CLUT_MODE_OFF :
108 NV50_EVO_CRTC_CLUT_MODE_ON);
109 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
110 if (nv_device(drm->device)->chipset != 0x50) {
111 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
112 OUT_RING(evo, NvEvoVRAM);
113 }
114
115 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
116 OUT_RING(evo, nv_crtc->fb.offset >> 8);
117 OUT_RING(evo, 0);
118 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
119 if (nv_device(drm->device)->chipset != 0x50)
120 if (nv_crtc->fb.tile_flags == 0x7a00 ||
121 nv_crtc->fb.tile_flags == 0xfe00)
122 OUT_RING(evo, NvEvoFB32);
123 else
124 if (nv_crtc->fb.tile_flags == 0x7000)
125 OUT_RING(evo, NvEvoFB16);
126 else
127 OUT_RING(evo, NvEvoVRAM_LP);
128 else
129 OUT_RING(evo, NvEvoVRAM_LP);
130 }
131
132 nv_crtc->fb.blanked = blanked;
133 return 0;
134}
135
136static int
137nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
138{
139 struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
140 struct nouveau_connector *nv_connector;
141 struct drm_connector *connector;
142 int head = nv_crtc->index, ret;
143 u32 mode = 0x00;
144
145 nv_connector = nouveau_crtc_connector_get(nv_crtc);
146 connector = &nv_connector->base;
147 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
148 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
149 mode = DITHERING_MODE_DYNAMIC2X2;
150 } else {
151 mode = nv_connector->dithering_mode;
152 }
153
154 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
155 if (connector->display_info.bpc >= 8)
156 mode |= DITHERING_DEPTH_8BPC;
157 } else {
158 mode |= nv_connector->dithering_depth;
159 }
160
161 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
162 if (ret == 0) {
163 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
164 OUT_RING (evo, mode);
165 if (update) {
166 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
167 OUT_RING (evo, 0);
168 FIRE_RING (evo);
169 }
170 }
171
172 return ret;
173}
174
175static int
176nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
177{
178 struct drm_device *dev = nv_crtc->base.dev;
179 struct nouveau_drm *drm = nouveau_drm(dev);
180 struct nouveau_channel *evo = nv50_display(dev)->master;
181 int ret;
182 int adj;
183 u32 hue, vib;
184
185 NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
186 nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
187
188 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
189 if (ret) {
190 NV_ERROR(drm, "no space while setting color vibrance\n");
191 return ret;
192 }
193
194 adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
195 vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
196
197 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
198
199 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
200 OUT_RING (evo, (hue << 20) | (vib << 8));
201
202 if (update) {
203 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
204 OUT_RING (evo, 0);
205 FIRE_RING (evo);
206 }
207
208 return 0;
209}
210
211struct nouveau_connector *
212nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
213{
214 struct drm_device *dev = nv_crtc->base.dev;
215 struct drm_connector *connector;
216 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
217
218 /* The safest approach is to find an encoder with the right crtc, that
219 * is also linked to a connector. */
220 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
221 if (connector->encoder)
222 if (connector->encoder->crtc == crtc)
223 return nouveau_connector(connector);
224 }
225
226 return NULL;
227}
228
229static int
230nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
231{
232 struct nouveau_connector *nv_connector;
233 struct drm_crtc *crtc = &nv_crtc->base;
234 struct drm_device *dev = crtc->dev;
235 struct nouveau_drm *drm = nouveau_drm(dev);
236 struct nouveau_channel *evo = nv50_display(dev)->master;
237 struct drm_display_mode *umode = &crtc->mode;
238 struct drm_display_mode *omode;
239 int scaling_mode, ret;
240 u32 ctrl = 0, oX, oY;
241
242 NV_DEBUG(drm, "\n");
243
244 nv_connector = nouveau_crtc_connector_get(nv_crtc);
245 if (!nv_connector || !nv_connector->native_mode) {
246 NV_ERROR(drm, "no native mode, forcing panel scaling\n");
247 scaling_mode = DRM_MODE_SCALE_NONE;
248 } else {
249 scaling_mode = nv_connector->scaling_mode;
250 }
251
252 /* start off at the resolution we programmed the crtc for, this
253 * effectively handles NONE/FULL scaling
254 */
255 if (scaling_mode != DRM_MODE_SCALE_NONE)
256 omode = nv_connector->native_mode;
257 else
258 omode = umode;
259
260 oX = omode->hdisplay;
261 oY = omode->vdisplay;
262 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
263 oY *= 2;
264
265 /* add overscan compensation if necessary, will keep the aspect
266 * ratio the same as the backend mode unless overridden by the
267 * user setting both hborder and vborder properties.
268 */
269 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
270 (nv_connector->underscan == UNDERSCAN_AUTO &&
271 nv_connector->edid &&
272 drm_detect_hdmi_monitor(nv_connector->edid)))) {
273 u32 bX = nv_connector->underscan_hborder;
274 u32 bY = nv_connector->underscan_vborder;
275 u32 aspect = (oY << 19) / oX;
276
277 if (bX) {
278 oX -= (bX * 2);
279 if (bY) oY -= (bY * 2);
280 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
281 } else {
282 oX -= (oX >> 4) + 32;
283 if (bY) oY -= (bY * 2);
284 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
285 }
286 }
287
288 /* handle CENTER/ASPECT scaling, taking into account the areas
289 * removed already for overscan compensation
290 */
291 switch (scaling_mode) {
292 case DRM_MODE_SCALE_CENTER:
293 oX = min((u32)umode->hdisplay, oX);
294 oY = min((u32)umode->vdisplay, oY);
295 /* fall-through */
296 case DRM_MODE_SCALE_ASPECT:
297 if (oY < oX) {
298 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
299 oX = ((oY * aspect) + (aspect / 2)) >> 19;
300 } else {
301 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
302 oY = ((oX * aspect) + (aspect / 2)) >> 19;
303 }
304 break;
305 default:
306 break;
307 }
308
309 if (umode->hdisplay != oX || umode->vdisplay != oY ||
310 umode->flags & DRM_MODE_FLAG_INTERLACE ||
311 umode->flags & DRM_MODE_FLAG_DBLSCAN)
312 ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
313
314 ret = RING_SPACE(evo, 5);
315 if (ret)
316 return ret;
317
318 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
319 OUT_RING (evo, ctrl);
320 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
321 OUT_RING (evo, oY << 16 | oX);
322 OUT_RING (evo, oY << 16 | oX);
323
324 if (update) {
325 nv50_display_flip_stop(crtc);
326 nv50_display_sync(dev);
327 nv50_display_flip_next(crtc, crtc->fb, NULL);
328 }
329
330 return 0;
331}
332
333int
334nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
335{
336 struct nouveau_device *device = nouveau_dev(dev);
337 struct nouveau_clock *clk = nouveau_clock(device);
338
339 return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
340}
341
342static void
343nv50_crtc_destroy(struct drm_crtc *crtc)
344{
345 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
346 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
347
348 NV_DEBUG(drm, "\n");
349
350 nouveau_bo_unmap(nv_crtc->lut.nvbo);
351 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
352 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
353 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
354 drm_crtc_cleanup(&nv_crtc->base);
355 kfree(nv_crtc);
356}
357
358int
359nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
360 uint32_t buffer_handle, uint32_t width, uint32_t height)
361{
362 struct drm_device *dev = crtc->dev;
363 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
364 struct nouveau_bo *cursor = NULL;
365 struct drm_gem_object *gem;
366 int ret = 0, i;
367
368 if (!buffer_handle) {
369 nv_crtc->cursor.hide(nv_crtc, true);
370 return 0;
371 }
372
373 if (width != 64 || height != 64)
374 return -EINVAL;
375
376 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
377 if (!gem)
378 return -ENOENT;
379 cursor = nouveau_gem_object(gem);
380
381 ret = nouveau_bo_map(cursor);
382 if (ret)
383 goto out;
384
385 /* The simple will do for now. */
386 for (i = 0; i < 64 * 64; i++)
387 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
388
389 nouveau_bo_unmap(cursor);
390
391 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
392 nv_crtc->cursor.show(nv_crtc, true);
393
394out:
395 drm_gem_object_unreference_unlocked(gem);
396 return ret;
397}
398
399int
400nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
401{
402 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
403
404 nv_crtc->cursor.set_pos(nv_crtc, x, y);
405 return 0;
406}
407
408static void
409nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
410 uint32_t start, uint32_t size)
411{
412 int end = (start + size > 256) ? 256 : start + size, i;
413 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
414
415 for (i = start; i < end; i++) {
416 nv_crtc->lut.r[i] = r[i];
417 nv_crtc->lut.g[i] = g[i];
418 nv_crtc->lut.b[i] = b[i];
419 }
420
421 /* We need to know the depth before we upload, but it's possible to
422 * get called before a framebuffer is bound. If this is the case,
423 * mark the lut values as dirty by setting depth==0, and it'll be
424 * uploaded on the first mode_set_base()
425 */
426 if (!nv_crtc->base.fb) {
427 nv_crtc->lut.depth = 0;
428 return;
429 }
430
431 nv50_crtc_lut_load(crtc);
432}
433
434static void
435nv50_crtc_save(struct drm_crtc *crtc)
436{
437 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
438 NV_ERROR(drm, "!!\n");
439}
440
441static void
442nv50_crtc_restore(struct drm_crtc *crtc)
443{
444 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
445 NV_ERROR(drm, "!!\n");
446}
447
448static const struct drm_crtc_funcs nv50_crtc_funcs = {
449 .save = nv50_crtc_save,
450 .restore = nv50_crtc_restore,
451 .cursor_set = nv50_crtc_cursor_set,
452 .cursor_move = nv50_crtc_cursor_move,
453 .gamma_set = nv50_crtc_gamma_set,
454 .set_config = drm_crtc_helper_set_config,
455 .page_flip = nouveau_crtc_page_flip,
456 .destroy = nv50_crtc_destroy,
457};
458
459static void
460nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
461{
462}
463
464static void
465nv50_crtc_prepare(struct drm_crtc *crtc)
466{
467 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
468 struct drm_device *dev = crtc->dev;
469 struct nouveau_drm *drm = nouveau_drm(dev);
470
471 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
472
473 nv50_display_flip_stop(crtc);
474 drm_vblank_pre_modeset(dev, nv_crtc->index);
475 nv50_crtc_blank(nv_crtc, true);
476}
477
478static void
479nv50_crtc_commit(struct drm_crtc *crtc)
480{
481 struct drm_device *dev = crtc->dev;
482 struct nouveau_drm *drm = nouveau_drm(dev);
483 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
484
485 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
486
487 nv50_crtc_blank(nv_crtc, false);
488 drm_vblank_post_modeset(dev, nv_crtc->index);
489 nv50_display_sync(dev);
490 nv50_display_flip_next(crtc, crtc->fb, NULL);
491}
492
493static bool
494nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
495 struct drm_display_mode *adjusted_mode)
496{
497 return true;
498}
499
500static int
501nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
502 struct drm_framebuffer *passed_fb,
503 int x, int y, bool atomic)
504{
505 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
506 struct drm_device *dev = nv_crtc->base.dev;
507 struct nouveau_drm *drm = nouveau_drm(dev);
508 struct nouveau_channel *evo = nv50_display(dev)->master;
509 struct drm_framebuffer *drm_fb;
510 struct nouveau_framebuffer *fb;
511 int ret;
512
513 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
514
515 /* no fb bound */
516 if (!atomic && !crtc->fb) {
517 NV_DEBUG(drm, "No FB bound\n");
518 return 0;
519 }
520
521 /* If atomic, we want to switch to the fb we were passed, so
522 * now we update pointers to do that. (We don't pin; just
523 * assume we're already pinned and update the base address.)
524 */
525 if (atomic) {
526 drm_fb = passed_fb;
527 fb = nouveau_framebuffer(passed_fb);
528 } else {
529 drm_fb = crtc->fb;
530 fb = nouveau_framebuffer(crtc->fb);
531 /* If not atomic, we can go ahead and pin, and unpin the
532 * old fb we were passed.
533 */
534 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
535 if (ret)
536 return ret;
537
538 if (passed_fb) {
539 struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
540 nouveau_bo_unpin(ofb->nvbo);
541 }
542 }
543
544 nv_crtc->fb.offset = fb->nvbo->bo.offset;
545 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
546 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
547 if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
548 ret = RING_SPACE(evo, 2);
549 if (ret)
550 return ret;
551
552 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
553 OUT_RING (evo, fb->r_dma);
554 }
555
556 ret = RING_SPACE(evo, 12);
557 if (ret)
558 return ret;
559
560 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
561 OUT_RING (evo, nv_crtc->fb.offset >> 8);
562 OUT_RING (evo, 0);
563 OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
564 OUT_RING (evo, fb->r_pitch);
565 OUT_RING (evo, fb->r_format);
566
567 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
568 OUT_RING (evo, fb->base.depth == 8 ?
569 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
570
571 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
572 OUT_RING (evo, (y << 16) | x);
573
574 if (nv_crtc->lut.depth != fb->base.depth) {
575 nv_crtc->lut.depth = fb->base.depth;
576 nv50_crtc_lut_load(crtc);
577 }
578
579 return 0;
580}
581
582static int
583nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
584 struct drm_display_mode *mode, int x, int y,
585 struct drm_framebuffer *old_fb)
586{
587 struct drm_device *dev = crtc->dev;
588 struct nouveau_channel *evo = nv50_display(dev)->master;
589 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
590 u32 head = nv_crtc->index * 0x400;
591 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
592 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
593 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
594 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
595 u32 vblan2e = 0, vblan2s = 1;
596 int ret;
597
598 /* hw timing description looks like this:
599 *
600 * <sync> <back porch> <---------display---------> <front porch>
601 * ______
602 * |____________|---------------------------|____________|
603 *
604 * ^ synce ^ blanke ^ blanks ^ active
605 *
606 * interlaced modes also have 2 additional values pointing at the end
607 * and start of the next field's blanking period.
608 */
609
610 hactive = mode->htotal;
611 hsynce = mode->hsync_end - mode->hsync_start - 1;
612 hbackp = mode->htotal - mode->hsync_end;
613 hblanke = hsynce + hbackp;
614 hfrontp = mode->hsync_start - mode->hdisplay;
615 hblanks = mode->htotal - hfrontp - 1;
616
617 vactive = mode->vtotal * vscan / ilace;
618 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
619 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
620 vblanke = vsynce + vbackp;
621 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
622 vblanks = vactive - vfrontp - 1;
623 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
624 vblan2e = vactive + vsynce + vbackp;
625 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
626 vactive = (vactive * 2) + 1;
627 }
628
629 ret = RING_SPACE(evo, 18);
630 if (ret == 0) {
631 BEGIN_NV04(evo, 0, 0x0804 + head, 2);
632 OUT_RING (evo, 0x00800000 | mode->clock);
633 OUT_RING (evo, (ilace == 2) ? 2 : 0);
634 BEGIN_NV04(evo, 0, 0x0810 + head, 6);
635 OUT_RING (evo, 0x00000000); /* border colour */
636 OUT_RING (evo, (vactive << 16) | hactive);
637 OUT_RING (evo, ( vsynce << 16) | hsynce);
638 OUT_RING (evo, (vblanke << 16) | hblanke);
639 OUT_RING (evo, (vblanks << 16) | hblanks);
640 OUT_RING (evo, (vblan2e << 16) | vblan2s);
641 BEGIN_NV04(evo, 0, 0x082c + head, 1);
642 OUT_RING (evo, 0x00000000);
643 BEGIN_NV04(evo, 0, 0x0900 + head, 1);
644 OUT_RING (evo, 0x00000311); /* makes sync channel work */
645 BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
646 OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
647 BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
648 OUT_RING (evo, 0x00000000); /* screen position */
649 }
650
651 nv_crtc->set_dither(nv_crtc, false);
652 nv_crtc->set_scale(nv_crtc, false);
653 nv_crtc->set_color_vibrance(nv_crtc, false);
654
655 return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
656}
657
658static int
659nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
660 struct drm_framebuffer *old_fb)
661{
662 int ret;
663
664 nv50_display_flip_stop(crtc);
665 ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
666 if (ret)
667 return ret;
668
669 ret = nv50_display_sync(crtc->dev);
670 if (ret)
671 return ret;
672
673 return nv50_display_flip_next(crtc, crtc->fb, NULL);
674}
675
676static int
677nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
678 struct drm_framebuffer *fb,
679 int x, int y, enum mode_set_atomic state)
680{
681 int ret;
682
683 nv50_display_flip_stop(crtc);
684 ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
685 if (ret)
686 return ret;
687
688 return nv50_display_sync(crtc->dev);
689}
690
691static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
692 .dpms = nv50_crtc_dpms,
693 .prepare = nv50_crtc_prepare,
694 .commit = nv50_crtc_commit,
695 .mode_fixup = nv50_crtc_mode_fixup,
696 .mode_set = nv50_crtc_mode_set,
697 .mode_set_base = nv50_crtc_mode_set_base,
698 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
699 .load_lut = nv50_crtc_lut_load,
700};
701
702int
703nv50_crtc_create(struct drm_device *dev, int index)
704{
705 struct nouveau_drm *drm = nouveau_drm(dev);
706 struct nouveau_crtc *nv_crtc = NULL;
707 int ret, i;
708
709 NV_DEBUG(drm, "\n");
710
711 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
712 if (!nv_crtc)
713 return -ENOMEM;
714
715 nv_crtc->index = index;
716 nv_crtc->set_dither = nv50_crtc_set_dither;
717 nv_crtc->set_scale = nv50_crtc_set_scale;
718 nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
719 nv_crtc->color_vibrance = 50;
720 nv_crtc->vibrant_hue = 0;
721 nv_crtc->lut.depth = 0;
722 for (i = 0; i < 256; i++) {
723 nv_crtc->lut.r[i] = i << 8;
724 nv_crtc->lut.g[i] = i << 8;
725 nv_crtc->lut.b[i] = i << 8;
726 }
727
728 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
729 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
730 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
731
732 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
733 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
734 if (!ret) {
735 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
736 if (!ret)
737 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
738 if (ret)
739 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
740 }
741
742 if (ret)
743 goto out;
744
745
746 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
747 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
748 if (!ret) {
749 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
750 if (!ret)
751 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
752 if (ret)
753 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
754 }
755
756 if (ret)
757 goto out;
758
759 nv50_cursor_init(nv_crtc);
760out:
761 if (ret)
762 nv50_crtc_destroy(&nv_crtc->base);
763 return ret;
764}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644
index 223da113ceee..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28
29#include "nouveau_drm.h"
30#include "nouveau_dma.h"
31#include "nouveau_crtc.h"
32#include "nv50_display.h"
33
34static void
35nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
36{
37 struct drm_device *dev = nv_crtc->base.dev;
38 struct nouveau_drm *drm = nouveau_drm(dev);
39 struct nouveau_channel *evo = nv50_display(dev)->master;
40 int ret;
41
42 NV_DEBUG(drm, "\n");
43
44 if (update && nv_crtc->cursor.visible)
45 return;
46
47 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
48 if (ret) {
49 NV_ERROR(drm, "no space while unhiding cursor\n");
50 return;
51 }
52
53 if (nv_device(drm->device)->chipset != 0x50) {
54 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
55 OUT_RING(evo, NvEvoVRAM);
56 }
57 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
58 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
59 OUT_RING(evo, nv_crtc->cursor.offset >> 8);
60
61 if (update) {
62 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
63 OUT_RING(evo, 0);
64 FIRE_RING(evo);
65 nv_crtc->cursor.visible = true;
66 }
67}
68
69static void
70nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
71{
72 struct drm_device *dev = nv_crtc->base.dev;
73 struct nouveau_drm *drm = nouveau_drm(dev);
74 struct nouveau_channel *evo = nv50_display(dev)->master;
75 int ret;
76
77 NV_DEBUG(drm, "\n");
78
79 if (update && !nv_crtc->cursor.visible)
80 return;
81
82 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
83 if (ret) {
84 NV_ERROR(drm, "no space while hiding cursor\n");
85 return;
86 }
87 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
88 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
89 OUT_RING(evo, 0);
90 if (nv_device(drm->device)->chipset != 0x50) {
91 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
92 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
93 }
94
95 if (update) {
96 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
97 OUT_RING(evo, 0);
98 FIRE_RING(evo);
99 nv_crtc->cursor.visible = false;
100 }
101}
102
103static void
104nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
105{
106 struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
107
108 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
109 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
110 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
111 /* Needed to make the cursor move. */
112 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
113}
114
115static void
116nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
117{
118 if (offset == nv_crtc->cursor.offset)
119 return;
120
121 nv_crtc->cursor.offset = offset;
122 if (nv_crtc->cursor.visible) {
123 nv_crtc->cursor.visible = false;
124 nv_crtc->cursor.show(nv_crtc, true);
125 }
126}
127
128int
129nv50_cursor_init(struct nouveau_crtc *nv_crtc)
130{
131 nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
132 nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
133 nv_crtc->cursor.hide = nv50_cursor_hide;
134 nv_crtc->cursor.show = nv50_cursor_show;
135 return 0;
136}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644
index 6a30a1748573..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ /dev/null
@@ -1,321 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drm.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39#include <subdev/timer.h>
40
41static void
42nv50_dac_disconnect(struct drm_encoder *encoder)
43{
44 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
45 struct drm_device *dev = encoder->dev;
46 struct nouveau_drm *drm = nouveau_drm(dev);
47 struct nouveau_channel *evo = nv50_display(dev)->master;
48 int ret;
49
50 if (!nv_encoder->crtc)
51 return;
52 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
53
54 NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
55
56 ret = RING_SPACE(evo, 4);
57 if (ret) {
58 NV_ERROR(drm, "no space while disconnecting DAC\n");
59 return;
60 }
61 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
62 OUT_RING (evo, 0);
63 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
64 OUT_RING (evo, 0);
65
66 nv_encoder->crtc = NULL;
67}
68
69static enum drm_connector_status
70nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
71{
72 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
73 struct nouveau_device *device = nouveau_dev(encoder->dev);
74 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
75 enum drm_connector_status status = connector_status_disconnected;
76 uint32_t dpms_state, load_pattern, load_state;
77 int or = nv_encoder->or;
78
79 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
80 dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
81
82 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
83 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
84 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
85 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
86 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
87 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
88 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
89 return status;
90 }
91
92 /* Use bios provided value if possible. */
93 if (drm->vbios.dactestval) {
94 load_pattern = drm->vbios.dactestval;
95 NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
96 load_pattern);
97 } else {
98 load_pattern = 340;
99 NV_DEBUG(drm, "Using default load_pattern of %d\n",
100 load_pattern);
101 }
102
103 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
104 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
105 mdelay(45); /* give it some time to process */
106 load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
107
108 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
109 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
110 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
111
112 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
113 NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
114 status = connector_status_connected;
115
116 if (status == connector_status_connected)
117 NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
118 else
119 NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
120
121 return status;
122}
123
124static void
125nv50_dac_dpms(struct drm_encoder *encoder, int mode)
126{
127 struct nouveau_device *device = nouveau_dev(encoder->dev);
128 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
129 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
130 uint32_t val;
131 int or = nv_encoder->or;
132
133 NV_DEBUG(drm, "or %d mode %d\n", or, mode);
134
135 /* wait for it to be done */
136 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
137 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
138 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
139 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
140 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
141 return;
142 }
143
144 val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
145
146 if (mode != DRM_MODE_DPMS_ON)
147 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
148
149 switch (mode) {
150 case DRM_MODE_DPMS_STANDBY:
151 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
152 break;
153 case DRM_MODE_DPMS_SUSPEND:
154 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
155 break;
156 case DRM_MODE_DPMS_OFF:
157 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
158 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
159 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
160 break;
161 default:
162 break;
163 }
164
165 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
166 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
167}
168
169static void
170nv50_dac_save(struct drm_encoder *encoder)
171{
172 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
173 NV_ERROR(drm, "!!\n");
174}
175
176static void
177nv50_dac_restore(struct drm_encoder *encoder)
178{
179 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
180 NV_ERROR(drm, "!!\n");
181}
182
183static bool
184nv50_dac_mode_fixup(struct drm_encoder *encoder,
185 const struct drm_display_mode *mode,
186 struct drm_display_mode *adjusted_mode)
187{
188 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
189 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
190 struct nouveau_connector *connector;
191
192 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
193
194 connector = nouveau_encoder_connector_get(nv_encoder);
195 if (!connector) {
196 NV_ERROR(drm, "Encoder has no connector\n");
197 return false;
198 }
199
200 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
201 connector->native_mode)
202 drm_mode_copy(adjusted_mode, connector->native_mode);
203
204 return true;
205}
206
207static void
208nv50_dac_commit(struct drm_encoder *encoder)
209{
210}
211
212static void
213nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
214 struct drm_display_mode *adjusted_mode)
215{
216 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
217 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
218 struct drm_device *dev = encoder->dev;
219 struct nouveau_channel *evo = nv50_display(dev)->master;
220 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
221 uint32_t mode_ctl = 0, mode_ctl2 = 0;
222 int ret;
223
224 NV_DEBUG(drm, "or %d type %d crtc %d\n",
225 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
226
227 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
228
229 if (crtc->index == 1)
230 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
231 else
232 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
233
234 /* Lacking a working tv-out, this is not a 100% sure. */
235 if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
236 mode_ctl |= 0x40;
237 else
238 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
239 mode_ctl |= 0x100;
240
241 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
242 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
243
244 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
245 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
246
247 ret = RING_SPACE(evo, 3);
248 if (ret) {
249 NV_ERROR(drm, "no space while connecting DAC\n");
250 return;
251 }
252 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
253 OUT_RING(evo, mode_ctl);
254 OUT_RING(evo, mode_ctl2);
255
256 nv_encoder->crtc = encoder->crtc;
257}
258
259static struct drm_crtc *
260nv50_dac_crtc_get(struct drm_encoder *encoder)
261{
262 return nouveau_encoder(encoder)->crtc;
263}
264
265static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
266 .dpms = nv50_dac_dpms,
267 .save = nv50_dac_save,
268 .restore = nv50_dac_restore,
269 .mode_fixup = nv50_dac_mode_fixup,
270 .prepare = nv50_dac_disconnect,
271 .commit = nv50_dac_commit,
272 .mode_set = nv50_dac_mode_set,
273 .get_crtc = nv50_dac_crtc_get,
274 .detect = nv50_dac_detect,
275 .disable = nv50_dac_disconnect
276};
277
278static void
279nv50_dac_destroy(struct drm_encoder *encoder)
280{
281 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
282 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
283
284 if (!encoder)
285 return;
286
287 NV_DEBUG(drm, "\n");
288
289 drm_encoder_cleanup(encoder);
290 kfree(nv_encoder);
291}
292
293static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
294 .destroy = nv50_dac_destroy,
295};
296
297int
298nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
299{
300 struct nouveau_encoder *nv_encoder;
301 struct drm_encoder *encoder;
302
303 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
304 if (!nv_encoder)
305 return -ENOMEM;
306 encoder = to_drm_encoder(nv_encoder);
307
308 nv_encoder->dcb = entry;
309 nv_encoder->or = ffs(entry->or) - 1;
310
311 drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
312 DRM_MODE_ENCODER_DAC);
313 drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
314
315 encoder->possible_crtcs = entry->heads;
316 encoder->possible_clones = 0;
317
318 drm_mode_connector_attach_encoder(connector, encoder);
319 return 0;
320}
321
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f97b42cbb6bb..35874085a61e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,969 +1,2058 @@
1/* 1 /*
2 * Copyright (C) 2008 Maarten Maathuis. 2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
27#include "nouveau_drm.h" 30#include "nouveau_drm.h"
28#include "nouveau_dma.h" 31#include "nouveau_dma.h"
29 32#include "nouveau_gem.h"
30#include "nv50_display.h"
31#include "nouveau_crtc.h"
32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34#include "nouveau_fbcon.h" 34#include "nouveau_encoder.h"
35#include <drm/drm_crtc_helper.h> 35#include "nouveau_crtc.h"
36#include "nouveau_fence.h" 36#include "nouveau_fence.h"
37#include "nv50_display.h"
37 38
39#include <core/client.h>
38#include <core/gpuobj.h> 40#include <core/gpuobj.h>
39#include <subdev/timer.h> 41#include <core/class.h>
40 42
41static void nv50_display_bh(unsigned long); 43#include <subdev/timer.h>
42 44#include <subdev/bar.h>
43static inline int 45#include <subdev/fb.h>
44nv50_sor_nr(struct drm_device *dev) 46
47#define EVO_DMA_NR 9
48
49#define EVO_MASTER (0x00)
50#define EVO_FLIP(c) (0x01 + (c))
51#define EVO_OVLY(c) (0x05 + (c))
52#define EVO_OIMM(c) (0x09 + (c))
53#define EVO_CURS(c) (0x0d + (c))
54
55/* offsets in shared sync bo of various structures */
56#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
57#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
58#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
59#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
60
61#define EVO_CORE_HANDLE (0xd1500000)
62#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
63#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
64#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
65 (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
66
67/******************************************************************************
68 * EVO channel
69 *****************************************************************************/
70
71struct nv50_chan {
72 struct nouveau_object *user;
73 u32 handle;
74};
75
76static int
77nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
78 void *data, u32 size, struct nv50_chan *chan)
45{ 79{
46 struct nouveau_device *device = nouveau_dev(dev); 80 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
81 const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
82 const u32 handle = EVO_CHAN_HANDLE(bclass, head);
83 int ret;
47 84
48 if (device->chipset < 0x90 || 85 ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
49 device->chipset == 0x92 || 86 oclass, data, size, &chan->user);
50 device->chipset == 0xa0) 87 if (ret)
51 return 2; 88 return ret;
52 89
53 return 4; 90 chan->handle = handle;
91 return 0;
54} 92}
55 93
56u32 94static void
57nv50_display_active_crtcs(struct drm_device *dev) 95nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
58{ 96{
59 struct nouveau_device *device = nouveau_dev(dev); 97 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
60 u32 mask = 0; 98 if (chan->handle)
61 int i; 99 nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
62 100}
63 if (device->chipset < 0x90 ||
64 device->chipset == 0x92 ||
65 device->chipset == 0xa0) {
66 for (i = 0; i < 2; i++)
67 mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
68 } else {
69 for (i = 0; i < 4; i++)
70 mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
71 }
72 101
73 for (i = 0; i < 3; i++) 102/******************************************************************************
74 mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 103 * PIO EVO channel
104 *****************************************************************************/
75 105
76 return mask & 3; 106struct nv50_pioc {
77} 107 struct nv50_chan base;
108};
78 109
79int 110static void
80nv50_display_early_init(struct drm_device *dev) 111nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
81{ 112{
82 return 0; 113 nv50_chan_destroy(core, &pioc->base);
83} 114}
84 115
85void 116static int
86nv50_display_late_takedown(struct drm_device *dev) 117nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
118 void *data, u32 size, struct nv50_pioc *pioc)
87{ 119{
120 return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
88} 121}
89 122
90int 123/******************************************************************************
91nv50_display_sync(struct drm_device *dev) 124 * DMA EVO channel
92{ 125 *****************************************************************************/
93 struct nv50_display *disp = nv50_display(dev);
94 struct nouveau_channel *evo = disp->master;
95 int ret;
96 126
97 ret = RING_SPACE(evo, 6); 127struct nv50_dmac {
98 if (ret == 0) { 128 struct nv50_chan base;
99 BEGIN_NV04(evo, 0, 0x0084, 1); 129 dma_addr_t handle;
100 OUT_RING (evo, 0x80000000); 130 u32 *ptr;
101 BEGIN_NV04(evo, 0, 0x0080, 1); 131};
102 OUT_RING (evo, 0);
103 BEGIN_NV04(evo, 0, 0x0084, 1);
104 OUT_RING (evo, 0x00000000);
105 132
106 nv_wo32(disp->ramin, 0x2000, 0x00000000); 133static void
107 FIRE_RING (evo); 134nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
108 135{
109 if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000)) 136 if (dmac->ptr) {
110 return 0; 137 struct pci_dev *pdev = nv_device(core)->pdev;
138 pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
111 } 139 }
112 140
113 return 0; 141 nv50_chan_destroy(core, &dmac->base);
114} 142}
115 143
116int 144static int
117nv50_display_init(struct drm_device *dev) 145nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
118{ 146{
119 struct nouveau_drm *drm = nouveau_drm(dev); 147 struct nouveau_fb *pfb = nouveau_fb(core);
120 struct nouveau_device *device = nouveau_dev(dev); 148 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
121 struct nouveau_channel *evo; 149 struct nouveau_object *object;
122 int ret, i; 150 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
123 u32 val; 151 NV_DMA_IN_MEMORY_CLASS,
124 152 &(struct nv_dma_class) {
125 NV_DEBUG(drm, "\n"); 153 .flags = NV_DMA_TARGET_VRAM |
126 154 NV_DMA_ACCESS_RDWR,
127 nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004)); 155 .start = 0,
128 156 .limit = pfb->ram.size - 1,
129 /* 157 .conf0 = NV50_DMA_CONF0_ENABLE |
130 * I think the 0x006101XX range is some kind of main control area 158 NV50_DMA_CONF0_PART_256,
131 * that enables things. 159 }, sizeof(struct nv_dma_class), &object);
132 */
133 /* CRTC? */
134 for (i = 0; i < 2; i++) {
135 val = nv_rd32(device, 0x00616100 + (i * 0x800));
136 nv_wr32(device, 0x00610190 + (i * 0x10), val);
137 val = nv_rd32(device, 0x00616104 + (i * 0x800));
138 nv_wr32(device, 0x00610194 + (i * 0x10), val);
139 val = nv_rd32(device, 0x00616108 + (i * 0x800));
140 nv_wr32(device, 0x00610198 + (i * 0x10), val);
141 val = nv_rd32(device, 0x0061610c + (i * 0x800));
142 nv_wr32(device, 0x0061019c + (i * 0x10), val);
143 }
144
145 /* DAC */
146 for (i = 0; i < 3; i++) {
147 val = nv_rd32(device, 0x0061a000 + (i * 0x800));
148 nv_wr32(device, 0x006101d0 + (i * 0x04), val);
149 }
150
151 /* SOR */
152 for (i = 0; i < nv50_sor_nr(dev); i++) {
153 val = nv_rd32(device, 0x0061c000 + (i * 0x800));
154 nv_wr32(device, 0x006101e0 + (i * 0x04), val);
155 }
156
157 /* EXT */
158 for (i = 0; i < 3; i++) {
159 val = nv_rd32(device, 0x0061e000 + (i * 0x800));
160 nv_wr32(device, 0x006101f0 + (i * 0x04), val);
161 }
162
163 for (i = 0; i < 3; i++) {
164 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
165 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
166 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
167 }
168
169 /* The precise purpose is unknown, i suspect it has something to do
170 * with text mode.
171 */
172 if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
173 nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
174 nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
175 if (!nv_wait(device, 0x006194e8, 2, 0)) {
176 NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
177 NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
178 nv_rd32(device, 0x6194e8));
179 return -EBUSY;
180 }
181 }
182
183 for (i = 0; i < 2; i++) {
184 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
185 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
186 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
187 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
188 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
189 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
190 return -EBUSY;
191 }
192
193 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
194 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
195 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
196 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
197 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
198 NV_ERROR(drm, "timeout: "
199 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
200 NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
201 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
202 return -EBUSY;
203 }
204 }
205
206 nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
207 nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
208 nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
209 nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
210 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
211 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
212 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
213 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
214
215 ret = nv50_evo_init(dev);
216 if (ret) 160 if (ret)
217 return ret; 161 return ret;
218 evo = nv50_display(dev)->master;
219
220 nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
221 162
222 ret = RING_SPACE(evo, 3); 163 ret = nouveau_object_new(client, parent, NvEvoFB16,
164 NV_DMA_IN_MEMORY_CLASS,
165 &(struct nv_dma_class) {
166 .flags = NV_DMA_TARGET_VRAM |
167 NV_DMA_ACCESS_RDWR,
168 .start = 0,
169 .limit = pfb->ram.size - 1,
170 .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
171 NV50_DMA_CONF0_PART_256,
172 }, sizeof(struct nv_dma_class), &object);
223 if (ret) 173 if (ret)
224 return ret; 174 return ret;
225 BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
226 OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
227 OUT_RING (evo, NvEvoSync);
228 175
229 return nv50_display_sync(dev); 176 ret = nouveau_object_new(client, parent, NvEvoFB32,
177 NV_DMA_IN_MEMORY_CLASS,
178 &(struct nv_dma_class) {
179 .flags = NV_DMA_TARGET_VRAM |
180 NV_DMA_ACCESS_RDWR,
181 .start = 0,
182 .limit = pfb->ram.size - 1,
183 .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
184 NV50_DMA_CONF0_PART_256,
185 }, sizeof(struct nv_dma_class), &object);
186 return ret;
230} 187}
231 188
232void 189static int
233nv50_display_fini(struct drm_device *dev) 190nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
234{ 191{
235 struct nouveau_drm *drm = nouveau_drm(dev); 192 struct nouveau_fb *pfb = nouveau_fb(core);
236 struct nouveau_device *device = nouveau_dev(dev); 193 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
237 struct nv50_display *disp = nv50_display(dev); 194 struct nouveau_object *object;
238 struct nouveau_channel *evo = disp->master; 195 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
239 struct drm_crtc *drm_crtc; 196 NV_DMA_IN_MEMORY_CLASS,
240 int ret, i; 197 &(struct nv_dma_class) {
198 .flags = NV_DMA_TARGET_VRAM |
199 NV_DMA_ACCESS_RDWR,
200 .start = 0,
201 .limit = pfb->ram.size - 1,
202 .conf0 = NVC0_DMA_CONF0_ENABLE,
203 }, sizeof(struct nv_dma_class), &object);
204 if (ret)
205 return ret;
241 206
242 NV_DEBUG(drm, "\n"); 207 ret = nouveau_object_new(client, parent, NvEvoFB16,
208 NV_DMA_IN_MEMORY_CLASS,
209 &(struct nv_dma_class) {
210 .flags = NV_DMA_TARGET_VRAM |
211 NV_DMA_ACCESS_RDWR,
212 .start = 0,
213 .limit = pfb->ram.size - 1,
214 .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
215 }, sizeof(struct nv_dma_class), &object);
216 if (ret)
217 return ret;
243 218
244 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { 219 ret = nouveau_object_new(client, parent, NvEvoFB32,
245 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); 220 NV_DMA_IN_MEMORY_CLASS,
221 &(struct nv_dma_class) {
222 .flags = NV_DMA_TARGET_VRAM |
223 NV_DMA_ACCESS_RDWR,
224 .start = 0,
225 .limit = pfb->ram.size - 1,
226 .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
227 }, sizeof(struct nv_dma_class), &object);
228 return ret;
229}
246 230
247 nv50_crtc_blank(crtc, true); 231static int
248 } 232nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
233{
234 struct nouveau_fb *pfb = nouveau_fb(core);
235 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
236 struct nouveau_object *object;
237 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
238 NV_DMA_IN_MEMORY_CLASS,
239 &(struct nv_dma_class) {
240 .flags = NV_DMA_TARGET_VRAM |
241 NV_DMA_ACCESS_RDWR,
242 .start = 0,
243 .limit = pfb->ram.size - 1,
244 .conf0 = NVD0_DMA_CONF0_ENABLE |
245 NVD0_DMA_CONF0_PAGE_LP,
246 }, sizeof(struct nv_dma_class), &object);
247 if (ret)
248 return ret;
249 249
250 ret = RING_SPACE(evo, 2); 250 ret = nouveau_object_new(client, parent, NvEvoFB32,
251 if (ret == 0) { 251 NV_DMA_IN_MEMORY_CLASS,
252 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); 252 &(struct nv_dma_class) {
253 OUT_RING(evo, 0); 253 .flags = NV_DMA_TARGET_VRAM |
254 } 254 NV_DMA_ACCESS_RDWR,
255 FIRE_RING(evo); 255 .start = 0,
256 .limit = pfb->ram.size - 1,
257 .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
258 NVD0_DMA_CONF0_PAGE_LP,
259 }, sizeof(struct nv_dma_class), &object);
260 return ret;
261}
256 262
257 /* Almost like ack'ing a vblank interrupt, maybe in the spirit of 263static int
258 * cleaning up? 264nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
259 */ 265 void *data, u32 size, u64 syncbuf,
260 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { 266 struct nv50_dmac *dmac)
261 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); 267{
262 uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index); 268 struct nouveau_fb *pfb = nouveau_fb(core);
269 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
270 struct nouveau_object *object;
271 u32 pushbuf = *(u32 *)data;
272 int ret;
263 273
264 if (!crtc->base.enabled) 274 dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
265 continue; 275 &dmac->handle);
276 if (!dmac->ptr)
277 return -ENOMEM;
266 278
267 nv_wr32(device, NV50_PDISPLAY_INTR_1, mask); 279 ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
268 if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) { 280 NV_DMA_FROM_MEMORY_CLASS,
269 NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == " 281 &(struct nv_dma_class) {
270 "0x%08x\n", mask, mask); 282 .flags = NV_DMA_TARGET_PCI_US |
271 NV_ERROR(drm, "0x610024 = 0x%08x\n", 283 NV_DMA_ACCESS_RD,
272 nv_rd32(device, NV50_PDISPLAY_INTR_1)); 284 .start = dmac->handle + 0x0000,
273 } 285 .limit = dmac->handle + 0x0fff,
274 } 286 }, sizeof(struct nv_dma_class), &object);
287 if (ret)
288 return ret;
275 289
276 for (i = 0; i < 2; i++) { 290 ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
277 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0); 291 if (ret)
278 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 292 return ret;
279 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
280 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
281 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
282 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
283 }
284 }
285 293
286 nv50_evo_fini(dev); 294 ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
295 NV_DMA_IN_MEMORY_CLASS,
296 &(struct nv_dma_class) {
297 .flags = NV_DMA_TARGET_VRAM |
298 NV_DMA_ACCESS_RDWR,
299 .start = syncbuf + 0x0000,
300 .limit = syncbuf + 0x0fff,
301 }, sizeof(struct nv_dma_class), &object);
302 if (ret)
303 return ret;
287 304
288 for (i = 0; i < 3; i++) { 305 ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
289 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i), 306 NV_DMA_IN_MEMORY_CLASS,
290 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 307 &(struct nv_dma_class) {
291 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); 308 .flags = NV_DMA_TARGET_VRAM |
292 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, 309 NV_DMA_ACCESS_RDWR,
293 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i))); 310 .start = 0,
294 } 311 .limit = pfb->ram.size - 1,
295 } 312 }, sizeof(struct nv_dma_class), &object);
313 if (ret)
314 return ret;
296 315
297 /* disable interrupts. */ 316 if (nv_device(core)->card_type < NV_C0)
298 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000); 317 ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
318 else
319 if (nv_device(core)->card_type < NV_D0)
320 ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
321 else
322 ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
323 return ret;
299} 324}
300 325
301int 326struct nv50_mast {
302nv50_display_create(struct drm_device *dev) 327 struct nv50_dmac base;
328};
329
330struct nv50_curs {
331 struct nv50_pioc base;
332};
333
334struct nv50_sync {
335 struct nv50_dmac base;
336 struct {
337 u32 offset;
338 u16 value;
339 } sem;
340};
341
342struct nv50_ovly {
343 struct nv50_dmac base;
344};
345
346struct nv50_oimm {
347 struct nv50_pioc base;
348};
349
350struct nv50_head {
351 struct nouveau_crtc base;
352 struct nv50_curs curs;
353 struct nv50_sync sync;
354 struct nv50_ovly ovly;
355 struct nv50_oimm oimm;
356};
357
358#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
359#define nv50_curs(c) (&nv50_head(c)->curs)
360#define nv50_sync(c) (&nv50_head(c)->sync)
361#define nv50_ovly(c) (&nv50_head(c)->ovly)
362#define nv50_oimm(c) (&nv50_head(c)->oimm)
363#define nv50_chan(c) (&(c)->base.base)
364#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
365
366struct nv50_disp {
367 struct nouveau_object *core;
368 struct nv50_mast mast;
369
370 u32 modeset;
371
372 struct nouveau_bo *sync;
373};
374
375static struct nv50_disp *
376nv50_disp(struct drm_device *dev)
303{ 377{
304 struct nouveau_drm *drm = nouveau_drm(dev); 378 return nouveau_display(dev)->priv;
305 struct dcb_table *dcb = &drm->vbios.dcb; 379}
306 struct drm_connector *connector, *ct;
307 struct nv50_display *priv;
308 int ret, i;
309 380
310 NV_DEBUG(drm, "\n"); 381#define nv50_mast(d) (&nv50_disp(d)->mast)
311 382
312 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 383static struct drm_crtc *
313 if (!priv) 384nv50_display_crtc_get(struct drm_encoder *encoder)
314 return -ENOMEM; 385{
315 386 return nouveau_encoder(encoder)->crtc;
316 nouveau_display(dev)->priv = priv; 387}
317 nouveau_display(dev)->dtor = nv50_display_destroy;
318 nouveau_display(dev)->init = nv50_display_init;
319 nouveau_display(dev)->fini = nv50_display_fini;
320 388
321 /* Create CRTC objects */ 389/******************************************************************************
322 for (i = 0; i < 2; i++) { 390 * EVO channel helpers
323 ret = nv50_crtc_create(dev, i); 391 *****************************************************************************/
324 if (ret) 392static u32 *
325 return ret; 393evo_wait(void *evoc, int nr)
326 } 394{
395 struct nv50_dmac *dmac = evoc;
396 u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
327 397
328 /* We setup the encoders from the BIOS table */ 398 if (put + nr >= (PAGE_SIZE / 4) - 8) {
329 for (i = 0 ; i < dcb->entries; i++) { 399 dmac->ptr[put] = 0x20000000;
330 struct dcb_output *entry = &dcb->entry[i];
331 400
332 if (entry->location != DCB_LOC_ON_CHIP) { 401 nv_wo32(dmac->base.user, 0x0000, 0x00000000);
333 NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n", 402 if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
334 entry->type, ffs(entry->or) - 1); 403 NV_ERROR(dmac->base.user, "channel stalled\n");
335 continue; 404 return NULL;
336 } 405 }
337 406
338 connector = nouveau_connector_create(dev, entry->connector); 407 put = 0;
339 if (IS_ERR(connector))
340 continue;
341
342 switch (entry->type) {
343 case DCB_OUTPUT_TMDS:
344 case DCB_OUTPUT_LVDS:
345 case DCB_OUTPUT_DP:
346 nv50_sor_create(connector, entry);
347 break;
348 case DCB_OUTPUT_ANALOG:
349 nv50_dac_create(connector, entry);
350 break;
351 default:
352 NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
353 continue;
354 }
355 } 408 }
356 409
357 list_for_each_entry_safe(connector, ct, 410 return dmac->ptr + put;
358 &dev->mode_config.connector_list, head) { 411}
359 if (!connector->encoder_ids[0]) {
360 NV_WARN(drm, "%s has no encoders, removing\n",
361 drm_get_connector_name(connector));
362 connector->funcs->destroy(connector);
363 }
364 }
365 412
366 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev); 413static void
414evo_kick(u32 *push, void *evoc)
415{
416 struct nv50_dmac *dmac = evoc;
417 nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
418}
367 419
368 ret = nv50_evo_create(dev); 420#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
369 if (ret) { 421#define evo_data(p,d) *((p)++) = (d)
370 nv50_display_destroy(dev);
371 return ret;
372 }
373 422
374 return 0; 423static bool
424evo_sync_wait(void *data)
425{
426 return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
375} 427}
376 428
377void 429static int
378nv50_display_destroy(struct drm_device *dev) 430evo_sync(struct drm_device *dev)
379{ 431{
380 struct nv50_display *disp = nv50_display(dev); 432 struct nouveau_device *device = nouveau_dev(dev);
433 struct nv50_disp *disp = nv50_disp(dev);
434 struct nv50_mast *mast = nv50_mast(dev);
435 u32 *push = evo_wait(mast, 8);
436 if (push) {
437 nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
438 evo_mthd(push, 0x0084, 1);
439 evo_data(push, 0x80000000 | EVO_MAST_NTFY);
440 evo_mthd(push, 0x0080, 2);
441 evo_data(push, 0x00000000);
442 evo_data(push, 0x00000000);
443 evo_kick(push, mast);
444 if (nv_wait_cb(device, evo_sync_wait, disp->sync))
445 return 0;
446 }
381 447
382 nv50_evo_destroy(dev); 448 return -EBUSY;
383 kfree(disp);
384} 449}
385 450
451/******************************************************************************
452 * Page flipping channel
453 *****************************************************************************/
386struct nouveau_bo * 454struct nouveau_bo *
387nv50_display_crtc_sema(struct drm_device *dev, int crtc) 455nv50_display_crtc_sema(struct drm_device *dev, int crtc)
388{ 456{
389 return nv50_display(dev)->crtc[crtc].sem.bo; 457 return nv50_disp(dev)->sync;
390} 458}
391 459
392void 460void
393nv50_display_flip_stop(struct drm_crtc *crtc) 461nv50_display_flip_stop(struct drm_crtc *crtc)
394{ 462{
395 struct nv50_display *disp = nv50_display(crtc->dev); 463 struct nv50_sync *sync = nv50_sync(crtc);
396 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 464 u32 *push;
397 struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; 465
398 struct nouveau_channel *evo = dispc->sync; 466 push = evo_wait(sync, 8);
399 int ret; 467 if (push) {
400 468 evo_mthd(push, 0x0084, 1);
401 ret = RING_SPACE(evo, 8); 469 evo_data(push, 0x00000000);
402 if (ret) { 470 evo_mthd(push, 0x0094, 1);
403 WARN_ON(1); 471 evo_data(push, 0x00000000);
404 return; 472 evo_mthd(push, 0x00c0, 1);
473 evo_data(push, 0x00000000);
474 evo_mthd(push, 0x0080, 1);
475 evo_data(push, 0x00000000);
476 evo_kick(push, sync);
405 } 477 }
406
407 BEGIN_NV04(evo, 0, 0x0084, 1);
408 OUT_RING (evo, 0x00000000);
409 BEGIN_NV04(evo, 0, 0x0094, 1);
410 OUT_RING (evo, 0x00000000);
411 BEGIN_NV04(evo, 0, 0x00c0, 1);
412 OUT_RING (evo, 0x00000000);
413 BEGIN_NV04(evo, 0, 0x0080, 1);
414 OUT_RING (evo, 0x00000000);
415 FIRE_RING (evo);
416} 478}
417 479
418int 480int
419nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, 481nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
420 struct nouveau_channel *chan) 482 struct nouveau_channel *chan, u32 swap_interval)
421{ 483{
422 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
423 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 484 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
424 struct nv50_display *disp = nv50_display(crtc->dev); 485 struct nv50_disp *disp = nv50_disp(crtc->dev);
425 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 486 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
426 struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; 487 struct nv50_sync *sync = nv50_sync(crtc);
427 struct nouveau_channel *evo = dispc->sync; 488 u32 *push;
428 int ret; 489 int ret;
429 490
430 ret = RING_SPACE(evo, chan ? 25 : 27); 491 swap_interval <<= 4;
431 if (unlikely(ret)) 492 if (swap_interval == 0)
432 return ret; 493 swap_interval |= 0x100;
494
495 push = evo_wait(sync, 128);
496 if (unlikely(push == NULL))
497 return -EBUSY;
433 498
434 /* synchronise with the rendering channel, if necessary */ 499 /* synchronise with the rendering channel, if necessary */
435 if (likely(chan)) { 500 if (likely(chan)) {
436 ret = RING_SPACE(chan, 10); 501 ret = RING_SPACE(chan, 10);
437 if (ret) { 502 if (ret)
438 WIND_RING(evo);
439 return ret; 503 return ret;
440 }
441 504
442 if (nv_device(drm->device)->chipset < 0xc0) { 505 if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
443 BEGIN_NV04(chan, 0, 0x0060, 2); 506 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
444 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 507 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
445 OUT_RING (chan, dispc->sem.offset); 508 OUT_RING (chan, sync->sem.offset);
446 BEGIN_NV04(chan, 0, 0x006c, 1); 509 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
447 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 510 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
448 BEGIN_NV04(chan, 0, 0x0064, 2); 511 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
449 OUT_RING (chan, dispc->sem.offset ^ 0x10); 512 OUT_RING (chan, sync->sem.offset ^ 0x10);
450 OUT_RING (chan, 0x74b1e000); 513 OUT_RING (chan, 0x74b1e000);
451 BEGIN_NV04(chan, 0, 0x0060, 1); 514 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
452 if (nv_device(drm->device)->chipset < 0x84) 515 if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
453 OUT_RING (chan, NvSema); 516 OUT_RING (chan, NvSema);
454 else 517 else
455 OUT_RING (chan, chan->vram); 518 OUT_RING (chan, chan->vram);
456 } else { 519 } else {
457 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index); 520 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
458 offset += dispc->sem.offset; 521 offset += sync->sem.offset;
459 BEGIN_NVC0(chan, 0, 0x0010, 4); 522
523 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
460 OUT_RING (chan, upper_32_bits(offset)); 524 OUT_RING (chan, upper_32_bits(offset));
461 OUT_RING (chan, lower_32_bits(offset)); 525 OUT_RING (chan, lower_32_bits(offset));
462 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 526 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
463 OUT_RING (chan, 0x1002); 527 OUT_RING (chan, 0x1002);
464 BEGIN_NVC0(chan, 0, 0x0010, 4); 528 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
465 OUT_RING (chan, upper_32_bits(offset)); 529 OUT_RING (chan, upper_32_bits(offset));
466 OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 530 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
467 OUT_RING (chan, 0x74b1e000); 531 OUT_RING (chan, 0x74b1e000);
468 OUT_RING (chan, 0x1001); 532 OUT_RING (chan, 0x1001);
469 } 533 }
534
470 FIRE_RING (chan); 535 FIRE_RING (chan);
471 } else { 536 } else {
472 nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4, 537 nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
473 0xf00d0000 | dispc->sem.value); 538 0xf00d0000 | sync->sem.value);
539 evo_sync(crtc->dev);
474 } 540 }
475 541
476 /* queue the flip on the crtc's "display sync" channel */ 542 /* queue the flip */
477 BEGIN_NV04(evo, 0, 0x0100, 1); 543 evo_mthd(push, 0x0100, 1);
478 OUT_RING (evo, 0xfffe0000); 544 evo_data(push, 0xfffe0000);
479 if (chan) { 545 evo_mthd(push, 0x0084, 1);
480 BEGIN_NV04(evo, 0, 0x0084, 1); 546 evo_data(push, swap_interval);
481 OUT_RING (evo, 0x00000100); 547 if (!(swap_interval & 0x00000100)) {
548 evo_mthd(push, 0x00e0, 1);
549 evo_data(push, 0x40000000);
550 }
551 evo_mthd(push, 0x0088, 4);
552 evo_data(push, sync->sem.offset);
553 evo_data(push, 0xf00d0000 | sync->sem.value);
554 evo_data(push, 0x74b1e000);
555 evo_data(push, NvEvoSync);
556 evo_mthd(push, 0x00a0, 2);
557 evo_data(push, 0x00000000);
558 evo_data(push, 0x00000000);
559 evo_mthd(push, 0x00c0, 1);
560 evo_data(push, nv_fb->r_dma);
561 evo_mthd(push, 0x0110, 2);
562 evo_data(push, 0x00000000);
563 evo_data(push, 0x00000000);
564 if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
565 evo_mthd(push, 0x0800, 5);
566 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
567 evo_data(push, 0);
568 evo_data(push, (fb->height << 16) | fb->width);
569 evo_data(push, nv_fb->r_pitch);
570 evo_data(push, nv_fb->r_format);
482 } else { 571 } else {
483 BEGIN_NV04(evo, 0, 0x0084, 1); 572 evo_mthd(push, 0x0400, 5);
484 OUT_RING (evo, 0x00000010); 573 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
485 /* allows gamma somehow, PDISP will bitch at you if 574 evo_data(push, 0);
486 * you don't wait for vblank before changing this.. 575 evo_data(push, (fb->height << 16) | fb->width);
487 */ 576 evo_data(push, nv_fb->r_pitch);
488 BEGIN_NV04(evo, 0, 0x00e0, 1); 577 evo_data(push, nv_fb->r_format);
489 OUT_RING (evo, 0x40000000); 578 }
490 } 579 evo_mthd(push, 0x0080, 1);
491 BEGIN_NV04(evo, 0, 0x0088, 4); 580 evo_data(push, 0x00000000);
492 OUT_RING (evo, dispc->sem.offset); 581 evo_kick(push, sync);
493 OUT_RING (evo, 0xf00d0000 | dispc->sem.value); 582
494 OUT_RING (evo, 0x74b1e000); 583 sync->sem.offset ^= 0x10;
495 OUT_RING (evo, NvEvoSync); 584 sync->sem.value++;
496 BEGIN_NV04(evo, 0, 0x00a0, 2);
497 OUT_RING (evo, 0x00000000);
498 OUT_RING (evo, 0x00000000);
499 BEGIN_NV04(evo, 0, 0x00c0, 1);
500 OUT_RING (evo, nv_fb->r_dma);
501 BEGIN_NV04(evo, 0, 0x0110, 2);
502 OUT_RING (evo, 0x00000000);
503 OUT_RING (evo, 0x00000000);
504 BEGIN_NV04(evo, 0, 0x0800, 5);
505 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
506 OUT_RING (evo, 0);
507 OUT_RING (evo, (fb->height << 16) | fb->width);
508 OUT_RING (evo, nv_fb->r_pitch);
509 OUT_RING (evo, nv_fb->r_format);
510 BEGIN_NV04(evo, 0, 0x0080, 1);
511 OUT_RING (evo, 0x00000000);
512 FIRE_RING (evo);
513
514 dispc->sem.offset ^= 0x10;
515 dispc->sem.value++;
516 return 0; 585 return 0;
517} 586}
518 587
519static u16 588/******************************************************************************
520nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb, 589 * CRTC
521 u32 mc, int pxclk) 590 *****************************************************************************/
591static int
592nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
522{ 593{
523 struct nouveau_drm *drm = nouveau_drm(dev); 594 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
524 struct nouveau_connector *nv_connector = NULL; 595 struct nouveau_connector *nv_connector;
525 struct drm_encoder *encoder; 596 struct drm_connector *connector;
526 struct nvbios *bios = &drm->vbios; 597 u32 *push, mode = 0x00;
527 u32 script = 0, or; 598
599 nv_connector = nouveau_crtc_connector_get(nv_crtc);
600 connector = &nv_connector->base;
601 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
602 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
603 mode = DITHERING_MODE_DYNAMIC2X2;
604 } else {
605 mode = nv_connector->dithering_mode;
606 }
528 607
529 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 608 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
530 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 609 if (connector->display_info.bpc >= 8)
610 mode |= DITHERING_DEPTH_8BPC;
611 } else {
612 mode |= nv_connector->dithering_depth;
613 }
531 614
532 if (nv_encoder->dcb != dcb) 615 push = evo_wait(mast, 4);
533 continue; 616 if (push) {
617 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
618 evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
619 evo_data(push, mode);
620 } else
621 if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
622 evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
623 evo_data(push, mode);
624 } else {
625 evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
626 evo_data(push, mode);
627 }
534 628
535 nv_connector = nouveau_encoder_connector_get(nv_encoder); 629 if (update) {
536 break; 630 evo_mthd(push, 0x0080, 1);
631 evo_data(push, 0x00000000);
632 }
633 evo_kick(push, mast);
537 } 634 }
538 635
539 or = ffs(dcb->or) - 1; 636 return 0;
540 switch (dcb->type) { 637}
541 case DCB_OUTPUT_LVDS:
542 script = (mc >> 8) & 0xf;
543 if (bios->fp_no_ddc) {
544 if (bios->fp.dual_link)
545 script |= 0x0100;
546 if (bios->fp.if_is_24bit)
547 script |= 0x0200;
548 } else {
549 /* determine number of lvds links */
550 if (nv_connector && nv_connector->edid &&
551 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
552 /* http://www.spwg.org */
553 if (((u8 *)nv_connector->edid)[121] == 2)
554 script |= 0x0100;
555 } else
556 if (pxclk >= bios->fp.duallink_transition_clk) {
557 script |= 0x0100;
558 }
559 638
560 /* determine panel depth */ 639static int
561 if (script & 0x0100) { 640nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
562 if (bios->fp.strapless_is_24bit & 2) 641{
563 script |= 0x0200; 642 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
564 } else { 643 struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
565 if (bios->fp.strapless_is_24bit & 1) 644 struct drm_crtc *crtc = &nv_crtc->base;
566 script |= 0x0200; 645 struct nouveau_connector *nv_connector;
567 } 646 int mode = DRM_MODE_SCALE_NONE;
647 u32 oX, oY, *push;
648
649 /* start off at the resolution we programmed the crtc for, this
650 * effectively handles NONE/FULL scaling
651 */
652 nv_connector = nouveau_crtc_connector_get(nv_crtc);
653 if (nv_connector && nv_connector->native_mode)
654 mode = nv_connector->scaling_mode;
655
656 if (mode != DRM_MODE_SCALE_NONE)
657 omode = nv_connector->native_mode;
658 else
659 omode = umode;
660
661 oX = omode->hdisplay;
662 oY = omode->vdisplay;
663 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
664 oY *= 2;
665
666 /* add overscan compensation if necessary, will keep the aspect
667 * ratio the same as the backend mode unless overridden by the
668 * user setting both hborder and vborder properties.
669 */
670 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
671 (nv_connector->underscan == UNDERSCAN_AUTO &&
672 nv_connector->edid &&
673 drm_detect_hdmi_monitor(nv_connector->edid)))) {
674 u32 bX = nv_connector->underscan_hborder;
675 u32 bY = nv_connector->underscan_vborder;
676 u32 aspect = (oY << 19) / oX;
677
678 if (bX) {
679 oX -= (bX * 2);
680 if (bY) oY -= (bY * 2);
681 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
682 } else {
683 oX -= (oX >> 4) + 32;
684 if (bY) oY -= (bY * 2);
685 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
686 }
687 }
568 688
569 if (nv_connector && nv_connector->edid && 689 /* handle CENTER/ASPECT scaling, taking into account the areas
570 (nv_connector->edid->revision >= 4) && 690 * removed already for overscan compensation
571 (nv_connector->edid->input & 0x70) >= 0x20) 691 */
572 script |= 0x0200; 692 switch (mode) {
693 case DRM_MODE_SCALE_CENTER:
694 oX = min((u32)umode->hdisplay, oX);
695 oY = min((u32)umode->vdisplay, oY);
696 /* fall-through */
697 case DRM_MODE_SCALE_ASPECT:
698 if (oY < oX) {
699 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
700 oX = ((oY * aspect) + (aspect / 2)) >> 19;
701 } else {
702 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
703 oY = ((oX * aspect) + (aspect / 2)) >> 19;
573 } 704 }
574 break; 705 break;
575 case DCB_OUTPUT_TMDS:
576 script = (mc >> 8) & 0xf;
577 if (pxclk >= 165000)
578 script |= 0x0100;
579 break;
580 case DCB_OUTPUT_DP:
581 script = (mc >> 8) & 0xf;
582 break;
583 case DCB_OUTPUT_ANALOG:
584 script = 0xff;
585 break;
586 default: 706 default:
587 NV_ERROR(drm, "modeset on unsupported output type!\n");
588 break; 707 break;
589 } 708 }
590 709
591 return script; 710 push = evo_wait(mast, 8);
711 if (push) {
712 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
713 /*XXX: SCALE_CTRL_ACTIVE??? */
714 evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
715 evo_data(push, (oY << 16) | oX);
716 evo_data(push, (oY << 16) | oX);
717 evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
718 evo_data(push, 0x00000000);
719 evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
720 evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
721 } else {
722 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
723 evo_data(push, (oY << 16) | oX);
724 evo_data(push, (oY << 16) | oX);
725 evo_data(push, (oY << 16) | oX);
726 evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
727 evo_data(push, 0x00000000);
728 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
729 evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
730 }
731
732 evo_kick(push, mast);
733
734 if (update) {
735 nv50_display_flip_stop(crtc);
736 nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
737 }
738 }
739
740 return 0;
592} 741}
593 742
594static void 743static int
595nv50_display_unk10_handler(struct drm_device *dev) 744nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
596{ 745{
597 struct nouveau_device *device = nouveau_dev(dev); 746 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
598 struct nouveau_drm *drm = nouveau_drm(dev); 747 u32 *push, hue, vib;
599 struct nv50_display *disp = nv50_display(dev); 748 int adj;
600 u32 unk30 = nv_rd32(device, 0x610030), mc; 749
601 int i, crtc, or = 0, type = DCB_OUTPUT_ANY; 750 adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
751 vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
752 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
753
754 push = evo_wait(mast, 16);
755 if (push) {
756 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
757 evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
758 evo_data(push, (hue << 20) | (vib << 8));
759 } else {
760 evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
761 evo_data(push, (hue << 20) | (vib << 8));
762 }
602 763
603 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); 764 if (update) {
604 disp->irq.dcb = NULL; 765 evo_mthd(push, 0x0080, 1);
766 evo_data(push, 0x00000000);
767 }
768 evo_kick(push, mast);
769 }
605 770
606 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8); 771 return 0;
772}
607 773
608 /* Determine which CRTC we're dealing with, only 1 ever will be 774static int
609 * signalled at the same time with the current nouveau code. 775nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
610 */ 776 int x, int y, bool update)
611 crtc = ffs((unk30 & 0x00000060) >> 5) - 1; 777{
612 if (crtc < 0) 778 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
613 goto ack; 779 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
614 780 u32 *push;
615 /* Nothing needs to be done for the encoder */ 781
616 crtc = ffs((unk30 & 0x00000180) >> 7) - 1; 782 push = evo_wait(mast, 16);
617 if (crtc < 0) 783 if (push) {
618 goto ack; 784 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
619 785 evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
620 /* Find which encoder was connected to the CRTC */ 786 evo_data(push, nvfb->nvbo->bo.offset >> 8);
621 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) { 787 evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
622 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 788 evo_data(push, (fb->height << 16) | fb->width);
623 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc); 789 evo_data(push, nvfb->r_pitch);
624 if (!(mc & (1 << crtc))) 790 evo_data(push, nvfb->r_format);
625 continue; 791 evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
792 evo_data(push, (y << 16) | x);
793 if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
794 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
795 evo_data(push, nvfb->r_dma);
796 }
797 } else {
798 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
799 evo_data(push, nvfb->nvbo->bo.offset >> 8);
800 evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
801 evo_data(push, (fb->height << 16) | fb->width);
802 evo_data(push, nvfb->r_pitch);
803 evo_data(push, nvfb->r_format);
804 evo_data(push, nvfb->r_dma);
805 evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
806 evo_data(push, (y << 16) | x);
807 }
626 808
627 switch ((mc & 0x00000f00) >> 8) { 809 if (update) {
628 case 0: type = DCB_OUTPUT_ANALOG; break; 810 evo_mthd(push, 0x0080, 1);
629 case 1: type = DCB_OUTPUT_TV; break; 811 evo_data(push, 0x00000000);
630 default:
631 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
632 goto ack;
633 } 812 }
813 evo_kick(push, mast);
814 }
634 815
635 or = i; 816 nv_crtc->fb.tile_flags = nvfb->r_dma;
817 return 0;
818}
819
820static void
821nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
822{
823 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
824 u32 *push = evo_wait(mast, 16);
825 if (push) {
826 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
827 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
828 evo_data(push, 0x85000000);
829 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
830 } else
831 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
832 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
833 evo_data(push, 0x85000000);
834 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
835 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
836 evo_data(push, NvEvoVRAM);
837 } else {
838 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
839 evo_data(push, 0x85000000);
840 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
841 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
842 evo_data(push, NvEvoVRAM);
843 }
844 evo_kick(push, mast);
636 } 845 }
846}
637 847
638 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 848static void
639 if (nv_device(drm->device)->chipset < 0x90 || 849nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
640 nv_device(drm->device)->chipset == 0x92 || 850{
641 nv_device(drm->device)->chipset == 0xa0) 851 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
642 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); 852 u32 *push = evo_wait(mast, 16);
643 else 853 if (push) {
644 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); 854 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
855 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
856 evo_data(push, 0x05000000);
857 } else
858 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
859 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
860 evo_data(push, 0x05000000);
861 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
862 evo_data(push, 0x00000000);
863 } else {
864 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
865 evo_data(push, 0x05000000);
866 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
867 evo_data(push, 0x00000000);
868 }
869 evo_kick(push, mast);
870 }
871}
645 872
646 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); 873static void
647 if (!(mc & (1 << crtc))) 874nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
648 continue; 875{
876 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
877
878 if (show)
879 nv50_crtc_cursor_show(nv_crtc);
880 else
881 nv50_crtc_cursor_hide(nv_crtc);
882
883 if (update) {
884 u32 *push = evo_wait(mast, 2);
885 if (push) {
886 evo_mthd(push, 0x0080, 1);
887 evo_data(push, 0x00000000);
888 evo_kick(push, mast);
889 }
890 }
891}
649 892
650 switch ((mc & 0x00000f00) >> 8) { 893static void
651 case 0: type = DCB_OUTPUT_LVDS; break; 894nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
652 case 1: type = DCB_OUTPUT_TMDS; break; 895{
653 case 2: type = DCB_OUTPUT_TMDS; break; 896}
654 case 5: type = DCB_OUTPUT_TMDS; break; 897
655 case 8: type = DCB_OUTPUT_DP; break; 898static void
656 case 9: type = DCB_OUTPUT_DP; break; 899nv50_crtc_prepare(struct drm_crtc *crtc)
657 default: 900{
658 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 901 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
659 goto ack; 902 struct nv50_mast *mast = nv50_mast(crtc->dev);
903 u32 *push;
904
905 nv50_display_flip_stop(crtc);
906
907 push = evo_wait(mast, 2);
908 if (push) {
909 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
910 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
911 evo_data(push, 0x00000000);
912 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
913 evo_data(push, 0x40000000);
914 } else
915 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
916 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
917 evo_data(push, 0x00000000);
918 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
919 evo_data(push, 0x40000000);
920 evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
921 evo_data(push, 0x00000000);
922 } else {
923 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
924 evo_data(push, 0x00000000);
925 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
926 evo_data(push, 0x03000000);
927 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
928 evo_data(push, 0x00000000);
929 }
930
931 evo_kick(push, mast);
932 }
933
934 nv50_crtc_cursor_show_hide(nv_crtc, false, false);
935}
936
937static void
938nv50_crtc_commit(struct drm_crtc *crtc)
939{
940 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
941 struct nv50_mast *mast = nv50_mast(crtc->dev);
942 u32 *push;
943
944 push = evo_wait(mast, 32);
945 if (push) {
946 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
947 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
948 evo_data(push, NvEvoVRAM_LP);
949 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
950 evo_data(push, 0xc0000000);
951 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
952 } else
953 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
954 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
955 evo_data(push, nv_crtc->fb.tile_flags);
956 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
957 evo_data(push, 0xc0000000);
958 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
959 evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
960 evo_data(push, NvEvoVRAM);
961 } else {
962 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
963 evo_data(push, nv_crtc->fb.tile_flags);
964 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
965 evo_data(push, 0x83000000);
966 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
967 evo_data(push, 0x00000000);
968 evo_data(push, 0x00000000);
969 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
970 evo_data(push, NvEvoVRAM);
971 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
972 evo_data(push, 0xffffff00);
660 } 973 }
661 974
662 or = i; 975 evo_kick(push, mast);
976 }
977
978 nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
979 nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
980}
981
982static bool
983nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
984 struct drm_display_mode *adjusted_mode)
985{
986 return true;
987}
988
989static int
990nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
991{
992 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
993 int ret;
994
995 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
996 if (ret)
997 return ret;
998
999 if (old_fb) {
1000 nvfb = nouveau_framebuffer(old_fb);
1001 nouveau_bo_unpin(nvfb->nvbo);
663 } 1002 }
664 1003
665 /* There was no encoder to disable */ 1004 return 0;
666 if (type == DCB_OUTPUT_ANY) 1005}
667 goto ack; 1006
1007static int
1008nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1009 struct drm_display_mode *mode, int x, int y,
1010 struct drm_framebuffer *old_fb)
1011{
1012 struct nv50_mast *mast = nv50_mast(crtc->dev);
1013 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1014 struct nouveau_connector *nv_connector;
1015 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
1016 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
1017 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
1018 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
1019 u32 vblan2e = 0, vblan2s = 1;
1020 u32 *push;
1021 int ret;
1022
1023 hactive = mode->htotal;
1024 hsynce = mode->hsync_end - mode->hsync_start - 1;
1025 hbackp = mode->htotal - mode->hsync_end;
1026 hblanke = hsynce + hbackp;
1027 hfrontp = mode->hsync_start - mode->hdisplay;
1028 hblanks = mode->htotal - hfrontp - 1;
1029
1030 vactive = mode->vtotal * vscan / ilace;
1031 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
1032 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
1033 vblanke = vsynce + vbackp;
1034 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
1035 vblanks = vactive - vfrontp - 1;
1036 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1037 vblan2e = vactive + vsynce + vbackp;
1038 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
1039 vactive = (vactive * 2) + 1;
1040 }
668 1041
669 /* Disable the encoder */ 1042 ret = nv50_crtc_swap_fbs(crtc, old_fb);
670 for (i = 0; i < drm->vbios.dcb.entries; i++) { 1043 if (ret)
671 struct dcb_output *dcb = &drm->vbios.dcb.entry[i]; 1044 return ret;
672 1045
673 if (dcb->type == type && (dcb->or & (1 << or))) { 1046 push = evo_wait(mast, 64);
674 nouveau_bios_run_display_table(dev, 0, -1, dcb, -1); 1047 if (push) {
675 disp->irq.dcb = dcb; 1048 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
676 goto ack; 1049 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
1050 evo_data(push, 0x00800000 | mode->clock);
1051 evo_data(push, (ilace == 2) ? 2 : 0);
1052 evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
1053 evo_data(push, 0x00000000);
1054 evo_data(push, (vactive << 16) | hactive);
1055 evo_data(push, ( vsynce << 16) | hsynce);
1056 evo_data(push, (vblanke << 16) | hblanke);
1057 evo_data(push, (vblanks << 16) | hblanks);
1058 evo_data(push, (vblan2e << 16) | vblan2s);
1059 evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
1060 evo_data(push, 0x00000000);
1061 evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
1062 evo_data(push, 0x00000311);
1063 evo_data(push, 0x00000100);
1064 } else {
1065 evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
1066 evo_data(push, 0x00000000);
1067 evo_data(push, (vactive << 16) | hactive);
1068 evo_data(push, ( vsynce << 16) | hsynce);
1069 evo_data(push, (vblanke << 16) | hblanke);
1070 evo_data(push, (vblanks << 16) | hblanks);
1071 evo_data(push, (vblan2e << 16) | vblan2s);
1072 evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
1073 evo_data(push, 0x00000000); /* ??? */
1074 evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
1075 evo_data(push, mode->clock * 1000);
1076 evo_data(push, 0x00200000); /* ??? */
1077 evo_data(push, mode->clock * 1000);
1078 evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
1079 evo_data(push, 0x00000311);
1080 evo_data(push, 0x00000100);
677 } 1081 }
1082
1083 evo_kick(push, mast);
678 } 1084 }
679 1085
680 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); 1086 nv_connector = nouveau_crtc_connector_get(nv_crtc);
681ack: 1087 nv50_crtc_set_dither(nv_crtc, false);
682 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); 1088 nv50_crtc_set_scale(nv_crtc, false);
683 nv_wr32(device, 0x610030, 0x80000000); 1089 nv50_crtc_set_color_vibrance(nv_crtc, false);
1090 nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
1091 return 0;
1092}
1093
1094static int
1095nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1096 struct drm_framebuffer *old_fb)
1097{
1098 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
1099 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1100 int ret;
1101
1102 if (!crtc->fb) {
1103 NV_DEBUG(drm, "No FB bound\n");
1104 return 0;
1105 }
1106
1107 ret = nv50_crtc_swap_fbs(crtc, old_fb);
1108 if (ret)
1109 return ret;
1110
1111 nv50_display_flip_stop(crtc);
1112 nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
1113 nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
1114 return 0;
1115}
1116
1117static int
1118nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
1119 struct drm_framebuffer *fb, int x, int y,
1120 enum mode_set_atomic state)
1121{
1122 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1123 nv50_display_flip_stop(crtc);
1124 nv50_crtc_set_image(nv_crtc, fb, x, y, true);
1125 return 0;
684} 1126}
685 1127
686static void 1128static void
687nv50_display_unk20_handler(struct drm_device *dev) 1129nv50_crtc_lut_load(struct drm_crtc *crtc)
688{ 1130{
689 struct nouveau_device *device = nouveau_dev(dev); 1131 struct nv50_disp *disp = nv50_disp(crtc->dev);
690 struct nouveau_drm *drm = nouveau_drm(dev); 1132 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
691 struct nv50_display *disp = nv50_display(dev); 1133 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
692 u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0; 1134 int i;
693 struct dcb_output *dcb;
694 int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
695
696 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
697 dcb = disp->irq.dcb;
698 if (dcb) {
699 nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
700 disp->irq.dcb = NULL;
701 }
702
703 /* CRTC clock change requested? */
704 crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
705 if (crtc >= 0) {
706 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
707 pclk &= 0x003fffff;
708 if (pclk)
709 nv50_crtc_set_clock(dev, crtc, pclk);
710
711 tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
712 tmp &= ~0x000000f;
713 nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
714 }
715
716 /* Nothing needs to be done for the encoder */
717 crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
718 if (crtc < 0)
719 goto ack;
720 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
721
722 /* Find which encoder is connected to the CRTC */
723 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
724 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
725 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
726 if (!(mc & (1 << crtc)))
727 continue;
728 1135
729 switch ((mc & 0x00000f00) >> 8) { 1136 for (i = 0; i < 256; i++) {
730 case 0: type = DCB_OUTPUT_ANALOG; break; 1137 u16 r = nv_crtc->lut.r[i] >> 2;
731 case 1: type = DCB_OUTPUT_TV; break; 1138 u16 g = nv_crtc->lut.g[i] >> 2;
732 default: 1139 u16 b = nv_crtc->lut.b[i] >> 2;
733 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc); 1140
734 goto ack; 1141 if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
1142 writew(r + 0x0000, lut + (i * 0x08) + 0);
1143 writew(g + 0x0000, lut + (i * 0x08) + 2);
1144 writew(b + 0x0000, lut + (i * 0x08) + 4);
1145 } else {
1146 writew(r + 0x6000, lut + (i * 0x20) + 0);
1147 writew(g + 0x6000, lut + (i * 0x20) + 2);
1148 writew(b + 0x6000, lut + (i * 0x20) + 4);
1149 }
1150 }
1151}
1152
1153static int
1154nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1155 uint32_t handle, uint32_t width, uint32_t height)
1156{
1157 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1158 struct drm_device *dev = crtc->dev;
1159 struct drm_gem_object *gem;
1160 struct nouveau_bo *nvbo;
1161 bool visible = (handle != 0);
1162 int i, ret = 0;
1163
1164 if (visible) {
1165 if (width != 64 || height != 64)
1166 return -EINVAL;
1167
1168 gem = drm_gem_object_lookup(dev, file_priv, handle);
1169 if (unlikely(!gem))
1170 return -ENOENT;
1171 nvbo = nouveau_gem_object(gem);
1172
1173 ret = nouveau_bo_map(nvbo);
1174 if (ret == 0) {
1175 for (i = 0; i < 64 * 64; i++) {
1176 u32 v = nouveau_bo_rd32(nvbo, i);
1177 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
1178 }
1179 nouveau_bo_unmap(nvbo);
735 } 1180 }
736 1181
737 or = i; 1182 drm_gem_object_unreference_unlocked(gem);
738 } 1183 }
739 1184
740 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 1185 if (visible != nv_crtc->cursor.visible) {
741 if (nv_device(drm->device)->chipset < 0x90 || 1186 nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
742 nv_device(drm->device)->chipset == 0x92 || 1187 nv_crtc->cursor.visible = visible;
743 nv_device(drm->device)->chipset == 0xa0) 1188 }
744 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
745 else
746 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
747 1189
748 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); 1190 return ret;
749 if (!(mc & (1 << crtc))) 1191}
750 continue;
751 1192
752 switch ((mc & 0x00000f00) >> 8) { 1193static int
753 case 0: type = DCB_OUTPUT_LVDS; break; 1194nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
754 case 1: type = DCB_OUTPUT_TMDS; break; 1195{
755 case 2: type = DCB_OUTPUT_TMDS; break; 1196 struct nv50_curs *curs = nv50_curs(crtc);
756 case 5: type = DCB_OUTPUT_TMDS; break; 1197 struct nv50_chan *chan = nv50_chan(curs);
757 case 8: type = DCB_OUTPUT_DP; break; 1198 nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
758 case 9: type = DCB_OUTPUT_DP; break; 1199 nv_wo32(chan->user, 0x0080, 0x00000000);
759 default: 1200 return 0;
760 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 1201}
761 goto ack;
762 }
763 1202
764 or = i; 1203static void
1204nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1205 uint32_t start, uint32_t size)
1206{
1207 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1208 u32 end = max(start + size, (u32)256);
1209 u32 i;
1210
1211 for (i = start; i < end; i++) {
1212 nv_crtc->lut.r[i] = r[i];
1213 nv_crtc->lut.g[i] = g[i];
1214 nv_crtc->lut.b[i] = b[i];
765 } 1215 }
766 1216
767 if (type == DCB_OUTPUT_ANY) 1217 nv50_crtc_lut_load(crtc);
768 goto ack; 1218}
769 1219
770 /* Enable the encoder */ 1220static void
771 for (i = 0; i < drm->vbios.dcb.entries; i++) { 1221nv50_crtc_destroy(struct drm_crtc *crtc)
772 dcb = &drm->vbios.dcb.entry[i]; 1222{
773 if (dcb->type == type && (dcb->or & (1 << or))) 1223 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
774 break; 1224 struct nv50_disp *disp = nv50_disp(crtc->dev);
1225 struct nv50_head *head = nv50_head(crtc);
1226 nv50_dmac_destroy(disp->core, &head->ovly.base);
1227 nv50_pioc_destroy(disp->core, &head->oimm.base);
1228 nv50_dmac_destroy(disp->core, &head->sync.base);
1229 nv50_pioc_destroy(disp->core, &head->curs.base);
1230 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
1231 if (nv_crtc->cursor.nvbo)
1232 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
1233 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
1234 nouveau_bo_unmap(nv_crtc->lut.nvbo);
1235 if (nv_crtc->lut.nvbo)
1236 nouveau_bo_unpin(nv_crtc->lut.nvbo);
1237 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
1238 drm_crtc_cleanup(crtc);
1239 kfree(crtc);
1240}
1241
1242static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
1243 .dpms = nv50_crtc_dpms,
1244 .prepare = nv50_crtc_prepare,
1245 .commit = nv50_crtc_commit,
1246 .mode_fixup = nv50_crtc_mode_fixup,
1247 .mode_set = nv50_crtc_mode_set,
1248 .mode_set_base = nv50_crtc_mode_set_base,
1249 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
1250 .load_lut = nv50_crtc_lut_load,
1251};
1252
1253static const struct drm_crtc_funcs nv50_crtc_func = {
1254 .cursor_set = nv50_crtc_cursor_set,
1255 .cursor_move = nv50_crtc_cursor_move,
1256 .gamma_set = nv50_crtc_gamma_set,
1257 .set_config = drm_crtc_helper_set_config,
1258 .destroy = nv50_crtc_destroy,
1259 .page_flip = nouveau_crtc_page_flip,
1260};
1261
1262static void
1263nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
1264{
1265}
1266
1267static void
1268nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
1269{
1270}
1271
1272static int
1273nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
1274{
1275 struct nv50_disp *disp = nv50_disp(dev);
1276 struct nv50_head *head;
1277 struct drm_crtc *crtc;
1278 int ret, i;
1279
1280 head = kzalloc(sizeof(*head), GFP_KERNEL);
1281 if (!head)
1282 return -ENOMEM;
1283
1284 head->base.index = index;
1285 head->base.set_dither = nv50_crtc_set_dither;
1286 head->base.set_scale = nv50_crtc_set_scale;
1287 head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
1288 head->base.color_vibrance = 50;
1289 head->base.vibrant_hue = 0;
1290 head->base.cursor.set_offset = nv50_cursor_set_offset;
1291 head->base.cursor.set_pos = nv50_cursor_set_pos;
1292 for (i = 0; i < 256; i++) {
1293 head->base.lut.r[i] = i << 8;
1294 head->base.lut.g[i] = i << 8;
1295 head->base.lut.b[i] = i << 8;
775 } 1296 }
776 1297
777 if (i == drm->vbios.dcb.entries) { 1298 crtc = &head->base.base;
778 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); 1299 drm_crtc_init(dev, crtc, &nv50_crtc_func);
779 goto ack; 1300 drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
1301 drm_mode_crtc_set_gamma_size(crtc, 256);
1302
1303 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
1304 0, 0x0000, NULL, &head->base.lut.nvbo);
1305 if (!ret) {
1306 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
1307 if (!ret) {
1308 ret = nouveau_bo_map(head->base.lut.nvbo);
1309 if (ret)
1310 nouveau_bo_unpin(head->base.lut.nvbo);
1311 }
1312 if (ret)
1313 nouveau_bo_ref(NULL, &head->base.lut.nvbo);
780 } 1314 }
781 1315
782 script = nv50_display_script_select(dev, dcb, mc, pclk); 1316 if (ret)
783 nouveau_bios_run_display_table(dev, script, pclk, dcb, -1); 1317 goto out;
784 1318
785 if (type == DCB_OUTPUT_DP) { 1319 nv50_crtc_lut_load(crtc);
786 int link = !(dcb->dpconf.sor.link & 1); 1320
787 if ((mc & 0x000f0000) == 0x00020000) 1321 /* allocate cursor resources */
788 nv50_sor_dp_calc_tu(dev, or, link, pclk, 18); 1322 ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
789 else 1323 &(struct nv50_display_curs_class) {
790 nv50_sor_dp_calc_tu(dev, or, link, pclk, 24); 1324 .head = index,
1325 }, sizeof(struct nv50_display_curs_class),
1326 &head->curs.base);
1327 if (ret)
1328 goto out;
1329
1330 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
1331 0, 0x0000, NULL, &head->base.cursor.nvbo);
1332 if (!ret) {
1333 ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
1334 if (!ret) {
1335 ret = nouveau_bo_map(head->base.cursor.nvbo);
1336 if (ret)
1337 nouveau_bo_unpin(head->base.lut.nvbo);
1338 }
1339 if (ret)
1340 nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
791 } 1341 }
792 1342
793 if (dcb->type != DCB_OUTPUT_ANALOG) { 1343 if (ret)
794 tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); 1344 goto out;
795 tmp &= ~0x00000f0f; 1345
796 if (script & 0x0100) 1346 /* allocate page flip / sync resources */
797 tmp |= 0x00000101; 1347 ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
798 nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp); 1348 &(struct nv50_display_sync_class) {
799 } else { 1349 .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
800 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); 1350 .head = index,
1351 }, sizeof(struct nv50_display_sync_class),
1352 disp->sync->bo.offset, &head->sync.base);
1353 if (ret)
1354 goto out;
1355
1356 head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
1357
1358 /* allocate overlay resources */
1359 ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
1360 &(struct nv50_display_oimm_class) {
1361 .head = index,
1362 }, sizeof(struct nv50_display_oimm_class),
1363 &head->oimm.base);
1364 if (ret)
1365 goto out;
1366
1367 ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
1368 &(struct nv50_display_ovly_class) {
1369 .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
1370 .head = index,
1371 }, sizeof(struct nv50_display_ovly_class),
1372 disp->sync->bo.offset, &head->ovly.base);
1373 if (ret)
1374 goto out;
1375
1376out:
1377 if (ret)
1378 nv50_crtc_destroy(crtc);
1379 return ret;
1380}
1381
1382/******************************************************************************
1383 * DAC
1384 *****************************************************************************/
1385static void
1386nv50_dac_dpms(struct drm_encoder *encoder, int mode)
1387{
1388 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1389 struct nv50_disp *disp = nv50_disp(encoder->dev);
1390 int or = nv_encoder->or;
1391 u32 dpms_ctrl;
1392
1393 dpms_ctrl = 0x00000000;
1394 if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
1395 dpms_ctrl |= 0x00000001;
1396 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
1397 dpms_ctrl |= 0x00000004;
1398
1399 nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
1400}
1401
1402static bool
1403nv50_dac_mode_fixup(struct drm_encoder *encoder,
1404 const struct drm_display_mode *mode,
1405 struct drm_display_mode *adjusted_mode)
1406{
1407 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1408 struct nouveau_connector *nv_connector;
1409
1410 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1411 if (nv_connector && nv_connector->native_mode) {
1412 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
1413 int id = adjusted_mode->base.id;
1414 *adjusted_mode = *nv_connector->native_mode;
1415 adjusted_mode->base.id = id;
1416 }
801 } 1417 }
802 1418
803 disp->irq.dcb = dcb; 1419 return true;
804 disp->irq.pclk = pclk; 1420}
805 disp->irq.script = script; 1421
1422static void
1423nv50_dac_commit(struct drm_encoder *encoder)
1424{
1425}
1426
1427static void
1428nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1429 struct drm_display_mode *adjusted_mode)
1430{
1431 struct nv50_mast *mast = nv50_mast(encoder->dev);
1432 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1433 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1434 u32 *push;
1435
1436 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
1437
1438 push = evo_wait(mast, 8);
1439 if (push) {
1440 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1441 u32 syncs = 0x00000000;
1442
1443 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1444 syncs |= 0x00000001;
1445 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1446 syncs |= 0x00000002;
1447
1448 evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
1449 evo_data(push, 1 << nv_crtc->index);
1450 evo_data(push, syncs);
1451 } else {
1452 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
1453 u32 syncs = 0x00000001;
1454
1455 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1456 syncs |= 0x00000008;
1457 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1458 syncs |= 0x00000010;
1459
1460 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1461 magic |= 0x00000001;
1462
1463 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
1464 evo_data(push, syncs);
1465 evo_data(push, magic);
1466 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
1467 evo_data(push, 1 << nv_crtc->index);
1468 }
1469
1470 evo_kick(push, mast);
1471 }
806 1472
807ack: 1473 nv_encoder->crtc = encoder->crtc;
808 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
809 nv_wr32(device, 0x610030, 0x80000000);
810} 1474}
811 1475
812/* If programming a TMDS output on a SOR that can also be configured for
813 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
814 *
815 * It looks like the VBIOS TMDS scripts make an attempt at this, however,
816 * the VBIOS scripts on at least one board I have only switch it off on
817 * link 0, causing a blank display if the output has previously been
818 * programmed for DisplayPort.
819 */
820static void 1476static void
821nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb) 1477nv50_dac_disconnect(struct drm_encoder *encoder)
822{ 1478{
823 struct nouveau_device *device = nouveau_dev(dev); 1479 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
824 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); 1480 struct nv50_mast *mast = nv50_mast(encoder->dev);
1481 const int or = nv_encoder->or;
1482 u32 *push;
1483
1484 if (nv_encoder->crtc) {
1485 nv50_crtc_prepare(nv_encoder->crtc);
1486
1487 push = evo_wait(mast, 4);
1488 if (push) {
1489 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1490 evo_mthd(push, 0x0400 + (or * 0x080), 1);
1491 evo_data(push, 0x00000000);
1492 } else {
1493 evo_mthd(push, 0x0180 + (or * 0x020), 1);
1494 evo_data(push, 0x00000000);
1495 }
1496
1497 evo_mthd(push, 0x0080, 1);
1498 evo_data(push, 0x00000000);
1499 evo_kick(push, mast);
1500 }
1501 }
1502
1503 nv_encoder->crtc = NULL;
1504}
1505
1506static enum drm_connector_status
1507nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1508{
1509 struct nv50_disp *disp = nv50_disp(encoder->dev);
1510 int ret, or = nouveau_encoder(encoder)->or;
1511 u32 load = 0;
1512
1513 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
1514 if (ret || load != 7)
1515 return connector_status_disconnected;
1516
1517 return connector_status_connected;
1518}
1519
1520static void
1521nv50_dac_destroy(struct drm_encoder *encoder)
1522{
1523 drm_encoder_cleanup(encoder);
1524 kfree(encoder);
1525}
1526
1527static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
1528 .dpms = nv50_dac_dpms,
1529 .mode_fixup = nv50_dac_mode_fixup,
1530 .prepare = nv50_dac_disconnect,
1531 .commit = nv50_dac_commit,
1532 .mode_set = nv50_dac_mode_set,
1533 .disable = nv50_dac_disconnect,
1534 .get_crtc = nv50_display_crtc_get,
1535 .detect = nv50_dac_detect
1536};
1537
1538static const struct drm_encoder_funcs nv50_dac_func = {
1539 .destroy = nv50_dac_destroy,
1540};
1541
1542static int
1543nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1544{
1545 struct drm_device *dev = connector->dev;
1546 struct nouveau_encoder *nv_encoder;
825 struct drm_encoder *encoder; 1547 struct drm_encoder *encoder;
826 u32 tmp;
827 1548
828 if (dcb->type != DCB_OUTPUT_TMDS) 1549 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1550 if (!nv_encoder)
1551 return -ENOMEM;
1552 nv_encoder->dcb = dcbe;
1553 nv_encoder->or = ffs(dcbe->or) - 1;
1554
1555 encoder = to_drm_encoder(nv_encoder);
1556 encoder->possible_crtcs = dcbe->heads;
1557 encoder->possible_clones = 0;
1558 drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
1559 drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
1560
1561 drm_mode_connector_attach_encoder(connector, encoder);
1562 return 0;
1563}
1564
1565/******************************************************************************
1566 * Audio
1567 *****************************************************************************/
1568static void
1569nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1570{
1571 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1572 struct nouveau_connector *nv_connector;
1573 struct nv50_disp *disp = nv50_disp(encoder->dev);
1574
1575 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1576 if (!drm_detect_monitor_audio(nv_connector->edid))
1577 return;
1578
1579 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1580
1581 nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
1582 nv_connector->base.eld,
1583 nv_connector->base.eld[2] * 4);
1584}
1585
1586static void
1587nv50_audio_disconnect(struct drm_encoder *encoder)
1588{
1589 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1590 struct nv50_disp *disp = nv50_disp(encoder->dev);
1591
1592 nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
1593}
1594
1595/******************************************************************************
1596 * HDMI
1597 *****************************************************************************/
1598static void
1599nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1600{
1601 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1602 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1603 struct nouveau_connector *nv_connector;
1604 struct nv50_disp *disp = nv50_disp(encoder->dev);
1605 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
1606 u32 rekey = 56; /* binary driver, and tegra constant */
1607 u32 max_ac_packet;
1608
1609 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1610 if (!drm_detect_hdmi_monitor(nv_connector->edid))
829 return; 1611 return;
830 1612
831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1613 max_ac_packet = mode->htotal - mode->hdisplay;
832 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1614 max_ac_packet -= rekey;
1615 max_ac_packet -= 18; /* constant from tegra */
1616 max_ac_packet /= 32;
1617
1618 nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
1619 NV84_DISP_SOR_HDMI_PWR_STATE_ON |
1620 (max_ac_packet << 16) | rekey);
833 1621
834 if (nv_encoder->dcb->type == DCB_OUTPUT_DP && 1622 nv50_audio_mode_set(encoder, mode);
835 nv_encoder->dcb->or & (1 << or)) { 1623}
836 tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)); 1624
837 tmp &= ~NV50_SOR_DP_CTRL_ENABLED; 1625static void
838 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp); 1626nv50_hdmi_disconnect(struct drm_encoder *encoder)
1627{
1628 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1629 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1630 struct nv50_disp *disp = nv50_disp(encoder->dev);
1631 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
1632
1633 nv50_audio_disconnect(encoder);
1634
1635 nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
1636}
1637
1638/******************************************************************************
1639 * SOR
1640 *****************************************************************************/
1641static void
1642nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1643{
1644 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1645 struct drm_device *dev = encoder->dev;
1646 struct nv50_disp *disp = nv50_disp(dev);
1647 struct drm_encoder *partner;
1648 int or = nv_encoder->or;
1649
1650 nv_encoder->last_dpms = mode;
1651
1652 list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
1653 struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
1654
1655 if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
1656 continue;
1657
1658 if (nv_partner != nv_encoder &&
1659 nv_partner->dcb->or == nv_encoder->dcb->or) {
1660 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
1661 return;
839 break; 1662 break;
840 } 1663 }
841 } 1664 }
1665
1666 nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
1667
1668 if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
1669 nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
1670}
1671
1672static bool
1673nv50_sor_mode_fixup(struct drm_encoder *encoder,
1674 const struct drm_display_mode *mode,
1675 struct drm_display_mode *adjusted_mode)
1676{
1677 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1678 struct nouveau_connector *nv_connector;
1679
1680 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1681 if (nv_connector && nv_connector->native_mode) {
1682 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
1683 int id = adjusted_mode->base.id;
1684 *adjusted_mode = *nv_connector->native_mode;
1685 adjusted_mode->base.id = id;
1686 }
1687 }
1688
1689 return true;
842} 1690}
843 1691
844static void 1692static void
845nv50_display_unk40_handler(struct drm_device *dev) 1693nv50_sor_disconnect(struct drm_encoder *encoder)
846{ 1694{
847 struct nouveau_device *device = nouveau_dev(dev); 1695 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
848 struct nouveau_drm *drm = nouveau_drm(dev); 1696 struct nv50_mast *mast = nv50_mast(encoder->dev);
849 struct nv50_display *disp = nv50_display(dev); 1697 const int or = nv_encoder->or;
850 struct dcb_output *dcb = disp->irq.dcb; 1698 u32 *push;
851 u16 script = disp->irq.script; 1699
852 u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk; 1700 if (nv_encoder->crtc) {
1701 nv50_crtc_prepare(nv_encoder->crtc);
1702
1703 push = evo_wait(mast, 4);
1704 if (push) {
1705 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1706 evo_mthd(push, 0x0600 + (or * 0x40), 1);
1707 evo_data(push, 0x00000000);
1708 } else {
1709 evo_mthd(push, 0x0200 + (or * 0x20), 1);
1710 evo_data(push, 0x00000000);
1711 }
1712
1713 evo_mthd(push, 0x0080, 1);
1714 evo_data(push, 0x00000000);
1715 evo_kick(push, mast);
1716 }
853 1717
854 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); 1718 nv50_hdmi_disconnect(encoder);
855 disp->irq.dcb = NULL; 1719 }
856 if (!dcb)
857 goto ack;
858 1720
859 nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1); 1721 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
860 nv50_display_unk40_dp_set_tmds(dev, dcb); 1722 nv_encoder->crtc = NULL;
1723}
861 1724
862ack: 1725static void
863 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); 1726nv50_sor_prepare(struct drm_encoder *encoder)
864 nv_wr32(device, 0x610030, 0x80000000); 1727{
865 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8); 1728 nv50_sor_disconnect(encoder);
1729 if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
1730 evo_sync(encoder->dev);
866} 1731}
867 1732
868static void 1733static void
869nv50_display_bh(unsigned long data) 1734nv50_sor_commit(struct drm_encoder *encoder)
870{ 1735{
871 struct drm_device *dev = (struct drm_device *)data; 1736}
872 struct nouveau_device *device = nouveau_dev(dev); 1737
1738static void
1739nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1740 struct drm_display_mode *mode)
1741{
1742 struct nv50_disp *disp = nv50_disp(encoder->dev);
1743 struct nv50_mast *mast = nv50_mast(encoder->dev);
1744 struct drm_device *dev = encoder->dev;
873 struct nouveau_drm *drm = nouveau_drm(dev); 1745 struct nouveau_drm *drm = nouveau_drm(dev);
1746 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1747 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1748 struct nouveau_connector *nv_connector;
1749 struct nvbios *bios = &drm->vbios;
1750 u32 *push, lvds = 0;
1751 u8 owner = 1 << nv_crtc->index;
1752 u8 proto = 0xf;
1753 u8 depth = 0x0;
874 1754
875 for (;;) { 1755 nv_connector = nouveau_encoder_connector_get(nv_encoder);
876 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); 1756 switch (nv_encoder->dcb->type) {
877 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1); 1757 case DCB_OUTPUT_TMDS:
1758 if (nv_encoder->dcb->sorconf.link & 1) {
1759 if (mode->clock < 165000)
1760 proto = 0x1;
1761 else
1762 proto = 0x5;
1763 } else {
1764 proto = 0x2;
1765 }
878 1766
879 NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); 1767 nv50_hdmi_mode_set(encoder, mode);
1768 break;
1769 case DCB_OUTPUT_LVDS:
1770 proto = 0x0;
880 1771
881 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) 1772 if (bios->fp_no_ddc) {
882 nv50_display_unk10_handler(dev); 1773 if (bios->fp.dual_link)
883 else 1774 lvds |= 0x0100;
884 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20) 1775 if (bios->fp.if_is_24bit)
885 nv50_display_unk20_handler(dev); 1776 lvds |= 0x0200;
886 else 1777 } else {
887 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40) 1778 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
888 nv50_display_unk40_handler(dev); 1779 if (((u8 *)nv_connector->edid)[121] == 2)
1780 lvds |= 0x0100;
1781 } else
1782 if (mode->clock >= bios->fp.duallink_transition_clk) {
1783 lvds |= 0x0100;
1784 }
1785
1786 if (lvds & 0x0100) {
1787 if (bios->fp.strapless_is_24bit & 2)
1788 lvds |= 0x0200;
1789 } else {
1790 if (bios->fp.strapless_is_24bit & 1)
1791 lvds |= 0x0200;
1792 }
1793
1794 if (nv_connector->base.display_info.bpc == 8)
1795 lvds |= 0x0200;
1796 }
1797
1798 nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
1799 break;
1800 case DCB_OUTPUT_DP:
1801 if (nv_connector->base.display_info.bpc == 6) {
1802 nv_encoder->dp.datarate = mode->clock * 18 / 8;
1803 depth = 0x2;
1804 } else
1805 if (nv_connector->base.display_info.bpc == 8) {
1806 nv_encoder->dp.datarate = mode->clock * 24 / 8;
1807 depth = 0x5;
1808 } else {
1809 nv_encoder->dp.datarate = mode->clock * 30 / 8;
1810 depth = 0x6;
1811 }
1812
1813 if (nv_encoder->dcb->sorconf.link & 1)
1814 proto = 0x8;
889 else 1815 else
890 break; 1816 proto = 0x9;
1817 break;
1818 default:
1819 BUG_ON(1);
1820 break;
891 } 1821 }
892 1822
893 nv_wr32(device, NV03_PMC_INTR_EN_0, 1); 1823 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
1824
1825 push = evo_wait(nv50_mast(dev), 8);
1826 if (push) {
1827 if (nv50_vers(mast) < NVD0_DISP_CLASS) {
1828 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
1829 evo_data(push, (depth << 16) | (proto << 8) | owner);
1830 } else {
1831 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
1832 u32 syncs = 0x00000001;
1833
1834 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1835 syncs |= 0x00000008;
1836 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1837 syncs |= 0x00000010;
1838
1839 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1840 magic |= 0x00000001;
1841
1842 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
1843 evo_data(push, syncs | (depth << 6));
1844 evo_data(push, magic);
1845 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
1846 evo_data(push, owner | (proto << 8));
1847 }
1848
1849 evo_kick(push, mast);
1850 }
1851
1852 nv_encoder->crtc = encoder->crtc;
894} 1853}
895 1854
896static void 1855static void
897nv50_display_error_handler(struct drm_device *dev) 1856nv50_sor_destroy(struct drm_encoder *encoder)
898{ 1857{
899 struct nouveau_device *device = nouveau_dev(dev); 1858 drm_encoder_cleanup(encoder);
900 struct nouveau_drm *drm = nouveau_drm(dev); 1859 kfree(encoder);
901 u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; 1860}
902 u32 addr, data;
903 int chid;
904 1861
905 for (chid = 0; chid < 5; chid++) { 1862static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
906 if (!(channels & (1 << chid))) 1863 .dpms = nv50_sor_dpms,
907 continue; 1864 .mode_fixup = nv50_sor_mode_fixup,
1865 .prepare = nv50_sor_prepare,
1866 .commit = nv50_sor_commit,
1867 .mode_set = nv50_sor_mode_set,
1868 .disable = nv50_sor_disconnect,
1869 .get_crtc = nv50_display_crtc_get,
1870};
1871
1872static const struct drm_encoder_funcs nv50_sor_func = {
1873 .destroy = nv50_sor_destroy,
1874};
1875
1876static int
1877nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1878{
1879 struct drm_device *dev = connector->dev;
1880 struct nouveau_encoder *nv_encoder;
1881 struct drm_encoder *encoder;
1882
1883 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1884 if (!nv_encoder)
1885 return -ENOMEM;
1886 nv_encoder->dcb = dcbe;
1887 nv_encoder->or = ffs(dcbe->or) - 1;
1888 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
908 1889
909 nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); 1890 encoder = to_drm_encoder(nv_encoder);
910 addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid)); 1891 encoder->possible_crtcs = dcbe->heads;
911 data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid)); 1892 encoder->possible_clones = 0;
912 NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x " 1893 drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
913 "(0x%04x 0x%02x)\n", chid, 1894 drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
914 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); 1895
1896 drm_mode_connector_attach_encoder(connector, encoder);
1897 return 0;
1898}
1899
1900/******************************************************************************
1901 * Init
1902 *****************************************************************************/
1903void
1904nv50_display_fini(struct drm_device *dev)
1905{
1906}
915 1907
916 nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); 1908int
1909nv50_display_init(struct drm_device *dev)
1910{
1911 u32 *push = evo_wait(nv50_mast(dev), 32);
1912 if (push) {
1913 evo_mthd(push, 0x0088, 1);
1914 evo_data(push, NvEvoSync);
1915 evo_kick(push, nv50_mast(dev));
1916 return evo_sync(dev);
917 } 1917 }
1918
1919 return -EBUSY;
918} 1920}
919 1921
920void 1922void
921nv50_display_intr(struct drm_device *dev) 1923nv50_display_destroy(struct drm_device *dev)
1924{
1925 struct nv50_disp *disp = nv50_disp(dev);
1926
1927 nv50_dmac_destroy(disp->core, &disp->mast.base);
1928
1929 nouveau_bo_unmap(disp->sync);
1930 if (disp->sync)
1931 nouveau_bo_unpin(disp->sync);
1932 nouveau_bo_ref(NULL, &disp->sync);
1933
1934 nouveau_display(dev)->priv = NULL;
1935 kfree(disp);
1936}
1937
1938int
1939nv50_display_create(struct drm_device *dev)
922{ 1940{
1941 static const u16 oclass[] = {
1942 NVE0_DISP_CLASS,
1943 NVD0_DISP_CLASS,
1944 NVA3_DISP_CLASS,
1945 NV94_DISP_CLASS,
1946 NVA0_DISP_CLASS,
1947 NV84_DISP_CLASS,
1948 NV50_DISP_CLASS,
1949 };
923 struct nouveau_device *device = nouveau_dev(dev); 1950 struct nouveau_device *device = nouveau_dev(dev);
924 struct nouveau_drm *drm = nouveau_drm(dev); 1951 struct nouveau_drm *drm = nouveau_drm(dev);
925 struct nv50_display *disp = nv50_display(dev); 1952 struct dcb_table *dcb = &drm->vbios.dcb;
926 uint32_t delayed = 0; 1953 struct drm_connector *connector, *tmp;
927 1954 struct nv50_disp *disp;
928 while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 1955 struct dcb_output *dcbe;
929 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); 1956 int crtcs, ret, i;
930 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
931 uint32_t clock;
932 1957
933 NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); 1958 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
1959 if (!disp)
1960 return -ENOMEM;
934 1961
935 if (!intr0 && !(intr1 & ~delayed)) 1962 nouveau_display(dev)->priv = disp;
936 break; 1963 nouveau_display(dev)->dtor = nv50_display_destroy;
1964 nouveau_display(dev)->init = nv50_display_init;
1965 nouveau_display(dev)->fini = nv50_display_fini;
937 1966
938 if (intr0 & 0x001f0000) { 1967 /* small shared memory area we use for notifiers and semaphores */
939 nv50_display_error_handler(dev); 1968 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
940 intr0 &= ~0x001f0000; 1969 0, 0x0000, NULL, &disp->sync);
1970 if (!ret) {
1971 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
1972 if (!ret) {
1973 ret = nouveau_bo_map(disp->sync);
1974 if (ret)
1975 nouveau_bo_unpin(disp->sync);
941 } 1976 }
1977 if (ret)
1978 nouveau_bo_ref(NULL, &disp->sync);
1979 }
942 1980
943 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { 1981 if (ret)
944 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; 1982 goto out;
945 delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; 1983
946 } 1984 /* attempt to allocate a supported evo display class */
1985 ret = -ENODEV;
1986 for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
1987 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
1988 0xd1500000, oclass[i], NULL, 0,
1989 &disp->core);
1990 }
947 1991
948 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | 1992 if (ret)
949 NV50_PDISPLAY_INTR_1_CLK_UNK20 | 1993 goto out;
950 NV50_PDISPLAY_INTR_1_CLK_UNK40)); 1994
951 if (clock) { 1995 /* allocate master evo channel */
952 nv_wr32(device, NV03_PMC_INTR_EN_0, 0); 1996 ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
953 tasklet_schedule(&disp->tasklet); 1997 &(struct nv50_display_mast_class) {
954 delayed |= clock; 1998 .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
955 intr1 &= ~clock; 1999 }, sizeof(struct nv50_display_mast_class),
956 } 2000 disp->sync->bo.offset, &disp->mast.base);
2001 if (ret)
2002 goto out;
2003
2004 /* create crtc objects to represent the hw heads */
2005 if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
2006 crtcs = nv_rd32(device, 0x022448);
2007 else
2008 crtcs = 2;
2009
2010 for (i = 0; i < crtcs; i++) {
2011 ret = nv50_crtc_create(dev, disp->core, i);
2012 if (ret)
2013 goto out;
2014 }
2015
2016 /* create encoder/connector objects based on VBIOS DCB table */
2017 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
2018 connector = nouveau_connector_create(dev, dcbe->connector);
2019 if (IS_ERR(connector))
2020 continue;
957 2021
958 if (intr0) { 2022 if (dcbe->location != DCB_LOC_ON_CHIP) {
959 NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0); 2023 NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
960 nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0); 2024 dcbe->type, ffs(dcbe->or) - 1);
2025 continue;
961 } 2026 }
962 2027
963 if (intr1) { 2028 switch (dcbe->type) {
964 NV_ERROR(drm, 2029 case DCB_OUTPUT_TMDS:
965 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1); 2030 case DCB_OUTPUT_LVDS:
966 nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1); 2031 case DCB_OUTPUT_DP:
2032 nv50_sor_create(connector, dcbe);
2033 break;
2034 case DCB_OUTPUT_ANALOG:
2035 nv50_dac_create(connector, dcbe);
2036 break;
2037 default:
2038 NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
2039 dcbe->type, ffs(dcbe->or) - 1);
2040 continue;
967 } 2041 }
968 } 2042 }
2043
2044 /* cull any connectors we created that don't have an encoder */
2045 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
2046 if (connector->encoder_ids[0])
2047 continue;
2048
2049 NV_WARN(drm, "%s has no encoders, removing\n",
2050 drm_get_connector_name(connector));
2051 connector->funcs->destroy(connector);
2052 }
2053
2054out:
2055 if (ret)
2056 nv50_display_destroy(dev);
2057 return ret;
969} 2058}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 973554d8a7a6..70da347aa8c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -30,77 +30,16 @@
30#include "nouveau_display.h" 30#include "nouveau_display.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_reg.h" 32#include "nouveau_reg.h"
33#include "nv50_evo.h"
34 33
35struct nv50_display_crtc { 34int nv50_display_create(struct drm_device *);
36 struct nouveau_channel *sync; 35void nv50_display_destroy(struct drm_device *);
37 struct { 36int nv50_display_init(struct drm_device *);
38 struct nouveau_bo *bo; 37void nv50_display_fini(struct drm_device *);
39 u32 offset;
40 u16 value;
41 } sem;
42};
43 38
44struct nv50_display {
45 struct nouveau_channel *master;
46
47 struct nouveau_gpuobj *ramin;
48 u32 dmao;
49 u32 hash;
50
51 struct nv50_display_crtc crtc[2];
52
53 struct tasklet_struct tasklet;
54 struct {
55 struct dcb_output *dcb;
56 u16 script;
57 u32 pclk;
58 } irq;
59};
60
61static inline struct nv50_display *
62nv50_display(struct drm_device *dev)
63{
64 return nouveau_display(dev)->priv;
65}
66
67int nv50_display_early_init(struct drm_device *dev);
68void nv50_display_late_takedown(struct drm_device *dev);
69int nv50_display_create(struct drm_device *dev);
70int nv50_display_init(struct drm_device *dev);
71void nv50_display_fini(struct drm_device *dev);
72void nv50_display_destroy(struct drm_device *dev);
73void nv50_display_intr(struct drm_device *);
74int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
75int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
76
77u32 nv50_display_active_crtcs(struct drm_device *);
78
79int nv50_display_sync(struct drm_device *);
80int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
81 struct nouveau_channel *chan);
82void nv50_display_flip_stop(struct drm_crtc *); 39void nv50_display_flip_stop(struct drm_crtc *);
83 40int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
84int nv50_evo_create(struct drm_device *dev);
85void nv50_evo_destroy(struct drm_device *dev);
86int nv50_evo_init(struct drm_device *dev);
87void nv50_evo_fini(struct drm_device *dev);
88void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
89 u64 size);
90int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
91 u64 base, u64 size, struct nouveau_gpuobj **);
92
93int nvd0_display_create(struct drm_device *);
94void nvd0_display_destroy(struct drm_device *);
95int nvd0_display_init(struct drm_device *);
96void nvd0_display_fini(struct drm_device *);
97void nvd0_display_intr(struct drm_device *);
98
99void nvd0_display_flip_stop(struct drm_crtc *);
100int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
101 struct nouveau_channel *, u32 swap_interval); 41 struct nouveau_channel *, u32 swap_interval);
102 42
103struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head); 43struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
104struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
105 44
106#endif /* __NV50_DISPLAY_H__ */ 45#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644
index 9f6f55cdfa77..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ /dev/null
@@ -1,403 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_dma.h"
29#include "nv50_display.h"
30
31#include <core/gpuobj.h>
32
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
36static u32
37nv50_evo_rd32(struct nouveau_object *object, u32 addr)
38{
39 void __iomem *iomem = object->oclass->ofuncs->rd08;
40 return ioread32_native(iomem + addr);
41}
42
43static void
44nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
45{
46 void __iomem *iomem = object->oclass->ofuncs->rd08;
47 iowrite32_native(data, iomem + addr);
48}
49
50static void
51nv50_evo_channel_del(struct nouveau_channel **pevo)
52{
53 struct nouveau_channel *evo = *pevo;
54
55 if (!evo)
56 return;
57 *pevo = NULL;
58
59 nouveau_bo_unmap(evo->push.buffer);
60 nouveau_bo_ref(NULL, &evo->push.buffer);
61
62 if (evo->object)
63 iounmap(evo->object->oclass->ofuncs);
64
65 kfree(evo);
66}
67
68int
69nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
70 u64 base, u64 size, struct nouveau_gpuobj **pobj)
71{
72 struct drm_device *dev = evo->fence;
73 struct nouveau_drm *drm = nouveau_drm(dev);
74 struct nv50_display *disp = nv50_display(dev);
75 u32 dmao = disp->dmao;
76 u32 hash = disp->hash;
77 u32 flags5;
78
79 if (nv_device(drm->device)->chipset < 0xc0) {
80 /* not supported on 0x50, specified in format mthd */
81 if (nv_device(drm->device)->chipset == 0x50)
82 memtype = 0;
83 flags5 = 0x00010000;
84 } else {
85 if (memtype & 0x80000000)
86 flags5 = 0x00000000; /* large pages */
87 else
88 flags5 = 0x00020000;
89 }
90
91 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
92 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
93 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
94 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
95 upper_32_bits(base));
96 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
97 nv_wo32(disp->ramin, dmao + 0x14, flags5);
98
99 nv_wo32(disp->ramin, hash + 0x00, handle);
100 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
101 evo->handle);
102
103 disp->dmao += 0x20;
104 disp->hash += 0x08;
105 return 0;
106}
107
108static int
109nv50_evo_channel_new(struct drm_device *dev, int chid,
110 struct nouveau_channel **pevo)
111{
112 struct nouveau_drm *drm = nouveau_drm(dev);
113 struct nv50_display *disp = nv50_display(dev);
114 struct nouveau_channel *evo;
115 int ret;
116
117 evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
118 if (!evo)
119 return -ENOMEM;
120 *pevo = evo;
121
122 evo->drm = drm;
123 evo->handle = chid;
124 evo->fence = dev;
125 evo->user_get = 4;
126 evo->user_put = 0;
127
128 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
129 &evo->push.buffer);
130 if (ret == 0)
131 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
132 if (ret) {
133 NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
134 nv50_evo_channel_del(pevo);
135 return ret;
136 }
137
138 ret = nouveau_bo_map(evo->push.buffer);
139 if (ret) {
140 NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
141 nv50_evo_channel_del(pevo);
142 return ret;
143 }
144
145 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
146#ifdef NOUVEAU_OBJECT_MAGIC
147 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
148#endif
149 evo->object->parent = nv_object(disp->ramin)->parent;
150 evo->object->engine = nv_object(disp->ramin)->engine;
151 evo->object->oclass =
152 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
153 evo->object->oclass->ofuncs =
154 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
155 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
156 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
157 evo->object->oclass->ofuncs->rd08 =
158 ioremap(pci_resource_start(dev->pdev, 0) +
159 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
160 return 0;
161}
162
163static int
164nv50_evo_channel_init(struct nouveau_channel *evo)
165{
166 struct nouveau_drm *drm = evo->drm;
167 struct nouveau_device *device = nv_device(drm->device);
168 int id = evo->handle, ret, i;
169 u64 pushbuf = evo->push.buffer->bo.offset;
170 u32 tmp;
171
172 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
173 if ((tmp & 0x009f0000) == 0x00020000)
174 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
175
176 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
177 if ((tmp & 0x003f0000) == 0x00030000)
178 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
179
180 /* initialise fifo */
181 nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
182 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
183 NV50_PDISPLAY_EVO_DMA_CB_VALID);
184 nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
185 nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
186 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
187 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
188
189 nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
190 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
191 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
192 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
193 NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
194 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
195 return -EBUSY;
196 }
197
198 /* enable error reporting on the channel */
199 nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
200
201 evo->dma.max = (4096/4) - 2;
202 evo->dma.max &= ~7;
203 evo->dma.put = 0;
204 evo->dma.cur = evo->dma.put;
205 evo->dma.free = evo->dma.max - evo->dma.cur;
206
207 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
208 if (ret)
209 return ret;
210
211 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
212 OUT_RING(evo, 0);
213
214 return 0;
215}
216
217static void
218nv50_evo_channel_fini(struct nouveau_channel *evo)
219{
220 struct nouveau_drm *drm = evo->drm;
221 struct nouveau_device *device = nv_device(drm->device);
222 int id = evo->handle;
223
224 nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
225 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
226 nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
227 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
228 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
229 NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
230 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
231 }
232}
233
234void
235nv50_evo_destroy(struct drm_device *dev)
236{
237 struct nv50_display *disp = nv50_display(dev);
238 int i;
239
240 for (i = 0; i < 2; i++) {
241 if (disp->crtc[i].sem.bo) {
242 nouveau_bo_unmap(disp->crtc[i].sem.bo);
243 nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
244 }
245 nv50_evo_channel_del(&disp->crtc[i].sync);
246 }
247 nv50_evo_channel_del(&disp->master);
248 nouveau_gpuobj_ref(NULL, &disp->ramin);
249}
250
251int
252nv50_evo_create(struct drm_device *dev)
253{
254 struct nouveau_drm *drm = nouveau_drm(dev);
255 struct nouveau_fb *pfb = nouveau_fb(drm->device);
256 struct nv50_display *disp = nv50_display(dev);
257 struct nouveau_channel *evo;
258 int ret, i, j;
259
260 /* setup object management on it, any other evo channel will
261 * use this also as there's no per-channel support on the
262 * hardware
263 */
264 ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
265 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
266 if (ret) {
267 NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
268 goto err;
269 }
270
271 disp->hash = 0x0000;
272 disp->dmao = 0x1000;
273
274 /* create primary evo channel, the one we use for modesetting
275 * purporses
276 */
277 ret = nv50_evo_channel_new(dev, 0, &disp->master);
278 if (ret)
279 return ret;
280 evo = disp->master;
281
282 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
283 disp->ramin->addr + 0x2000, 0x1000, NULL);
284 if (ret)
285 goto err;
286
287 /* create some default objects for the scanout memtypes we support */
288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
289 0, pfb->ram.size, NULL);
290 if (ret)
291 goto err;
292
293 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
294 0, pfb->ram.size, NULL);
295 if (ret)
296 goto err;
297
298 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
299 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
300 0, pfb->ram.size, NULL);
301 if (ret)
302 goto err;
303
304 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
305 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
306 0, pfb->ram.size, NULL);
307 if (ret)
308 goto err;
309
310 /* create "display sync" channels and other structures we need
311 * to implement page flipping
312 */
313 for (i = 0; i < 2; i++) {
314 struct nv50_display_crtc *dispc = &disp->crtc[i];
315 u64 offset;
316
317 ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
318 if (ret)
319 goto err;
320
321 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
322 0, 0x0000, NULL, &dispc->sem.bo);
323 if (!ret) {
324 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
325 if (!ret)
326 ret = nouveau_bo_map(dispc->sem.bo);
327 if (ret)
328 nouveau_bo_ref(NULL, &dispc->sem.bo);
329 offset = dispc->sem.bo->bo.offset;
330 }
331
332 if (ret)
333 goto err;
334
335 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
336 offset, 4096, NULL);
337 if (ret)
338 goto err;
339
340 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
341 0, pfb->ram.size, NULL);
342 if (ret)
343 goto err;
344
345 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
346 (nv_device(drm->device)->chipset < 0xc0 ?
347 0x7a : 0xfe),
348 0, pfb->ram.size, NULL);
349 if (ret)
350 goto err;
351
352 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
353 (nv_device(drm->device)->chipset < 0xc0 ?
354 0x70 : 0xfe),
355 0, pfb->ram.size, NULL);
356 if (ret)
357 goto err;
358
359 for (j = 0; j < 4096; j += 4)
360 nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
361 dispc->sem.offset = 0;
362 }
363
364 return 0;
365
366err:
367 nv50_evo_destroy(dev);
368 return ret;
369}
370
371int
372nv50_evo_init(struct drm_device *dev)
373{
374 struct nv50_display *disp = nv50_display(dev);
375 int ret, i;
376
377 ret = nv50_evo_channel_init(disp->master);
378 if (ret)
379 return ret;
380
381 for (i = 0; i < 2; i++) {
382 ret = nv50_evo_channel_init(disp->crtc[i].sync);
383 if (ret)
384 return ret;
385 }
386
387 return 0;
388}
389
390void
391nv50_evo_fini(struct drm_device *dev)
392{
393 struct nv50_display *disp = nv50_display(dev);
394 int i;
395
396 for (i = 0; i < 2; i++) {
397 if (disp->crtc[i].sync)
398 nv50_evo_channel_fini(disp->crtc[i].sync);
399 }
400
401 if (disp->master)
402 nv50_evo_channel_fini(disp->master);
403}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644
index 771d879bc834..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV50_EVO_H__
28#define __NV50_EVO_H__
29
30#define NV50_EVO_UPDATE 0x00000080
31#define NV50_EVO_UNK84 0x00000084
32#define NV50_EVO_UNK84_NOTIFY 0x40000000
33#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
34#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
35#define NV50_EVO_DMA_NOTIFY 0x00000088
36#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
37#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
38#define NV50_EVO_UNK8C 0x0000008C
39
40#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
41#define NV50_EVO_DAC_MODE_CTRL 0x00000400
42#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
43#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
44#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
45#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
46#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
47
48#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
49#define NV50_EVO_SOR_MODE_CTRL 0x00000600
50#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
51#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
52#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
53#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
54#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
55#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
56
57#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
58#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
59#define NV50_EVO_CRTC_UNK0800 0x00000800
60#define NV50_EVO_CRTC_CLOCK 0x00000804
61#define NV50_EVO_CRTC_INTERLACE 0x00000808
62#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
63#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
64#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
65#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
66#define NV50_EVO_CRTC_UNK0820 0x00000820
67#define NV50_EVO_CRTC_UNK0824 0x00000824
68#define NV50_EVO_CRTC_UNK082C 0x0000082c
69#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
70/* You can't have a palette in 8 bit mode (=OFF) */
71#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
72#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
73#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
74#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
75#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
76#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
77#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
78#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
79#define NV50_EVO_CRTC_FB_SIZE 0x00000868
80#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
81#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
82#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
83#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
84#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
85#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
86#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
87#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
88#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
89#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
90#define NV50_EVO_CRTC_FB_DMA 0x00000874
91#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
92#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
93#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
94#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
95#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
96#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
97#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
98#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
99#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
100#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
101#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
102#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
103#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
104#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
105#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
106#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
107#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
108#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
109#define NV50_EVO_CRTC_FB_POS 0x000008c0
110#define NV50_EVO_CRTC_REAL_RES 0x000008c8
111#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
112#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
113 ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
114/* Both of these are needed, otherwise nothing happens. */
115#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
116#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
117#define NV50_EVO_CRTC_UNK900 0x00000900
118#define NV50_EVO_CRTC_UNK904 0x00000904
119
120#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index e0763ea88ee2..d889f3ac0d41 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
110 0, 0x0000, NULL, &priv->bo); 110 0, 0x0000, NULL, &priv->bo);
111 if (!ret) { 111 if (!ret) {
112 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 112 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
113 if (!ret) 113 if (!ret) {
114 ret = nouveau_bo_map(priv->bo); 114 ret = nouveau_bo_map(priv->bo);
115 if (ret)
116 nouveau_bo_unpin(priv->bo);
117 }
115 if (ret) 118 if (ret)
116 nouveau_bo_ref(NULL, &priv->bo); 119 nouveau_bo_ref(NULL, &priv->bo);
117 } 120 }
@@ -119,6 +122,7 @@ nv50_fence_create(struct nouveau_drm *drm)
119 if (ret == 0) { 122 if (ret == 0) {
120 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000); 123 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
121 priv->base.sync = nv17_fence_sync; 124 priv->base.sync = nv17_fence_sync;
125 priv->base.resume = nv17_fence_resume;
122 } 126 }
123 127
124 if (ret) 128 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index c4a65039b1ca..8bd5d2781baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
546{ 546{
547 struct nouveau_drm *drm = nouveau_drm(dev); 547 struct nouveau_drm *drm = nouveau_drm(dev);
548 struct nouveau_device *device = nouveau_dev(dev); 548 struct nouveau_device *device = nouveau_dev(dev);
549 u32 crtc_mask = nv50_display_active_crtcs(dev); 549 u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
550 struct nouveau_mem_exec_func exec = { 550 struct nouveau_mem_exec_func exec = {
551 .dev = dev, 551 .dev = dev,
552 .precharge = mclk_precharge, 552 .precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644
index b562b59e1326..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ /dev/null
@@ -1,530 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drm.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39#include <subdev/timer.h>
40
41static u32
42nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
43{
44 struct nouveau_drm *drm = nouveau_drm(dev);
45 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
46 static const u8 nv50[] = { 16, 8, 0, 24 };
47 if (nv_device(drm->device)->chipset == 0xaf)
48 return nvaf[lane];
49 return nv50[lane];
50}
51
52static void
53nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
54{
55 struct nouveau_device *device = nouveau_dev(dev);
56 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
57 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
58}
59
60static void
61nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
62 u8 lane, u8 swing, u8 preem)
63{
64 struct nouveau_device *device = nouveau_dev(dev);
65 struct nouveau_drm *drm = nouveau_drm(dev);
66 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
67 u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
68 u32 mask = 0x000000ff << shift;
69 u8 *table, *entry, *config;
70
71 table = nouveau_dp_bios_data(dev, dcb, &entry);
72 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
73 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
74 return;
75 }
76
77 config = entry + table[4];
78 while (config[0] != swing || config[1] != preem) {
79 config += table[5];
80 if (config >= entry + table[4] + entry[4] * table[5])
81 return;
82 }
83
84 nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
85 nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
86 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
87}
88
89static void
90nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
91 int link_nr, u32 link_bw, bool enhframe)
92{
93 struct nouveau_device *device = nouveau_dev(dev);
94 struct nouveau_drm *drm = nouveau_drm(dev);
95 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
96 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
97 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
98 u8 *table, *entry, mask;
99 int i;
100
101 table = nouveau_dp_bios_data(dev, dcb, &entry);
102 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
103 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
104 return;
105 }
106
107 entry = ROMPTR(dev, entry[10]);
108 if (entry) {
109 while (link_bw < ROM16(entry[0]) * 10)
110 entry += 4;
111
112 nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
113 }
114
115 dpctrl |= ((1 << link_nr) - 1) << 16;
116 if (enhframe)
117 dpctrl |= 0x00004000;
118
119 if (link_bw > 162000)
120 clksor |= 0x00040000;
121
122 nv_wr32(device, 0x614300 + (or * 0x800), clksor);
123 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
124
125 mask = 0;
126 for (i = 0; i < link_nr; i++)
127 mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
128 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
129}
130
131static void
132nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
133{
134 struct nouveau_device *device = nouveau_dev(dev);
135 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
136 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
137 if (clksor & 0x000c0000)
138 *bw = 270000;
139 else
140 *bw = 162000;
141
142 if (dpctrl > 0x00030000) *nr = 4;
143 else if (dpctrl > 0x00010000) *nr = 2;
144 else *nr = 1;
145}
146
147void
148nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
149{
150 struct nouveau_device *device = nouveau_dev(dev);
151 struct nouveau_drm *drm = nouveau_drm(dev);
152 const u32 symbol = 100000;
153 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
154 int TU, VTUi, VTUf, VTUa;
155 u64 link_data_rate, link_ratio, unk;
156 u32 best_diff = 64 * symbol;
157 u32 link_nr, link_bw, r;
158
159 /* calculate packed data rate for each lane */
160 nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
161 link_data_rate = (clk * bpp / 8) / link_nr;
162
163 /* calculate ratio of packed data rate to link symbol rate */
164 link_ratio = link_data_rate * symbol;
165 r = do_div(link_ratio, link_bw);
166
167 for (TU = 64; TU >= 32; TU--) {
168 /* calculate average number of valid symbols in each TU */
169 u32 tu_valid = link_ratio * TU;
170 u32 calc, diff;
171
172 /* find a hw representation for the fraction.. */
173 VTUi = tu_valid / symbol;
174 calc = VTUi * symbol;
175 diff = tu_valid - calc;
176 if (diff) {
177 if (diff >= (symbol / 2)) {
178 VTUf = symbol / (symbol - diff);
179 if (symbol - (VTUf * diff))
180 VTUf++;
181
182 if (VTUf <= 15) {
183 VTUa = 1;
184 calc += symbol - (symbol / VTUf);
185 } else {
186 VTUa = 0;
187 VTUf = 1;
188 calc += symbol;
189 }
190 } else {
191 VTUa = 0;
192 VTUf = min((int)(symbol / diff), 15);
193 calc += symbol / VTUf;
194 }
195
196 diff = calc - tu_valid;
197 } else {
198 /* no remainder, but the hw doesn't like the fractional
199 * part to be zero. decrement the integer part and
200 * have the fraction add a whole symbol back
201 */
202 VTUa = 0;
203 VTUf = 1;
204 VTUi--;
205 }
206
207 if (diff < best_diff) {
208 best_diff = diff;
209 bestTU = TU;
210 bestVTUa = VTUa;
211 bestVTUf = VTUf;
212 bestVTUi = VTUi;
213 if (diff == 0)
214 break;
215 }
216 }
217
218 if (!bestTU) {
219 NV_ERROR(drm, "DP: unable to find suitable config\n");
220 return;
221 }
222
223 /* XXX close to vbios numbers, but not right */
224 unk = (symbol - link_ratio) * bestTU;
225 unk *= link_ratio;
226 r = do_div(unk, symbol);
227 r = do_div(unk, symbol);
228 unk += 6;
229
230 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
231 nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
232 bestVTUf << 16 |
233 bestVTUi << 8 |
234 unk);
235}
236static void
237nv50_sor_disconnect(struct drm_encoder *encoder)
238{
239 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
240 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
241 struct drm_device *dev = encoder->dev;
242 struct nouveau_channel *evo = nv50_display(dev)->master;
243 int ret;
244
245 if (!nv_encoder->crtc)
246 return;
247 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
248
249 NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
250
251 ret = RING_SPACE(evo, 4);
252 if (ret) {
253 NV_ERROR(drm, "no space while disconnecting SOR\n");
254 return;
255 }
256 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
257 OUT_RING (evo, 0);
258 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
259 OUT_RING (evo, 0);
260
261 nouveau_hdmi_mode_set(encoder, NULL);
262
263 nv_encoder->crtc = NULL;
264 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
265}
266
267static void
268nv50_sor_dpms(struct drm_encoder *encoder, int mode)
269{
270 struct nouveau_device *device = nouveau_dev(encoder->dev);
271 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
272 struct drm_device *dev = encoder->dev;
273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
274 struct drm_encoder *enc;
275 uint32_t val;
276 int or = nv_encoder->or;
277
278 NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
279
280 nv_encoder->last_dpms = mode;
281 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
282 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
283
284 if (nvenc == nv_encoder ||
285 (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
286 nvenc->dcb->type != DCB_OUTPUT_LVDS &&
287 nvenc->dcb->type != DCB_OUTPUT_DP) ||
288 nvenc->dcb->or != nv_encoder->dcb->or)
289 continue;
290
291 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
292 return;
293 }
294
295 /* wait for it to be done */
296 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
297 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
298 NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
299 NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
300 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
301 }
302
303 val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
304
305 if (mode == DRM_MODE_DPMS_ON)
306 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
307 else
308 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
309
310 nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
311 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
312 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
313 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
314 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
315 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
316 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
317 }
318
319 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
320 struct dp_train_func func = {
321 .link_set = nv50_sor_dp_link_set,
322 .train_set = nv50_sor_dp_train_set,
323 .train_adj = nv50_sor_dp_train_adj
324 };
325
326 nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
327 }
328}
329
330static void
331nv50_sor_save(struct drm_encoder *encoder)
332{
333 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
334 NV_ERROR(drm, "!!\n");
335}
336
337static void
338nv50_sor_restore(struct drm_encoder *encoder)
339{
340 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
341 NV_ERROR(drm, "!!\n");
342}
343
344static bool
345nv50_sor_mode_fixup(struct drm_encoder *encoder,
346 const struct drm_display_mode *mode,
347 struct drm_display_mode *adjusted_mode)
348{
349 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
350 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
351 struct nouveau_connector *connector;
352
353 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
354
355 connector = nouveau_encoder_connector_get(nv_encoder);
356 if (!connector) {
357 NV_ERROR(drm, "Encoder has no connector\n");
358 return false;
359 }
360
361 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
362 connector->native_mode)
363 drm_mode_copy(adjusted_mode, connector->native_mode);
364
365 return true;
366}
367
368static void
369nv50_sor_prepare(struct drm_encoder *encoder)
370{
371 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
372 nv50_sor_disconnect(encoder);
373 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
374 /* avoid race between link training and supervisor intr */
375 nv50_display_sync(encoder->dev);
376 }
377}
378
379static void
380nv50_sor_commit(struct drm_encoder *encoder)
381{
382}
383
384static void
385nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
386 struct drm_display_mode *mode)
387{
388 struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
389 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
390 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
391 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
392 struct nouveau_connector *nv_connector;
393 uint32_t mode_ctl = 0;
394 int ret;
395
396 NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
397 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
398 nv_encoder->crtc = encoder->crtc;
399
400 switch (nv_encoder->dcb->type) {
401 case DCB_OUTPUT_TMDS:
402 if (nv_encoder->dcb->sorconf.link & 1) {
403 if (mode->clock < 165000)
404 mode_ctl = 0x0100;
405 else
406 mode_ctl = 0x0500;
407 } else
408 mode_ctl = 0x0200;
409
410 nouveau_hdmi_mode_set(encoder, mode);
411 break;
412 case DCB_OUTPUT_DP:
413 nv_connector = nouveau_encoder_connector_get(nv_encoder);
414 if (nv_connector && nv_connector->base.display_info.bpc == 6) {
415 nv_encoder->dp.datarate = mode->clock * 18 / 8;
416 mode_ctl |= 0x00020000;
417 } else {
418 nv_encoder->dp.datarate = mode->clock * 24 / 8;
419 mode_ctl |= 0x00050000;
420 }
421
422 if (nv_encoder->dcb->sorconf.link & 1)
423 mode_ctl |= 0x00000800;
424 else
425 mode_ctl |= 0x00000900;
426 break;
427 default:
428 break;
429 }
430
431 if (crtc->index == 1)
432 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
433 else
434 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
435
436 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
437 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
438
439 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
440 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
441
442 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
443
444 ret = RING_SPACE(evo, 2);
445 if (ret) {
446 NV_ERROR(drm, "no space while connecting SOR\n");
447 nv_encoder->crtc = NULL;
448 return;
449 }
450 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
451 OUT_RING(evo, mode_ctl);
452}
453
454static struct drm_crtc *
455nv50_sor_crtc_get(struct drm_encoder *encoder)
456{
457 return nouveau_encoder(encoder)->crtc;
458}
459
460static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
461 .dpms = nv50_sor_dpms,
462 .save = nv50_sor_save,
463 .restore = nv50_sor_restore,
464 .mode_fixup = nv50_sor_mode_fixup,
465 .prepare = nv50_sor_prepare,
466 .commit = nv50_sor_commit,
467 .mode_set = nv50_sor_mode_set,
468 .get_crtc = nv50_sor_crtc_get,
469 .detect = NULL,
470 .disable = nv50_sor_disconnect
471};
472
473static void
474nv50_sor_destroy(struct drm_encoder *encoder)
475{
476 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
477 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
478
479 NV_DEBUG(drm, "\n");
480
481 drm_encoder_cleanup(encoder);
482
483 kfree(nv_encoder);
484}
485
486static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
487 .destroy = nv50_sor_destroy,
488};
489
490int
491nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
492{
493 struct nouveau_encoder *nv_encoder = NULL;
494 struct drm_device *dev = connector->dev;
495 struct nouveau_drm *drm = nouveau_drm(dev);
496 struct drm_encoder *encoder;
497 int type;
498
499 NV_DEBUG(drm, "\n");
500
501 switch (entry->type) {
502 case DCB_OUTPUT_TMDS:
503 case DCB_OUTPUT_DP:
504 type = DRM_MODE_ENCODER_TMDS;
505 break;
506 case DCB_OUTPUT_LVDS:
507 type = DRM_MODE_ENCODER_LVDS;
508 break;
509 default:
510 return -EINVAL;
511 }
512
513 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
514 if (!nv_encoder)
515 return -ENOMEM;
516 encoder = to_drm_encoder(nv_encoder);
517
518 nv_encoder->dcb = entry;
519 nv_encoder->or = ffs(entry->or) - 1;
520 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
521
522 drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
523 drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
524
525 encoder->possible_crtcs = entry->heads;
526 encoder->possible_clones = 0;
527
528 drm_mode_connector_attach_encoder(connector, encoder);
529 return 0;
530}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 53299eac9676..2a56b1b551cb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
114 struct nvc0_fence_chan *fctx = chan->fence; 114 struct nvc0_fence_chan *fctx = chan->fence;
115 int i; 115 int i;
116 116
117 if (nv_device(chan->drm->device)->card_type >= NV_D0) { 117 for (i = 0; i < dev->mode_config.num_crtc; i++) {
118 for (i = 0; i < dev->mode_config.num_crtc; i++) { 118 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
119 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i); 119 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
121 }
122 } else
123 if (nv_device(chan->drm->device)->card_type >= NV_50) {
124 for (i = 0; i < dev->mode_config.num_crtc; i++) {
125 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
126 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
127 }
128 } 120 }
129 121
130 nouveau_bo_vma_del(priv->bo, &fctx->vma); 122 nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
154 146
155 /* map display semaphore buffers into channel's vm */ 147 /* map display semaphore buffers into channel's vm */
156 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { 148 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
157 struct nouveau_bo *bo; 149 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
158 if (nv_device(chan->drm->device)->card_type >= NV_D0)
159 bo = nvd0_display_crtc_sema(chan->drm->dev, i);
160 else
161 bo = nv50_display_crtc_sema(chan->drm->dev, i);
162
163 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); 150 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
164 } 151 }
165 152
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
203{ 190{
204 struct nvc0_fence_priv *priv = drm->fence; 191 struct nvc0_fence_priv *priv = drm->fence;
205 nouveau_bo_unmap(priv->bo); 192 nouveau_bo_unmap(priv->bo);
193 if (priv->bo)
194 nouveau_bo_unpin(priv->bo);
206 nouveau_bo_ref(NULL, &priv->bo); 195 nouveau_bo_ref(NULL, &priv->bo);
207 drm->fence = NULL; 196 drm->fence = NULL;
208 kfree(priv); 197 kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
232 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); 221 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
233 if (ret == 0) { 222 if (ret == 0) {
234 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 223 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
235 if (ret == 0) 224 if (ret == 0) {
236 ret = nouveau_bo_map(priv->bo); 225 ret = nouveau_bo_map(priv->bo);
226 if (ret)
227 nouveau_bo_unpin(priv->bo);
228 }
237 if (ret) 229 if (ret)
238 nouveau_bo_ref(NULL, &priv->bo); 230 nouveau_bo_ref(NULL, &priv->bo);
239 } 231 }
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index c402fca2b2b8..000000000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2141 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32#include "nouveau_gem.h"
33#include "nouveau_connector.h"
34#include "nouveau_encoder.h"
35#include "nouveau_crtc.h"
36#include "nouveau_fence.h"
37#include "nv50_display.h"
38
39#include <core/gpuobj.h>
40
41#include <subdev/timer.h>
42#include <subdev/bar.h>
43#include <subdev/fb.h>
44
45#define EVO_DMA_NR 9
46
47#define EVO_MASTER (0x00)
48#define EVO_FLIP(c) (0x01 + (c))
49#define EVO_OVLY(c) (0x05 + (c))
50#define EVO_OIMM(c) (0x09 + (c))
51#define EVO_CURS(c) (0x0d + (c))
52
53/* offsets in shared sync bo of various structures */
54#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
55#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
56#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
57#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
58
59struct evo {
60 int idx;
61 dma_addr_t handle;
62 u32 *ptr;
63 struct {
64 u32 offset;
65 u16 value;
66 } sem;
67};
68
69struct nvd0_display {
70 struct nouveau_gpuobj *mem;
71 struct nouveau_bo *sync;
72 struct evo evo[9];
73
74 struct tasklet_struct tasklet;
75 u32 modeset;
76};
77
78static struct nvd0_display *
79nvd0_display(struct drm_device *dev)
80{
81 return nouveau_display(dev)->priv;
82}
83
84static struct drm_crtc *
85nvd0_display_crtc_get(struct drm_encoder *encoder)
86{
87 return nouveau_encoder(encoder)->crtc;
88}
89
90/******************************************************************************
91 * EVO channel helpers
92 *****************************************************************************/
93static inline int
94evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
95{
96 struct nouveau_device *device = nouveau_dev(dev);
97 int ret = 0;
98 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
99 nv_wr32(device, 0x610704 + (id * 0x10), data);
100 nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
101 if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
102 ret = -EBUSY;
103 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
104 return ret;
105}
106
107static u32 *
108evo_wait(struct drm_device *dev, int id, int nr)
109{
110 struct nouveau_device *device = nouveau_dev(dev);
111 struct nouveau_drm *drm = nouveau_drm(dev);
112 struct nvd0_display *disp = nvd0_display(dev);
113 u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
114
115 if (put + nr >= (PAGE_SIZE / 4)) {
116 disp->evo[id].ptr[put] = 0x20000000;
117
118 nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
119 if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
120 NV_ERROR(drm, "evo %d dma stalled\n", id);
121 return NULL;
122 }
123
124 put = 0;
125 }
126
127 return disp->evo[id].ptr + put;
128}
129
130static void
131evo_kick(u32 *push, struct drm_device *dev, int id)
132{
133 struct nouveau_device *device = nouveau_dev(dev);
134 struct nvd0_display *disp = nvd0_display(dev);
135
136 nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
137}
138
139#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
140#define evo_data(p,d) *((p)++) = (d)
141
142static int
143evo_init_dma(struct drm_device *dev, int ch)
144{
145 struct nouveau_device *device = nouveau_dev(dev);
146 struct nouveau_drm *drm = nouveau_drm(dev);
147 struct nvd0_display *disp = nvd0_display(dev);
148 u32 flags;
149
150 flags = 0x00000000;
151 if (ch == EVO_MASTER)
152 flags |= 0x01000000;
153
154 nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
155 nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
156 nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
157 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
158 nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
159 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
160 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
161 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
162 nv_rd32(device, 0x610490 + (ch * 0x0010)));
163 return -EBUSY;
164 }
165
166 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
167 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
168 return 0;
169}
170
171static void
172evo_fini_dma(struct drm_device *dev, int ch)
173{
174 struct nouveau_device *device = nouveau_dev(dev);
175
176 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
177 return;
178
179 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
180 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
181 nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
182 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
183 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
184}
185
186static inline void
187evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
188{
189 struct nouveau_device *device = nouveau_dev(dev);
190 nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
191}
192
193static int
194evo_init_pio(struct drm_device *dev, int ch)
195{
196 struct nouveau_device *device = nouveau_dev(dev);
197 struct nouveau_drm *drm = nouveau_drm(dev);
198
199 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
200 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
201 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
202 nv_rd32(device, 0x610490 + (ch * 0x0010)));
203 return -EBUSY;
204 }
205
206 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
207 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
208 return 0;
209}
210
211static void
212evo_fini_pio(struct drm_device *dev, int ch)
213{
214 struct nouveau_device *device = nouveau_dev(dev);
215
216 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
217 return;
218
219 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
220 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
221 nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
222 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
223 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
224}
225
226static bool
227evo_sync_wait(void *data)
228{
229 return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
230}
231
232static int
233evo_sync(struct drm_device *dev, int ch)
234{
235 struct nouveau_device *device = nouveau_dev(dev);
236 struct nvd0_display *disp = nvd0_display(dev);
237 u32 *push = evo_wait(dev, ch, 8);
238 if (push) {
239 nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
240 evo_mthd(push, 0x0084, 1);
241 evo_data(push, 0x80000000 | EVO_MAST_NTFY);
242 evo_mthd(push, 0x0080, 2);
243 evo_data(push, 0x00000000);
244 evo_data(push, 0x00000000);
245 evo_kick(push, dev, ch);
246 if (nv_wait_cb(device, evo_sync_wait, disp->sync))
247 return 0;
248 }
249
250 return -EBUSY;
251}
252
253/******************************************************************************
254 * Page flipping channel
255 *****************************************************************************/
256struct nouveau_bo *
257nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
258{
259 return nvd0_display(dev)->sync;
260}
261
262void
263nvd0_display_flip_stop(struct drm_crtc *crtc)
264{
265 struct nvd0_display *disp = nvd0_display(crtc->dev);
266 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
267 struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
268 u32 *push;
269
270 push = evo_wait(crtc->dev, evo->idx, 8);
271 if (push) {
272 evo_mthd(push, 0x0084, 1);
273 evo_data(push, 0x00000000);
274 evo_mthd(push, 0x0094, 1);
275 evo_data(push, 0x00000000);
276 evo_mthd(push, 0x00c0, 1);
277 evo_data(push, 0x00000000);
278 evo_mthd(push, 0x0080, 1);
279 evo_data(push, 0x00000000);
280 evo_kick(push, crtc->dev, evo->idx);
281 }
282}
283
284int
285nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
286 struct nouveau_channel *chan, u32 swap_interval)
287{
288 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
289 struct nvd0_display *disp = nvd0_display(crtc->dev);
290 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
291 struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
292 u64 offset;
293 u32 *push;
294 int ret;
295
296 swap_interval <<= 4;
297 if (swap_interval == 0)
298 swap_interval |= 0x100;
299
300 push = evo_wait(crtc->dev, evo->idx, 128);
301 if (unlikely(push == NULL))
302 return -EBUSY;
303
304 /* synchronise with the rendering channel, if necessary */
305 if (likely(chan)) {
306 ret = RING_SPACE(chan, 10);
307 if (ret)
308 return ret;
309
310
311 offset = nvc0_fence_crtc(chan, nv_crtc->index);
312 offset += evo->sem.offset;
313
314 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
315 OUT_RING (chan, upper_32_bits(offset));
316 OUT_RING (chan, lower_32_bits(offset));
317 OUT_RING (chan, 0xf00d0000 | evo->sem.value);
318 OUT_RING (chan, 0x1002);
319 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
320 OUT_RING (chan, upper_32_bits(offset));
321 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
322 OUT_RING (chan, 0x74b1e000);
323 OUT_RING (chan, 0x1001);
324 FIRE_RING (chan);
325 } else {
326 nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
327 0xf00d0000 | evo->sem.value);
328 evo_sync(crtc->dev, EVO_MASTER);
329 }
330
331 /* queue the flip */
332 evo_mthd(push, 0x0100, 1);
333 evo_data(push, 0xfffe0000);
334 evo_mthd(push, 0x0084, 1);
335 evo_data(push, swap_interval);
336 if (!(swap_interval & 0x00000100)) {
337 evo_mthd(push, 0x00e0, 1);
338 evo_data(push, 0x40000000);
339 }
340 evo_mthd(push, 0x0088, 4);
341 evo_data(push, evo->sem.offset);
342 evo_data(push, 0xf00d0000 | evo->sem.value);
343 evo_data(push, 0x74b1e000);
344 evo_data(push, NvEvoSync);
345 evo_mthd(push, 0x00a0, 2);
346 evo_data(push, 0x00000000);
347 evo_data(push, 0x00000000);
348 evo_mthd(push, 0x00c0, 1);
349 evo_data(push, nv_fb->r_dma);
350 evo_mthd(push, 0x0110, 2);
351 evo_data(push, 0x00000000);
352 evo_data(push, 0x00000000);
353 evo_mthd(push, 0x0400, 5);
354 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
355 evo_data(push, 0);
356 evo_data(push, (fb->height << 16) | fb->width);
357 evo_data(push, nv_fb->r_pitch);
358 evo_data(push, nv_fb->r_format);
359 evo_mthd(push, 0x0080, 1);
360 evo_data(push, 0x00000000);
361 evo_kick(push, crtc->dev, evo->idx);
362
363 evo->sem.offset ^= 0x10;
364 evo->sem.value++;
365 return 0;
366}
367
368/******************************************************************************
369 * CRTC
370 *****************************************************************************/
371static int
372nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
373{
374 struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
375 struct drm_device *dev = nv_crtc->base.dev;
376 struct nouveau_connector *nv_connector;
377 struct drm_connector *connector;
378 u32 *push, mode = 0x00;
379 u32 mthd;
380
381 nv_connector = nouveau_crtc_connector_get(nv_crtc);
382 connector = &nv_connector->base;
383 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
384 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
385 mode = DITHERING_MODE_DYNAMIC2X2;
386 } else {
387 mode = nv_connector->dithering_mode;
388 }
389
390 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
391 if (connector->display_info.bpc >= 8)
392 mode |= DITHERING_DEPTH_8BPC;
393 } else {
394 mode |= nv_connector->dithering_depth;
395 }
396
397 if (nv_device(drm->device)->card_type < NV_E0)
398 mthd = 0x0490 + (nv_crtc->index * 0x0300);
399 else
400 mthd = 0x04a0 + (nv_crtc->index * 0x0300);
401
402 push = evo_wait(dev, EVO_MASTER, 4);
403 if (push) {
404 evo_mthd(push, mthd, 1);
405 evo_data(push, mode);
406 if (update) {
407 evo_mthd(push, 0x0080, 1);
408 evo_data(push, 0x00000000);
409 }
410 evo_kick(push, dev, EVO_MASTER);
411 }
412
413 return 0;
414}
415
416static int
417nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
418{
419 struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
420 struct drm_device *dev = nv_crtc->base.dev;
421 struct drm_crtc *crtc = &nv_crtc->base;
422 struct nouveau_connector *nv_connector;
423 int mode = DRM_MODE_SCALE_NONE;
424 u32 oX, oY, *push;
425
426 /* start off at the resolution we programmed the crtc for, this
427 * effectively handles NONE/FULL scaling
428 */
429 nv_connector = nouveau_crtc_connector_get(nv_crtc);
430 if (nv_connector && nv_connector->native_mode)
431 mode = nv_connector->scaling_mode;
432
433 if (mode != DRM_MODE_SCALE_NONE)
434 omode = nv_connector->native_mode;
435 else
436 omode = umode;
437
438 oX = omode->hdisplay;
439 oY = omode->vdisplay;
440 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
441 oY *= 2;
442
443 /* add overscan compensation if necessary, will keep the aspect
444 * ratio the same as the backend mode unless overridden by the
445 * user setting both hborder and vborder properties.
446 */
447 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
448 (nv_connector->underscan == UNDERSCAN_AUTO &&
449 nv_connector->edid &&
450 drm_detect_hdmi_monitor(nv_connector->edid)))) {
451 u32 bX = nv_connector->underscan_hborder;
452 u32 bY = nv_connector->underscan_vborder;
453 u32 aspect = (oY << 19) / oX;
454
455 if (bX) {
456 oX -= (bX * 2);
457 if (bY) oY -= (bY * 2);
458 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
459 } else {
460 oX -= (oX >> 4) + 32;
461 if (bY) oY -= (bY * 2);
462 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
463 }
464 }
465
466 /* handle CENTER/ASPECT scaling, taking into account the areas
467 * removed already for overscan compensation
468 */
469 switch (mode) {
470 case DRM_MODE_SCALE_CENTER:
471 oX = min((u32)umode->hdisplay, oX);
472 oY = min((u32)umode->vdisplay, oY);
473 /* fall-through */
474 case DRM_MODE_SCALE_ASPECT:
475 if (oY < oX) {
476 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
477 oX = ((oY * aspect) + (aspect / 2)) >> 19;
478 } else {
479 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
480 oY = ((oX * aspect) + (aspect / 2)) >> 19;
481 }
482 break;
483 default:
484 break;
485 }
486
487 push = evo_wait(dev, EVO_MASTER, 8);
488 if (push) {
489 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
490 evo_data(push, (oY << 16) | oX);
491 evo_data(push, (oY << 16) | oX);
492 evo_data(push, (oY << 16) | oX);
493 evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
494 evo_data(push, 0x00000000);
495 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
496 evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
497 evo_kick(push, dev, EVO_MASTER);
498 if (update) {
499 nvd0_display_flip_stop(crtc);
500 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
501 }
502 }
503
504 return 0;
505}
506
507static int
508nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
509 int x, int y, bool update)
510{
511 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
512 u32 *push;
513
514 push = evo_wait(fb->dev, EVO_MASTER, 16);
515 if (push) {
516 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
517 evo_data(push, nvfb->nvbo->bo.offset >> 8);
518 evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
519 evo_data(push, (fb->height << 16) | fb->width);
520 evo_data(push, nvfb->r_pitch);
521 evo_data(push, nvfb->r_format);
522 evo_data(push, nvfb->r_dma);
523 evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
524 evo_data(push, (y << 16) | x);
525 if (update) {
526 evo_mthd(push, 0x0080, 1);
527 evo_data(push, 0x00000000);
528 }
529 evo_kick(push, fb->dev, EVO_MASTER);
530 }
531
532 nv_crtc->fb.tile_flags = nvfb->r_dma;
533 return 0;
534}
535
536static void
537nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
538{
539 struct drm_device *dev = nv_crtc->base.dev;
540 u32 *push = evo_wait(dev, EVO_MASTER, 16);
541 if (push) {
542 if (show) {
543 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
544 evo_data(push, 0x85000000);
545 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
546 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
547 evo_data(push, NvEvoVRAM);
548 } else {
549 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
550 evo_data(push, 0x05000000);
551 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
552 evo_data(push, 0x00000000);
553 }
554
555 if (update) {
556 evo_mthd(push, 0x0080, 1);
557 evo_data(push, 0x00000000);
558 }
559
560 evo_kick(push, dev, EVO_MASTER);
561 }
562}
563
564static void
565nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
566{
567}
568
569static void
570nvd0_crtc_prepare(struct drm_crtc *crtc)
571{
572 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
573 u32 *push;
574
575 nvd0_display_flip_stop(crtc);
576
577 push = evo_wait(crtc->dev, EVO_MASTER, 2);
578 if (push) {
579 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
580 evo_data(push, 0x00000000);
581 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
582 evo_data(push, 0x03000000);
583 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
584 evo_data(push, 0x00000000);
585 evo_kick(push, crtc->dev, EVO_MASTER);
586 }
587
588 nvd0_crtc_cursor_show(nv_crtc, false, false);
589}
590
591static void
592nvd0_crtc_commit(struct drm_crtc *crtc)
593{
594 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
595 u32 *push;
596
597 push = evo_wait(crtc->dev, EVO_MASTER, 32);
598 if (push) {
599 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
600 evo_data(push, nv_crtc->fb.tile_flags);
601 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
602 evo_data(push, 0x83000000);
603 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
604 evo_data(push, 0x00000000);
605 evo_data(push, 0x00000000);
606 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
607 evo_data(push, NvEvoVRAM);
608 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
609 evo_data(push, 0xffffff00);
610 evo_kick(push, crtc->dev, EVO_MASTER);
611 }
612
613 nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
614 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
615}
616
617static bool
618nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
619 struct drm_display_mode *adjusted_mode)
620{
621 return true;
622}
623
624static int
625nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
626{
627 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
628 int ret;
629
630 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
631 if (ret)
632 return ret;
633
634 if (old_fb) {
635 nvfb = nouveau_framebuffer(old_fb);
636 nouveau_bo_unpin(nvfb->nvbo);
637 }
638
639 return 0;
640}
641
642static int
643nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
644 struct drm_display_mode *mode, int x, int y,
645 struct drm_framebuffer *old_fb)
646{
647 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
648 struct nouveau_connector *nv_connector;
649 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
650 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
651 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
652 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
653 u32 vblan2e = 0, vblan2s = 1;
654 u32 *push;
655 int ret;
656
657 hactive = mode->htotal;
658 hsynce = mode->hsync_end - mode->hsync_start - 1;
659 hbackp = mode->htotal - mode->hsync_end;
660 hblanke = hsynce + hbackp;
661 hfrontp = mode->hsync_start - mode->hdisplay;
662 hblanks = mode->htotal - hfrontp - 1;
663
664 vactive = mode->vtotal * vscan / ilace;
665 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
666 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
667 vblanke = vsynce + vbackp;
668 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
669 vblanks = vactive - vfrontp - 1;
670 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
671 vblan2e = vactive + vsynce + vbackp;
672 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
673 vactive = (vactive * 2) + 1;
674 }
675
676 ret = nvd0_crtc_swap_fbs(crtc, old_fb);
677 if (ret)
678 return ret;
679
680 push = evo_wait(crtc->dev, EVO_MASTER, 64);
681 if (push) {
682 evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
683 evo_data(push, 0x00000000);
684 evo_data(push, (vactive << 16) | hactive);
685 evo_data(push, ( vsynce << 16) | hsynce);
686 evo_data(push, (vblanke << 16) | hblanke);
687 evo_data(push, (vblanks << 16) | hblanks);
688 evo_data(push, (vblan2e << 16) | vblan2s);
689 evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
690 evo_data(push, 0x00000000); /* ??? */
691 evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
692 evo_data(push, mode->clock * 1000);
693 evo_data(push, 0x00200000); /* ??? */
694 evo_data(push, mode->clock * 1000);
695 evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
696 evo_data(push, 0x00000311);
697 evo_data(push, 0x00000100);
698 evo_kick(push, crtc->dev, EVO_MASTER);
699 }
700
701 nv_connector = nouveau_crtc_connector_get(nv_crtc);
702 nvd0_crtc_set_dither(nv_crtc, false);
703 nvd0_crtc_set_scale(nv_crtc, false);
704 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
705 return 0;
706}
707
708static int
709nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
710 struct drm_framebuffer *old_fb)
711{
712 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
713 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
714 int ret;
715
716 if (!crtc->fb) {
717 NV_DEBUG(drm, "No FB bound\n");
718 return 0;
719 }
720
721 ret = nvd0_crtc_swap_fbs(crtc, old_fb);
722 if (ret)
723 return ret;
724
725 nvd0_display_flip_stop(crtc);
726 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
727 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
728 return 0;
729}
730
731static int
732nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
733 struct drm_framebuffer *fb, int x, int y,
734 enum mode_set_atomic state)
735{
736 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
737 nvd0_display_flip_stop(crtc);
738 nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
739 return 0;
740}
741
742static void
743nvd0_crtc_lut_load(struct drm_crtc *crtc)
744{
745 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
746 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
747 int i;
748
749 for (i = 0; i < 256; i++) {
750 writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
751 writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
752 writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
753 }
754}
755
756static int
757nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
758 uint32_t handle, uint32_t width, uint32_t height)
759{
760 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
761 struct drm_device *dev = crtc->dev;
762 struct drm_gem_object *gem;
763 struct nouveau_bo *nvbo;
764 bool visible = (handle != 0);
765 int i, ret = 0;
766
767 if (visible) {
768 if (width != 64 || height != 64)
769 return -EINVAL;
770
771 gem = drm_gem_object_lookup(dev, file_priv, handle);
772 if (unlikely(!gem))
773 return -ENOENT;
774 nvbo = nouveau_gem_object(gem);
775
776 ret = nouveau_bo_map(nvbo);
777 if (ret == 0) {
778 for (i = 0; i < 64 * 64; i++) {
779 u32 v = nouveau_bo_rd32(nvbo, i);
780 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
781 }
782 nouveau_bo_unmap(nvbo);
783 }
784
785 drm_gem_object_unreference_unlocked(gem);
786 }
787
788 if (visible != nv_crtc->cursor.visible) {
789 nvd0_crtc_cursor_show(nv_crtc, visible, true);
790 nv_crtc->cursor.visible = visible;
791 }
792
793 return ret;
794}
795
796static int
797nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
798{
799 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
800 int ch = EVO_CURS(nv_crtc->index);
801
802 evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
803 evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
804 return 0;
805}
806
807static void
808nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
809 uint32_t start, uint32_t size)
810{
811 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
812 u32 end = max(start + size, (u32)256);
813 u32 i;
814
815 for (i = start; i < end; i++) {
816 nv_crtc->lut.r[i] = r[i];
817 nv_crtc->lut.g[i] = g[i];
818 nv_crtc->lut.b[i] = b[i];
819 }
820
821 nvd0_crtc_lut_load(crtc);
822}
823
824static void
825nvd0_crtc_destroy(struct drm_crtc *crtc)
826{
827 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
828 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
829 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
830 nouveau_bo_unmap(nv_crtc->lut.nvbo);
831 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
832 drm_crtc_cleanup(crtc);
833 kfree(crtc);
834}
835
836static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
837 .dpms = nvd0_crtc_dpms,
838 .prepare = nvd0_crtc_prepare,
839 .commit = nvd0_crtc_commit,
840 .mode_fixup = nvd0_crtc_mode_fixup,
841 .mode_set = nvd0_crtc_mode_set,
842 .mode_set_base = nvd0_crtc_mode_set_base,
843 .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
844 .load_lut = nvd0_crtc_lut_load,
845};
846
847static const struct drm_crtc_funcs nvd0_crtc_func = {
848 .cursor_set = nvd0_crtc_cursor_set,
849 .cursor_move = nvd0_crtc_cursor_move,
850 .gamma_set = nvd0_crtc_gamma_set,
851 .set_config = drm_crtc_helper_set_config,
852 .destroy = nvd0_crtc_destroy,
853 .page_flip = nouveau_crtc_page_flip,
854};
855
856static void
857nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
858{
859}
860
861static void
862nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
863{
864}
865
866static int
867nvd0_crtc_create(struct drm_device *dev, int index)
868{
869 struct nouveau_crtc *nv_crtc;
870 struct drm_crtc *crtc;
871 int ret, i;
872
873 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
874 if (!nv_crtc)
875 return -ENOMEM;
876
877 nv_crtc->index = index;
878 nv_crtc->set_dither = nvd0_crtc_set_dither;
879 nv_crtc->set_scale = nvd0_crtc_set_scale;
880 nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
881 nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
882 for (i = 0; i < 256; i++) {
883 nv_crtc->lut.r[i] = i << 8;
884 nv_crtc->lut.g[i] = i << 8;
885 nv_crtc->lut.b[i] = i << 8;
886 }
887
888 crtc = &nv_crtc->base;
889 drm_crtc_init(dev, crtc, &nvd0_crtc_func);
890 drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
891 drm_mode_crtc_set_gamma_size(crtc, 256);
892
893 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
894 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
895 if (!ret) {
896 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
897 if (!ret)
898 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
899 if (ret)
900 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
901 }
902
903 if (ret)
904 goto out;
905
906 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
907 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
908 if (!ret) {
909 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
910 if (!ret)
911 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
912 if (ret)
913 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
914 }
915
916 if (ret)
917 goto out;
918
919 nvd0_crtc_lut_load(crtc);
920
921out:
922 if (ret)
923 nvd0_crtc_destroy(crtc);
924 return ret;
925}
926
927/******************************************************************************
928 * DAC
929 *****************************************************************************/
930static void
931nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
932{
933 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
934 struct drm_device *dev = encoder->dev;
935 struct nouveau_device *device = nouveau_dev(dev);
936 int or = nv_encoder->or;
937 u32 dpms_ctrl;
938
939 dpms_ctrl = 0x80000000;
940 if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
941 dpms_ctrl |= 0x00000001;
942 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
943 dpms_ctrl |= 0x00000004;
944
945 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
946 nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
947 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
948}
949
950static bool
951nvd0_dac_mode_fixup(struct drm_encoder *encoder,
952 const struct drm_display_mode *mode,
953 struct drm_display_mode *adjusted_mode)
954{
955 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
956 struct nouveau_connector *nv_connector;
957
958 nv_connector = nouveau_encoder_connector_get(nv_encoder);
959 if (nv_connector && nv_connector->native_mode) {
960 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
961 int id = adjusted_mode->base.id;
962 *adjusted_mode = *nv_connector->native_mode;
963 adjusted_mode->base.id = id;
964 }
965 }
966
967 return true;
968}
969
970static void
971nvd0_dac_commit(struct drm_encoder *encoder)
972{
973}
974
975static void
976nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
977 struct drm_display_mode *adjusted_mode)
978{
979 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
980 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
981 u32 syncs, magic, *push;
982
983 syncs = 0x00000001;
984 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
985 syncs |= 0x00000008;
986 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
987 syncs |= 0x00000010;
988
989 magic = 0x31ec6000 | (nv_crtc->index << 25);
990 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
991 magic |= 0x00000001;
992
993 nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
994
995 push = evo_wait(encoder->dev, EVO_MASTER, 8);
996 if (push) {
997 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
998 evo_data(push, syncs);
999 evo_data(push, magic);
1000 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
1001 evo_data(push, 1 << nv_crtc->index);
1002 evo_data(push, 0x00ff);
1003 evo_kick(push, encoder->dev, EVO_MASTER);
1004 }
1005
1006 nv_encoder->crtc = encoder->crtc;
1007}
1008
1009static void
1010nvd0_dac_disconnect(struct drm_encoder *encoder)
1011{
1012 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1013 struct drm_device *dev = encoder->dev;
1014 u32 *push;
1015
1016 if (nv_encoder->crtc) {
1017 nvd0_crtc_prepare(nv_encoder->crtc);
1018
1019 push = evo_wait(dev, EVO_MASTER, 4);
1020 if (push) {
1021 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
1022 evo_data(push, 0x00000000);
1023 evo_mthd(push, 0x0080, 1);
1024 evo_data(push, 0x00000000);
1025 evo_kick(push, dev, EVO_MASTER);
1026 }
1027
1028 nv_encoder->crtc = NULL;
1029 }
1030}
1031
1032static enum drm_connector_status
1033nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1034{
1035 enum drm_connector_status status = connector_status_disconnected;
1036 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1037 struct drm_device *dev = encoder->dev;
1038 struct nouveau_device *device = nouveau_dev(dev);
1039 int or = nv_encoder->or;
1040 u32 load;
1041
1042 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
1043 udelay(9500);
1044 nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
1045
1046 load = nv_rd32(device, 0x61a00c + (or * 0x800));
1047 if ((load & 0x38000000) == 0x38000000)
1048 status = connector_status_connected;
1049
1050 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
1051 return status;
1052}
1053
1054static void
1055nvd0_dac_destroy(struct drm_encoder *encoder)
1056{
1057 drm_encoder_cleanup(encoder);
1058 kfree(encoder);
1059}
1060
1061static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
1062 .dpms = nvd0_dac_dpms,
1063 .mode_fixup = nvd0_dac_mode_fixup,
1064 .prepare = nvd0_dac_disconnect,
1065 .commit = nvd0_dac_commit,
1066 .mode_set = nvd0_dac_mode_set,
1067 .disable = nvd0_dac_disconnect,
1068 .get_crtc = nvd0_display_crtc_get,
1069 .detect = nvd0_dac_detect
1070};
1071
1072static const struct drm_encoder_funcs nvd0_dac_func = {
1073 .destroy = nvd0_dac_destroy,
1074};
1075
1076static int
1077nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1078{
1079 struct drm_device *dev = connector->dev;
1080 struct nouveau_encoder *nv_encoder;
1081 struct drm_encoder *encoder;
1082
1083 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1084 if (!nv_encoder)
1085 return -ENOMEM;
1086 nv_encoder->dcb = dcbe;
1087 nv_encoder->or = ffs(dcbe->or) - 1;
1088
1089 encoder = to_drm_encoder(nv_encoder);
1090 encoder->possible_crtcs = dcbe->heads;
1091 encoder->possible_clones = 0;
1092 drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
1093 drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
1094
1095 drm_mode_connector_attach_encoder(connector, encoder);
1096 return 0;
1097}
1098
1099/******************************************************************************
1100 * Audio
1101 *****************************************************************************/
1102static void
1103nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1104{
1105 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1106 struct nouveau_connector *nv_connector;
1107 struct drm_device *dev = encoder->dev;
1108 struct nouveau_device *device = nouveau_dev(dev);
1109 int i, or = nv_encoder->or * 0x30;
1110
1111 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1112 if (!drm_detect_monitor_audio(nv_connector->edid))
1113 return;
1114
1115 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
1116
1117 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1118 if (nv_connector->base.eld[0]) {
1119 u8 *eld = nv_connector->base.eld;
1120
1121 for (i = 0; i < eld[2] * 4; i++)
1122 nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
1123 for (i = eld[2] * 4; i < 0x60; i++)
1124 nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
1125
1126 nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
1127 }
1128}
1129
1130static void
1131nvd0_audio_disconnect(struct drm_encoder *encoder)
1132{
1133 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1134 struct drm_device *dev = encoder->dev;
1135 struct nouveau_device *device = nouveau_dev(dev);
1136 int or = nv_encoder->or * 0x30;
1137
1138 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
1139}
1140
1141/******************************************************************************
1142 * HDMI
1143 *****************************************************************************/
1144static void
1145nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1146{
1147 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1148 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1149 struct nouveau_connector *nv_connector;
1150 struct drm_device *dev = encoder->dev;
1151 struct nouveau_device *device = nouveau_dev(dev);
1152 int head = nv_crtc->index * 0x800;
1153 u32 rekey = 56; /* binary driver, and tegra constant */
1154 u32 max_ac_packet;
1155
1156 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1157 if (!drm_detect_hdmi_monitor(nv_connector->edid))
1158 return;
1159
1160 max_ac_packet = mode->htotal - mode->hdisplay;
1161 max_ac_packet -= rekey;
1162 max_ac_packet -= 18; /* constant from tegra */
1163 max_ac_packet /= 32;
1164
1165 /* AVI InfoFrame */
1166 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1167 nv_wr32(device, 0x61671c + head, 0x000d0282);
1168 nv_wr32(device, 0x616720 + head, 0x0000006f);
1169 nv_wr32(device, 0x616724 + head, 0x00000000);
1170 nv_wr32(device, 0x616728 + head, 0x00000000);
1171 nv_wr32(device, 0x61672c + head, 0x00000000);
1172 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
1173
1174 /* ??? InfoFrame? */
1175 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1176 nv_wr32(device, 0x6167ac + head, 0x00000010);
1177 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
1178
1179 /* HDMI_CTRL */
1180 nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
1181 max_ac_packet << 16);
1182
1183 /* NFI, audio doesn't work without it though.. */
1184 nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
1185
1186 nvd0_audio_mode_set(encoder, mode);
1187}
1188
1189static void
1190nvd0_hdmi_disconnect(struct drm_encoder *encoder)
1191{
1192 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1193 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1194 struct drm_device *dev = encoder->dev;
1195 struct nouveau_device *device = nouveau_dev(dev);
1196 int head = nv_crtc->index * 0x800;
1197
1198 nvd0_audio_disconnect(encoder);
1199
1200 nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
1201 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1202 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1203}
1204
1205/******************************************************************************
1206 * SOR
1207 *****************************************************************************/
1208static inline u32
1209nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
1210{
1211 static const u8 nvd0[] = { 16, 8, 0, 24 };
1212 return nvd0[lane];
1213}
1214
1215static void
1216nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
1217{
1218 struct nouveau_device *device = nouveau_dev(dev);
1219 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1220 const u32 loff = (or * 0x800) + (link * 0x80);
1221 nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
1222}
1223
1224static void
1225nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
1226 u8 lane, u8 swing, u8 preem)
1227{
1228 struct nouveau_device *device = nouveau_dev(dev);
1229 struct nouveau_drm *drm = nouveau_drm(dev);
1230 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1231 const u32 loff = (or * 0x800) + (link * 0x80);
1232 u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
1233 u32 mask = 0x000000ff << shift;
1234 u8 *table, *entry, *config = NULL;
1235
1236 switch (swing) {
1237 case 0: preem += 0; break;
1238 case 1: preem += 4; break;
1239 case 2: preem += 7; break;
1240 case 3: preem += 9; break;
1241 }
1242
1243 table = nouveau_dp_bios_data(dev, dcb, &entry);
1244 if (table) {
1245 if (table[0] == 0x30) {
1246 config = entry + table[4];
1247 config += table[5] * preem;
1248 } else
1249 if (table[0] == 0x40) {
1250 config = table + table[1];
1251 config += table[2] * table[3];
1252 config += table[6] * preem;
1253 }
1254 }
1255
1256 if (!config) {
1257 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
1258 return;
1259 }
1260
1261 nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
1262 nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
1263 nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
1264 nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
1265}
1266
1267static void
1268nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
1269 int link_nr, u32 link_bw, bool enhframe)
1270{
1271 struct nouveau_device *device = nouveau_dev(dev);
1272 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1273 const u32 loff = (or * 0x800) + (link * 0x80);
1274 const u32 soff = (or * 0x800);
1275 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
1276 u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
1277 u32 script = 0x0000, lane_mask = 0;
1278 u8 *table, *entry;
1279 int i;
1280
1281 link_bw /= 27000;
1282
1283 table = nouveau_dp_bios_data(dev, dcb, &entry);
1284 if (table) {
1285 if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
1286 else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
1287 else entry = NULL;
1288
1289 while (entry) {
1290 if (entry[0] >= link_bw)
1291 break;
1292 entry += 3;
1293 }
1294
1295 nouveau_bios_run_init_table(dev, script, dcb, crtc);
1296 }
1297
1298 clksor |= link_bw << 18;
1299 dpctrl |= ((1 << link_nr) - 1) << 16;
1300 if (enhframe)
1301 dpctrl |= 0x00004000;
1302
1303 for (i = 0; i < link_nr; i++)
1304 lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
1305
1306 nv_wr32(device, 0x612300 + soff, clksor);
1307 nv_wr32(device, 0x61c10c + loff, dpctrl);
1308 nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
1309}
1310
1311static void
1312nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
1313 u32 *link_nr, u32 *link_bw)
1314{
1315 struct nouveau_device *device = nouveau_dev(dev);
1316 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1317 const u32 loff = (or * 0x800) + (link * 0x80);
1318 const u32 soff = (or * 0x800);
1319 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
1320 u32 clksor = nv_rd32(device, 0x612300 + soff);
1321
1322 if (dpctrl > 0x00030000) *link_nr = 4;
1323 else if (dpctrl > 0x00010000) *link_nr = 2;
1324 else *link_nr = 1;
1325
1326 *link_bw = (clksor & 0x007c0000) >> 18;
1327 *link_bw *= 27000;
1328}
1329
1330static void
1331nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
1332 u32 crtc, u32 datarate)
1333{
1334 struct nouveau_device *device = nouveau_dev(dev);
1335 const u32 symbol = 100000;
1336 const u32 TU = 64;
1337 u32 link_nr, link_bw;
1338 u64 ratio, value;
1339
1340 nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
1341
1342 ratio = datarate;
1343 ratio *= symbol;
1344 do_div(ratio, link_nr * link_bw);
1345
1346 value = (symbol - ratio) * TU;
1347 value *= ratio;
1348 do_div(value, symbol);
1349 do_div(value, symbol);
1350
1351 value += 5;
1352 value |= 0x08000000;
1353
1354 nv_wr32(device, 0x616610 + (crtc * 0x800), value);
1355}
1356
1357static void
1358nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
1359{
1360 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1361 struct drm_device *dev = encoder->dev;
1362 struct nouveau_device *device = nouveau_dev(dev);
1363 struct drm_encoder *partner;
1364 int or = nv_encoder->or;
1365 u32 dpms_ctrl;
1366
1367 nv_encoder->last_dpms = mode;
1368
1369 list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
1370 struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
1371
1372 if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
1373 continue;
1374
1375 if (nv_partner != nv_encoder &&
1376 nv_partner->dcb->or == nv_encoder->dcb->or) {
1377 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
1378 return;
1379 break;
1380 }
1381 }
1382
1383 dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
1384 dpms_ctrl |= 0x80000000;
1385
1386 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1387 nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
1388 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1389 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
1390
1391 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1392 struct dp_train_func func = {
1393 .link_set = nvd0_sor_dp_link_set,
1394 .train_set = nvd0_sor_dp_train_set,
1395 .train_adj = nvd0_sor_dp_train_adj
1396 };
1397
1398 nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
1399 }
1400}
1401
1402static bool
1403nvd0_sor_mode_fixup(struct drm_encoder *encoder,
1404 const struct drm_display_mode *mode,
1405 struct drm_display_mode *adjusted_mode)
1406{
1407 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1408 struct nouveau_connector *nv_connector;
1409
1410 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1411 if (nv_connector && nv_connector->native_mode) {
1412 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
1413 int id = adjusted_mode->base.id;
1414 *adjusted_mode = *nv_connector->native_mode;
1415 adjusted_mode->base.id = id;
1416 }
1417 }
1418
1419 return true;
1420}
1421
1422static void
1423nvd0_sor_disconnect(struct drm_encoder *encoder)
1424{
1425 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1426 struct drm_device *dev = encoder->dev;
1427 u32 *push;
1428
1429 if (nv_encoder->crtc) {
1430 nvd0_crtc_prepare(nv_encoder->crtc);
1431
1432 push = evo_wait(dev, EVO_MASTER, 4);
1433 if (push) {
1434 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
1435 evo_data(push, 0x00000000);
1436 evo_mthd(push, 0x0080, 1);
1437 evo_data(push, 0x00000000);
1438 evo_kick(push, dev, EVO_MASTER);
1439 }
1440
1441 nvd0_hdmi_disconnect(encoder);
1442
1443 nv_encoder->crtc = NULL;
1444 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
1445 }
1446}
1447
1448static void
1449nvd0_sor_prepare(struct drm_encoder *encoder)
1450{
1451 nvd0_sor_disconnect(encoder);
1452 if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
1453 evo_sync(encoder->dev, EVO_MASTER);
1454}
1455
1456static void
1457nvd0_sor_commit(struct drm_encoder *encoder)
1458{
1459}
1460
1461static void
1462nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1463 struct drm_display_mode *mode)
1464{
1465 struct drm_device *dev = encoder->dev;
1466 struct nouveau_drm *drm = nouveau_drm(dev);
1467 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1468 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1469 struct nouveau_connector *nv_connector;
1470 struct nvbios *bios = &drm->vbios;
1471 u32 mode_ctrl = (1 << nv_crtc->index);
1472 u32 syncs, magic, *push;
1473 u32 or_config;
1474
1475 syncs = 0x00000001;
1476 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1477 syncs |= 0x00000008;
1478 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1479 syncs |= 0x00000010;
1480
1481 magic = 0x31ec6000 | (nv_crtc->index << 25);
1482 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1483 magic |= 0x00000001;
1484
1485 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1486 switch (nv_encoder->dcb->type) {
1487 case DCB_OUTPUT_TMDS:
1488 if (nv_encoder->dcb->sorconf.link & 1) {
1489 if (mode->clock < 165000)
1490 mode_ctrl |= 0x00000100;
1491 else
1492 mode_ctrl |= 0x00000500;
1493 } else {
1494 mode_ctrl |= 0x00000200;
1495 }
1496
1497 or_config = (mode_ctrl & 0x00000f00) >> 8;
1498 if (mode->clock >= 165000)
1499 or_config |= 0x0100;
1500
1501 nvd0_hdmi_mode_set(encoder, mode);
1502 break;
1503 case DCB_OUTPUT_LVDS:
1504 or_config = (mode_ctrl & 0x00000f00) >> 8;
1505 if (bios->fp_no_ddc) {
1506 if (bios->fp.dual_link)
1507 or_config |= 0x0100;
1508 if (bios->fp.if_is_24bit)
1509 or_config |= 0x0200;
1510 } else {
1511 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
1512 if (((u8 *)nv_connector->edid)[121] == 2)
1513 or_config |= 0x0100;
1514 } else
1515 if (mode->clock >= bios->fp.duallink_transition_clk) {
1516 or_config |= 0x0100;
1517 }
1518
1519 if (or_config & 0x0100) {
1520 if (bios->fp.strapless_is_24bit & 2)
1521 or_config |= 0x0200;
1522 } else {
1523 if (bios->fp.strapless_is_24bit & 1)
1524 or_config |= 0x0200;
1525 }
1526
1527 if (nv_connector->base.display_info.bpc == 8)
1528 or_config |= 0x0200;
1529
1530 }
1531 break;
1532 case DCB_OUTPUT_DP:
1533 if (nv_connector->base.display_info.bpc == 6) {
1534 nv_encoder->dp.datarate = mode->clock * 18 / 8;
1535 syncs |= 0x00000002 << 6;
1536 } else {
1537 nv_encoder->dp.datarate = mode->clock * 24 / 8;
1538 syncs |= 0x00000005 << 6;
1539 }
1540
1541 if (nv_encoder->dcb->sorconf.link & 1)
1542 mode_ctrl |= 0x00000800;
1543 else
1544 mode_ctrl |= 0x00000900;
1545
1546 or_config = (mode_ctrl & 0x00000f00) >> 8;
1547 break;
1548 default:
1549 BUG_ON(1);
1550 break;
1551 }
1552
1553 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
1554
1555 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1556 nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
1557 nv_encoder->dp.datarate);
1558 }
1559
1560 push = evo_wait(dev, EVO_MASTER, 8);
1561 if (push) {
1562 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
1563 evo_data(push, syncs);
1564 evo_data(push, magic);
1565 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
1566 evo_data(push, mode_ctrl);
1567 evo_data(push, or_config);
1568 evo_kick(push, dev, EVO_MASTER);
1569 }
1570
1571 nv_encoder->crtc = encoder->crtc;
1572}
1573
1574static void
1575nvd0_sor_destroy(struct drm_encoder *encoder)
1576{
1577 drm_encoder_cleanup(encoder);
1578 kfree(encoder);
1579}
1580
1581static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
1582 .dpms = nvd0_sor_dpms,
1583 .mode_fixup = nvd0_sor_mode_fixup,
1584 .prepare = nvd0_sor_prepare,
1585 .commit = nvd0_sor_commit,
1586 .mode_set = nvd0_sor_mode_set,
1587 .disable = nvd0_sor_disconnect,
1588 .get_crtc = nvd0_display_crtc_get,
1589};
1590
1591static const struct drm_encoder_funcs nvd0_sor_func = {
1592 .destroy = nvd0_sor_destroy,
1593};
1594
1595static int
1596nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1597{
1598 struct drm_device *dev = connector->dev;
1599 struct nouveau_encoder *nv_encoder;
1600 struct drm_encoder *encoder;
1601
1602 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1603 if (!nv_encoder)
1604 return -ENOMEM;
1605 nv_encoder->dcb = dcbe;
1606 nv_encoder->or = ffs(dcbe->or) - 1;
1607 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
1608
1609 encoder = to_drm_encoder(nv_encoder);
1610 encoder->possible_crtcs = dcbe->heads;
1611 encoder->possible_clones = 0;
1612 drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
1613 drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
1614
1615 drm_mode_connector_attach_encoder(connector, encoder);
1616 return 0;
1617}
1618
1619/******************************************************************************
1620 * IRQ
1621 *****************************************************************************/
1622static struct dcb_output *
1623lookup_dcb(struct drm_device *dev, int id, u32 mc)
1624{
1625 struct nouveau_drm *drm = nouveau_drm(dev);
1626 int type, or, i, link = -1;
1627
1628 if (id < 4) {
1629 type = DCB_OUTPUT_ANALOG;
1630 or = id;
1631 } else {
1632 switch (mc & 0x00000f00) {
1633 case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
1634 case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
1635 case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
1636 case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
1637 case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
1638 case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
1639 default:
1640 NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
1641 return NULL;
1642 }
1643
1644 or = id - 4;
1645 }
1646
1647 for (i = 0; i < drm->vbios.dcb.entries; i++) {
1648 struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
1649 if (dcb->type == type && (dcb->or & (1 << or)) &&
1650 (link < 0 || link == !(dcb->sorconf.link & 1)))
1651 return dcb;
1652 }
1653
1654 NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
1655 return NULL;
1656}
1657
1658static void
1659nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
1660{
1661 struct nouveau_device *device = nouveau_dev(dev);
1662 struct dcb_output *dcb;
1663 int i;
1664
1665 for (i = 0; mask && i < 8; i++) {
1666 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1667 if (!(mcc & (1 << crtc)))
1668 continue;
1669
1670 dcb = lookup_dcb(dev, i, mcc);
1671 if (!dcb)
1672 continue;
1673
1674 nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
1675 }
1676
1677 nv_wr32(device, 0x6101d4, 0x00000000);
1678 nv_wr32(device, 0x6109d4, 0x00000000);
1679 nv_wr32(device, 0x6101d0, 0x80000000);
1680}
1681
1682static void
1683nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1684{
1685 struct nouveau_device *device = nouveau_dev(dev);
1686 struct nouveau_drm *drm = nouveau_drm(dev);
1687 struct dcb_output *dcb;
1688 u32 or, tmp, pclk;
1689 int i;
1690
1691 for (i = 0; mask && i < 8; i++) {
1692 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1693 if (!(mcc & (1 << crtc)))
1694 continue;
1695
1696 dcb = lookup_dcb(dev, i, mcc);
1697 if (!dcb)
1698 continue;
1699
1700 nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
1701 }
1702
1703 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1704 NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
1705 crtc, pclk, mask);
1706 if (pclk && (mask & 0x00010000)) {
1707 nv50_crtc_set_clock(dev, crtc, pclk);
1708 }
1709
1710 for (i = 0; mask && i < 8; i++) {
1711 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1712 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1713 if (!(mcp & (1 << crtc)))
1714 continue;
1715
1716 dcb = lookup_dcb(dev, i, mcp);
1717 if (!dcb)
1718 continue;
1719 or = ffs(dcb->or) - 1;
1720
1721 nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
1722
1723 nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
1724 switch (dcb->type) {
1725 case DCB_OUTPUT_ANALOG:
1726 nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
1727 break;
1728 case DCB_OUTPUT_TMDS:
1729 case DCB_OUTPUT_LVDS:
1730 case DCB_OUTPUT_DP:
1731 if (cfg & 0x00000100)
1732 tmp = 0x00000101;
1733 else
1734 tmp = 0x00000000;
1735
1736 nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
1737 break;
1738 default:
1739 break;
1740 }
1741
1742 break;
1743 }
1744
1745 nv_wr32(device, 0x6101d4, 0x00000000);
1746 nv_wr32(device, 0x6109d4, 0x00000000);
1747 nv_wr32(device, 0x6101d0, 0x80000000);
1748}
1749
1750static void
1751nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
1752{
1753 struct nouveau_device *device = nouveau_dev(dev);
1754 struct dcb_output *dcb;
1755 int pclk, i;
1756
1757 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1758
1759 for (i = 0; mask && i < 8; i++) {
1760 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1761 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1762 if (!(mcp & (1 << crtc)))
1763 continue;
1764
1765 dcb = lookup_dcb(dev, i, mcp);
1766 if (!dcb)
1767 continue;
1768
1769 nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
1770 }
1771
1772 nv_wr32(device, 0x6101d4, 0x00000000);
1773 nv_wr32(device, 0x6109d4, 0x00000000);
1774 nv_wr32(device, 0x6101d0, 0x80000000);
1775}
1776
1777static void
1778nvd0_display_bh(unsigned long data)
1779{
1780 struct drm_device *dev = (struct drm_device *)data;
1781 struct nouveau_device *device = nouveau_dev(dev);
1782 struct nouveau_drm *drm = nouveau_drm(dev);
1783 struct nvd0_display *disp = nvd0_display(dev);
1784 u32 mask = 0, crtc = ~0;
1785 int i;
1786
1787 if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
1788 NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
1789 NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
1790 nv_rd32(device, 0x6101d0),
1791 nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
1792 for (i = 0; i < 8; i++) {
1793 NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
1794 i < 4 ? "DAC" : "SOR", i,
1795 nv_rd32(device, 0x640180 + (i * 0x20)),
1796 nv_rd32(device, 0x660180 + (i * 0x20)));
1797 }
1798 }
1799
1800 while (!mask && ++crtc < dev->mode_config.num_crtc)
1801 mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
1802
1803 if (disp->modeset & 0x00000001)
1804 nvd0_display_unk1_handler(dev, crtc, mask);
1805 if (disp->modeset & 0x00000002)
1806 nvd0_display_unk2_handler(dev, crtc, mask);
1807 if (disp->modeset & 0x00000004)
1808 nvd0_display_unk4_handler(dev, crtc, mask);
1809}
1810
1811void
1812nvd0_display_intr(struct drm_device *dev)
1813{
1814 struct nvd0_display *disp = nvd0_display(dev);
1815 struct nouveau_device *device = nouveau_dev(dev);
1816 struct nouveau_drm *drm = nouveau_drm(dev);
1817 u32 intr = nv_rd32(device, 0x610088);
1818
1819 if (intr & 0x00000001) {
1820 u32 stat = nv_rd32(device, 0x61008c);
1821 nv_wr32(device, 0x61008c, stat);
1822 intr &= ~0x00000001;
1823 }
1824
1825 if (intr & 0x00000002) {
1826 u32 stat = nv_rd32(device, 0x61009c);
1827 int chid = ffs(stat) - 1;
1828 if (chid >= 0) {
1829 u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
1830 u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
1831 u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
1832
1833 NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
1834 "0x%08x 0x%08x\n",
1835 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1836 nv_wr32(device, 0x61009c, (1 << chid));
1837 nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
1838 }
1839
1840 intr &= ~0x00000002;
1841 }
1842
1843 if (intr & 0x00100000) {
1844 u32 stat = nv_rd32(device, 0x6100ac);
1845
1846 if (stat & 0x00000007) {
1847 disp->modeset = stat;
1848 tasklet_schedule(&disp->tasklet);
1849
1850 nv_wr32(device, 0x6100ac, (stat & 0x00000007));
1851 stat &= ~0x00000007;
1852 }
1853
1854 if (stat) {
1855 NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
1856 nv_wr32(device, 0x6100ac, stat);
1857 }
1858
1859 intr &= ~0x00100000;
1860 }
1861
1862 intr &= ~0x0f000000; /* vblank, handled in core */
1863 if (intr)
1864 NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
1865}
1866
1867/******************************************************************************
1868 * Init
1869 *****************************************************************************/
1870void
1871nvd0_display_fini(struct drm_device *dev)
1872{
1873 int i;
1874
1875 /* fini cursors + overlays + flips */
1876 for (i = 1; i >= 0; i--) {
1877 evo_fini_pio(dev, EVO_CURS(i));
1878 evo_fini_pio(dev, EVO_OIMM(i));
1879 evo_fini_dma(dev, EVO_OVLY(i));
1880 evo_fini_dma(dev, EVO_FLIP(i));
1881 }
1882
1883 /* fini master */
1884 evo_fini_dma(dev, EVO_MASTER);
1885}
1886
1887int
1888nvd0_display_init(struct drm_device *dev)
1889{
1890 struct nvd0_display *disp = nvd0_display(dev);
1891 struct nouveau_device *device = nouveau_dev(dev);
1892 struct nouveau_drm *drm = nouveau_drm(dev);
1893 int ret, i;
1894 u32 *push;
1895
1896 if (nv_rd32(device, 0x6100ac) & 0x00000100) {
1897 nv_wr32(device, 0x6100ac, 0x00000100);
1898 nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1899 if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
1900 NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
1901 nv_rd32(device, 0x6194e8));
1902 return -EBUSY;
1903 }
1904 }
1905
1906 /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
1907 * work at all unless you do the SOR part below.
1908 */
1909 for (i = 0; i < 3; i++) {
1910 u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
1911 nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
1912 }
1913
1914 for (i = 0; i < 4; i++) {
1915 u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
1916 nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
1917 }
1918
1919 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1920 u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
1921 u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
1922 u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
1923 nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
1924 nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
1925 nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
1926 }
1927
1928 /* point at our hash table / objects, enable interrupts */
1929 nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
1930 nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
1931
1932 /* init master */
1933 ret = evo_init_dma(dev, EVO_MASTER);
1934 if (ret)
1935 goto error;
1936
1937 /* init flips + overlays + cursors */
1938 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1939 if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
1940 (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
1941 (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
1942 (ret = evo_init_pio(dev, EVO_CURS(i))))
1943 goto error;
1944 }
1945
1946 push = evo_wait(dev, EVO_MASTER, 32);
1947 if (!push) {
1948 ret = -EBUSY;
1949 goto error;
1950 }
1951 evo_mthd(push, 0x0088, 1);
1952 evo_data(push, NvEvoSync);
1953 evo_mthd(push, 0x0084, 1);
1954 evo_data(push, 0x00000000);
1955 evo_mthd(push, 0x0084, 1);
1956 evo_data(push, 0x80000000);
1957 evo_mthd(push, 0x008c, 1);
1958 evo_data(push, 0x00000000);
1959 evo_kick(push, dev, EVO_MASTER);
1960
1961error:
1962 if (ret)
1963 nvd0_display_fini(dev);
1964 return ret;
1965}
1966
1967void
1968nvd0_display_destroy(struct drm_device *dev)
1969{
1970 struct nvd0_display *disp = nvd0_display(dev);
1971 struct pci_dev *pdev = dev->pdev;
1972 int i;
1973
1974 for (i = 0; i < EVO_DMA_NR; i++) {
1975 struct evo *evo = &disp->evo[i];
1976 pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
1977 }
1978
1979 nouveau_gpuobj_ref(NULL, &disp->mem);
1980 nouveau_bo_unmap(disp->sync);
1981 nouveau_bo_ref(NULL, &disp->sync);
1982
1983 nouveau_display(dev)->priv = NULL;
1984 kfree(disp);
1985}
1986
1987int
1988nvd0_display_create(struct drm_device *dev)
1989{
1990 struct nouveau_device *device = nouveau_dev(dev);
1991 struct nouveau_drm *drm = nouveau_drm(dev);
1992 struct nouveau_bar *bar = nouveau_bar(device);
1993 struct nouveau_fb *pfb = nouveau_fb(device);
1994 struct dcb_table *dcb = &drm->vbios.dcb;
1995 struct drm_connector *connector, *tmp;
1996 struct pci_dev *pdev = dev->pdev;
1997 struct nvd0_display *disp;
1998 struct dcb_output *dcbe;
1999 int crtcs, ret, i;
2000
2001 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
2002 if (!disp)
2003 return -ENOMEM;
2004
2005 nouveau_display(dev)->priv = disp;
2006 nouveau_display(dev)->dtor = nvd0_display_destroy;
2007 nouveau_display(dev)->init = nvd0_display_init;
2008 nouveau_display(dev)->fini = nvd0_display_fini;
2009
2010 /* create crtc objects to represent the hw heads */
2011 crtcs = nv_rd32(device, 0x022448);
2012 for (i = 0; i < crtcs; i++) {
2013 ret = nvd0_crtc_create(dev, i);
2014 if (ret)
2015 goto out;
2016 }
2017
2018 /* create encoder/connector objects based on VBIOS DCB table */
2019 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
2020 connector = nouveau_connector_create(dev, dcbe->connector);
2021 if (IS_ERR(connector))
2022 continue;
2023
2024 if (dcbe->location != DCB_LOC_ON_CHIP) {
2025 NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
2026 dcbe->type, ffs(dcbe->or) - 1);
2027 continue;
2028 }
2029
2030 switch (dcbe->type) {
2031 case DCB_OUTPUT_TMDS:
2032 case DCB_OUTPUT_LVDS:
2033 case DCB_OUTPUT_DP:
2034 nvd0_sor_create(connector, dcbe);
2035 break;
2036 case DCB_OUTPUT_ANALOG:
2037 nvd0_dac_create(connector, dcbe);
2038 break;
2039 default:
2040 NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
2041 dcbe->type, ffs(dcbe->or) - 1);
2042 continue;
2043 }
2044 }
2045
2046 /* cull any connectors we created that don't have an encoder */
2047 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
2048 if (connector->encoder_ids[0])
2049 continue;
2050
2051 NV_WARN(drm, "%s has no encoders, removing\n",
2052 drm_get_connector_name(connector));
2053 connector->funcs->destroy(connector);
2054 }
2055
2056 /* setup interrupt handling */
2057 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
2058
2059 /* small shared memory area we use for notifiers and semaphores */
2060 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
2061 0, 0x0000, NULL, &disp->sync);
2062 if (!ret) {
2063 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
2064 if (!ret)
2065 ret = nouveau_bo_map(disp->sync);
2066 if (ret)
2067 nouveau_bo_ref(NULL, &disp->sync);
2068 }
2069
2070 if (ret)
2071 goto out;
2072
2073 /* hash table and dma objects for the memory areas we care about */
2074 ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
2075 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
2076 if (ret)
2077 goto out;
2078
2079 /* create evo dma channels */
2080 for (i = 0; i < EVO_DMA_NR; i++) {
2081 struct evo *evo = &disp->evo[i];
2082 u64 offset = disp->sync->bo.offset;
2083 u32 dmao = 0x1000 + (i * 0x100);
2084 u32 hash = 0x0000 + (i * 0x040);
2085
2086 evo->idx = i;
2087 evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
2088 evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
2089 if (!evo->ptr) {
2090 ret = -ENOMEM;
2091 goto out;
2092 }
2093
2094 nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
2095 nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
2096 nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
2097 nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
2098 nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
2099 nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
2100 nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
2101 nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
2102 ((dmao + 0x00) << 9));
2103
2104 nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
2105 nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
2106 nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
2107 nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
2108 nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
2109 nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
2110 nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
2111 nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
2112 ((dmao + 0x20) << 9));
2113
2114 nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
2115 nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
2116 nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
2117 nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
2118 nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
2119 nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
2120 nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
2121 nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
2122 ((dmao + 0x40) << 9));
2123
2124 nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
2125 nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
2126 nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
2127 nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
2128 nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
2129 nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
2130 nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
2131 nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
2132 ((dmao + 0x60) << 9));
2133 }
2134
2135 bar->flush(bar);
2136
2137out:
2138 if (ret)
2139 nvd0_display_destroy(dev);
2140 return ret;
2141}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2e566e123e9e..9175615bbd8a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
561 /* use frac fb div on APUs */ 561 /* use frac fb div on APUs */
562 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 562 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
563 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 563 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
564 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
565 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
564 } else { 566 } else {
565 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY; 567 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
566 568
@@ -1697,34 +1699,22 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1697 DRM_ERROR("unable to allocate a PPLL\n"); 1699 DRM_ERROR("unable to allocate a PPLL\n");
1698 return ATOM_PPLL_INVALID; 1700 return ATOM_PPLL_INVALID;
1699 } else { 1701 } else {
1700 if (ASIC_IS_AVIVO(rdev)) { 1702 /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
1701 /* in DP mode, the DP ref clock can come from either PPLL 1703 /* some atombios (observed in some DCE2/DCE3) code have a bug,
1702 * depending on the asic: 1704 * the matching btw pll and crtc is done through
1703 * DCE3: PPLL1 or PPLL2 1705 * PCLK_CRTC[1|2]_CNTL (0x480/0x484) but atombios code use the
1704 */ 1706 * pll (1 or 2) to select which register to write. ie if using
1705 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) { 1707 * pll1 it will use PCLK_CRTC1_CNTL (0x480) and if using pll2
1706 /* use the same PPLL for all DP monitors */ 1708 * it will use PCLK_CRTC2_CNTL (0x484), it then use crtc id to
1707 pll = radeon_get_shared_dp_ppll(crtc); 1709 * choose which value to write. Which is reverse order from
1708 if (pll != ATOM_PPLL_INVALID) 1710 * register logic. So only case that works is when pllid is
1709 return pll; 1711 * same as crtcid or when both pll and crtc are enabled and
1710 } else { 1712 * both use same clock.
1711 /* use the same PPLL for all monitors with the same clock */ 1713 *
1712 pll = radeon_get_shared_nondp_ppll(crtc); 1714 * So just return crtc id as if crtc and pll were hard linked
1713 if (pll != ATOM_PPLL_INVALID) 1715 * together even if they aren't
1714 return pll; 1716 */
1715 } 1717 return radeon_crtc->crtc_id;
1716 /* all other cases */
1717 pll_in_use = radeon_get_pll_use_mask(crtc);
1718 if (!(pll_in_use & (1 << ATOM_PPLL1)))
1719 return ATOM_PPLL1;
1720 if (!(pll_in_use & (1 << ATOM_PPLL2)))
1721 return ATOM_PPLL2;
1722 DRM_ERROR("unable to allocate a PPLL\n");
1723 return ATOM_PPLL_INVALID;
1724 } else {
1725 /* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
1726 return radeon_crtc->crtc_id;
1727 }
1728 } 1718 }
1729} 1719}
1730 1720
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index d5699fe4f1e8..064023bed480 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -34,8 +34,7 @@
34 34
35/* move these to drm_dp_helper.c/h */ 35/* move these to drm_dp_helper.c/h */
36#define DP_LINK_CONFIGURATION_SIZE 9 36#define DP_LINK_CONFIGURATION_SIZE 9
37#define DP_LINK_STATUS_SIZE 6 37#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
38#define DP_DPCD_SIZE 8
39 38
40static char *voltage_names[] = { 39static char *voltage_names[] = {
41 "0.4V", "0.6V", "0.8V", "1.2V" 40 "0.4V", "0.6V", "0.8V", "1.2V"
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
290 289
291/***** general DP utility functions *****/ 290/***** general DP utility functions *****/
292 291
293static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
294{
295 return link_status[r - DP_LANE0_1_STATUS];
296}
297
298static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
299 int lane)
300{
301 int i = DP_LANE0_1_STATUS + (lane >> 1);
302 int s = (lane & 1) * 4;
303 u8 l = dp_link_status(link_status, i);
304 return (l >> s) & 0xf;
305}
306
307static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
308 int lane_count)
309{
310 int lane;
311 u8 lane_status;
312
313 for (lane = 0; lane < lane_count; lane++) {
314 lane_status = dp_get_lane_status(link_status, lane);
315 if ((lane_status & DP_LANE_CR_DONE) == 0)
316 return false;
317 }
318 return true;
319}
320
321static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
322 int lane_count)
323{
324 u8 lane_align;
325 u8 lane_status;
326 int lane;
327
328 lane_align = dp_link_status(link_status,
329 DP_LANE_ALIGN_STATUS_UPDATED);
330 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
331 return false;
332 for (lane = 0; lane < lane_count; lane++) {
333 lane_status = dp_get_lane_status(link_status, lane);
334 if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
335 return false;
336 }
337 return true;
338}
339
340static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
341 int lane)
342
343{
344 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
345 int s = ((lane & 1) ?
346 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
347 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
348 u8 l = dp_link_status(link_status, i);
349
350 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
351}
352
353static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
354 int lane)
355{
356 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
357 int s = ((lane & 1) ?
358 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
359 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
360 u8 l = dp_link_status(link_status, i);
361
362 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
363}
364
365#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 292#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
366#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 293#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
367 294
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
374 int lane; 301 int lane;
375 302
376 for (lane = 0; lane < lane_count; lane++) { 303 for (lane = 0; lane < lane_count; lane++) {
377 u8 this_v = dp_get_adjust_request_voltage(link_status, lane); 304 u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
378 u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); 305 u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
379 306
380 DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", 307 DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
381 lane, 308 lane,
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
420 return (link_rate * lane_num * 8) / bpp; 347 return (link_rate * lane_num * 8) / bpp;
421} 348}
422 349
423static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
424{
425 switch (dpcd[DP_MAX_LINK_RATE]) {
426 case DP_LINK_BW_1_62:
427 default:
428 return 162000;
429 case DP_LINK_BW_2_7:
430 return 270000;
431 case DP_LINK_BW_5_4:
432 return 540000;
433 }
434}
435
436static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
437{
438 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
439}
440
441static u8 dp_get_dp_link_rate_coded(int link_rate)
442{
443 switch (link_rate) {
444 case 162000:
445 default:
446 return DP_LINK_BW_1_62;
447 case 270000:
448 return DP_LINK_BW_2_7;
449 case 540000:
450 return DP_LINK_BW_5_4;
451 }
452}
453
454/***** radeon specific DP functions *****/ 350/***** radeon specific DP functions *****/
455 351
456/* First get the min lane# when low rate is used according to pixel clock 352/* First get the min lane# when low rate is used according to pixel clock
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
462 int pix_clock) 358 int pix_clock)
463{ 359{
464 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); 360 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
465 int max_link_rate = dp_get_max_link_rate(dpcd); 361 int max_link_rate = drm_dp_max_link_rate(dpcd);
466 int max_lane_num = dp_get_max_lane_number(dpcd); 362 int max_lane_num = drm_dp_max_lane_count(dpcd);
467 int lane_num; 363 int lane_num;
468 int max_dp_pix_clock; 364 int max_dp_pix_clock;
469 365
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
500 return 540000; 396 return 540000;
501 } 397 }
502 398
503 return dp_get_max_link_rate(dpcd); 399 return drm_dp_max_link_rate(dpcd);
504} 400}
505 401
506static u8 radeon_dp_encoder_service(struct radeon_device *rdev, 402static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
551bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) 447bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
552{ 448{
553 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 449 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
554 u8 msg[25]; 450 u8 msg[DP_DPCD_SIZE];
555 int ret, i; 451 int ret, i;
556 452
557 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); 453 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
454 DP_DPCD_SIZE, 0);
558 if (ret > 0) { 455 if (ret > 0) {
559 memcpy(dig_connector->dpcd, msg, 8); 456 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
560 DRM_DEBUG_KMS("DPCD: "); 457 DRM_DEBUG_KMS("DPCD: ");
561 for (i = 0; i < 8; i++) 458 for (i = 0; i < DP_DPCD_SIZE; i++)
562 DRM_DEBUG_KMS("%02x ", msg[i]); 459 DRM_DEBUG_KMS("%02x ", msg[i]);
563 DRM_DEBUG_KMS("\n"); 460 DRM_DEBUG_KMS("\n");
564 461
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
664 561
665 if (!radeon_dp_get_link_status(radeon_connector, link_status)) 562 if (!radeon_dp_get_link_status(radeon_connector, link_status))
666 return false; 563 return false;
667 if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) 564 if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
668 return false; 565 return false;
669 return true; 566 return true;
670} 567}
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
677 int enc_id; 574 int enc_id;
678 int dp_clock; 575 int dp_clock;
679 int dp_lane_count; 576 int dp_lane_count;
680 int rd_interval;
681 bool tp3_supported; 577 bool tp3_supported;
682 u8 dpcd[8]; 578 u8 dpcd[DP_RECEIVER_CAP_SIZE];
683 u8 train_set[4]; 579 u8 train_set[4];
684 u8 link_status[DP_LINK_STATUS_SIZE]; 580 u8 link_status[DP_LINK_STATUS_SIZE];
685 u8 tries; 581 u8 tries;
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
765 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); 661 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
766 662
767 /* set the link rate on the sink */ 663 /* set the link rate on the sink */
768 tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); 664 tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
769 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); 665 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
770 666
771 /* start training on the source */ 667 /* start training on the source */
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
821 dp_info->tries = 0; 717 dp_info->tries = 0;
822 voltage = 0xff; 718 voltage = 0xff;
823 while (1) { 719 while (1) {
824 if (dp_info->rd_interval == 0) 720 drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
825 udelay(100);
826 else
827 mdelay(dp_info->rd_interval * 4);
828 721
829 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { 722 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
830 DRM_ERROR("displayport link status failed\n"); 723 DRM_ERROR("displayport link status failed\n");
831 break; 724 break;
832 } 725 }
833 726
834 if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { 727 if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
835 clock_recovery = true; 728 clock_recovery = true;
836 break; 729 break;
837 } 730 }
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
886 dp_info->tries = 0; 779 dp_info->tries = 0;
887 channel_eq = false; 780 channel_eq = false;
888 while (1) { 781 while (1) {
889 if (dp_info->rd_interval == 0) 782 drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
890 udelay(400);
891 else
892 mdelay(dp_info->rd_interval * 4);
893 783
894 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { 784 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
895 DRM_ERROR("displayport link status failed\n"); 785 DRM_ERROR("displayport link status failed\n");
896 break; 786 break;
897 } 787 }
898 788
899 if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { 789 if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
900 channel_eq = true; 790 channel_eq = true;
901 break; 791 break;
902 } 792 }
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
974 else 864 else
975 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; 865 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
976 866
977 dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
978 tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); 867 tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
979 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 868 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
980 dp_info.tp3_supported = true; 869 dp_info.tp3_supported = true;
981 else 870 else
982 dp_info.tp3_supported = false; 871 dp_info.tp3_supported = false;
983 872
984 memcpy(dp_info.dpcd, dig_connector->dpcd, 8); 873 memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
985 dp_info.rdev = rdev; 874 dp_info.rdev = rdev;
986 dp_info.encoder = encoder; 875 dp_info.encoder = encoder;
987 dp_info.connector = connector; 876 dp_info.connector = connector;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index ba498f8e47a2..4552d4aff317 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
340 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 340 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
341 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { 341 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
342 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 342 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
343 radeon_dp_set_link_config(connector, mode); 343 radeon_dp_set_link_config(connector, adjusted_mode);
344 } 344 }
345 345
346 return true; 346 return true;
@@ -1625,7 +1625,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
1625 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); 1625 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1626 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); 1626 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1627 /* some early dce3.2 boards have a bug in their transmitter control table */ 1627 /* some early dce3.2 boards have a bug in their transmitter control table */
1628 if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) 1628 if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
1629 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1629 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1630 } 1630 }
1631 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { 1631 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 14313ad43b76..061fa0a28900 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1330 break; 1330 break;
1331 udelay(1); 1331 udelay(1);
1332 } 1332 }
1333 } else {
1334 save->crtc_enabled[i] = false;
1333 } 1335 }
1334 } 1336 }
1335 1337
@@ -1372,7 +1374,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
1372 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); 1374 WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
1373 1375
1374 for (i = 0; i < rdev->num_crtc; i++) { 1376 for (i = 0; i < rdev->num_crtc; i++) {
1375 if (save->crtc_enabled) { 1377 if (save->crtc_enabled[i]) {
1376 if (ASIC_IS_DCE6(rdev)) { 1378 if (ASIC_IS_DCE6(rdev)) {
1377 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); 1379 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1378 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 1380 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
@@ -1648,7 +1650,7 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
1648 ring->wptr = 0; 1650 ring->wptr = 0;
1649 WREG32(CP_RB_WPTR, ring->wptr); 1651 WREG32(CP_RB_WPTR, ring->wptr);
1650 1652
1651 /* set the wb address wether it's enabled or not */ 1653 /* set the wb address whether it's enabled or not */
1652 WREG32(CP_RB_RPTR_ADDR, 1654 WREG32(CP_RB_RPTR_ADDR,
1653 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC)); 1655 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1654 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1656 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
@@ -1819,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1819 case CHIP_SUMO: 1821 case CHIP_SUMO:
1820 rdev->config.evergreen.num_ses = 1; 1822 rdev->config.evergreen.num_ses = 1;
1821 rdev->config.evergreen.max_pipes = 4; 1823 rdev->config.evergreen.max_pipes = 4;
1822 rdev->config.evergreen.max_tile_pipes = 2; 1824 rdev->config.evergreen.max_tile_pipes = 4;
1823 if (rdev->pdev->device == 0x9648) 1825 if (rdev->pdev->device == 0x9648)
1824 rdev->config.evergreen.max_simds = 3; 1826 rdev->config.evergreen.max_simds = 3;
1825 else if ((rdev->pdev->device == 0x9647) || 1827 else if ((rdev->pdev->device == 0x9647) ||
@@ -1842,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1842 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1844 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1843 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1845 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1844 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1846 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1845 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; 1847 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
1846 break; 1848 break;
1847 case CHIP_SUMO2: 1849 case CHIP_SUMO2:
1848 rdev->config.evergreen.num_ses = 1; 1850 rdev->config.evergreen.num_ses = 1;
@@ -1864,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1864 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1866 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1865 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1867 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1866 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1868 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1867 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; 1869 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
1868 break; 1870 break;
1869 case CHIP_BARTS: 1871 case CHIP_BARTS:
1870 rdev->config.evergreen.num_ses = 2; 1872 rdev->config.evergreen.num_ses = 2;
@@ -1912,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1912 break; 1914 break;
1913 case CHIP_CAICOS: 1915 case CHIP_CAICOS:
1914 rdev->config.evergreen.num_ses = 1; 1916 rdev->config.evergreen.num_ses = 1;
1915 rdev->config.evergreen.max_pipes = 4; 1917 rdev->config.evergreen.max_pipes = 2;
1916 rdev->config.evergreen.max_tile_pipes = 2; 1918 rdev->config.evergreen.max_tile_pipes = 2;
1917 rdev->config.evergreen.max_simds = 2; 1919 rdev->config.evergreen.max_simds = 2;
1918 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1920 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2032,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2032 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2034 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2033 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2035 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2034 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2036 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2037 WREG32(DMA_TILING_CONFIG, gb_addr_config);
2035 2038
2036 tmp = gb_addr_config & NUM_PIPES_MASK; 2039 tmp = gb_addr_config & NUM_PIPES_MASK;
2037 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2040 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
@@ -2303,22 +2306,20 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
2303 return radeon_ring_test_lockup(rdev, ring); 2306 return radeon_ring_test_lockup(rdev, ring);
2304} 2307}
2305 2308
2306static int evergreen_gpu_soft_reset(struct radeon_device *rdev) 2309static void evergreen_gpu_soft_reset_gfx(struct radeon_device *rdev)
2307{ 2310{
2308 struct evergreen_mc_save save;
2309 u32 grbm_reset = 0; 2311 u32 grbm_reset = 0;
2310 2312
2311 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2313 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2312 return 0; 2314 return;
2313 2315
2314 dev_info(rdev->dev, "GPU softreset \n"); 2316 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
2315 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2316 RREG32(GRBM_STATUS)); 2317 RREG32(GRBM_STATUS));
2317 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 2318 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
2318 RREG32(GRBM_STATUS_SE0)); 2319 RREG32(GRBM_STATUS_SE0));
2319 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 2320 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
2320 RREG32(GRBM_STATUS_SE1)); 2321 RREG32(GRBM_STATUS_SE1));
2321 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2322 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
2322 RREG32(SRBM_STATUS)); 2323 RREG32(SRBM_STATUS));
2323 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2324 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2324 RREG32(CP_STALLED_STAT1)); 2325 RREG32(CP_STALLED_STAT1));
@@ -2328,10 +2329,7 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2328 RREG32(CP_BUSY_STAT)); 2329 RREG32(CP_BUSY_STAT));
2329 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 2330 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2330 RREG32(CP_STAT)); 2331 RREG32(CP_STAT));
2331 evergreen_mc_stop(rdev, &save); 2332
2332 if (evergreen_mc_wait_for_idle(rdev)) {
2333 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2334 }
2335 /* Disable CP parsing/prefetching */ 2333 /* Disable CP parsing/prefetching */
2336 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 2334 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
2337 2335
@@ -2355,15 +2353,14 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2355 udelay(50); 2353 udelay(50);
2356 WREG32(GRBM_SOFT_RESET, 0); 2354 WREG32(GRBM_SOFT_RESET, 0);
2357 (void)RREG32(GRBM_SOFT_RESET); 2355 (void)RREG32(GRBM_SOFT_RESET);
2358 /* Wait a little for things to settle down */ 2356
2359 udelay(50); 2357 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
2360 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2361 RREG32(GRBM_STATUS)); 2358 RREG32(GRBM_STATUS));
2362 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 2359 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
2363 RREG32(GRBM_STATUS_SE0)); 2360 RREG32(GRBM_STATUS_SE0));
2364 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 2361 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
2365 RREG32(GRBM_STATUS_SE1)); 2362 RREG32(GRBM_STATUS_SE1));
2366 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2363 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
2367 RREG32(SRBM_STATUS)); 2364 RREG32(SRBM_STATUS));
2368 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 2365 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
2369 RREG32(CP_STALLED_STAT1)); 2366 RREG32(CP_STALLED_STAT1));
@@ -2373,13 +2370,65 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2373 RREG32(CP_BUSY_STAT)); 2370 RREG32(CP_BUSY_STAT));
2374 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 2371 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
2375 RREG32(CP_STAT)); 2372 RREG32(CP_STAT));
2373}
2374
2375static void evergreen_gpu_soft_reset_dma(struct radeon_device *rdev)
2376{
2377 u32 tmp;
2378
2379 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2380 return;
2381
2382 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2383 RREG32(DMA_STATUS_REG));
2384
2385 /* Disable DMA */
2386 tmp = RREG32(DMA_RB_CNTL);
2387 tmp &= ~DMA_RB_ENABLE;
2388 WREG32(DMA_RB_CNTL, tmp);
2389
2390 /* Reset dma */
2391 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2392 RREG32(SRBM_SOFT_RESET);
2393 udelay(50);
2394 WREG32(SRBM_SOFT_RESET, 0);
2395
2396 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
2397 RREG32(DMA_STATUS_REG));
2398}
2399
2400static int evergreen_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2401{
2402 struct evergreen_mc_save save;
2403
2404 if (reset_mask == 0)
2405 return 0;
2406
2407 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2408
2409 evergreen_mc_stop(rdev, &save);
2410 if (evergreen_mc_wait_for_idle(rdev)) {
2411 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2412 }
2413
2414 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
2415 evergreen_gpu_soft_reset_gfx(rdev);
2416
2417 if (reset_mask & RADEON_RESET_DMA)
2418 evergreen_gpu_soft_reset_dma(rdev);
2419
2420 /* Wait a little for things to settle down */
2421 udelay(50);
2422
2376 evergreen_mc_resume(rdev, &save); 2423 evergreen_mc_resume(rdev, &save);
2377 return 0; 2424 return 0;
2378} 2425}
2379 2426
2380int evergreen_asic_reset(struct radeon_device *rdev) 2427int evergreen_asic_reset(struct radeon_device *rdev)
2381{ 2428{
2382 return evergreen_gpu_soft_reset(rdev); 2429 return evergreen_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
2430 RADEON_RESET_COMPUTE |
2431 RADEON_RESET_DMA));
2383} 2432}
2384 2433
2385/* Interrupts */ 2434/* Interrupts */
@@ -2401,8 +2450,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2401 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2450 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2402 cayman_cp_int_cntl_setup(rdev, 1, 0); 2451 cayman_cp_int_cntl_setup(rdev, 1, 0);
2403 cayman_cp_int_cntl_setup(rdev, 2, 0); 2452 cayman_cp_int_cntl_setup(rdev, 2, 0);
2453 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2454 WREG32(CAYMAN_DMA1_CNTL, tmp);
2404 } else 2455 } else
2405 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2456 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2457 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2458 WREG32(DMA_CNTL, tmp);
2406 WREG32(GRBM_INT_CNTL, 0); 2459 WREG32(GRBM_INT_CNTL, 0);
2407 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2460 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2408 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2461 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2455,6 +2508,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2455 u32 grbm_int_cntl = 0; 2508 u32 grbm_int_cntl = 0;
2456 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 2509 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2457 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 2510 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
2511 u32 dma_cntl, dma_cntl1 = 0;
2458 2512
2459 if (!rdev->irq.installed) { 2513 if (!rdev->irq.installed) {
2460 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 2514 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2482,6 +2536,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
2482 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2536 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2483 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2537 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2484 2538
2539 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2540
2485 if (rdev->family >= CHIP_CAYMAN) { 2541 if (rdev->family >= CHIP_CAYMAN) {
2486 /* enable CP interrupts on all rings */ 2542 /* enable CP interrupts on all rings */
2487 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 2543 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2504,6 +2560,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
2504 } 2560 }
2505 } 2561 }
2506 2562
2563 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
2564 DRM_DEBUG("r600_irq_set: sw int dma\n");
2565 dma_cntl |= TRAP_ENABLE;
2566 }
2567
2568 if (rdev->family >= CHIP_CAYMAN) {
2569 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2570 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
2571 DRM_DEBUG("r600_irq_set: sw int dma1\n");
2572 dma_cntl1 |= TRAP_ENABLE;
2573 }
2574 }
2575
2507 if (rdev->irq.crtc_vblank_int[0] || 2576 if (rdev->irq.crtc_vblank_int[0] ||
2508 atomic_read(&rdev->irq.pflip[0])) { 2577 atomic_read(&rdev->irq.pflip[0])) {
2509 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2578 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2589,6 +2658,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
2589 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); 2658 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2590 } else 2659 } else
2591 WREG32(CP_INT_CNTL, cp_int_cntl); 2660 WREG32(CP_INT_CNTL, cp_int_cntl);
2661
2662 WREG32(DMA_CNTL, dma_cntl);
2663
2664 if (rdev->family >= CHIP_CAYMAN)
2665 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
2666
2592 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 2667 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2593 2668
2594 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 2669 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3091,6 +3166,16 @@ restart_ih:
3091 break; 3166 break;
3092 } 3167 }
3093 break; 3168 break;
3169 case 146:
3170 case 147:
3171 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3172 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3173 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3174 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3175 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3176 /* reset addr and status */
3177 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3178 break;
3094 case 176: /* CP_INT in ring buffer */ 3179 case 176: /* CP_INT in ring buffer */
3095 case 177: /* CP_INT in IB1 */ 3180 case 177: /* CP_INT in IB1 */
3096 case 178: /* CP_INT in IB2 */ 3181 case 178: /* CP_INT in IB2 */
@@ -3114,9 +3199,19 @@ restart_ih:
3114 } else 3199 } else
3115 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3200 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3116 break; 3201 break;
3202 case 224: /* DMA trap event */
3203 DRM_DEBUG("IH: DMA trap\n");
3204 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3205 break;
3117 case 233: /* GUI IDLE */ 3206 case 233: /* GUI IDLE */
3118 DRM_DEBUG("IH: GUI idle\n"); 3207 DRM_DEBUG("IH: GUI idle\n");
3119 break; 3208 break;
3209 case 244: /* DMA trap event */
3210 if (rdev->family >= CHIP_CAYMAN) {
3211 DRM_DEBUG("IH: DMA1 trap\n");
3212 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3213 }
3214 break;
3120 default: 3215 default:
3121 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3216 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3122 break; 3217 break;
@@ -3142,6 +3237,143 @@ restart_ih:
3142 return IRQ_HANDLED; 3237 return IRQ_HANDLED;
3143} 3238}
3144 3239
3240/**
3241 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
3242 *
3243 * @rdev: radeon_device pointer
3244 * @fence: radeon fence object
3245 *
3246 * Add a DMA fence packet to the ring to write
3247 * the fence seq number and DMA trap packet to generate
3248 * an interrupt if needed (evergreen-SI).
3249 */
3250void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3251 struct radeon_fence *fence)
3252{
3253 struct radeon_ring *ring = &rdev->ring[fence->ring];
3254 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3255 /* write the fence */
3256 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
3257 radeon_ring_write(ring, addr & 0xfffffffc);
3258 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3259 radeon_ring_write(ring, fence->seq);
3260 /* generate an interrupt */
3261 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
3262 /* flush HDP */
3263 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
3264 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
3265 radeon_ring_write(ring, 1);
3266}
3267
3268/**
3269 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
3270 *
3271 * @rdev: radeon_device pointer
3272 * @ib: IB object to schedule
3273 *
3274 * Schedule an IB in the DMA ring (evergreen).
3275 */
3276void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3277 struct radeon_ib *ib)
3278{
3279 struct radeon_ring *ring = &rdev->ring[ib->ring];
3280
3281 if (rdev->wb.enabled) {
3282 u32 next_rptr = ring->wptr + 4;
3283 while ((next_rptr & 7) != 5)
3284 next_rptr++;
3285 next_rptr += 3;
3286 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3287 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3288 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3289 radeon_ring_write(ring, next_rptr);
3290 }
3291
3292 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3293 * Pad as necessary with NOPs.
3294 */
3295 while ((ring->wptr & 7) != 5)
3296 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3297 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3298 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3299 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3300
3301}
3302
3303/**
3304 * evergreen_copy_dma - copy pages using the DMA engine
3305 *
3306 * @rdev: radeon_device pointer
3307 * @src_offset: src GPU address
3308 * @dst_offset: dst GPU address
3309 * @num_gpu_pages: number of GPU pages to xfer
3310 * @fence: radeon fence object
3311 *
3312 * Copy GPU paging using the DMA engine (evergreen-cayman).
3313 * Used by the radeon ttm implementation to move pages if
3314 * registered as the asic copy callback.
3315 */
3316int evergreen_copy_dma(struct radeon_device *rdev,
3317 uint64_t src_offset, uint64_t dst_offset,
3318 unsigned num_gpu_pages,
3319 struct radeon_fence **fence)
3320{
3321 struct radeon_semaphore *sem = NULL;
3322 int ring_index = rdev->asic->copy.dma_ring_index;
3323 struct radeon_ring *ring = &rdev->ring[ring_index];
3324 u32 size_in_dw, cur_size_in_dw;
3325 int i, num_loops;
3326 int r = 0;
3327
3328 r = radeon_semaphore_create(rdev, &sem);
3329 if (r) {
3330 DRM_ERROR("radeon: moving bo (%d).\n", r);
3331 return r;
3332 }
3333
3334 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3335 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
3336 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3337 if (r) {
3338 DRM_ERROR("radeon: moving bo (%d).\n", r);
3339 radeon_semaphore_free(rdev, &sem, NULL);
3340 return r;
3341 }
3342
3343 if (radeon_fence_need_sync(*fence, ring->idx)) {
3344 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3345 ring->idx);
3346 radeon_fence_note_sync(*fence, ring->idx);
3347 } else {
3348 radeon_semaphore_free(rdev, &sem, NULL);
3349 }
3350
3351 for (i = 0; i < num_loops; i++) {
3352 cur_size_in_dw = size_in_dw;
3353 if (cur_size_in_dw > 0xFFFFF)
3354 cur_size_in_dw = 0xFFFFF;
3355 size_in_dw -= cur_size_in_dw;
3356 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
3357 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3358 radeon_ring_write(ring, src_offset & 0xfffffffc);
3359 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3360 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3361 src_offset += cur_size_in_dw * 4;
3362 dst_offset += cur_size_in_dw * 4;
3363 }
3364
3365 r = radeon_fence_emit(rdev, fence, ring->idx);
3366 if (r) {
3367 radeon_ring_unlock_undo(rdev, ring);
3368 return r;
3369 }
3370
3371 radeon_ring_unlock_commit(rdev, ring);
3372 radeon_semaphore_free(rdev, &sem, *fence);
3373
3374 return r;
3375}
3376
3145static int evergreen_startup(struct radeon_device *rdev) 3377static int evergreen_startup(struct radeon_device *rdev)
3146{ 3378{
3147 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3379 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3205,6 +3437,12 @@ static int evergreen_startup(struct radeon_device *rdev)
3205 return r; 3437 return r;
3206 } 3438 }
3207 3439
3440 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3441 if (r) {
3442 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3443 return r;
3444 }
3445
3208 /* Enable IRQ */ 3446 /* Enable IRQ */
3209 r = r600_irq_init(rdev); 3447 r = r600_irq_init(rdev);
3210 if (r) { 3448 if (r) {
@@ -3219,12 +3457,23 @@ static int evergreen_startup(struct radeon_device *rdev)
3219 0, 0xfffff, RADEON_CP_PACKET2); 3457 0, 0xfffff, RADEON_CP_PACKET2);
3220 if (r) 3458 if (r)
3221 return r; 3459 return r;
3460
3461 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3462 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3463 DMA_RB_RPTR, DMA_RB_WPTR,
3464 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3465 if (r)
3466 return r;
3467
3222 r = evergreen_cp_load_microcode(rdev); 3468 r = evergreen_cp_load_microcode(rdev);
3223 if (r) 3469 if (r)
3224 return r; 3470 return r;
3225 r = evergreen_cp_resume(rdev); 3471 r = evergreen_cp_resume(rdev);
3226 if (r) 3472 if (r)
3227 return r; 3473 return r;
3474 r = r600_dma_resume(rdev);
3475 if (r)
3476 return r;
3228 3477
3229 r = radeon_ib_pool_init(rdev); 3478 r = radeon_ib_pool_init(rdev);
3230 if (r) { 3479 if (r) {
@@ -3271,11 +3520,9 @@ int evergreen_resume(struct radeon_device *rdev)
3271 3520
3272int evergreen_suspend(struct radeon_device *rdev) 3521int evergreen_suspend(struct radeon_device *rdev)
3273{ 3522{
3274 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3275
3276 r600_audio_fini(rdev); 3523 r600_audio_fini(rdev);
3277 r700_cp_stop(rdev); 3524 r700_cp_stop(rdev);
3278 ring->ready = false; 3525 r600_dma_stop(rdev);
3279 evergreen_irq_suspend(rdev); 3526 evergreen_irq_suspend(rdev);
3280 radeon_wb_disable(rdev); 3527 radeon_wb_disable(rdev);
3281 evergreen_pcie_gart_disable(rdev); 3528 evergreen_pcie_gart_disable(rdev);
@@ -3352,6 +3599,9 @@ int evergreen_init(struct radeon_device *rdev)
3352 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3599 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3353 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3600 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3354 3601
3602 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
3603 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3604
3355 rdev->ih.ring_obj = NULL; 3605 rdev->ih.ring_obj = NULL;
3356 r600_ih_ring_init(rdev, 64 * 1024); 3606 r600_ih_ring_init(rdev, 64 * 1024);
3357 3607
@@ -3364,6 +3614,7 @@ int evergreen_init(struct radeon_device *rdev)
3364 if (r) { 3614 if (r) {
3365 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3615 dev_err(rdev->dev, "disabling GPU acceleration\n");
3366 r700_cp_fini(rdev); 3616 r700_cp_fini(rdev);
3617 r600_dma_fini(rdev);
3367 r600_irq_fini(rdev); 3618 r600_irq_fini(rdev);
3368 radeon_wb_fini(rdev); 3619 radeon_wb_fini(rdev);
3369 radeon_ib_pool_fini(rdev); 3620 radeon_ib_pool_fini(rdev);
@@ -3391,6 +3642,7 @@ void evergreen_fini(struct radeon_device *rdev)
3391 r600_audio_fini(rdev); 3642 r600_audio_fini(rdev);
3392 r600_blit_fini(rdev); 3643 r600_blit_fini(rdev);
3393 r700_cp_fini(rdev); 3644 r700_cp_fini(rdev);
3645 r600_dma_fini(rdev);
3394 r600_irq_fini(rdev); 3646 r600_irq_fini(rdev);
3395 radeon_wb_fini(rdev); 3647 radeon_wb_fini(rdev);
3396 radeon_ib_pool_fini(rdev); 3648 radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 30271b641913..7a445666e71f 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
34#define MAX(a,b) (((a)>(b))?(a):(b)) 34#define MAX(a,b) (((a)>(b))?(a):(b))
35#define MIN(a,b) (((a)<(b))?(a):(b)) 35#define MIN(a,b) (((a)<(b))?(a):(b))
36 36
37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc);
37static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, 39static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc); 40 struct radeon_cs_reloc **cs_reloc);
39 41
@@ -264,7 +266,7 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
264 /* macro tile width & height */ 266 /* macro tile width & height */
265 palign = (8 * surf->bankw * track->npipes) * surf->mtilea; 267 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
266 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; 268 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
267 mtileb = (palign / 8) * (halign / 8) * tileb;; 269 mtileb = (palign / 8) * (halign / 8) * tileb;
268 mtile_pr = surf->nbx / palign; 270 mtile_pr = surf->nbx / palign;
269 mtile_ps = (mtile_pr * surf->nby) / halign; 271 mtile_ps = (mtile_pr * surf->nby) / halign;
270 surf->layer_size = mtile_ps * mtileb * slice_pt; 272 surf->layer_size = mtile_ps * mtileb * slice_pt;
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
507 /* height is npipes htiles aligned == npipes * 8 pixel aligned */ 509 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
508 nby = round_up(nby, track->npipes * 8); 510 nby = round_up(nby, track->npipes * 8);
509 } else { 511 } else {
512 /* always assume 8x8 htile */
513 /* align is htile align * 8, htile align vary according to
514 * number of pipe and tile width and nby
515 */
510 switch (track->npipes) { 516 switch (track->npipes) {
511 case 8: 517 case 8:
518 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
512 nbx = round_up(nbx, 64 * 8); 519 nbx = round_up(nbx, 64 * 8);
513 nby = round_up(nby, 64 * 8); 520 nby = round_up(nby, 64 * 8);
514 break; 521 break;
515 case 4: 522 case 4:
523 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
516 nbx = round_up(nbx, 64 * 8); 524 nbx = round_up(nbx, 64 * 8);
517 nby = round_up(nby, 32 * 8); 525 nby = round_up(nby, 32 * 8);
518 break; 526 break;
519 case 2: 527 case 2:
528 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
520 nbx = round_up(nbx, 32 * 8); 529 nbx = round_up(nbx, 32 * 8);
521 nby = round_up(nby, 32 * 8); 530 nby = round_up(nby, 32 * 8);
522 break; 531 break;
523 case 1: 532 case 1:
533 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
524 nbx = round_up(nbx, 32 * 8); 534 nbx = round_up(nbx, 32 * 8);
525 nby = round_up(nby, 16 * 8); 535 nby = round_up(nby, 16 * 8);
526 break; 536 break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
531 } 541 }
532 } 542 }
533 /* compute number of htile */ 543 /* compute number of htile */
534 nbx = nbx / 8; 544 nbx = nbx >> 3;
535 nby = nby / 8; 545 nby = nby >> 3;
536 size = nbx * nby * 4; 546 /* size must be aligned on npipes * 2K boundary */
547 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
537 size += track->htile_offset; 548 size += track->htile_offset;
538 549
539 if (size > radeon_bo_size(track->htile_bo)) { 550 if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1790 case DB_HTILE_SURFACE: 1801 case DB_HTILE_SURFACE:
1791 /* 8x8 only */ 1802 /* 8x8 only */
1792 track->htile_surface = radeon_get_ib_value(p, idx); 1803 track->htile_surface = radeon_get_ib_value(p, idx);
1804 /* force 8x8 htile width and height */
1805 ib[idx] |= 3;
1793 track->db_dirty = true; 1806 track->db_dirty = true;
1794 break; 1807 break;
1795 case CB_IMMED0_BASE: 1808 case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2232 ib[idx+2] = upper_32_bits(offset) & 0xff; 2245 ib[idx+2] = upper_32_bits(offset) & 0xff;
2233 } 2246 }
2234 break; 2247 break;
2248 case PACKET3_CP_DMA:
2249 {
2250 u32 command, size, info;
2251 u64 offset, tmp;
2252 if (pkt->count != 4) {
2253 DRM_ERROR("bad CP DMA\n");
2254 return -EINVAL;
2255 }
2256 command = radeon_get_ib_value(p, idx+4);
2257 size = command & 0x1fffff;
2258 info = radeon_get_ib_value(p, idx+1);
2259 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
2260 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
2261 ((((info & 0x00300000) >> 20) == 0) &&
2262 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
2263 ((((info & 0x60000000) >> 29) == 0) &&
2264 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
2265 /* non mem to mem copies requires dw aligned count */
2266 if (size % 4) {
2267 DRM_ERROR("CP DMA command requires dw count alignment\n");
2268 return -EINVAL;
2269 }
2270 }
2271 if (command & PACKET3_CP_DMA_CMD_SAS) {
2272 /* src address space is register */
2273 /* GDS is ok */
2274 if (((info & 0x60000000) >> 29) != 1) {
2275 DRM_ERROR("CP DMA SAS not supported\n");
2276 return -EINVAL;
2277 }
2278 } else {
2279 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2280 DRM_ERROR("CP DMA SAIC only supported for registers\n");
2281 return -EINVAL;
2282 }
2283 /* src address space is memory */
2284 if (((info & 0x60000000) >> 29) == 0) {
2285 r = evergreen_cs_packet_next_reloc(p, &reloc);
2286 if (r) {
2287 DRM_ERROR("bad CP DMA SRC\n");
2288 return -EINVAL;
2289 }
2290
2291 tmp = radeon_get_ib_value(p, idx) +
2292 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2293
2294 offset = reloc->lobj.gpu_offset + tmp;
2295
2296 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2297 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
2298 tmp + size, radeon_bo_size(reloc->robj));
2299 return -EINVAL;
2300 }
2301
2302 ib[idx] = offset;
2303 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2304 } else if (((info & 0x60000000) >> 29) != 2) {
2305 DRM_ERROR("bad CP DMA SRC_SEL\n");
2306 return -EINVAL;
2307 }
2308 }
2309 if (command & PACKET3_CP_DMA_CMD_DAS) {
2310 /* dst address space is register */
2311 /* GDS is ok */
2312 if (((info & 0x00300000) >> 20) != 1) {
2313 DRM_ERROR("CP DMA DAS not supported\n");
2314 return -EINVAL;
2315 }
2316 } else {
2317 /* dst address space is memory */
2318 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2319 DRM_ERROR("CP DMA DAIC only supported for registers\n");
2320 return -EINVAL;
2321 }
2322 if (((info & 0x00300000) >> 20) == 0) {
2323 r = evergreen_cs_packet_next_reloc(p, &reloc);
2324 if (r) {
2325 DRM_ERROR("bad CP DMA DST\n");
2326 return -EINVAL;
2327 }
2328
2329 tmp = radeon_get_ib_value(p, idx+2) +
2330 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2331
2332 offset = reloc->lobj.gpu_offset + tmp;
2333
2334 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2335 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
2336 tmp + size, radeon_bo_size(reloc->robj));
2337 return -EINVAL;
2338 }
2339
2340 ib[idx+2] = offset;
2341 ib[idx+3] = upper_32_bits(offset) & 0xff;
2342 } else {
2343 DRM_ERROR("bad CP DMA DST_SEL\n");
2344 return -EINVAL;
2345 }
2346 }
2347 break;
2348 }
2235 case PACKET3_SURFACE_SYNC: 2349 case PACKET3_SURFACE_SYNC:
2236 if (pkt->count != 3) { 2350 if (pkt->count != 3) {
2237 DRM_ERROR("bad SURFACE_SYNC\n"); 2351 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2540,6 +2654,35 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2540 ib[idx+4] = upper_32_bits(offset) & 0xff; 2654 ib[idx+4] = upper_32_bits(offset) & 0xff;
2541 } 2655 }
2542 break; 2656 break;
2657 case PACKET3_MEM_WRITE:
2658 {
2659 u64 offset;
2660
2661 if (pkt->count != 3) {
2662 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2663 return -EINVAL;
2664 }
2665 r = evergreen_cs_packet_next_reloc(p, &reloc);
2666 if (r) {
2667 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2668 return -EINVAL;
2669 }
2670 offset = radeon_get_ib_value(p, idx+0);
2671 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2672 if (offset & 0x7) {
2673 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2674 return -EINVAL;
2675 }
2676 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2677 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2678 offset + 8, radeon_bo_size(reloc->robj));
2679 return -EINVAL;
2680 }
2681 offset += reloc->lobj.gpu_offset;
2682 ib[idx+0] = offset;
2683 ib[idx+1] = upper_32_bits(offset) & 0xff;
2684 break;
2685 }
2543 case PACKET3_COPY_DW: 2686 case PACKET3_COPY_DW:
2544 if (pkt->count != 4) { 2687 if (pkt->count != 4) {
2545 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2688 DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2715,6 +2858,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2715 return 0; 2858 return 0;
2716} 2859}
2717 2860
2861/*
2862 * DMA
2863 */
2864
2865#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2866#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
2867#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2868#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
2869#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
2870
2871/**
2872 * evergreen_dma_cs_parse() - parse the DMA IB
2873 * @p: parser structure holding parsing context.
2874 *
2875 * Parses the DMA IB from the CS ioctl and updates
2876 * the GPU addresses based on the reloc information and
2877 * checks for errors. (Evergreen-Cayman)
2878 * Returns 0 for success and an error on failure.
2879 **/
2880int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2881{
2882 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2883 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
2884 u32 header, cmd, count, tiled, new_cmd, misc;
2885 volatile u32 *ib = p->ib.ptr;
2886 u32 idx, idx_value;
2887 u64 src_offset, dst_offset, dst2_offset;
2888 int r;
2889
2890 do {
2891 if (p->idx >= ib_chunk->length_dw) {
2892 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2893 p->idx, ib_chunk->length_dw);
2894 return -EINVAL;
2895 }
2896 idx = p->idx;
2897 header = radeon_get_ib_value(p, idx);
2898 cmd = GET_DMA_CMD(header);
2899 count = GET_DMA_COUNT(header);
2900 tiled = GET_DMA_T(header);
2901 new_cmd = GET_DMA_NEW(header);
2902 misc = GET_DMA_MISC(header);
2903
2904 switch (cmd) {
2905 case DMA_PACKET_WRITE:
2906 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2907 if (r) {
2908 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2909 return -EINVAL;
2910 }
2911 if (tiled) {
2912 dst_offset = ib[idx+1];
2913 dst_offset <<= 8;
2914
2915 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2916 p->idx += count + 7;
2917 } else {
2918 dst_offset = ib[idx+1];
2919 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
2920
2921 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2922 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2923 p->idx += count + 3;
2924 }
2925 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2926 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2927 dst_offset, radeon_bo_size(dst_reloc->robj));
2928 return -EINVAL;
2929 }
2930 break;
2931 case DMA_PACKET_COPY:
2932 r = r600_dma_cs_next_reloc(p, &src_reloc);
2933 if (r) {
2934 DRM_ERROR("bad DMA_PACKET_COPY\n");
2935 return -EINVAL;
2936 }
2937 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2938 if (r) {
2939 DRM_ERROR("bad DMA_PACKET_COPY\n");
2940 return -EINVAL;
2941 }
2942 if (tiled) {
2943 idx_value = radeon_get_ib_value(p, idx + 2);
2944 if (new_cmd) {
2945 switch (misc) {
2946 case 0:
2947 /* L2T, frame to fields */
2948 if (idx_value & (1 << 31)) {
2949 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2950 return -EINVAL;
2951 }
2952 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2953 if (r) {
2954 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2955 return -EINVAL;
2956 }
2957 dst_offset = ib[idx+1];
2958 dst_offset <<= 8;
2959 dst2_offset = ib[idx+2];
2960 dst2_offset <<= 8;
2961 src_offset = ib[idx+8];
2962 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2963 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2964 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
2965 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2966 return -EINVAL;
2967 }
2968 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2969 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2970 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2971 return -EINVAL;
2972 }
2973 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2974 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2975 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2976 return -EINVAL;
2977 }
2978 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2979 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2980 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2981 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2982 p->idx += 10;
2983 break;
2984 case 1:
2985 /* L2T, T2L partial */
2986 if (p->family < CHIP_CAYMAN) {
2987 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2988 return -EINVAL;
2989 }
2990 /* detile bit */
2991 if (idx_value & (1 << 31)) {
2992 /* tiled src, linear dst */
2993 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2994
2995 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2996 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2997 } else {
2998 /* linear src, tiled dst */
2999 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3000 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3001
3002 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3003 }
3004 p->idx += 12;
3005 break;
3006 case 3:
3007 /* L2T, broadcast */
3008 if (idx_value & (1 << 31)) {
3009 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3010 return -EINVAL;
3011 }
3012 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3013 if (r) {
3014 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3015 return -EINVAL;
3016 }
3017 dst_offset = ib[idx+1];
3018 dst_offset <<= 8;
3019 dst2_offset = ib[idx+2];
3020 dst2_offset <<= 8;
3021 src_offset = ib[idx+8];
3022 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
3023 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3024 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3025 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3026 return -EINVAL;
3027 }
3028 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3029 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3030 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3031 return -EINVAL;
3032 }
3033 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3034 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3035 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3036 return -EINVAL;
3037 }
3038 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3039 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3040 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3041 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3042 p->idx += 10;
3043 break;
3044 case 4:
3045 /* L2T, T2L */
3046 /* detile bit */
3047 if (idx_value & (1 << 31)) {
3048 /* tiled src, linear dst */
3049 src_offset = ib[idx+1];
3050 src_offset <<= 8;
3051 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3052
3053 dst_offset = ib[idx+7];
3054 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3055 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3056 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3057 } else {
3058 /* linear src, tiled dst */
3059 src_offset = ib[idx+7];
3060 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3061 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3062 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3063
3064 dst_offset = ib[idx+1];
3065 dst_offset <<= 8;
3066 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3067 }
3068 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3069 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
3070 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3071 return -EINVAL;
3072 }
3073 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3074 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
3075 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3076 return -EINVAL;
3077 }
3078 p->idx += 9;
3079 break;
3080 case 5:
3081 /* T2T partial */
3082 if (p->family < CHIP_CAYMAN) {
3083 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3084 return -EINVAL;
3085 }
3086 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3087 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3088 p->idx += 13;
3089 break;
3090 case 7:
3091 /* L2T, broadcast */
3092 if (idx_value & (1 << 31)) {
3093 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3094 return -EINVAL;
3095 }
3096 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3097 if (r) {
3098 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3099 return -EINVAL;
3100 }
3101 dst_offset = ib[idx+1];
3102 dst_offset <<= 8;
3103 dst2_offset = ib[idx+2];
3104 dst2_offset <<= 8;
3105 src_offset = ib[idx+8];
3106 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
3107 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3108 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3109 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3110 return -EINVAL;
3111 }
3112 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3113 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3114 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3115 return -EINVAL;
3116 }
3117 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3118 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3119 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3120 return -EINVAL;
3121 }
3122 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3123 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3124 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3125 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3126 p->idx += 10;
3127 break;
3128 default:
3129 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3130 return -EINVAL;
3131 }
3132 } else {
3133 switch (misc) {
3134 case 0:
3135 /* detile bit */
3136 if (idx_value & (1 << 31)) {
3137 /* tiled src, linear dst */
3138 src_offset = ib[idx+1];
3139 src_offset <<= 8;
3140 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3141
3142 dst_offset = ib[idx+7];
3143 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3144 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3145 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3146 } else {
3147 /* linear src, tiled dst */
3148 src_offset = ib[idx+7];
3149 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3150 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3151 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3152
3153 dst_offset = ib[idx+1];
3154 dst_offset <<= 8;
3155 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3156 }
3157 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3158 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3159 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3160 return -EINVAL;
3161 }
3162 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3163 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3164 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3165 return -EINVAL;
3166 }
3167 p->idx += 9;
3168 break;
3169 default:
3170 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3171 return -EINVAL;
3172 }
3173 }
3174 } else {
3175 if (new_cmd) {
3176 switch (misc) {
3177 case 0:
3178 /* L2L, byte */
3179 src_offset = ib[idx+2];
3180 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3181 dst_offset = ib[idx+1];
3182 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
3183 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
3184 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
3185 src_offset + count, radeon_bo_size(src_reloc->robj));
3186 return -EINVAL;
3187 }
3188 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
3189 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
3190 dst_offset + count, radeon_bo_size(dst_reloc->robj));
3191 return -EINVAL;
3192 }
3193 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3194 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3195 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3196 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3197 p->idx += 5;
3198 break;
3199 case 1:
3200 /* L2L, partial */
3201 if (p->family < CHIP_CAYMAN) {
3202 DRM_ERROR("L2L Partial is cayman only !\n");
3203 return -EINVAL;
3204 }
3205 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3206 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3207 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3208 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3209
3210 p->idx += 9;
3211 break;
3212 case 4:
3213 /* L2L, dw, broadcast */
3214 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3215 if (r) {
3216 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
3217 return -EINVAL;
3218 }
3219 dst_offset = ib[idx+1];
3220 dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3221 dst2_offset = ib[idx+2];
3222 dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
3223 src_offset = ib[idx+3];
3224 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
3225 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3226 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
3227 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3228 return -EINVAL;
3229 }
3230 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3231 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
3232 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3233 return -EINVAL;
3234 }
3235 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3236 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
3237 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3238 return -EINVAL;
3239 }
3240 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3241 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
3242 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3243 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3244 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
3245 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3246 p->idx += 7;
3247 break;
3248 default:
3249 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3250 return -EINVAL;
3251 }
3252 } else {
3253 /* L2L, dw */
3254 src_offset = ib[idx+2];
3255 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3256 dst_offset = ib[idx+1];
3257 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
3258 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3259 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
3260 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3261 return -EINVAL;
3262 }
3263 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3264 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
3265 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3266 return -EINVAL;
3267 }
3268 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3269 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3270 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3271 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3272 p->idx += 5;
3273 }
3274 }
3275 break;
3276 case DMA_PACKET_CONSTANT_FILL:
3277 r = r600_dma_cs_next_reloc(p, &dst_reloc);
3278 if (r) {
3279 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3280 return -EINVAL;
3281 }
3282 dst_offset = ib[idx+1];
3283 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
3284 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3285 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
3286 dst_offset, radeon_bo_size(dst_reloc->robj));
3287 return -EINVAL;
3288 }
3289 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3290 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
3291 p->idx += 4;
3292 break;
3293 case DMA_PACKET_NOP:
3294 p->idx += 1;
3295 break;
3296 default:
3297 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3298 return -EINVAL;
3299 }
3300 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
3301#if 0
3302 for (r = 0; r < p->ib->length_dw; r++) {
3303 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
3304 mdelay(1);
3305 }
3306#endif
3307 return 0;
3308}
3309
2718/* vm parser */ 3310/* vm parser */
2719static bool evergreen_vm_reg_valid(u32 reg) 3311static bool evergreen_vm_reg_valid(u32 reg)
2720{ 3312{
@@ -2724,7 +3316,11 @@ static bool evergreen_vm_reg_valid(u32 reg)
2724 3316
2725 /* check config regs */ 3317 /* check config regs */
2726 switch (reg) { 3318 switch (reg) {
3319 case WAIT_UNTIL:
2727 case GRBM_GFX_INDEX: 3320 case GRBM_GFX_INDEX:
3321 case CP_STRMOUT_CNTL:
3322 case CP_COHER_CNTL:
3323 case CP_COHER_SIZE:
2728 case VGT_VTX_VECT_EJECT_REG: 3324 case VGT_VTX_VECT_EJECT_REG:
2729 case VGT_CACHE_INVALIDATION: 3325 case VGT_CACHE_INVALIDATION:
2730 case VGT_GS_VERTEX_REUSE: 3326 case VGT_GS_VERTEX_REUSE:
@@ -2840,6 +3436,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
2840 u32 idx = pkt->idx + 1; 3436 u32 idx = pkt->idx + 1;
2841 u32 idx_value = ib[idx]; 3437 u32 idx_value = ib[idx];
2842 u32 start_reg, end_reg, reg, i; 3438 u32 start_reg, end_reg, reg, i;
3439 u32 command, info;
2843 3440
2844 switch (pkt->opcode) { 3441 switch (pkt->opcode) {
2845 case PACKET3_NOP: 3442 case PACKET3_NOP:
@@ -2914,6 +3511,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
2914 return -EINVAL; 3511 return -EINVAL;
2915 } 3512 }
2916 break; 3513 break;
3514 case PACKET3_CP_DMA:
3515 command = ib[idx + 4];
3516 info = ib[idx + 1];
3517 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
3518 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
3519 ((((info & 0x00300000) >> 20) == 0) &&
3520 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
3521 ((((info & 0x60000000) >> 29) == 0) &&
3522 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
3523 /* non mem to mem copies requires dw aligned count */
3524 if ((command & 0x1fffff) % 4) {
3525 DRM_ERROR("CP DMA command requires dw count alignment\n");
3526 return -EINVAL;
3527 }
3528 }
3529 if (command & PACKET3_CP_DMA_CMD_SAS) {
3530 /* src address space is register */
3531 if (((info & 0x60000000) >> 29) == 0) {
3532 start_reg = idx_value << 2;
3533 if (command & PACKET3_CP_DMA_CMD_SAIC) {
3534 reg = start_reg;
3535 if (!evergreen_vm_reg_valid(reg)) {
3536 DRM_ERROR("CP DMA Bad SRC register\n");
3537 return -EINVAL;
3538 }
3539 } else {
3540 for (i = 0; i < (command & 0x1fffff); i++) {
3541 reg = start_reg + (4 * i);
3542 if (!evergreen_vm_reg_valid(reg)) {
3543 DRM_ERROR("CP DMA Bad SRC register\n");
3544 return -EINVAL;
3545 }
3546 }
3547 }
3548 }
3549 }
3550 if (command & PACKET3_CP_DMA_CMD_DAS) {
3551 /* dst address space is register */
3552 if (((info & 0x00300000) >> 20) == 0) {
3553 start_reg = ib[idx + 2];
3554 if (command & PACKET3_CP_DMA_CMD_DAIC) {
3555 reg = start_reg;
3556 if (!evergreen_vm_reg_valid(reg)) {
3557 DRM_ERROR("CP DMA Bad DST register\n");
3558 return -EINVAL;
3559 }
3560 } else {
3561 for (i = 0; i < (command & 0x1fffff); i++) {
3562 reg = start_reg + (4 * i);
3563 if (!evergreen_vm_reg_valid(reg)) {
3564 DRM_ERROR("CP DMA Bad DST register\n");
3565 return -EINVAL;
3566 }
3567 }
3568 }
3569 }
3570 }
3571 break;
2917 default: 3572 default:
2918 return -EINVAL; 3573 return -EINVAL;
2919 } 3574 }
@@ -2955,3 +3610,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2955 3610
2956 return ret; 3611 return ret;
2957} 3612}
3613
3614/**
3615 * evergreen_dma_ib_parse() - parse the DMA IB for VM
3616 * @rdev: radeon_device pointer
3617 * @ib: radeon_ib pointer
3618 *
3619 * Parses the DMA IB from the VM CS ioctl
3620 * checks for errors. (Cayman-SI)
3621 * Returns 0 for success and an error on failure.
3622 **/
3623int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3624{
3625 u32 idx = 0;
3626 u32 header, cmd, count, tiled, new_cmd, misc;
3627
3628 do {
3629 header = ib->ptr[idx];
3630 cmd = GET_DMA_CMD(header);
3631 count = GET_DMA_COUNT(header);
3632 tiled = GET_DMA_T(header);
3633 new_cmd = GET_DMA_NEW(header);
3634 misc = GET_DMA_MISC(header);
3635
3636 switch (cmd) {
3637 case DMA_PACKET_WRITE:
3638 if (tiled)
3639 idx += count + 7;
3640 else
3641 idx += count + 3;
3642 break;
3643 case DMA_PACKET_COPY:
3644 if (tiled) {
3645 if (new_cmd) {
3646 switch (misc) {
3647 case 0:
3648 /* L2T, frame to fields */
3649 idx += 10;
3650 break;
3651 case 1:
3652 /* L2T, T2L partial */
3653 idx += 12;
3654 break;
3655 case 3:
3656 /* L2T, broadcast */
3657 idx += 10;
3658 break;
3659 case 4:
3660 /* L2T, T2L */
3661 idx += 9;
3662 break;
3663 case 5:
3664 /* T2T partial */
3665 idx += 13;
3666 break;
3667 case 7:
3668 /* L2T, broadcast */
3669 idx += 10;
3670 break;
3671 default:
3672 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3673 return -EINVAL;
3674 }
3675 } else {
3676 switch (misc) {
3677 case 0:
3678 idx += 9;
3679 break;
3680 default:
3681 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3682 return -EINVAL;
3683 }
3684 }
3685 } else {
3686 if (new_cmd) {
3687 switch (misc) {
3688 case 0:
3689 /* L2L, byte */
3690 idx += 5;
3691 break;
3692 case 1:
3693 /* L2L, partial */
3694 idx += 9;
3695 break;
3696 case 4:
3697 /* L2L, dw, broadcast */
3698 idx += 7;
3699 break;
3700 default:
3701 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3702 return -EINVAL;
3703 }
3704 } else {
3705 /* L2L, dw */
3706 idx += 5;
3707 }
3708 }
3709 break;
3710 case DMA_PACKET_CONSTANT_FILL:
3711 idx += 4;
3712 break;
3713 case DMA_PACKET_NOP:
3714 idx += 1;
3715 break;
3716 default:
3717 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3718 return -EINVAL;
3719 }
3720 } while (idx < ib->length_dw);
3721
3722 return 0;
3723}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index df542f1a5dfb..0bfd0e9e469b 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -45,6 +45,8 @@
45#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002 45#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
46#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001 46#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
47#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001 47#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
48#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
49#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
48 50
49/* Registers */ 51/* Registers */
50 52
@@ -91,6 +93,10 @@
91#define FB_READ_EN (1 << 0) 93#define FB_READ_EN (1 << 0)
92#define FB_WRITE_EN (1 << 1) 94#define FB_WRITE_EN (1 << 1)
93 95
96#define CP_STRMOUT_CNTL 0x84FC
97
98#define CP_COHER_CNTL 0x85F0
99#define CP_COHER_SIZE 0x85F4
94#define CP_COHER_BASE 0x85F8 100#define CP_COHER_BASE 0x85F8
95#define CP_STALLED_STAT1 0x8674 101#define CP_STALLED_STAT1 0x8674
96#define CP_STALLED_STAT2 0x8678 102#define CP_STALLED_STAT2 0x8678
@@ -351,6 +357,54 @@
351# define AFMT_MPEG_INFO_UPDATE (1 << 10) 357# define AFMT_MPEG_INFO_UPDATE (1 << 10)
352#define AFMT_GENERIC0_7 0x7138 358#define AFMT_GENERIC0_7 0x7138
353 359
360/* DCE4/5 ELD audio interface */
361#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
362#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
363#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
364#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */
365#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */
366#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */
367#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */
368#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */
369#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */
370#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */
371#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */
372#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */
373#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */
374#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */
375# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
376/* max channels minus one. 7 = 8 channels */
377# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
378# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
379# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
380/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
381 * bit0 = 32 kHz
382 * bit1 = 44.1 kHz
383 * bit2 = 48 kHz
384 * bit3 = 88.2 kHz
385 * bit4 = 96 kHz
386 * bit5 = 176.4 kHz
387 * bit6 = 192 kHz
388 */
389
390#define AZ_HOT_PLUG_CONTROL 0x5e78
391# define AZ_FORCE_CODEC_WAKE (1 << 0)
392# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
393# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
394# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
395# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
396# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
397# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
398# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
399# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
400# define CODEC_HOT_PLUG_ENABLE (1 << 12)
401# define PIN0_AUDIO_ENABLED (1 << 24)
402# define PIN1_AUDIO_ENABLED (1 << 25)
403# define PIN2_AUDIO_ENABLED (1 << 26)
404# define PIN3_AUDIO_ENABLED (1 << 27)
405# define AUDIO_ENABLED (1 << 31)
406
407
354#define GC_USER_SHADER_PIPE_CONFIG 0x8954 408#define GC_USER_SHADER_PIPE_CONFIG 0x8954
355#define INACTIVE_QD_PIPES(x) ((x) << 8) 409#define INACTIVE_QD_PIPES(x) ((x) << 8)
356#define INACTIVE_QD_PIPES_MASK 0x0000FF00 410#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -647,6 +701,7 @@
647#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) 701#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
648#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) 702#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
649#define VM_CONTEXT1_CNTL 0x1414 703#define VM_CONTEXT1_CNTL 0x1414
704#define VM_CONTEXT1_CNTL2 0x1434
650#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C 705#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
651#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C 706#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
652#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C 707#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
@@ -668,6 +723,8 @@
668#define CACHE_UPDATE_MODE(x) ((x) << 6) 723#define CACHE_UPDATE_MODE(x) ((x) << 6)
669#define VM_L2_STATUS 0x140C 724#define VM_L2_STATUS 0x140C
670#define L2_BUSY (1 << 0) 725#define L2_BUSY (1 << 0)
726#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
727#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
671 728
672#define WAIT_UNTIL 0x8040 729#define WAIT_UNTIL 0x8040
673 730
@@ -685,8 +742,9 @@
685#define SOFT_RESET_ROM (1 << 14) 742#define SOFT_RESET_ROM (1 << 14)
686#define SOFT_RESET_SEM (1 << 15) 743#define SOFT_RESET_SEM (1 << 15)
687#define SOFT_RESET_VMC (1 << 17) 744#define SOFT_RESET_VMC (1 << 17)
745#define SOFT_RESET_DMA (1 << 20)
688#define SOFT_RESET_TST (1 << 21) 746#define SOFT_RESET_TST (1 << 21)
689#define SOFT_RESET_REGBB (1 << 22) 747#define SOFT_RESET_REGBB (1 << 22)
690#define SOFT_RESET_ORB (1 << 23) 748#define SOFT_RESET_ORB (1 << 23)
691 749
692/* display watermarks */ 750/* display watermarks */
@@ -850,6 +908,37 @@
850# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 908# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
851# define DC_HPDx_EN (1 << 28) 909# define DC_HPDx_EN (1 << 28)
852 910
911/* ASYNC DMA */
912#define DMA_RB_RPTR 0xd008
913#define DMA_RB_WPTR 0xd00c
914
915#define DMA_CNTL 0xd02c
916# define TRAP_ENABLE (1 << 0)
917# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
918# define SEM_WAIT_INT_ENABLE (1 << 2)
919# define DATA_SWAP_ENABLE (1 << 3)
920# define FENCE_SWAP_ENABLE (1 << 4)
921# define CTXEMPTY_INT_ENABLE (1 << 28)
922#define DMA_TILING_CONFIG 0xD0B8
923
924#define CAYMAN_DMA1_CNTL 0xd82c
925
926/* async DMA packets */
927#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
928 (((t) & 0x1) << 23) | \
929 (((s) & 0x1) << 22) | \
930 (((n) & 0xFFFFF) << 0))
931/* async DMA Packet types */
932#define DMA_PACKET_WRITE 0x2
933#define DMA_PACKET_COPY 0x3
934#define DMA_PACKET_INDIRECT_BUFFER 0x4
935#define DMA_PACKET_SEMAPHORE 0x5
936#define DMA_PACKET_FENCE 0x6
937#define DMA_PACKET_TRAP 0x7
938#define DMA_PACKET_SRBM_WRITE 0x9
939#define DMA_PACKET_CONSTANT_FILL 0xd
940#define DMA_PACKET_NOP 0xf
941
853/* PCIE link stuff */ 942/* PCIE link stuff */
854#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 943#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
855#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ 944#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
@@ -947,6 +1036,53 @@
947#define PACKET3_WAIT_REG_MEM 0x3C 1036#define PACKET3_WAIT_REG_MEM 0x3C
948#define PACKET3_MEM_WRITE 0x3D 1037#define PACKET3_MEM_WRITE 0x3D
949#define PACKET3_INDIRECT_BUFFER 0x32 1038#define PACKET3_INDIRECT_BUFFER 0x32
1039#define PACKET3_CP_DMA 0x41
1040/* 1. header
1041 * 2. SRC_ADDR_LO or DATA [31:0]
1042 * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
1043 * SRC_ADDR_HI [7:0]
1044 * 4. DST_ADDR_LO [31:0]
1045 * 5. DST_ADDR_HI [7:0]
1046 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
1047 */
1048# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1049 /* 0 - SRC_ADDR
1050 * 1 - GDS
1051 */
1052# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
1053 /* 0 - ME
1054 * 1 - PFP
1055 */
1056# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
1057 /* 0 - SRC_ADDR
1058 * 1 - GDS
1059 * 2 - DATA
1060 */
1061# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1062/* COMMAND */
1063# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1064# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
1065 /* 0 - none
1066 * 1 - 8 in 16
1067 * 2 - 8 in 32
1068 * 3 - 8 in 64
1069 */
1070# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
1071 /* 0 - none
1072 * 1 - 8 in 16
1073 * 2 - 8 in 32
1074 * 3 - 8 in 64
1075 */
1076# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
1077 /* 0 - memory
1078 * 1 - register
1079 */
1080# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
1081 /* 0 - memory
1082 * 1 - register
1083 */
1084# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
1085# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
950#define PACKET3_SURFACE_SYNC 0x43 1086#define PACKET3_SURFACE_SYNC 0x43
951# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 1087# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
952# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) 1088# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
@@ -1892,4 +2028,15 @@
1892/* cayman packet3 addition */ 2028/* cayman packet3 addition */
1893#define CAYMAN_PACKET3_DEALLOC_STATE 0x14 2029#define CAYMAN_PACKET3_DEALLOC_STATE 0x14
1894 2030
2031/* DMA regs common on r6xx/r7xx/evergreen/ni */
2032#define DMA_RB_CNTL 0xd000
2033# define DMA_RB_ENABLE (1 << 0)
2034# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
2035# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
2036# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
2037# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
2038# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
2039#define DMA_STATUS_REG 0xd034
2040# define DMA_IDLE (1 << 0)
2041
1895#endif 2042#endif
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 81e6a568c29d..896f1cbc58a5 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
611 WREG32(GB_ADDR_CONFIG, gb_addr_config); 611 WREG32(GB_ADDR_CONFIG, gb_addr_config);
612 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 612 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
613 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 613 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
614 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
615 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
614 616
615 tmp = gb_addr_config & NUM_PIPES_MASK; 617 tmp = gb_addr_config & NUM_PIPES_MASK;
616 tmp = r6xx_remap_render_backend(rdev, tmp, 618 tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
784 /* enable context1-7 */ 786 /* enable context1-7 */
785 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 787 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
786 (u32)(rdev->dummy_page.addr >> 12)); 788 (u32)(rdev->dummy_page.addr >> 12));
787 WREG32(VM_CONTEXT1_CNTL2, 0); 789 WREG32(VM_CONTEXT1_CNTL2, 4);
788 WREG32(VM_CONTEXT1_CNTL, 0);
789 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 790 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
790 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 791 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
792 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
793 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
794 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
795 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
796 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
797 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
798 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
799 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
800 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
801 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
802 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
791 803
792 cayman_pcie_gart_tlb_flush(rdev); 804 cayman_pcie_gart_tlb_flush(rdev);
793 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 805 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
905 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 917 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
906 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 918 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
907 WREG32(SCRATCH_UMSK, 0); 919 WREG32(SCRATCH_UMSK, 0);
920 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
908 } 921 }
909} 922}
910 923
@@ -1059,7 +1072,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1059 1072
1060 WREG32(CP_DEBUG, (1 << 27)); 1073 WREG32(CP_DEBUG, (1 << 27));
1061 1074
1062 /* set the wb address wether it's enabled or not */ 1075 /* set the wb address whether it's enabled or not */
1063 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1076 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1064 WREG32(SCRATCH_UMSK, 0xff); 1077 WREG32(SCRATCH_UMSK, 0xff);
1065 1078
@@ -1076,7 +1089,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1076#endif 1089#endif
1077 WREG32(cp_rb_cntl[i], rb_cntl); 1090 WREG32(cp_rb_cntl[i], rb_cntl);
1078 1091
1079 /* set the wb address wether it's enabled or not */ 1092 /* set the wb address whether it's enabled or not */
1080 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET; 1093 addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
1081 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC); 1094 WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
1082 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF); 1095 WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
@@ -1118,22 +1131,195 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1118 return 0; 1131 return 0;
1119} 1132}
1120 1133
1121static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1134/*
1135 * DMA
1136 * Starting with R600, the GPU has an asynchronous
1137 * DMA engine. The programming model is very similar
1138 * to the 3D engine (ring buffer, IBs, etc.), but the
1139 * DMA controller has it's own packet format that is
1140 * different form the PM4 format used by the 3D engine.
1141 * It supports copying data, writing embedded data,
1142 * solid fills, and a number of other things. It also
1143 * has support for tiling/detiling of buffers.
1144 * Cayman and newer support two asynchronous DMA engines.
1145 */
1146/**
1147 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1148 *
1149 * @rdev: radeon_device pointer
1150 * @ib: IB object to schedule
1151 *
1152 * Schedule an IB in the DMA ring (cayman-SI).
1153 */
1154void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1155 struct radeon_ib *ib)
1156{
1157 struct radeon_ring *ring = &rdev->ring[ib->ring];
1158
1159 if (rdev->wb.enabled) {
1160 u32 next_rptr = ring->wptr + 4;
1161 while ((next_rptr & 7) != 5)
1162 next_rptr++;
1163 next_rptr += 3;
1164 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1165 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1166 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1167 radeon_ring_write(ring, next_rptr);
1168 }
1169
1170 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1171 * Pad as necessary with NOPs.
1172 */
1173 while ((ring->wptr & 7) != 5)
1174 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1175 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1176 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1177 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1178
1179}
1180
1181/**
1182 * cayman_dma_stop - stop the async dma engines
1183 *
1184 * @rdev: radeon_device pointer
1185 *
1186 * Stop the async dma engines (cayman-SI).
1187 */
1188void cayman_dma_stop(struct radeon_device *rdev)
1189{
1190 u32 rb_cntl;
1191
1192 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1193
1194 /* dma0 */
1195 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1196 rb_cntl &= ~DMA_RB_ENABLE;
1197 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1198
1199 /* dma1 */
1200 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1201 rb_cntl &= ~DMA_RB_ENABLE;
1202 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1203
1204 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1205 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1206}
1207
1208/**
1209 * cayman_dma_resume - setup and start the async dma engines
1210 *
1211 * @rdev: radeon_device pointer
1212 *
1213 * Set up the DMA ring buffers and enable them. (cayman-SI).
1214 * Returns 0 for success, error for failure.
1215 */
1216int cayman_dma_resume(struct radeon_device *rdev)
1217{
1218 struct radeon_ring *ring;
1219 u32 rb_cntl, dma_cntl;
1220 u32 rb_bufsz;
1221 u32 reg_offset, wb_offset;
1222 int i, r;
1223
1224 /* Reset dma */
1225 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1226 RREG32(SRBM_SOFT_RESET);
1227 udelay(50);
1228 WREG32(SRBM_SOFT_RESET, 0);
1229
1230 for (i = 0; i < 2; i++) {
1231 if (i == 0) {
1232 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1233 reg_offset = DMA0_REGISTER_OFFSET;
1234 wb_offset = R600_WB_DMA_RPTR_OFFSET;
1235 } else {
1236 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1237 reg_offset = DMA1_REGISTER_OFFSET;
1238 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1239 }
1240
1241 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1242 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1243
1244 /* Set ring buffer size in dwords */
1245 rb_bufsz = drm_order(ring->ring_size / 4);
1246 rb_cntl = rb_bufsz << 1;
1247#ifdef __BIG_ENDIAN
1248 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1249#endif
1250 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1251
1252 /* Initialize the ring buffer's read and write pointers */
1253 WREG32(DMA_RB_RPTR + reg_offset, 0);
1254 WREG32(DMA_RB_WPTR + reg_offset, 0);
1255
1256 /* set the wb address whether it's enabled or not */
1257 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1258 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1259 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1260 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1261
1262 if (rdev->wb.enabled)
1263 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1264
1265 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1266
1267 /* enable DMA IBs */
1268 WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
1269
1270 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1271 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1272 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1273
1274 ring->wptr = 0;
1275 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1276
1277 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1278
1279 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1280
1281 ring->ready = true;
1282
1283 r = radeon_ring_test(rdev, ring->idx, ring);
1284 if (r) {
1285 ring->ready = false;
1286 return r;
1287 }
1288 }
1289
1290 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1291
1292 return 0;
1293}
1294
1295/**
1296 * cayman_dma_fini - tear down the async dma engines
1297 *
1298 * @rdev: radeon_device pointer
1299 *
1300 * Stop the async dma engines and free the rings (cayman-SI).
1301 */
1302void cayman_dma_fini(struct radeon_device *rdev)
1303{
1304 cayman_dma_stop(rdev);
1305 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1306 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1307}
1308
1309static void cayman_gpu_soft_reset_gfx(struct radeon_device *rdev)
1122{ 1310{
1123 struct evergreen_mc_save save;
1124 u32 grbm_reset = 0; 1311 u32 grbm_reset = 0;
1125 1312
1126 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1313 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1127 return 0; 1314 return;
1128 1315
1129 dev_info(rdev->dev, "GPU softreset \n"); 1316 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
1130 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
1131 RREG32(GRBM_STATUS)); 1317 RREG32(GRBM_STATUS));
1132 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1318 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
1133 RREG32(GRBM_STATUS_SE0)); 1319 RREG32(GRBM_STATUS_SE0));
1134 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1320 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
1135 RREG32(GRBM_STATUS_SE1)); 1321 RREG32(GRBM_STATUS_SE1));
1136 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1322 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
1137 RREG32(SRBM_STATUS)); 1323 RREG32(SRBM_STATUS));
1138 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1324 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1139 RREG32(CP_STALLED_STAT1)); 1325 RREG32(CP_STALLED_STAT1));
@@ -1143,19 +1329,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1143 RREG32(CP_BUSY_STAT)); 1329 RREG32(CP_BUSY_STAT));
1144 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1330 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1145 RREG32(CP_STAT)); 1331 RREG32(CP_STAT));
1146 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1147 RREG32(0x14F8));
1148 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1149 RREG32(0x14D8));
1150 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1151 RREG32(0x14FC));
1152 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1153 RREG32(0x14DC));
1154 1332
1155 evergreen_mc_stop(rdev, &save);
1156 if (evergreen_mc_wait_for_idle(rdev)) {
1157 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1158 }
1159 /* Disable CP parsing/prefetching */ 1333 /* Disable CP parsing/prefetching */
1160 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT); 1334 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT);
1161 1335
@@ -1180,16 +1354,14 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1180 udelay(50); 1354 udelay(50);
1181 WREG32(GRBM_SOFT_RESET, 0); 1355 WREG32(GRBM_SOFT_RESET, 0);
1182 (void)RREG32(GRBM_SOFT_RESET); 1356 (void)RREG32(GRBM_SOFT_RESET);
1183 /* Wait a little for things to settle down */
1184 udelay(50);
1185 1357
1186 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 1358 dev_info(rdev->dev, " GRBM_STATUS = 0x%08X\n",
1187 RREG32(GRBM_STATUS)); 1359 RREG32(GRBM_STATUS));
1188 dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", 1360 dev_info(rdev->dev, " GRBM_STATUS_SE0 = 0x%08X\n",
1189 RREG32(GRBM_STATUS_SE0)); 1361 RREG32(GRBM_STATUS_SE0));
1190 dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", 1362 dev_info(rdev->dev, " GRBM_STATUS_SE1 = 0x%08X\n",
1191 RREG32(GRBM_STATUS_SE1)); 1363 RREG32(GRBM_STATUS_SE1));
1192 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 1364 dev_info(rdev->dev, " SRBM_STATUS = 0x%08X\n",
1193 RREG32(SRBM_STATUS)); 1365 RREG32(SRBM_STATUS));
1194 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1366 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1195 RREG32(CP_STALLED_STAT1)); 1367 RREG32(CP_STALLED_STAT1));
@@ -1199,13 +1371,107 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1199 RREG32(CP_BUSY_STAT)); 1371 RREG32(CP_BUSY_STAT));
1200 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1372 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1201 RREG32(CP_STAT)); 1373 RREG32(CP_STAT));
1374
1375}
1376
1377static void cayman_gpu_soft_reset_dma(struct radeon_device *rdev)
1378{
1379 u32 tmp;
1380
1381 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1382 return;
1383
1384 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1385 RREG32(DMA_STATUS_REG));
1386
1387 /* dma0 */
1388 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1389 tmp &= ~DMA_RB_ENABLE;
1390 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
1391
1392 /* dma1 */
1393 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1394 tmp &= ~DMA_RB_ENABLE;
1395 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
1396
1397 /* Reset dma */
1398 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1399 RREG32(SRBM_SOFT_RESET);
1400 udelay(50);
1401 WREG32(SRBM_SOFT_RESET, 0);
1402
1403 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1404 RREG32(DMA_STATUS_REG));
1405
1406}
1407
1408static int cayman_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1409{
1410 struct evergreen_mc_save save;
1411
1412 if (reset_mask == 0)
1413 return 0;
1414
1415 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1416
1417 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
1418 RREG32(0x14F8));
1419 dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
1420 RREG32(0x14D8));
1421 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1422 RREG32(0x14FC));
1423 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1424 RREG32(0x14DC));
1425
1426 evergreen_mc_stop(rdev, &save);
1427 if (evergreen_mc_wait_for_idle(rdev)) {
1428 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1429 }
1430
1431 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
1432 cayman_gpu_soft_reset_gfx(rdev);
1433
1434 if (reset_mask & RADEON_RESET_DMA)
1435 cayman_gpu_soft_reset_dma(rdev);
1436
1437 /* Wait a little for things to settle down */
1438 udelay(50);
1439
1202 evergreen_mc_resume(rdev, &save); 1440 evergreen_mc_resume(rdev, &save);
1203 return 0; 1441 return 0;
1204} 1442}
1205 1443
1206int cayman_asic_reset(struct radeon_device *rdev) 1444int cayman_asic_reset(struct radeon_device *rdev)
1207{ 1445{
1208 return cayman_gpu_soft_reset(rdev); 1446 return cayman_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1447 RADEON_RESET_COMPUTE |
1448 RADEON_RESET_DMA));
1449}
1450
1451/**
1452 * cayman_dma_is_lockup - Check if the DMA engine is locked up
1453 *
1454 * @rdev: radeon_device pointer
1455 * @ring: radeon_ring structure holding ring information
1456 *
1457 * Check if the async DMA engine is locked up (cayman-SI).
1458 * Returns true if the engine appears to be locked up, false if not.
1459 */
1460bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1461{
1462 u32 dma_status_reg;
1463
1464 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
1465 dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1466 else
1467 dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1468 if (dma_status_reg & DMA_IDLE) {
1469 radeon_ring_lockup_update(ring);
1470 return false;
1471 }
1472 /* force ring activities */
1473 radeon_ring_force_activity(rdev, ring);
1474 return radeon_ring_test_lockup(rdev, ring);
1209} 1475}
1210 1476
1211static int cayman_startup(struct radeon_device *rdev) 1477static int cayman_startup(struct radeon_device *rdev)
@@ -1289,6 +1555,18 @@ static int cayman_startup(struct radeon_device *rdev)
1289 return r; 1555 return r;
1290 } 1556 }
1291 1557
1558 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1559 if (r) {
1560 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1561 return r;
1562 }
1563
1564 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
1565 if (r) {
1566 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1567 return r;
1568 }
1569
1292 /* Enable IRQ */ 1570 /* Enable IRQ */
1293 r = r600_irq_init(rdev); 1571 r = r600_irq_init(rdev);
1294 if (r) { 1572 if (r) {
@@ -1303,6 +1581,23 @@ static int cayman_startup(struct radeon_device *rdev)
1303 0, 0xfffff, RADEON_CP_PACKET2); 1581 0, 0xfffff, RADEON_CP_PACKET2);
1304 if (r) 1582 if (r)
1305 return r; 1583 return r;
1584
1585 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1586 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1587 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
1588 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
1589 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1590 if (r)
1591 return r;
1592
1593 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1594 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
1595 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
1596 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
1597 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1598 if (r)
1599 return r;
1600
1306 r = cayman_cp_load_microcode(rdev); 1601 r = cayman_cp_load_microcode(rdev);
1307 if (r) 1602 if (r)
1308 return r; 1603 return r;
@@ -1310,6 +1605,10 @@ static int cayman_startup(struct radeon_device *rdev)
1310 if (r) 1605 if (r)
1311 return r; 1606 return r;
1312 1607
1608 r = cayman_dma_resume(rdev);
1609 if (r)
1610 return r;
1611
1313 r = radeon_ib_pool_init(rdev); 1612 r = radeon_ib_pool_init(rdev);
1314 if (r) { 1613 if (r) {
1315 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1614 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1653,7 @@ int cayman_suspend(struct radeon_device *rdev)
1354{ 1653{
1355 r600_audio_fini(rdev); 1654 r600_audio_fini(rdev);
1356 cayman_cp_enable(rdev, false); 1655 cayman_cp_enable(rdev, false);
1357 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1656 cayman_dma_stop(rdev);
1358 evergreen_irq_suspend(rdev); 1657 evergreen_irq_suspend(rdev);
1359 radeon_wb_disable(rdev); 1658 radeon_wb_disable(rdev);
1360 cayman_pcie_gart_disable(rdev); 1659 cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1720,14 @@ int cayman_init(struct radeon_device *rdev)
1421 ring->ring_obj = NULL; 1720 ring->ring_obj = NULL;
1422 r600_ring_init(rdev, ring, 1024 * 1024); 1721 r600_ring_init(rdev, ring, 1024 * 1024);
1423 1722
1723 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1724 ring->ring_obj = NULL;
1725 r600_ring_init(rdev, ring, 64 * 1024);
1726
1727 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1728 ring->ring_obj = NULL;
1729 r600_ring_init(rdev, ring, 64 * 1024);
1730
1424 rdev->ih.ring_obj = NULL; 1731 rdev->ih.ring_obj = NULL;
1425 r600_ih_ring_init(rdev, 64 * 1024); 1732 r600_ih_ring_init(rdev, 64 * 1024);
1426 1733
@@ -1433,6 +1740,7 @@ int cayman_init(struct radeon_device *rdev)
1433 if (r) { 1740 if (r) {
1434 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1741 dev_err(rdev->dev, "disabling GPU acceleration\n");
1435 cayman_cp_fini(rdev); 1742 cayman_cp_fini(rdev);
1743 cayman_dma_fini(rdev);
1436 r600_irq_fini(rdev); 1744 r600_irq_fini(rdev);
1437 if (rdev->flags & RADEON_IS_IGP) 1745 if (rdev->flags & RADEON_IS_IGP)
1438 si_rlc_fini(rdev); 1746 si_rlc_fini(rdev);
@@ -1463,6 +1771,7 @@ void cayman_fini(struct radeon_device *rdev)
1463{ 1771{
1464 r600_blit_fini(rdev); 1772 r600_blit_fini(rdev);
1465 cayman_cp_fini(rdev); 1773 cayman_cp_fini(rdev);
1774 cayman_dma_fini(rdev);
1466 r600_irq_fini(rdev); 1775 r600_irq_fini(rdev);
1467 if (rdev->flags & RADEON_IS_IGP) 1776 if (rdev->flags & RADEON_IS_IGP)
1468 si_rlc_fini(rdev); 1777 si_rlc_fini(rdev);
@@ -1538,30 +1847,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1538{ 1847{
1539 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 1848 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1540 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1849 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1541 1850 uint64_t value;
1542 while (count) { 1851 unsigned ndw;
1543 unsigned ndw = 1 + count * 2; 1852
1544 if (ndw > 0x3FFF) 1853 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
1545 ndw = 0x3FFF; 1854 while (count) {
1546 1855 ndw = 1 + count * 2;
1547 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw)); 1856 if (ndw > 0x3FFF)
1548 radeon_ring_write(ring, pe); 1857 ndw = 0x3FFF;
1549 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1858
1550 for (; ndw > 1; ndw -= 2, --count, pe += 8) { 1859 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
1551 uint64_t value = 0; 1860 radeon_ring_write(ring, pe);
1552 if (flags & RADEON_VM_PAGE_SYSTEM) { 1861 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1553 value = radeon_vm_map_gart(rdev, addr); 1862 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1554 value &= 0xFFFFFFFFFFFFF000ULL; 1863 if (flags & RADEON_VM_PAGE_SYSTEM) {
1864 value = radeon_vm_map_gart(rdev, addr);
1865 value &= 0xFFFFFFFFFFFFF000ULL;
1866 } else if (flags & RADEON_VM_PAGE_VALID) {
1867 value = addr;
1868 } else {
1869 value = 0;
1870 }
1555 addr += incr; 1871 addr += incr;
1556 1872 value |= r600_flags;
1557 } else if (flags & RADEON_VM_PAGE_VALID) { 1873 radeon_ring_write(ring, value);
1558 value = addr; 1874 radeon_ring_write(ring, upper_32_bits(value));
1875 }
1876 }
1877 } else {
1878 while (count) {
1879 ndw = count * 2;
1880 if (ndw > 0xFFFFE)
1881 ndw = 0xFFFFE;
1882
1883 /* for non-physically contiguous pages (system) */
1884 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
1885 radeon_ring_write(ring, pe);
1886 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1887 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
1888 if (flags & RADEON_VM_PAGE_SYSTEM) {
1889 value = radeon_vm_map_gart(rdev, addr);
1890 value &= 0xFFFFFFFFFFFFF000ULL;
1891 } else if (flags & RADEON_VM_PAGE_VALID) {
1892 value = addr;
1893 } else {
1894 value = 0;
1895 }
1559 addr += incr; 1896 addr += incr;
1897 value |= r600_flags;
1898 radeon_ring_write(ring, value);
1899 radeon_ring_write(ring, upper_32_bits(value));
1560 } 1900 }
1561
1562 value |= r600_flags;
1563 radeon_ring_write(ring, value);
1564 radeon_ring_write(ring, upper_32_bits(value));
1565 } 1901 }
1566 } 1902 }
1567} 1903}
@@ -1596,3 +1932,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1596 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 1932 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1597 radeon_ring_write(ring, 0x0); 1933 radeon_ring_write(ring, 0x0);
1598} 1934}
1935
1936void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1937{
1938 struct radeon_ring *ring = &rdev->ring[ridx];
1939
1940 if (vm == NULL)
1941 return;
1942
1943 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1944 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
1945 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1946
1947 /* flush hdp cache */
1948 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1949 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
1950 radeon_ring_write(ring, 1);
1951
1952 /* bits 0-7 are the VM contexts0-7 */
1953 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1954 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
1955 radeon_ring_write(ring, 1 << vm->id);
1956}
1957
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index cbef6815907a..48e5022ee921 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -50,6 +50,24 @@
50#define VMID(x) (((x) & 0x7) << 0) 50#define VMID(x) (((x) & 0x7) << 0)
51#define SRBM_STATUS 0x0E50 51#define SRBM_STATUS 0x0E50
52 52
53#define SRBM_SOFT_RESET 0x0E60
54#define SOFT_RESET_BIF (1 << 1)
55#define SOFT_RESET_CG (1 << 2)
56#define SOFT_RESET_DC (1 << 5)
57#define SOFT_RESET_DMA1 (1 << 6)
58#define SOFT_RESET_GRBM (1 << 8)
59#define SOFT_RESET_HDP (1 << 9)
60#define SOFT_RESET_IH (1 << 10)
61#define SOFT_RESET_MC (1 << 11)
62#define SOFT_RESET_RLC (1 << 13)
63#define SOFT_RESET_ROM (1 << 14)
64#define SOFT_RESET_SEM (1 << 15)
65#define SOFT_RESET_VMC (1 << 17)
66#define SOFT_RESET_DMA (1 << 20)
67#define SOFT_RESET_TST (1 << 21)
68#define SOFT_RESET_REGBB (1 << 22)
69#define SOFT_RESET_ORB (1 << 23)
70
53#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 71#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
54#define REQUEST_TYPE(x) (((x) & 0xf) << 0) 72#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
55#define RESPONSE_TYPE_MASK 0x000000F0 73#define RESPONSE_TYPE_MASK 0x000000F0
@@ -80,7 +98,18 @@
80#define VM_CONTEXT0_CNTL 0x1410 98#define VM_CONTEXT0_CNTL 0x1410
81#define ENABLE_CONTEXT (1 << 0) 99#define ENABLE_CONTEXT (1 << 0)
82#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) 100#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
101#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
83#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) 102#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
103#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
104#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
105#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
106#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
107#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
108#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
109#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
110#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
111#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
112#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
84#define VM_CONTEXT1_CNTL 0x1414 113#define VM_CONTEXT1_CNTL 0x1414
85#define VM_CONTEXT0_CNTL2 0x1430 114#define VM_CONTEXT0_CNTL2 0x1430
86#define VM_CONTEXT1_CNTL2 0x1434 115#define VM_CONTEXT1_CNTL2 0x1434
@@ -588,5 +617,61 @@
588#define PACKET3_SET_APPEND_CNT 0x75 617#define PACKET3_SET_APPEND_CNT 0x75
589#define PACKET3_ME_WRITE 0x7A 618#define PACKET3_ME_WRITE 0x7A
590 619
591#endif 620/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
621#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
622#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
623
624#define DMA_RB_CNTL 0xd000
625# define DMA_RB_ENABLE (1 << 0)
626# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
627# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
628# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
629# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
630# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
631#define DMA_RB_BASE 0xd004
632#define DMA_RB_RPTR 0xd008
633#define DMA_RB_WPTR 0xd00c
634
635#define DMA_RB_RPTR_ADDR_HI 0xd01c
636#define DMA_RB_RPTR_ADDR_LO 0xd020
637
638#define DMA_IB_CNTL 0xd024
639# define DMA_IB_ENABLE (1 << 0)
640# define DMA_IB_SWAP_ENABLE (1 << 4)
641# define CMD_VMID_FORCE (1 << 31)
642#define DMA_IB_RPTR 0xd028
643#define DMA_CNTL 0xd02c
644# define TRAP_ENABLE (1 << 0)
645# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
646# define SEM_WAIT_INT_ENABLE (1 << 2)
647# define DATA_SWAP_ENABLE (1 << 3)
648# define FENCE_SWAP_ENABLE (1 << 4)
649# define CTXEMPTY_INT_ENABLE (1 << 28)
650#define DMA_STATUS_REG 0xd034
651# define DMA_IDLE (1 << 0)
652#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
653#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
654#define DMA_TILING_CONFIG 0xd0b8
655#define DMA_MODE 0xd0bc
656
657#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
658 (((t) & 0x1) << 23) | \
659 (((s) & 0x1) << 22) | \
660 (((n) & 0xFFFFF) << 0))
661
662#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
663 (((vmid) & 0xF) << 20) | \
664 (((n) & 0xFFFFF) << 0))
665
666/* async DMA Packet types */
667#define DMA_PACKET_WRITE 0x2
668#define DMA_PACKET_COPY 0x3
669#define DMA_PACKET_INDIRECT_BUFFER 0x4
670#define DMA_PACKET_SEMAPHORE 0x5
671#define DMA_PACKET_FENCE 0x6
672#define DMA_PACKET_TRAP 0x7
673#define DMA_PACKET_SRBM_WRITE 0x9
674#define DMA_PACKET_CONSTANT_FILL 0xd
675#define DMA_PACKET_NOP 0xf
592 676
677#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 376884f1bcd2..8ff7cac222dc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
4135 return 0; 4135 return 0;
4136} 4136}
4137 4137
4138uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 4138uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4139 bool always_indirect)
4139{ 4140{
4140 if (reg < rdev->rmmio_size) 4141 if (reg < rdev->rmmio_size && !always_indirect)
4141 return readl(((void __iomem *)rdev->rmmio) + reg); 4142 return readl(((void __iomem *)rdev->rmmio) + reg);
4142 else { 4143 else {
4144 unsigned long flags;
4145 uint32_t ret;
4146
4147 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4143 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4148 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4144 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4149 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4150 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4151
4152 return ret;
4145 } 4153 }
4146} 4154}
4147 4155
4148void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 4156void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4157 bool always_indirect)
4149{ 4158{
4150 if (reg < rdev->rmmio_size) 4159 if (reg < rdev->rmmio_size && !always_indirect)
4151 writel(v, ((void __iomem *)rdev->rmmio) + reg); 4160 writel(v, ((void __iomem *)rdev->rmmio) + reg);
4152 else { 4161 else {
4162 unsigned long flags;
4163
4164 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4153 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4165 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4154 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4166 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4167 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4155 } 4168 }
4156} 4169}
4157 4170
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cda280d157da..537e259b3837 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1258,9 +1258,8 @@ void r600_vram_scratch_fini(struct radeon_device *rdev)
1258 * reset, it's up to the caller to determine if the GPU needs one. We 1258 * reset, it's up to the caller to determine if the GPU needs one. We
1259 * might add an helper function to check that. 1259 * might add an helper function to check that.
1260 */ 1260 */
1261static int r600_gpu_soft_reset(struct radeon_device *rdev) 1261static void r600_gpu_soft_reset_gfx(struct radeon_device *rdev)
1262{ 1262{
1263 struct rv515_mc_save save;
1264 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | 1263 u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
1265 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | 1264 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
1266 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | 1265 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
@@ -1280,14 +1279,13 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1280 u32 tmp; 1279 u32 tmp;
1281 1280
1282 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 1281 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
1283 return 0; 1282 return;
1284 1283
1285 dev_info(rdev->dev, "GPU softreset \n"); 1284 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1286 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1287 RREG32(R_008010_GRBM_STATUS)); 1285 RREG32(R_008010_GRBM_STATUS));
1288 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", 1286 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1289 RREG32(R_008014_GRBM_STATUS2)); 1287 RREG32(R_008014_GRBM_STATUS2));
1290 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", 1288 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1291 RREG32(R_000E50_SRBM_STATUS)); 1289 RREG32(R_000E50_SRBM_STATUS));
1292 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1290 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1293 RREG32(CP_STALLED_STAT1)); 1291 RREG32(CP_STALLED_STAT1));
@@ -1297,12 +1295,10 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1297 RREG32(CP_BUSY_STAT)); 1295 RREG32(CP_BUSY_STAT));
1298 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1296 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1299 RREG32(CP_STAT)); 1297 RREG32(CP_STAT));
1300 rv515_mc_stop(rdev, &save); 1298
1301 if (r600_mc_wait_for_idle(rdev)) {
1302 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1303 }
1304 /* Disable CP parsing/prefetching */ 1299 /* Disable CP parsing/prefetching */
1305 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1300 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1301
1306 /* Check if any of the rendering block is busy and reset it */ 1302 /* Check if any of the rendering block is busy and reset it */
1307 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || 1303 if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) ||
1308 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { 1304 (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) {
@@ -1332,13 +1328,12 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1332 RREG32(R_008020_GRBM_SOFT_RESET); 1328 RREG32(R_008020_GRBM_SOFT_RESET);
1333 mdelay(15); 1329 mdelay(15);
1334 WREG32(R_008020_GRBM_SOFT_RESET, 0); 1330 WREG32(R_008020_GRBM_SOFT_RESET, 0);
1335 /* Wait a little for things to settle down */ 1331
1336 mdelay(1); 1332 dev_info(rdev->dev, " R_008010_GRBM_STATUS = 0x%08X\n",
1337 dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n",
1338 RREG32(R_008010_GRBM_STATUS)); 1333 RREG32(R_008010_GRBM_STATUS));
1339 dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", 1334 dev_info(rdev->dev, " R_008014_GRBM_STATUS2 = 0x%08X\n",
1340 RREG32(R_008014_GRBM_STATUS2)); 1335 RREG32(R_008014_GRBM_STATUS2));
1341 dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", 1336 dev_info(rdev->dev, " R_000E50_SRBM_STATUS = 0x%08X\n",
1342 RREG32(R_000E50_SRBM_STATUS)); 1337 RREG32(R_000E50_SRBM_STATUS));
1343 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n", 1338 dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
1344 RREG32(CP_STALLED_STAT1)); 1339 RREG32(CP_STALLED_STAT1));
@@ -1348,6 +1343,60 @@ static int r600_gpu_soft_reset(struct radeon_device *rdev)
1348 RREG32(CP_BUSY_STAT)); 1343 RREG32(CP_BUSY_STAT));
1349 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n", 1344 dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
1350 RREG32(CP_STAT)); 1345 RREG32(CP_STAT));
1346
1347}
1348
1349static void r600_gpu_soft_reset_dma(struct radeon_device *rdev)
1350{
1351 u32 tmp;
1352
1353 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
1354 return;
1355
1356 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1357 RREG32(DMA_STATUS_REG));
1358
1359 /* Disable DMA */
1360 tmp = RREG32(DMA_RB_CNTL);
1361 tmp &= ~DMA_RB_ENABLE;
1362 WREG32(DMA_RB_CNTL, tmp);
1363
1364 /* Reset dma */
1365 if (rdev->family >= CHIP_RV770)
1366 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
1367 else
1368 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
1369 RREG32(SRBM_SOFT_RESET);
1370 udelay(50);
1371 WREG32(SRBM_SOFT_RESET, 0);
1372
1373 dev_info(rdev->dev, " R_00D034_DMA_STATUS_REG = 0x%08X\n",
1374 RREG32(DMA_STATUS_REG));
1375}
1376
1377static int r600_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
1378{
1379 struct rv515_mc_save save;
1380
1381 if (reset_mask == 0)
1382 return 0;
1383
1384 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
1385
1386 rv515_mc_stop(rdev, &save);
1387 if (r600_mc_wait_for_idle(rdev)) {
1388 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1389 }
1390
1391 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
1392 r600_gpu_soft_reset_gfx(rdev);
1393
1394 if (reset_mask & RADEON_RESET_DMA)
1395 r600_gpu_soft_reset_dma(rdev);
1396
1397 /* Wait a little for things to settle down */
1398 mdelay(1);
1399
1351 rv515_mc_resume(rdev, &save); 1400 rv515_mc_resume(rdev, &save);
1352 return 0; 1401 return 0;
1353} 1402}
@@ -1370,9 +1419,34 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1370 return radeon_ring_test_lockup(rdev, ring); 1419 return radeon_ring_test_lockup(rdev, ring);
1371} 1420}
1372 1421
1422/**
1423 * r600_dma_is_lockup - Check if the DMA engine is locked up
1424 *
1425 * @rdev: radeon_device pointer
1426 * @ring: radeon_ring structure holding ring information
1427 *
1428 * Check if the async DMA engine is locked up (r6xx-evergreen).
1429 * Returns true if the engine appears to be locked up, false if not.
1430 */
1431bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1432{
1433 u32 dma_status_reg;
1434
1435 dma_status_reg = RREG32(DMA_STATUS_REG);
1436 if (dma_status_reg & DMA_IDLE) {
1437 radeon_ring_lockup_update(ring);
1438 return false;
1439 }
1440 /* force ring activities */
1441 radeon_ring_force_activity(rdev, ring);
1442 return radeon_ring_test_lockup(rdev, ring);
1443}
1444
1373int r600_asic_reset(struct radeon_device *rdev) 1445int r600_asic_reset(struct radeon_device *rdev)
1374{ 1446{
1375 return r600_gpu_soft_reset(rdev); 1447 return r600_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
1448 RADEON_RESET_COMPUTE |
1449 RADEON_RESET_DMA));
1376} 1450}
1377 1451
1378u32 r6xx_remap_render_backend(struct radeon_device *rdev, 1452u32 r6xx_remap_render_backend(struct radeon_device *rdev,
@@ -1424,13 +1498,7 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1424 1498
1425int r600_count_pipe_bits(uint32_t val) 1499int r600_count_pipe_bits(uint32_t val)
1426{ 1500{
1427 int i, ret = 0; 1501 return hweight32(val);
1428
1429 for (i = 0; i < 32; i++) {
1430 ret += val & 1;
1431 val >>= 1;
1432 }
1433 return ret;
1434} 1502}
1435 1503
1436static void r600_gpu_init(struct radeon_device *rdev) 1504static void r600_gpu_init(struct radeon_device *rdev)
@@ -1594,6 +1662,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
1594 WREG32(GB_TILING_CONFIG, tiling_config); 1662 WREG32(GB_TILING_CONFIG, tiling_config);
1595 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1663 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1596 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1664 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1665 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1597 1666
1598 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1667 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1599 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1668 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1871,6 +1940,7 @@ void r600_cp_stop(struct radeon_device *rdev)
1871 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1940 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1872 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1941 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1873 WREG32(SCRATCH_UMSK, 0); 1942 WREG32(SCRATCH_UMSK, 0);
1943 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1874} 1944}
1875 1945
1876int r600_init_microcode(struct radeon_device *rdev) 1946int r600_init_microcode(struct radeon_device *rdev)
@@ -2196,6 +2266,128 @@ void r600_cp_fini(struct radeon_device *rdev)
2196 radeon_scratch_free(rdev, ring->rptr_save_reg); 2266 radeon_scratch_free(rdev, ring->rptr_save_reg);
2197} 2267}
2198 2268
2269/*
2270 * DMA
2271 * Starting with R600, the GPU has an asynchronous
2272 * DMA engine. The programming model is very similar
2273 * to the 3D engine (ring buffer, IBs, etc.), but the
2274 * DMA controller has it's own packet format that is
2275 * different form the PM4 format used by the 3D engine.
2276 * It supports copying data, writing embedded data,
2277 * solid fills, and a number of other things. It also
2278 * has support for tiling/detiling of buffers.
2279 */
2280/**
2281 * r600_dma_stop - stop the async dma engine
2282 *
2283 * @rdev: radeon_device pointer
2284 *
2285 * Stop the async dma engine (r6xx-evergreen).
2286 */
2287void r600_dma_stop(struct radeon_device *rdev)
2288{
2289 u32 rb_cntl = RREG32(DMA_RB_CNTL);
2290
2291 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2292
2293 rb_cntl &= ~DMA_RB_ENABLE;
2294 WREG32(DMA_RB_CNTL, rb_cntl);
2295
2296 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
2297}
2298
2299/**
2300 * r600_dma_resume - setup and start the async dma engine
2301 *
2302 * @rdev: radeon_device pointer
2303 *
2304 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
2305 * Returns 0 for success, error for failure.
2306 */
2307int r600_dma_resume(struct radeon_device *rdev)
2308{
2309 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2310 u32 rb_cntl, dma_cntl;
2311 u32 rb_bufsz;
2312 int r;
2313
2314 /* Reset dma */
2315 if (rdev->family >= CHIP_RV770)
2316 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
2317 else
2318 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2319 RREG32(SRBM_SOFT_RESET);
2320 udelay(50);
2321 WREG32(SRBM_SOFT_RESET, 0);
2322
2323 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
2324 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2325
2326 /* Set ring buffer size in dwords */
2327 rb_bufsz = drm_order(ring->ring_size / 4);
2328 rb_cntl = rb_bufsz << 1;
2329#ifdef __BIG_ENDIAN
2330 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
2331#endif
2332 WREG32(DMA_RB_CNTL, rb_cntl);
2333
2334 /* Initialize the ring buffer's read and write pointers */
2335 WREG32(DMA_RB_RPTR, 0);
2336 WREG32(DMA_RB_WPTR, 0);
2337
2338 /* set the wb address whether it's enabled or not */
2339 WREG32(DMA_RB_RPTR_ADDR_HI,
2340 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
2341 WREG32(DMA_RB_RPTR_ADDR_LO,
2342 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
2343
2344 if (rdev->wb.enabled)
2345 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
2346
2347 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
2348
2349 /* enable DMA IBs */
2350 WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
2351
2352 dma_cntl = RREG32(DMA_CNTL);
2353 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
2354 WREG32(DMA_CNTL, dma_cntl);
2355
2356 if (rdev->family >= CHIP_RV770)
2357 WREG32(DMA_MODE, 1);
2358
2359 ring->wptr = 0;
2360 WREG32(DMA_RB_WPTR, ring->wptr << 2);
2361
2362 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
2363
2364 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
2365
2366 ring->ready = true;
2367
2368 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
2369 if (r) {
2370 ring->ready = false;
2371 return r;
2372 }
2373
2374 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2375
2376 return 0;
2377}
2378
2379/**
2380 * r600_dma_fini - tear down the async dma engine
2381 *
2382 * @rdev: radeon_device pointer
2383 *
2384 * Stop the async dma engine and free the ring (r6xx-evergreen).
2385 */
2386void r600_dma_fini(struct radeon_device *rdev)
2387{
2388 r600_dma_stop(rdev);
2389 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
2390}
2199 2391
2200/* 2392/*
2201 * GPU scratch registers helpers function. 2393 * GPU scratch registers helpers function.
@@ -2252,6 +2444,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2252 return r; 2444 return r;
2253} 2445}
2254 2446
2447/**
2448 * r600_dma_ring_test - simple async dma engine test
2449 *
2450 * @rdev: radeon_device pointer
2451 * @ring: radeon_ring structure holding ring information
2452 *
2453 * Test the DMA engine by writing using it to write an
2454 * value to memory. (r6xx-SI).
2455 * Returns 0 for success, error for failure.
2456 */
2457int r600_dma_ring_test(struct radeon_device *rdev,
2458 struct radeon_ring *ring)
2459{
2460 unsigned i;
2461 int r;
2462 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2463 u32 tmp;
2464
2465 if (!ptr) {
2466 DRM_ERROR("invalid vram scratch pointer\n");
2467 return -EINVAL;
2468 }
2469
2470 tmp = 0xCAFEDEAD;
2471 writel(tmp, ptr);
2472
2473 r = radeon_ring_lock(rdev, ring, 4);
2474 if (r) {
2475 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
2476 return r;
2477 }
2478 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
2479 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
2480 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
2481 radeon_ring_write(ring, 0xDEADBEEF);
2482 radeon_ring_unlock_commit(rdev, ring);
2483
2484 for (i = 0; i < rdev->usec_timeout; i++) {
2485 tmp = readl(ptr);
2486 if (tmp == 0xDEADBEEF)
2487 break;
2488 DRM_UDELAY(1);
2489 }
2490
2491 if (i < rdev->usec_timeout) {
2492 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2493 } else {
2494 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2495 ring->idx, tmp);
2496 r = -EINVAL;
2497 }
2498 return r;
2499}
2500
2501/*
2502 * CP fences/semaphores
2503 */
2504
2255void r600_fence_ring_emit(struct radeon_device *rdev, 2505void r600_fence_ring_emit(struct radeon_device *rdev,
2256 struct radeon_fence *fence) 2506 struct radeon_fence *fence)
2257{ 2507{
@@ -2315,6 +2565,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
2315 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2565 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2316} 2566}
2317 2567
2568/*
2569 * DMA fences/semaphores
2570 */
2571
2572/**
2573 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
2574 *
2575 * @rdev: radeon_device pointer
2576 * @fence: radeon fence object
2577 *
2578 * Add a DMA fence packet to the ring to write
2579 * the fence seq number and DMA trap packet to generate
2580 * an interrupt if needed (r6xx-r7xx).
2581 */
2582void r600_dma_fence_ring_emit(struct radeon_device *rdev,
2583 struct radeon_fence *fence)
2584{
2585 struct radeon_ring *ring = &rdev->ring[fence->ring];
2586 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2587
2588 /* write the fence */
2589 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
2590 radeon_ring_write(ring, addr & 0xfffffffc);
2591 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
2592 radeon_ring_write(ring, lower_32_bits(fence->seq));
2593 /* generate an interrupt */
2594 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
2595}
2596
2597/**
2598 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
2599 *
2600 * @rdev: radeon_device pointer
2601 * @ring: radeon_ring structure holding ring information
2602 * @semaphore: radeon semaphore object
2603 * @emit_wait: wait or signal semaphore
2604 *
2605 * Add a DMA semaphore packet to the ring wait on or signal
2606 * other rings (r6xx-SI).
2607 */
2608void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
2609 struct radeon_ring *ring,
2610 struct radeon_semaphore *semaphore,
2611 bool emit_wait)
2612{
2613 u64 addr = semaphore->gpu_addr;
2614 u32 s = emit_wait ? 0 : 1;
2615
2616 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
2617 radeon_ring_write(ring, addr & 0xfffffffc);
2618 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2619}
2620
2318int r600_copy_blit(struct radeon_device *rdev, 2621int r600_copy_blit(struct radeon_device *rdev,
2319 uint64_t src_offset, 2622 uint64_t src_offset,
2320 uint64_t dst_offset, 2623 uint64_t dst_offset,
@@ -2334,6 +2637,80 @@ int r600_copy_blit(struct radeon_device *rdev,
2334 return 0; 2637 return 0;
2335} 2638}
2336 2639
2640/**
2641 * r600_copy_dma - copy pages using the DMA engine
2642 *
2643 * @rdev: radeon_device pointer
2644 * @src_offset: src GPU address
2645 * @dst_offset: dst GPU address
2646 * @num_gpu_pages: number of GPU pages to xfer
2647 * @fence: radeon fence object
2648 *
2649 * Copy GPU paging using the DMA engine (r6xx).
2650 * Used by the radeon ttm implementation to move pages if
2651 * registered as the asic copy callback.
2652 */
2653int r600_copy_dma(struct radeon_device *rdev,
2654 uint64_t src_offset, uint64_t dst_offset,
2655 unsigned num_gpu_pages,
2656 struct radeon_fence **fence)
2657{
2658 struct radeon_semaphore *sem = NULL;
2659 int ring_index = rdev->asic->copy.dma_ring_index;
2660 struct radeon_ring *ring = &rdev->ring[ring_index];
2661 u32 size_in_dw, cur_size_in_dw;
2662 int i, num_loops;
2663 int r = 0;
2664
2665 r = radeon_semaphore_create(rdev, &sem);
2666 if (r) {
2667 DRM_ERROR("radeon: moving bo (%d).\n", r);
2668 return r;
2669 }
2670
2671 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
2672 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
2673 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
2674 if (r) {
2675 DRM_ERROR("radeon: moving bo (%d).\n", r);
2676 radeon_semaphore_free(rdev, &sem, NULL);
2677 return r;
2678 }
2679
2680 if (radeon_fence_need_sync(*fence, ring->idx)) {
2681 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
2682 ring->idx);
2683 radeon_fence_note_sync(*fence, ring->idx);
2684 } else {
2685 radeon_semaphore_free(rdev, &sem, NULL);
2686 }
2687
2688 for (i = 0; i < num_loops; i++) {
2689 cur_size_in_dw = size_in_dw;
2690 if (cur_size_in_dw > 0xFFFE)
2691 cur_size_in_dw = 0xFFFE;
2692 size_in_dw -= cur_size_in_dw;
2693 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
2694 radeon_ring_write(ring, dst_offset & 0xfffffffc);
2695 radeon_ring_write(ring, src_offset & 0xfffffffc);
2696 radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
2697 (upper_32_bits(src_offset) & 0xff)));
2698 src_offset += cur_size_in_dw * 4;
2699 dst_offset += cur_size_in_dw * 4;
2700 }
2701
2702 r = radeon_fence_emit(rdev, fence, ring->idx);
2703 if (r) {
2704 radeon_ring_unlock_undo(rdev, ring);
2705 return r;
2706 }
2707
2708 radeon_ring_unlock_commit(rdev, ring);
2709 radeon_semaphore_free(rdev, &sem, *fence);
2710
2711 return r;
2712}
2713
2337int r600_set_surface_reg(struct radeon_device *rdev, int reg, 2714int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2338 uint32_t tiling_flags, uint32_t pitch, 2715 uint32_t tiling_flags, uint32_t pitch,
2339 uint32_t offset, uint32_t obj_size) 2716 uint32_t offset, uint32_t obj_size)
@@ -2349,7 +2726,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2349 2726
2350static int r600_startup(struct radeon_device *rdev) 2727static int r600_startup(struct radeon_device *rdev)
2351{ 2728{
2352 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2729 struct radeon_ring *ring;
2353 int r; 2730 int r;
2354 2731
2355 /* enable pcie gen2 link */ 2732 /* enable pcie gen2 link */
@@ -2394,6 +2771,12 @@ static int r600_startup(struct radeon_device *rdev)
2394 return r; 2771 return r;
2395 } 2772 }
2396 2773
2774 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2775 if (r) {
2776 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2777 return r;
2778 }
2779
2397 /* Enable IRQ */ 2780 /* Enable IRQ */
2398 r = r600_irq_init(rdev); 2781 r = r600_irq_init(rdev);
2399 if (r) { 2782 if (r) {
@@ -2403,12 +2786,20 @@ static int r600_startup(struct radeon_device *rdev)
2403 } 2786 }
2404 r600_irq_set(rdev); 2787 r600_irq_set(rdev);
2405 2788
2789 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2406 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 2790 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2407 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 2791 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2408 0, 0xfffff, RADEON_CP_PACKET2); 2792 0, 0xfffff, RADEON_CP_PACKET2);
2793 if (r)
2794 return r;
2409 2795
2796 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2797 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2798 DMA_RB_RPTR, DMA_RB_WPTR,
2799 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2410 if (r) 2800 if (r)
2411 return r; 2801 return r;
2802
2412 r = r600_cp_load_microcode(rdev); 2803 r = r600_cp_load_microcode(rdev);
2413 if (r) 2804 if (r)
2414 return r; 2805 return r;
@@ -2416,6 +2807,10 @@ static int r600_startup(struct radeon_device *rdev)
2416 if (r) 2807 if (r)
2417 return r; 2808 return r;
2418 2809
2810 r = r600_dma_resume(rdev);
2811 if (r)
2812 return r;
2813
2419 r = radeon_ib_pool_init(rdev); 2814 r = radeon_ib_pool_init(rdev);
2420 if (r) { 2815 if (r) {
2421 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 2816 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2471,7 +2866,7 @@ int r600_suspend(struct radeon_device *rdev)
2471{ 2866{
2472 r600_audio_fini(rdev); 2867 r600_audio_fini(rdev);
2473 r600_cp_stop(rdev); 2868 r600_cp_stop(rdev);
2474 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2869 r600_dma_stop(rdev);
2475 r600_irq_suspend(rdev); 2870 r600_irq_suspend(rdev);
2476 radeon_wb_disable(rdev); 2871 radeon_wb_disable(rdev);
2477 r600_pcie_gart_disable(rdev); 2872 r600_pcie_gart_disable(rdev);
@@ -2544,6 +2939,9 @@ int r600_init(struct radeon_device *rdev)
2544 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 2939 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2545 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 2940 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2546 2941
2942 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
2943 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
2944
2547 rdev->ih.ring_obj = NULL; 2945 rdev->ih.ring_obj = NULL;
2548 r600_ih_ring_init(rdev, 64 * 1024); 2946 r600_ih_ring_init(rdev, 64 * 1024);
2549 2947
@@ -2556,6 +2954,7 @@ int r600_init(struct radeon_device *rdev)
2556 if (r) { 2954 if (r) {
2557 dev_err(rdev->dev, "disabling GPU acceleration\n"); 2955 dev_err(rdev->dev, "disabling GPU acceleration\n");
2558 r600_cp_fini(rdev); 2956 r600_cp_fini(rdev);
2957 r600_dma_fini(rdev);
2559 r600_irq_fini(rdev); 2958 r600_irq_fini(rdev);
2560 radeon_wb_fini(rdev); 2959 radeon_wb_fini(rdev);
2561 radeon_ib_pool_fini(rdev); 2960 radeon_ib_pool_fini(rdev);
@@ -2572,6 +2971,7 @@ void r600_fini(struct radeon_device *rdev)
2572 r600_audio_fini(rdev); 2971 r600_audio_fini(rdev);
2573 r600_blit_fini(rdev); 2972 r600_blit_fini(rdev);
2574 r600_cp_fini(rdev); 2973 r600_cp_fini(rdev);
2974 r600_dma_fini(rdev);
2575 r600_irq_fini(rdev); 2975 r600_irq_fini(rdev);
2576 radeon_wb_fini(rdev); 2976 radeon_wb_fini(rdev);
2577 radeon_ib_pool_fini(rdev); 2977 radeon_ib_pool_fini(rdev);
@@ -2674,6 +3074,104 @@ free_scratch:
2674 return r; 3074 return r;
2675} 3075}
2676 3076
3077/**
3078 * r600_dma_ib_test - test an IB on the DMA engine
3079 *
3080 * @rdev: radeon_device pointer
3081 * @ring: radeon_ring structure holding ring information
3082 *
3083 * Test a simple IB in the DMA ring (r6xx-SI).
3084 * Returns 0 on success, error on failure.
3085 */
3086int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3087{
3088 struct radeon_ib ib;
3089 unsigned i;
3090 int r;
3091 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3092 u32 tmp = 0;
3093
3094 if (!ptr) {
3095 DRM_ERROR("invalid vram scratch pointer\n");
3096 return -EINVAL;
3097 }
3098
3099 tmp = 0xCAFEDEAD;
3100 writel(tmp, ptr);
3101
3102 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3103 if (r) {
3104 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3105 return r;
3106 }
3107
3108 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
3109 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3110 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
3111 ib.ptr[3] = 0xDEADBEEF;
3112 ib.length_dw = 4;
3113
3114 r = radeon_ib_schedule(rdev, &ib, NULL);
3115 if (r) {
3116 radeon_ib_free(rdev, &ib);
3117 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3118 return r;
3119 }
3120 r = radeon_fence_wait(ib.fence, false);
3121 if (r) {
3122 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3123 return r;
3124 }
3125 for (i = 0; i < rdev->usec_timeout; i++) {
3126 tmp = readl(ptr);
3127 if (tmp == 0xDEADBEEF)
3128 break;
3129 DRM_UDELAY(1);
3130 }
3131 if (i < rdev->usec_timeout) {
3132 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3133 } else {
3134 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3135 r = -EINVAL;
3136 }
3137 radeon_ib_free(rdev, &ib);
3138 return r;
3139}
3140
3141/**
3142 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3143 *
3144 * @rdev: radeon_device pointer
3145 * @ib: IB object to schedule
3146 *
3147 * Schedule an IB in the DMA ring (r6xx-r7xx).
3148 */
3149void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3150{
3151 struct radeon_ring *ring = &rdev->ring[ib->ring];
3152
3153 if (rdev->wb.enabled) {
3154 u32 next_rptr = ring->wptr + 4;
3155 while ((next_rptr & 7) != 5)
3156 next_rptr++;
3157 next_rptr += 3;
3158 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3159 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3160 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3161 radeon_ring_write(ring, next_rptr);
3162 }
3163
3164 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3165 * Pad as necessary with NOPs.
3166 */
3167 while ((ring->wptr & 7) != 5)
3168 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3169 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3170 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3171 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3172
3173}
3174
2677/* 3175/*
2678 * Interrupts 3176 * Interrupts
2679 * 3177 *
@@ -2865,6 +3363,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2865 u32 tmp; 3363 u32 tmp;
2866 3364
2867 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3365 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3366 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3367 WREG32(DMA_CNTL, tmp);
2868 WREG32(GRBM_INT_CNTL, 0); 3368 WREG32(GRBM_INT_CNTL, 0);
2869 WREG32(DxMODE_INT_MASK, 0); 3369 WREG32(DxMODE_INT_MASK, 0);
2870 WREG32(D1GRPH_INTERRUPT_CONTROL, 0); 3370 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3006,6 +3506,7 @@ int r600_irq_set(struct radeon_device *rdev)
3006 u32 grbm_int_cntl = 0; 3506 u32 grbm_int_cntl = 0;
3007 u32 hdmi0, hdmi1; 3507 u32 hdmi0, hdmi1;
3008 u32 d1grph = 0, d2grph = 0; 3508 u32 d1grph = 0, d2grph = 0;
3509 u32 dma_cntl;
3009 3510
3010 if (!rdev->irq.installed) { 3511 if (!rdev->irq.installed) {
3011 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3512 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3040,12 +3541,19 @@ int r600_irq_set(struct radeon_device *rdev)
3040 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3541 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3041 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3542 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3042 } 3543 }
3544 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3043 3545
3044 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3546 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3045 DRM_DEBUG("r600_irq_set: sw int\n"); 3547 DRM_DEBUG("r600_irq_set: sw int\n");
3046 cp_int_cntl |= RB_INT_ENABLE; 3548 cp_int_cntl |= RB_INT_ENABLE;
3047 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3549 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3048 } 3550 }
3551
3552 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3553 DRM_DEBUG("r600_irq_set: sw int dma\n");
3554 dma_cntl |= TRAP_ENABLE;
3555 }
3556
3049 if (rdev->irq.crtc_vblank_int[0] || 3557 if (rdev->irq.crtc_vblank_int[0] ||
3050 atomic_read(&rdev->irq.pflip[0])) { 3558 atomic_read(&rdev->irq.pflip[0])) {
3051 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3559 DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3090,6 +3598,7 @@ int r600_irq_set(struct radeon_device *rdev)
3090 } 3598 }
3091 3599
3092 WREG32(CP_INT_CNTL, cp_int_cntl); 3600 WREG32(CP_INT_CNTL, cp_int_cntl);
3601 WREG32(DMA_CNTL, dma_cntl);
3093 WREG32(DxMODE_INT_MASK, mode_int); 3602 WREG32(DxMODE_INT_MASK, mode_int);
3094 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 3603 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3095 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 3604 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3469,6 +3978,10 @@ restart_ih:
3469 DRM_DEBUG("IH: CP EOP\n"); 3978 DRM_DEBUG("IH: CP EOP\n");
3470 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3979 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3471 break; 3980 break;
3981 case 224: /* DMA trap event */
3982 DRM_DEBUG("IH: DMA trap\n");
3983 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3984 break;
3472 case 233: /* GUI IDLE */ 3985 case 233: /* GUI IDLE */
3473 DRM_DEBUG("IH: GUI idle\n"); 3986 DRM_DEBUG("IH: GUI idle\n");
3474 break; 3987 break;
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 2514123d2d00..be85f75aedda 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -721,12 +721,7 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
721 721
722static int r600_count_pipe_bits(uint32_t val) 722static int r600_count_pipe_bits(uint32_t val)
723{ 723{
724 int i, ret = 0; 724 return hweight32(val);
725 for (i = 0; i < 32; i++) {
726 ret += val & 1;
727 val >>= 1;
728 }
729 return ret;
730} 725}
731 726
732static void r600_gfx_init(struct drm_device *dev, 727static void r600_gfx_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 211c40252fe0..69ec24ab8d63 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
657 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ 657 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
658 nby = round_up(nby, track->npipes * 8); 658 nby = round_up(nby, track->npipes * 8);
659 } else { 659 } else {
660 /* htile widht & nby (8 or 4) make 2 bits number */ 660 /* always assume 8x8 htile */
661 tmp = track->htile_surface & 3;
662 /* align is htile align * 8, htile align vary according to 661 /* align is htile align * 8, htile align vary according to
663 * number of pipe and tile width and nby 662 * number of pipe and tile width and nby
664 */ 663 */
665 switch (track->npipes) { 664 switch (track->npipes) {
666 case 8: 665 case 8:
667 switch (tmp) { 666 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
668 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 667 nbx = round_up(nbx, 64 * 8);
669 nbx = round_up(nbx, 64 * 8); 668 nby = round_up(nby, 64 * 8);
670 nby = round_up(nby, 64 * 8);
671 break;
672 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
673 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
674 nbx = round_up(nbx, 64 * 8);
675 nby = round_up(nby, 32 * 8);
676 break;
677 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
678 nbx = round_up(nbx, 32 * 8);
679 nby = round_up(nby, 32 * 8);
680 break;
681 default:
682 return -EINVAL;
683 }
684 break; 669 break;
685 case 4: 670 case 4:
686 switch (tmp) { 671 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
687 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 672 nbx = round_up(nbx, 64 * 8);
688 nbx = round_up(nbx, 64 * 8); 673 nby = round_up(nby, 32 * 8);
689 nby = round_up(nby, 32 * 8);
690 break;
691 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
692 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
693 nbx = round_up(nbx, 32 * 8);
694 nby = round_up(nby, 32 * 8);
695 break;
696 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
697 nbx = round_up(nbx, 32 * 8);
698 nby = round_up(nby, 16 * 8);
699 break;
700 default:
701 return -EINVAL;
702 }
703 break; 674 break;
704 case 2: 675 case 2:
705 switch (tmp) { 676 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
706 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 677 nbx = round_up(nbx, 32 * 8);
707 nbx = round_up(nbx, 32 * 8); 678 nby = round_up(nby, 32 * 8);
708 nby = round_up(nby, 32 * 8);
709 break;
710 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
711 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
712 nbx = round_up(nbx, 32 * 8);
713 nby = round_up(nby, 16 * 8);
714 break;
715 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
716 nbx = round_up(nbx, 16 * 8);
717 nby = round_up(nby, 16 * 8);
718 break;
719 default:
720 return -EINVAL;
721 }
722 break; 679 break;
723 case 1: 680 case 1:
724 switch (tmp) { 681 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
725 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 682 nbx = round_up(nbx, 32 * 8);
726 nbx = round_up(nbx, 32 * 8); 683 nby = round_up(nby, 16 * 8);
727 nby = round_up(nby, 16 * 8);
728 break;
729 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
730 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
731 nbx = round_up(nbx, 16 * 8);
732 nby = round_up(nby, 16 * 8);
733 break;
734 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
735 nbx = round_up(nbx, 16 * 8);
736 nby = round_up(nby, 8 * 8);
737 break;
738 default:
739 return -EINVAL;
740 }
741 break; 684 break;
742 default: 685 default:
743 dev_warn(p->dev, "%s:%d invalid num pipes %d\n", 686 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
746 } 689 }
747 } 690 }
748 /* compute number of htile */ 691 /* compute number of htile */
749 nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4; 692 nbx = nbx >> 3;
750 nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4; 693 nby = nby >> 3;
751 size = nbx * nby * 4; 694 /* size must be aligned on npipes * 2K boundary */
695 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
752 size += track->htile_offset; 696 size += track->htile_offset;
753 697
754 if (size > radeon_bo_size(track->htile_bo)) { 698 if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1492 break; 1436 break;
1493 case DB_HTILE_SURFACE: 1437 case DB_HTILE_SURFACE:
1494 track->htile_surface = radeon_get_ib_value(p, idx); 1438 track->htile_surface = radeon_get_ib_value(p, idx);
1439 /* force 8x8 htile width and height */
1440 ib[idx] |= 3;
1495 track->db_dirty = true; 1441 track->db_dirty = true;
1496 break; 1442 break;
1497 case SQ_PGM_START_FS: 1443 case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1949 ib[idx+2] = upper_32_bits(offset) & 0xff; 1895 ib[idx+2] = upper_32_bits(offset) & 0xff;
1950 } 1896 }
1951 break; 1897 break;
1898 case PACKET3_CP_DMA:
1899 {
1900 u32 command, size;
1901 u64 offset, tmp;
1902 if (pkt->count != 4) {
1903 DRM_ERROR("bad CP DMA\n");
1904 return -EINVAL;
1905 }
1906 command = radeon_get_ib_value(p, idx+4);
1907 size = command & 0x1fffff;
1908 if (command & PACKET3_CP_DMA_CMD_SAS) {
1909 /* src address space is register */
1910 DRM_ERROR("CP DMA SAS not supported\n");
1911 return -EINVAL;
1912 } else {
1913 if (command & PACKET3_CP_DMA_CMD_SAIC) {
1914 DRM_ERROR("CP DMA SAIC only supported for registers\n");
1915 return -EINVAL;
1916 }
1917 /* src address space is memory */
1918 r = r600_cs_packet_next_reloc(p, &reloc);
1919 if (r) {
1920 DRM_ERROR("bad CP DMA SRC\n");
1921 return -EINVAL;
1922 }
1923
1924 tmp = radeon_get_ib_value(p, idx) +
1925 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1926
1927 offset = reloc->lobj.gpu_offset + tmp;
1928
1929 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1930 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
1931 tmp + size, radeon_bo_size(reloc->robj));
1932 return -EINVAL;
1933 }
1934
1935 ib[idx] = offset;
1936 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1937 }
1938 if (command & PACKET3_CP_DMA_CMD_DAS) {
1939 /* dst address space is register */
1940 DRM_ERROR("CP DMA DAS not supported\n");
1941 return -EINVAL;
1942 } else {
1943 /* dst address space is memory */
1944 if (command & PACKET3_CP_DMA_CMD_DAIC) {
1945 DRM_ERROR("CP DMA DAIC only supported for registers\n");
1946 return -EINVAL;
1947 }
1948 r = r600_cs_packet_next_reloc(p, &reloc);
1949 if (r) {
1950 DRM_ERROR("bad CP DMA DST\n");
1951 return -EINVAL;
1952 }
1953
1954 tmp = radeon_get_ib_value(p, idx+2) +
1955 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1956
1957 offset = reloc->lobj.gpu_offset + tmp;
1958
1959 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1960 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
1961 tmp + size, radeon_bo_size(reloc->robj));
1962 return -EINVAL;
1963 }
1964
1965 ib[idx+2] = offset;
1966 ib[idx+3] = upper_32_bits(offset) & 0xff;
1967 }
1968 break;
1969 }
1952 case PACKET3_SURFACE_SYNC: 1970 case PACKET3_SURFACE_SYNC:
1953 if (pkt->count != 3) { 1971 if (pkt->count != 3) {
1954 DRM_ERROR("bad SURFACE_SYNC\n"); 1972 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2276,6 +2294,35 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2276 ib[idx+4] = upper_32_bits(offset) & 0xff; 2294 ib[idx+4] = upper_32_bits(offset) & 0xff;
2277 } 2295 }
2278 break; 2296 break;
2297 case PACKET3_MEM_WRITE:
2298 {
2299 u64 offset;
2300
2301 if (pkt->count != 3) {
2302 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2303 return -EINVAL;
2304 }
2305 r = r600_cs_packet_next_reloc(p, &reloc);
2306 if (r) {
2307 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2308 return -EINVAL;
2309 }
2310 offset = radeon_get_ib_value(p, idx+0);
2311 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2312 if (offset & 0x7) {
2313 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2314 return -EINVAL;
2315 }
2316 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2317 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2318 offset + 8, radeon_bo_size(reloc->robj));
2319 return -EINVAL;
2320 }
2321 offset += reloc->lobj.gpu_offset;
2322 ib[idx+0] = offset;
2323 ib[idx+1] = upper_32_bits(offset) & 0xff;
2324 break;
2325 }
2279 case PACKET3_COPY_DW: 2326 case PACKET3_COPY_DW:
2280 if (pkt->count != 4) { 2327 if (pkt->count != 4) {
2281 DRM_ERROR("bad COPY_DW (invalid count)\n"); 2328 DRM_ERROR("bad COPY_DW (invalid count)\n");
@@ -2429,8 +2476,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2429 kfree(parser->relocs); 2476 kfree(parser->relocs);
2430 for (i = 0; i < parser->nchunks; i++) { 2477 for (i = 0; i < parser->nchunks; i++) {
2431 kfree(parser->chunks[i].kdata); 2478 kfree(parser->chunks[i].kdata);
2432 kfree(parser->chunks[i].kpage[0]); 2479 if (parser->rdev && (parser->rdev->flags & RADEON_IS_AGP)) {
2433 kfree(parser->chunks[i].kpage[1]); 2480 kfree(parser->chunks[i].kpage[0]);
2481 kfree(parser->chunks[i].kpage[1]);
2482 }
2434 } 2483 }
2435 kfree(parser->chunks); 2484 kfree(parser->chunks);
2436 kfree(parser->chunks_array); 2485 kfree(parser->chunks_array);
@@ -2496,3 +2545,209 @@ void r600_cs_legacy_init(void)
2496{ 2545{
2497 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; 2546 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
2498} 2547}
2548
2549/*
2550 * DMA
2551 */
2552/**
2553 * r600_dma_cs_next_reloc() - parse next reloc
2554 * @p: parser structure holding parsing context.
2555 * @cs_reloc: reloc informations
2556 *
2557 * Return the next reloc, do bo validation and compute
2558 * GPU offset using the provided start.
2559 **/
2560int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2561 struct radeon_cs_reloc **cs_reloc)
2562{
2563 struct radeon_cs_chunk *relocs_chunk;
2564 unsigned idx;
2565
2566 *cs_reloc = NULL;
2567 if (p->chunk_relocs_idx == -1) {
2568 DRM_ERROR("No relocation chunk !\n");
2569 return -EINVAL;
2570 }
2571 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2572 idx = p->dma_reloc_idx;
2573 if (idx >= p->nrelocs) {
2574 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2575 idx, p->nrelocs);
2576 return -EINVAL;
2577 }
2578 *cs_reloc = p->relocs_ptr[idx];
2579 p->dma_reloc_idx++;
2580 return 0;
2581}
2582
2583#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2584#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2585#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2586
2587/**
2588 * r600_dma_cs_parse() - parse the DMA IB
2589 * @p: parser structure holding parsing context.
2590 *
2591 * Parses the DMA IB from the CS ioctl and updates
2592 * the GPU addresses based on the reloc information and
2593 * checks for errors. (R6xx-R7xx)
2594 * Returns 0 for success and an error on failure.
2595 **/
2596int r600_dma_cs_parse(struct radeon_cs_parser *p)
2597{
2598 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2599 struct radeon_cs_reloc *src_reloc, *dst_reloc;
2600 u32 header, cmd, count, tiled;
2601 volatile u32 *ib = p->ib.ptr;
2602 u32 idx, idx_value;
2603 u64 src_offset, dst_offset;
2604 int r;
2605
2606 do {
2607 if (p->idx >= ib_chunk->length_dw) {
2608 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2609 p->idx, ib_chunk->length_dw);
2610 return -EINVAL;
2611 }
2612 idx = p->idx;
2613 header = radeon_get_ib_value(p, idx);
2614 cmd = GET_DMA_CMD(header);
2615 count = GET_DMA_COUNT(header);
2616 tiled = GET_DMA_T(header);
2617
2618 switch (cmd) {
2619 case DMA_PACKET_WRITE:
2620 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2621 if (r) {
2622 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2623 return -EINVAL;
2624 }
2625 if (tiled) {
2626 dst_offset = ib[idx+1];
2627 dst_offset <<= 8;
2628
2629 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2630 p->idx += count + 5;
2631 } else {
2632 dst_offset = ib[idx+1];
2633 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
2634
2635 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2636 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2637 p->idx += count + 3;
2638 }
2639 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2640 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2641 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2642 return -EINVAL;
2643 }
2644 break;
2645 case DMA_PACKET_COPY:
2646 r = r600_dma_cs_next_reloc(p, &src_reloc);
2647 if (r) {
2648 DRM_ERROR("bad DMA_PACKET_COPY\n");
2649 return -EINVAL;
2650 }
2651 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2652 if (r) {
2653 DRM_ERROR("bad DMA_PACKET_COPY\n");
2654 return -EINVAL;
2655 }
2656 if (tiled) {
2657 idx_value = radeon_get_ib_value(p, idx + 2);
2658 /* detile bit */
2659 if (idx_value & (1 << 31)) {
2660 /* tiled src, linear dst */
2661 src_offset = ib[idx+1];
2662 src_offset <<= 8;
2663 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2664
2665 dst_offset = ib[idx+5];
2666 dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
2667 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2668 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2669 } else {
2670 /* linear src, tiled dst */
2671 src_offset = ib[idx+5];
2672 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
2673 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2674 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2675
2676 dst_offset = ib[idx+1];
2677 dst_offset <<= 8;
2678 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2679 }
2680 p->idx += 7;
2681 } else {
2682 if (p->family >= CHIP_RV770) {
2683 src_offset = ib[idx+2];
2684 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2685 dst_offset = ib[idx+1];
2686 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2687
2688 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2689 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2690 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2691 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2692 p->idx += 5;
2693 } else {
2694 src_offset = ib[idx+2];
2695 src_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2696 dst_offset = ib[idx+1];
2697 dst_offset |= ((u64)(ib[idx+3] & 0xff0000)) << 16;
2698
2699 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2700 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2701 ib[idx+3] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2702 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff) << 16;
2703 p->idx += 4;
2704 }
2705 }
2706 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2707 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
2708 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2709 return -EINVAL;
2710 }
2711 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2712 dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
2713 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2714 return -EINVAL;
2715 }
2716 break;
2717 case DMA_PACKET_CONSTANT_FILL:
2718 if (p->family < CHIP_RV770) {
2719 DRM_ERROR("Constant Fill is 7xx only !\n");
2720 return -EINVAL;
2721 }
2722 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2723 if (r) {
2724 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2725 return -EINVAL;
2726 }
2727 dst_offset = ib[idx+1];
2728 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
2729 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2730 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2731 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2732 return -EINVAL;
2733 }
2734 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2735 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
2736 p->idx += 4;
2737 break;
2738 case DMA_PACKET_NOP:
2739 p->idx += 1;
2740 break;
2741 default:
2742 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2743 return -EINVAL;
2744 }
2745 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2746#if 0
2747 for (r = 0; r < p->ib->length_dw; r++) {
2748 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2749 mdelay(1);
2750 }
2751#endif
2752 return 0;
2753}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 2b960cb5c18a..909219b1bf80 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -96,6 +96,15 @@
96#define R600_CONFIG_F0_BASE 0x542C 96#define R600_CONFIG_F0_BASE 0x542C
97#define R600_CONFIG_APER_SIZE 0x5430 97#define R600_CONFIG_APER_SIZE 0x5430
98 98
99#define R600_BIF_FB_EN 0x5490
100#define R600_FB_READ_EN (1 << 0)
101#define R600_FB_WRITE_EN (1 << 1)
102
103#define R600_CITF_CNTL 0x200c
104#define R600_BLACKOUT_MASK 0x00000003
105
106#define R700_MC_CITF_CNTL 0x25c0
107
99#define R600_ROM_CNTL 0x1600 108#define R600_ROM_CNTL 0x1600
100# define R600_SCK_OVERWRITE (1 << 1) 109# define R600_SCK_OVERWRITE (1 << 1)
101# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28 110# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fa6f37099ba9..4a53402b1852 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -590,9 +590,59 @@
590#define WAIT_2D_IDLECLEAN_bit (1 << 16) 590#define WAIT_2D_IDLECLEAN_bit (1 << 16)
591#define WAIT_3D_IDLECLEAN_bit (1 << 17) 591#define WAIT_3D_IDLECLEAN_bit (1 << 17)
592 592
593/* async DMA */
594#define DMA_TILING_CONFIG 0x3ec4
595#define DMA_CONFIG 0x3e4c
596
597#define DMA_RB_CNTL 0xd000
598# define DMA_RB_ENABLE (1 << 0)
599# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
600# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
601# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
602# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
603# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
604#define DMA_RB_BASE 0xd004
605#define DMA_RB_RPTR 0xd008
606#define DMA_RB_WPTR 0xd00c
607
608#define DMA_RB_RPTR_ADDR_HI 0xd01c
609#define DMA_RB_RPTR_ADDR_LO 0xd020
610
611#define DMA_IB_CNTL 0xd024
612# define DMA_IB_ENABLE (1 << 0)
613# define DMA_IB_SWAP_ENABLE (1 << 4)
614#define DMA_IB_RPTR 0xd028
615#define DMA_CNTL 0xd02c
616# define TRAP_ENABLE (1 << 0)
617# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
618# define SEM_WAIT_INT_ENABLE (1 << 2)
619# define DATA_SWAP_ENABLE (1 << 3)
620# define FENCE_SWAP_ENABLE (1 << 4)
621# define CTXEMPTY_INT_ENABLE (1 << 28)
622#define DMA_STATUS_REG 0xd034
623# define DMA_IDLE (1 << 0)
624#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
625#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
626#define DMA_MODE 0xd0bc
627
628/* async DMA packets */
629#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
630 (((t) & 0x1) << 23) | \
631 (((s) & 0x1) << 22) | \
632 (((n) & 0xFFFF) << 0))
633/* async DMA Packet types */
634#define DMA_PACKET_WRITE 0x2
635#define DMA_PACKET_COPY 0x3
636#define DMA_PACKET_INDIRECT_BUFFER 0x4
637#define DMA_PACKET_SEMAPHORE 0x5
638#define DMA_PACKET_FENCE 0x6
639#define DMA_PACKET_TRAP 0x7
640#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */
641#define DMA_PACKET_NOP 0xf
642
593#define IH_RB_CNTL 0x3e00 643#define IH_RB_CNTL 0x3e00
594# define IH_RB_ENABLE (1 << 0) 644# define IH_RB_ENABLE (1 << 0)
595# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ 645# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
596# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) 646# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
597# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) 647# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
598# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ 648# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
@@ -637,7 +687,9 @@
637#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20 687#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
638 688
639#define SRBM_SOFT_RESET 0xe60 689#define SRBM_SOFT_RESET 0xe60
690# define SOFT_RESET_DMA (1 << 12)
640# define SOFT_RESET_RLC (1 << 13) 691# define SOFT_RESET_RLC (1 << 13)
692# define RV770_SOFT_RESET_DMA (1 << 20)
641 693
642#define CP_INT_CNTL 0xc124 694#define CP_INT_CNTL 0xc124
643# define CNTX_BUSY_INT_ENABLE (1 << 19) 695# define CNTX_BUSY_INT_ENABLE (1 << 19)
@@ -1134,6 +1186,38 @@
1134#define PACKET3_WAIT_REG_MEM 0x3C 1186#define PACKET3_WAIT_REG_MEM 0x3C
1135#define PACKET3_MEM_WRITE 0x3D 1187#define PACKET3_MEM_WRITE 0x3D
1136#define PACKET3_INDIRECT_BUFFER 0x32 1188#define PACKET3_INDIRECT_BUFFER 0x32
1189#define PACKET3_CP_DMA 0x41
1190/* 1. header
1191 * 2. SRC_ADDR_LO [31:0]
1192 * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
1193 * 4. DST_ADDR_LO [31:0]
1194 * 5. DST_ADDR_HI [7:0]
1195 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
1196 */
1197# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1198/* COMMAND */
1199# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
1200 /* 0 - none
1201 * 1 - 8 in 16
1202 * 2 - 8 in 32
1203 * 3 - 8 in 64
1204 */
1205# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
1206 /* 0 - none
1207 * 1 - 8 in 16
1208 * 2 - 8 in 32
1209 * 3 - 8 in 64
1210 */
1211# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
1212 /* 0 - memory
1213 * 1 - register
1214 */
1215# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
1216 /* 0 - memory
1217 * 1 - register
1218 */
1219# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
1220# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
1137#define PACKET3_SURFACE_SYNC 0x43 1221#define PACKET3_SURFACE_SYNC 0x43
1138# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 1222# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1139# define PACKET3_TC_ACTION_ENA (1 << 23) 1223# define PACKET3_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8c42d54c2e26..34e52304a525 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
109#define RADEON_BIOS_NUM_SCRATCH 8 109#define RADEON_BIOS_NUM_SCRATCH 8
110 110
111/* max number of rings */ 111/* max number of rings */
112#define RADEON_NUM_RINGS 3 112#define RADEON_NUM_RINGS 5
113 113
114/* fence seq are set to this number when signaled */ 114/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL 115#define RADEON_FENCE_SIGNALED_SEQ 0LL
@@ -122,11 +122,21 @@ extern int radeon_lockup_timeout;
122#define CAYMAN_RING_TYPE_CP1_INDEX 1 122#define CAYMAN_RING_TYPE_CP1_INDEX 1
123#define CAYMAN_RING_TYPE_CP2_INDEX 2 123#define CAYMAN_RING_TYPE_CP2_INDEX 2
124 124
125/* R600+ has an async dma ring */
126#define R600_RING_TYPE_DMA_INDEX 3
127/* cayman add a second async dma ring */
128#define CAYMAN_RING_TYPE_DMA1_INDEX 4
129
125/* hardcode those limit for now */ 130/* hardcode those limit for now */
126#define RADEON_VA_IB_OFFSET (1 << 20) 131#define RADEON_VA_IB_OFFSET (1 << 20)
127#define RADEON_VA_RESERVED_SIZE (8 << 20) 132#define RADEON_VA_RESERVED_SIZE (8 << 20)
128#define RADEON_IB_VM_MAX_SIZE (64 << 10) 133#define RADEON_IB_VM_MAX_SIZE (64 << 10)
129 134
135/* reset flags */
136#define RADEON_RESET_GFX (1 << 0)
137#define RADEON_RESET_COMPUTE (1 << 1)
138#define RADEON_RESET_DMA (1 << 2)
139
130/* 140/*
131 * Errata workarounds. 141 * Errata workarounds.
132 */ 142 */
@@ -220,12 +230,13 @@ struct radeon_fence {
220int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 230int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
221int radeon_fence_driver_init(struct radeon_device *rdev); 231int radeon_fence_driver_init(struct radeon_device *rdev);
222void radeon_fence_driver_fini(struct radeon_device *rdev); 232void radeon_fence_driver_fini(struct radeon_device *rdev);
233void radeon_fence_driver_force_completion(struct radeon_device *rdev);
223int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); 234int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
224void radeon_fence_process(struct radeon_device *rdev, int ring); 235void radeon_fence_process(struct radeon_device *rdev, int ring);
225bool radeon_fence_signaled(struct radeon_fence *fence); 236bool radeon_fence_signaled(struct radeon_fence *fence);
226int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); 237int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
227int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); 238int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
228void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); 239int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
229int radeon_fence_wait_any(struct radeon_device *rdev, 240int radeon_fence_wait_any(struct radeon_device *rdev,
230 struct radeon_fence **fences, 241 struct radeon_fence **fences,
231 bool intr); 242 bool intr);
@@ -313,6 +324,7 @@ struct radeon_bo {
313 struct list_head list; 324 struct list_head list;
314 /* Protected by tbo.reserved */ 325 /* Protected by tbo.reserved */
315 u32 placements[3]; 326 u32 placements[3];
327 u32 busy_placements[3];
316 struct ttm_placement placement; 328 struct ttm_placement placement;
317 struct ttm_buffer_object tbo; 329 struct ttm_buffer_object tbo;
318 struct ttm_bo_kmap_obj kmap; 330 struct ttm_bo_kmap_obj kmap;
@@ -787,6 +799,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
787void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); 799void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
788 800
789 801
802/* r600 async dma */
803void r600_dma_stop(struct radeon_device *rdev);
804int r600_dma_resume(struct radeon_device *rdev);
805void r600_dma_fini(struct radeon_device *rdev);
806
807void cayman_dma_stop(struct radeon_device *rdev);
808int cayman_dma_resume(struct radeon_device *rdev);
809void cayman_dma_fini(struct radeon_device *rdev);
810
790/* 811/*
791 * CS. 812 * CS.
792 */ 813 */
@@ -824,6 +845,7 @@ struct radeon_cs_parser {
824 struct radeon_cs_reloc *relocs; 845 struct radeon_cs_reloc *relocs;
825 struct radeon_cs_reloc **relocs_ptr; 846 struct radeon_cs_reloc **relocs_ptr;
826 struct list_head validated; 847 struct list_head validated;
848 unsigned dma_reloc_idx;
827 /* indices of various chunks */ 849 /* indices of various chunks */
828 int chunk_ib_idx; 850 int chunk_ib_idx;
829 int chunk_relocs_idx; 851 int chunk_relocs_idx;
@@ -883,7 +905,9 @@ struct radeon_wb {
883#define RADEON_WB_CP_RPTR_OFFSET 1024 905#define RADEON_WB_CP_RPTR_OFFSET 1024
884#define RADEON_WB_CP1_RPTR_OFFSET 1280 906#define RADEON_WB_CP1_RPTR_OFFSET 1280
885#define RADEON_WB_CP2_RPTR_OFFSET 1536 907#define RADEON_WB_CP2_RPTR_OFFSET 1536
908#define R600_WB_DMA_RPTR_OFFSET 1792
886#define R600_WB_IH_WPTR_OFFSET 2048 909#define R600_WB_IH_WPTR_OFFSET 2048
910#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
887#define R600_WB_EVENT_OFFSET 3072 911#define R600_WB_EVENT_OFFSET 3072
888 912
889/** 913/**
@@ -1539,6 +1563,8 @@ struct radeon_device {
1539 /* Register mmio */ 1563 /* Register mmio */
1540 resource_size_t rmmio_base; 1564 resource_size_t rmmio_base;
1541 resource_size_t rmmio_size; 1565 resource_size_t rmmio_size;
1566 /* protects concurrent MM_INDEX/DATA based register access */
1567 spinlock_t mmio_idx_lock;
1542 void __iomem *rmmio; 1568 void __iomem *rmmio;
1543 radeon_rreg_t mc_rreg; 1569 radeon_rreg_t mc_rreg;
1544 radeon_wreg_t mc_wreg; 1570 radeon_wreg_t mc_wreg;
@@ -1614,8 +1640,10 @@ int radeon_device_init(struct radeon_device *rdev,
1614void radeon_device_fini(struct radeon_device *rdev); 1640void radeon_device_fini(struct radeon_device *rdev);
1615int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 1641int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1616 1642
1617uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 1643uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
1618void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 1644 bool always_indirect);
1645void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
1646 bool always_indirect);
1619u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 1647u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1620void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1648void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1621 1649
@@ -1631,9 +1659,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1631#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) 1659#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1632#define RREG16(reg) readw((rdev->rmmio) + (reg)) 1660#define RREG16(reg) readw((rdev->rmmio) + (reg))
1633#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) 1661#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
1634#define RREG32(reg) r100_mm_rreg(rdev, (reg)) 1662#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
1635#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1663#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
1636#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 1664#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
1665#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
1666#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
1637#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1667#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1638#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1668#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1639#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 1669#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1688,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1658 tmp_ |= ((val) & ~(mask)); \ 1688 tmp_ |= ((val) & ~(mask)); \
1659 WREG32_PLL(reg, tmp_); \ 1689 WREG32_PLL(reg, tmp_); \
1660 } while (0) 1690 } while (0)
1661#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg))) 1691#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
1662#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 1692#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1663#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 1693#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1664 1694
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 10ea17a6b2a6..42433344cb1b 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
69 /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ 69 /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
70 { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, 70 { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
71 PCI_VENDOR_ID_DELL, 0x00e3, 2}, 71 PCI_VENDOR_ID_DELL, 0x00e3, 2},
72 /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ 72 /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
73 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, 73 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
74 PCI_VENDOR_ID_DELL, 0x0149, 1}, 74 PCI_VENDOR_ID_DELL, 0x0149, 1},
75 /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
76 { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
77 PCI_VENDOR_ID_IBM, 0x0531, 1},
75 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ 78 /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
76 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, 79 { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
77 0x1025, 0x0061, 1}, 80 0x1025, 0x0061, 1},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 654520b95ab7..9056fafb00ea 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
947 .ring_test = &r600_ring_test, 947 .ring_test = &r600_ring_test,
948 .ib_test = &r600_ib_test, 948 .ib_test = &r600_ib_test,
949 .is_lockup = &r600_gpu_is_lockup, 949 .is_lockup = &r600_gpu_is_lockup,
950 },
951 [R600_RING_TYPE_DMA_INDEX] = {
952 .ib_execute = &r600_dma_ring_ib_execute,
953 .emit_fence = &r600_dma_fence_ring_emit,
954 .emit_semaphore = &r600_dma_semaphore_ring_emit,
955 .cs_parse = &r600_dma_cs_parse,
956 .ring_test = &r600_dma_ring_test,
957 .ib_test = &r600_dma_ib_test,
958 .is_lockup = &r600_dma_is_lockup,
950 } 959 }
951 }, 960 },
952 .irq = { 961 .irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
963 .copy = { 972 .copy = {
964 .blit = &r600_copy_blit, 973 .blit = &r600_copy_blit,
965 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 974 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
966 .dma = NULL, 975 .dma = &r600_copy_dma,
967 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 976 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
968 .copy = &r600_copy_blit, 977 .copy = &r600_copy_dma,
969 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 978 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
970 }, 979 },
971 .surface = { 980 .surface = {
972 .set_reg = r600_set_surface_reg, 981 .set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
1022 .ring_test = &r600_ring_test, 1031 .ring_test = &r600_ring_test,
1023 .ib_test = &r600_ib_test, 1032 .ib_test = &r600_ib_test,
1024 .is_lockup = &r600_gpu_is_lockup, 1033 .is_lockup = &r600_gpu_is_lockup,
1034 },
1035 [R600_RING_TYPE_DMA_INDEX] = {
1036 .ib_execute = &r600_dma_ring_ib_execute,
1037 .emit_fence = &r600_dma_fence_ring_emit,
1038 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1039 .cs_parse = &r600_dma_cs_parse,
1040 .ring_test = &r600_dma_ring_test,
1041 .ib_test = &r600_dma_ib_test,
1042 .is_lockup = &r600_dma_is_lockup,
1025 } 1043 }
1026 }, 1044 },
1027 .irq = { 1045 .irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
1038 .copy = { 1056 .copy = {
1039 .blit = &r600_copy_blit, 1057 .blit = &r600_copy_blit,
1040 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1058 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1041 .dma = NULL, 1059 .dma = &r600_copy_dma,
1042 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1060 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1043 .copy = &r600_copy_blit, 1061 .copy = &r600_copy_dma,
1044 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1062 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1045 }, 1063 },
1046 .surface = { 1064 .surface = {
1047 .set_reg = r600_set_surface_reg, 1065 .set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
1097 .ring_test = &r600_ring_test, 1115 .ring_test = &r600_ring_test,
1098 .ib_test = &r600_ib_test, 1116 .ib_test = &r600_ib_test,
1099 .is_lockup = &r600_gpu_is_lockup, 1117 .is_lockup = &r600_gpu_is_lockup,
1118 },
1119 [R600_RING_TYPE_DMA_INDEX] = {
1120 .ib_execute = &r600_dma_ring_ib_execute,
1121 .emit_fence = &r600_dma_fence_ring_emit,
1122 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1123 .cs_parse = &r600_dma_cs_parse,
1124 .ring_test = &r600_dma_ring_test,
1125 .ib_test = &r600_dma_ib_test,
1126 .is_lockup = &r600_dma_is_lockup,
1100 } 1127 }
1101 }, 1128 },
1102 .irq = { 1129 .irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
1113 .copy = { 1140 .copy = {
1114 .blit = &r600_copy_blit, 1141 .blit = &r600_copy_blit,
1115 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1142 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1116 .dma = NULL, 1143 .dma = &rv770_copy_dma,
1117 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1144 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1118 .copy = &r600_copy_blit, 1145 .copy = &rv770_copy_dma,
1119 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1146 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1120 }, 1147 },
1121 .surface = { 1148 .surface = {
1122 .set_reg = r600_set_surface_reg, 1149 .set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
1172 .ring_test = &r600_ring_test, 1199 .ring_test = &r600_ring_test,
1173 .ib_test = &r600_ib_test, 1200 .ib_test = &r600_ib_test,
1174 .is_lockup = &evergreen_gpu_is_lockup, 1201 .is_lockup = &evergreen_gpu_is_lockup,
1202 },
1203 [R600_RING_TYPE_DMA_INDEX] = {
1204 .ib_execute = &evergreen_dma_ring_ib_execute,
1205 .emit_fence = &evergreen_dma_fence_ring_emit,
1206 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1207 .cs_parse = &evergreen_dma_cs_parse,
1208 .ring_test = &r600_dma_ring_test,
1209 .ib_test = &r600_dma_ib_test,
1210 .is_lockup = &r600_dma_is_lockup,
1175 } 1211 }
1176 }, 1212 },
1177 .irq = { 1213 .irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
1188 .copy = { 1224 .copy = {
1189 .blit = &r600_copy_blit, 1225 .blit = &r600_copy_blit,
1190 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1226 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1191 .dma = NULL, 1227 .dma = &evergreen_copy_dma,
1192 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1228 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1193 .copy = &r600_copy_blit, 1229 .copy = &evergreen_copy_dma,
1194 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1230 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1195 }, 1231 },
1196 .surface = { 1232 .surface = {
1197 .set_reg = r600_set_surface_reg, 1233 .set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
1248 .ib_test = &r600_ib_test, 1284 .ib_test = &r600_ib_test,
1249 .is_lockup = &evergreen_gpu_is_lockup, 1285 .is_lockup = &evergreen_gpu_is_lockup,
1250 }, 1286 },
1287 [R600_RING_TYPE_DMA_INDEX] = {
1288 .ib_execute = &evergreen_dma_ring_ib_execute,
1289 .emit_fence = &evergreen_dma_fence_ring_emit,
1290 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1291 .cs_parse = &evergreen_dma_cs_parse,
1292 .ring_test = &r600_dma_ring_test,
1293 .ib_test = &r600_dma_ib_test,
1294 .is_lockup = &r600_dma_is_lockup,
1295 }
1251 }, 1296 },
1252 .irq = { 1297 .irq = {
1253 .set = &evergreen_irq_set, 1298 .set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
1263 .copy = { 1308 .copy = {
1264 .blit = &r600_copy_blit, 1309 .blit = &r600_copy_blit,
1265 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1310 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1266 .dma = NULL, 1311 .dma = &evergreen_copy_dma,
1267 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1312 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1268 .copy = &r600_copy_blit, 1313 .copy = &evergreen_copy_dma,
1269 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1314 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1270 }, 1315 },
1271 .surface = { 1316 .surface = {
1272 .set_reg = r600_set_surface_reg, 1317 .set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
1322 .ring_test = &r600_ring_test, 1367 .ring_test = &r600_ring_test,
1323 .ib_test = &r600_ib_test, 1368 .ib_test = &r600_ib_test,
1324 .is_lockup = &evergreen_gpu_is_lockup, 1369 .is_lockup = &evergreen_gpu_is_lockup,
1370 },
1371 [R600_RING_TYPE_DMA_INDEX] = {
1372 .ib_execute = &evergreen_dma_ring_ib_execute,
1373 .emit_fence = &evergreen_dma_fence_ring_emit,
1374 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1375 .cs_parse = &evergreen_dma_cs_parse,
1376 .ring_test = &r600_dma_ring_test,
1377 .ib_test = &r600_dma_ib_test,
1378 .is_lockup = &r600_dma_is_lockup,
1325 } 1379 }
1326 }, 1380 },
1327 .irq = { 1381 .irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
1338 .copy = { 1392 .copy = {
1339 .blit = &r600_copy_blit, 1393 .blit = &r600_copy_blit,
1340 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1394 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1341 .dma = NULL, 1395 .dma = &evergreen_copy_dma,
1342 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1396 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1343 .copy = &r600_copy_blit, 1397 .copy = &evergreen_copy_dma,
1344 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1398 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1345 }, 1399 },
1346 .surface = { 1400 .surface = {
1347 .set_reg = r600_set_surface_reg, 1401 .set_reg = r600_set_surface_reg,
@@ -1391,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
1391 .vm = { 1445 .vm = {
1392 .init = &cayman_vm_init, 1446 .init = &cayman_vm_init,
1393 .fini = &cayman_vm_fini, 1447 .fini = &cayman_vm_fini,
1394 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1448 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1395 .set_page = &cayman_vm_set_page, 1449 .set_page = &cayman_vm_set_page,
1396 }, 1450 },
1397 .ring = { 1451 .ring = {
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
1427 .ib_test = &r600_ib_test, 1481 .ib_test = &r600_ib_test,
1428 .is_lockup = &evergreen_gpu_is_lockup, 1482 .is_lockup = &evergreen_gpu_is_lockup,
1429 .vm_flush = &cayman_vm_flush, 1483 .vm_flush = &cayman_vm_flush,
1484 },
1485 [R600_RING_TYPE_DMA_INDEX] = {
1486 .ib_execute = &cayman_dma_ring_ib_execute,
1487 .ib_parse = &evergreen_dma_ib_parse,
1488 .emit_fence = &evergreen_dma_fence_ring_emit,
1489 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1490 .cs_parse = &evergreen_dma_cs_parse,
1491 .ring_test = &r600_dma_ring_test,
1492 .ib_test = &r600_dma_ib_test,
1493 .is_lockup = &cayman_dma_is_lockup,
1494 .vm_flush = &cayman_dma_vm_flush,
1495 },
1496 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1497 .ib_execute = &cayman_dma_ring_ib_execute,
1498 .ib_parse = &evergreen_dma_ib_parse,
1499 .emit_fence = &evergreen_dma_fence_ring_emit,
1500 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1501 .cs_parse = &evergreen_dma_cs_parse,
1502 .ring_test = &r600_dma_ring_test,
1503 .ib_test = &r600_dma_ib_test,
1504 .is_lockup = &cayman_dma_is_lockup,
1505 .vm_flush = &cayman_dma_vm_flush,
1430 } 1506 }
1431 }, 1507 },
1432 .irq = { 1508 .irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
1443 .copy = { 1519 .copy = {
1444 .blit = &r600_copy_blit, 1520 .blit = &r600_copy_blit,
1445 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1521 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1446 .dma = NULL, 1522 .dma = &evergreen_copy_dma,
1447 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1523 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1448 .copy = &r600_copy_blit, 1524 .copy = &evergreen_copy_dma,
1449 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1525 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1450 }, 1526 },
1451 .surface = { 1527 .surface = {
1452 .set_reg = r600_set_surface_reg, 1528 .set_reg = r600_set_surface_reg,
@@ -1496,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
1496 .vm = { 1572 .vm = {
1497 .init = &cayman_vm_init, 1573 .init = &cayman_vm_init,
1498 .fini = &cayman_vm_fini, 1574 .fini = &cayman_vm_fini,
1499 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1575 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1500 .set_page = &cayman_vm_set_page, 1576 .set_page = &cayman_vm_set_page,
1501 }, 1577 },
1502 .ring = { 1578 .ring = {
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
1532 .ib_test = &r600_ib_test, 1608 .ib_test = &r600_ib_test,
1533 .is_lockup = &evergreen_gpu_is_lockup, 1609 .is_lockup = &evergreen_gpu_is_lockup,
1534 .vm_flush = &cayman_vm_flush, 1610 .vm_flush = &cayman_vm_flush,
1611 },
1612 [R600_RING_TYPE_DMA_INDEX] = {
1613 .ib_execute = &cayman_dma_ring_ib_execute,
1614 .ib_parse = &evergreen_dma_ib_parse,
1615 .emit_fence = &evergreen_dma_fence_ring_emit,
1616 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1617 .cs_parse = &evergreen_dma_cs_parse,
1618 .ring_test = &r600_dma_ring_test,
1619 .ib_test = &r600_dma_ib_test,
1620 .is_lockup = &cayman_dma_is_lockup,
1621 .vm_flush = &cayman_dma_vm_flush,
1622 },
1623 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1624 .ib_execute = &cayman_dma_ring_ib_execute,
1625 .ib_parse = &evergreen_dma_ib_parse,
1626 .emit_fence = &evergreen_dma_fence_ring_emit,
1627 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1628 .cs_parse = &evergreen_dma_cs_parse,
1629 .ring_test = &r600_dma_ring_test,
1630 .ib_test = &r600_dma_ib_test,
1631 .is_lockup = &cayman_dma_is_lockup,
1632 .vm_flush = &cayman_dma_vm_flush,
1535 } 1633 }
1536 }, 1634 },
1537 .irq = { 1635 .irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
1548 .copy = { 1646 .copy = {
1549 .blit = &r600_copy_blit, 1647 .blit = &r600_copy_blit,
1550 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1648 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1551 .dma = NULL, 1649 .dma = &evergreen_copy_dma,
1552 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1650 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1553 .copy = &r600_copy_blit, 1651 .copy = &evergreen_copy_dma,
1554 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1652 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1555 }, 1653 },
1556 .surface = { 1654 .surface = {
1557 .set_reg = r600_set_surface_reg, 1655 .set_reg = r600_set_surface_reg,
@@ -1601,7 +1699,7 @@ static struct radeon_asic si_asic = {
1601 .vm = { 1699 .vm = {
1602 .init = &si_vm_init, 1700 .init = &si_vm_init,
1603 .fini = &si_vm_fini, 1701 .fini = &si_vm_fini,
1604 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1702 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1605 .set_page = &si_vm_set_page, 1703 .set_page = &si_vm_set_page,
1606 }, 1704 },
1607 .ring = { 1705 .ring = {
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
1637 .ib_test = &r600_ib_test, 1735 .ib_test = &r600_ib_test,
1638 .is_lockup = &si_gpu_is_lockup, 1736 .is_lockup = &si_gpu_is_lockup,
1639 .vm_flush = &si_vm_flush, 1737 .vm_flush = &si_vm_flush,
1738 },
1739 [R600_RING_TYPE_DMA_INDEX] = {
1740 .ib_execute = &cayman_dma_ring_ib_execute,
1741 .ib_parse = &evergreen_dma_ib_parse,
1742 .emit_fence = &evergreen_dma_fence_ring_emit,
1743 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1744 .cs_parse = NULL,
1745 .ring_test = &r600_dma_ring_test,
1746 .ib_test = &r600_dma_ib_test,
1747 .is_lockup = &cayman_dma_is_lockup,
1748 .vm_flush = &si_dma_vm_flush,
1749 },
1750 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1751 .ib_execute = &cayman_dma_ring_ib_execute,
1752 .ib_parse = &evergreen_dma_ib_parse,
1753 .emit_fence = &evergreen_dma_fence_ring_emit,
1754 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1755 .cs_parse = NULL,
1756 .ring_test = &r600_dma_ring_test,
1757 .ib_test = &r600_dma_ib_test,
1758 .is_lockup = &cayman_dma_is_lockup,
1759 .vm_flush = &si_dma_vm_flush,
1640 } 1760 }
1641 }, 1761 },
1642 .irq = { 1762 .irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
1653 .copy = { 1773 .copy = {
1654 .blit = NULL, 1774 .blit = NULL,
1655 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1775 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1656 .dma = NULL, 1776 .dma = &si_copy_dma,
1657 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1777 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1658 .copy = NULL, 1778 .copy = &si_copy_dma,
1659 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1779 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1660 }, 1780 },
1661 .surface = { 1781 .surface = {
1662 .set_reg = r600_set_surface_reg, 1782 .set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5e3a0e5c6be1..15d70e613076 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
263struct rv515_mc_save { 263struct rv515_mc_save {
264 u32 vga_render_control; 264 u32 vga_render_control;
265 u32 vga_hdp_control; 265 u32 vga_hdp_control;
266 bool crtc_enabled[2];
266}; 267};
267 268
268int rv515_init(struct radeon_device *rdev); 269int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
303uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); 304uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
304void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 305void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
305int r600_cs_parse(struct radeon_cs_parser *p); 306int r600_cs_parse(struct radeon_cs_parser *p);
307int r600_dma_cs_parse(struct radeon_cs_parser *p);
306void r600_fence_ring_emit(struct radeon_device *rdev, 308void r600_fence_ring_emit(struct radeon_device *rdev,
307 struct radeon_fence *fence); 309 struct radeon_fence *fence);
308void r600_semaphore_ring_emit(struct radeon_device *rdev, 310void r600_semaphore_ring_emit(struct radeon_device *rdev,
309 struct radeon_ring *cp, 311 struct radeon_ring *cp,
310 struct radeon_semaphore *semaphore, 312 struct radeon_semaphore *semaphore,
311 bool emit_wait); 313 bool emit_wait);
314void r600_dma_fence_ring_emit(struct radeon_device *rdev,
315 struct radeon_fence *fence);
316void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
317 struct radeon_ring *ring,
318 struct radeon_semaphore *semaphore,
319 bool emit_wait);
320void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
321bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
312bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); 322bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
313int r600_asic_reset(struct radeon_device *rdev); 323int r600_asic_reset(struct radeon_device *rdev);
314int r600_set_surface_reg(struct radeon_device *rdev, int reg, 324int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
316 uint32_t offset, uint32_t obj_size); 326 uint32_t offset, uint32_t obj_size);
317void r600_clear_surface_reg(struct radeon_device *rdev, int reg); 327void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
318int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 328int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
329int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
319void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 330void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
320int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 331int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
332int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
321int r600_copy_blit(struct radeon_device *rdev, 333int r600_copy_blit(struct radeon_device *rdev,
322 uint64_t src_offset, uint64_t dst_offset, 334 uint64_t src_offset, uint64_t dst_offset,
323 unsigned num_gpu_pages, struct radeon_fence **fence); 335 unsigned num_gpu_pages, struct radeon_fence **fence);
336int r600_copy_dma(struct radeon_device *rdev,
337 uint64_t src_offset, uint64_t dst_offset,
338 unsigned num_gpu_pages, struct radeon_fence **fence);
324void r600_hpd_init(struct radeon_device *rdev); 339void r600_hpd_init(struct radeon_device *rdev);
325void r600_hpd_fini(struct radeon_device *rdev); 340void r600_hpd_fini(struct radeon_device *rdev);
326bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 341bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -388,6 +403,10 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
388void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 403void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
389void r700_cp_stop(struct radeon_device *rdev); 404void r700_cp_stop(struct radeon_device *rdev);
390void r700_cp_fini(struct radeon_device *rdev); 405void r700_cp_fini(struct radeon_device *rdev);
406int rv770_copy_dma(struct radeon_device *rdev,
407 uint64_t src_offset, uint64_t dst_offset,
408 unsigned num_gpu_pages,
409 struct radeon_fence **fence);
391 410
392/* 411/*
393 * evergreen 412 * evergreen
@@ -416,6 +435,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
416int evergreen_irq_set(struct radeon_device *rdev); 435int evergreen_irq_set(struct radeon_device *rdev);
417int evergreen_irq_process(struct radeon_device *rdev); 436int evergreen_irq_process(struct radeon_device *rdev);
418extern int evergreen_cs_parse(struct radeon_cs_parser *p); 437extern int evergreen_cs_parse(struct radeon_cs_parser *p);
438extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
419extern void evergreen_pm_misc(struct radeon_device *rdev); 439extern void evergreen_pm_misc(struct radeon_device *rdev);
420extern void evergreen_pm_prepare(struct radeon_device *rdev); 440extern void evergreen_pm_prepare(struct radeon_device *rdev);
421extern void evergreen_pm_finish(struct radeon_device *rdev); 441extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +448,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
428void evergreen_disable_interrupt_state(struct radeon_device *rdev); 448void evergreen_disable_interrupt_state(struct radeon_device *rdev);
429int evergreen_blit_init(struct radeon_device *rdev); 449int evergreen_blit_init(struct radeon_device *rdev);
430int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 450int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
451void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
452 struct radeon_fence *fence);
453void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
454 struct radeon_ib *ib);
455int evergreen_copy_dma(struct radeon_device *rdev,
456 uint64_t src_offset, uint64_t dst_offset,
457 unsigned num_gpu_pages,
458 struct radeon_fence **fence);
431 459
432/* 460/*
433 * cayman 461 * cayman
@@ -449,6 +477,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
449 uint64_t addr, unsigned count, 477 uint64_t addr, unsigned count,
450 uint32_t incr, uint32_t flags); 478 uint32_t incr, uint32_t flags);
451int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 479int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
480int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
481void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
482 struct radeon_ib *ib);
483bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
484void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
452 485
453/* DCE6 - SI */ 486/* DCE6 - SI */
454void dce6_bandwidth_update(struct radeon_device *rdev); 487void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +509,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
476void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 509void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
477int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 510int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
478uint64_t si_get_gpu_clock(struct radeon_device *rdev); 511uint64_t si_get_gpu_clock(struct radeon_device *rdev);
512int si_copy_dma(struct radeon_device *rdev,
513 uint64_t src_offset, uint64_t dst_offset,
514 unsigned num_gpu_pages,
515 struct radeon_fence **fence);
516void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
479 517
480#endif 518#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 37f6a907aea4..15f5ded65e0c 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -352,9 +352,9 @@ static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
352} 352}
353 353
354/** 354/**
355 * radeon_atpx_switchto - switch to the requested GPU 355 * radeon_atpx_power_state - power down/up the requested GPU
356 * 356 *
357 * @id: GPU to switch to 357 * @id: GPU to power down/up
358 * @state: requested power state (0 = off, 1 = on) 358 * @state: requested power state (0 = off, 1 = on)
359 * 359 *
360 * Execute the necessary ATPX function to power down/up the discrete GPU 360 * Execute the necessary ATPX function to power down/up the discrete GPU
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 45b660b27cfc..33a56a09ff10 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1548,6 +1548,9 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1548 of_machine_is_compatible("PowerBook6,7")) { 1548 of_machine_is_compatible("PowerBook6,7")) {
1549 /* ibook */ 1549 /* ibook */
1550 rdev->mode_info.connector_table = CT_IBOOK; 1550 rdev->mode_info.connector_table = CT_IBOOK;
1551 } else if (of_machine_is_compatible("PowerMac3,5")) {
1552 /* PowerMac G4 Silver radeon 7500 */
1553 rdev->mode_info.connector_table = CT_MAC_G4_SILVER;
1551 } else if (of_machine_is_compatible("PowerMac4,4")) { 1554 } else if (of_machine_is_compatible("PowerMac4,4")) {
1552 /* emac */ 1555 /* emac */
1553 rdev->mode_info.connector_table = CT_EMAC; 1556 rdev->mode_info.connector_table = CT_EMAC;
@@ -2212,6 +2215,54 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2212 CONNECTOR_OBJECT_ID_SVIDEO, 2215 CONNECTOR_OBJECT_ID_SVIDEO,
2213 &hpd); 2216 &hpd);
2214 break; 2217 break;
2218 case CT_MAC_G4_SILVER:
2219 DRM_INFO("Connector Table: %d (mac g4 silver)\n",
2220 rdev->mode_info.connector_table);
2221 /* DVI-I - tv dac, int tmds */
2222 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2223 hpd.hpd = RADEON_HPD_1; /* ??? */
2224 radeon_add_legacy_encoder(dev,
2225 radeon_get_encoder_enum(dev,
2226 ATOM_DEVICE_DFP1_SUPPORT,
2227 0),
2228 ATOM_DEVICE_DFP1_SUPPORT);
2229 radeon_add_legacy_encoder(dev,
2230 radeon_get_encoder_enum(dev,
2231 ATOM_DEVICE_CRT2_SUPPORT,
2232 2),
2233 ATOM_DEVICE_CRT2_SUPPORT);
2234 radeon_add_legacy_connector(dev, 0,
2235 ATOM_DEVICE_DFP1_SUPPORT |
2236 ATOM_DEVICE_CRT2_SUPPORT,
2237 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2238 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2239 &hpd);
2240 /* VGA - primary dac */
2241 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2242 hpd.hpd = RADEON_HPD_NONE;
2243 radeon_add_legacy_encoder(dev,
2244 radeon_get_encoder_enum(dev,
2245 ATOM_DEVICE_CRT1_SUPPORT,
2246 1),
2247 ATOM_DEVICE_CRT1_SUPPORT);
2248 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
2249 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
2250 CONNECTOR_OBJECT_ID_VGA,
2251 &hpd);
2252 /* TV - TV DAC */
2253 ddc_i2c.valid = false;
2254 hpd.hpd = RADEON_HPD_NONE;
2255 radeon_add_legacy_encoder(dev,
2256 radeon_get_encoder_enum(dev,
2257 ATOM_DEVICE_TV1_SUPPORT,
2258 2),
2259 ATOM_DEVICE_TV1_SUPPORT);
2260 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
2261 DRM_MODE_CONNECTOR_SVIDEO,
2262 &ddc_i2c,
2263 CONNECTOR_OBJECT_ID_SVIDEO,
2264 &hpd);
2265 break;
2215 default: 2266 default:
2216 DRM_INFO("Connector table: %d (invalid)\n", 2267 DRM_INFO("Connector table: %d (invalid)\n",
2217 rdev->mode_info.connector_table); 2268 rdev->mode_info.connector_table);
@@ -3246,11 +3297,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
3246 while (ram--) { 3297 while (ram--) {
3247 addr = ram * 1024 * 1024; 3298 addr = ram * 1024 * 1024;
3248 /* write to each page */ 3299 /* write to each page */
3249 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); 3300 WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
3250 WREG32(RADEON_MM_DATA, 0xdeadbeef);
3251 /* read back and verify */ 3301 /* read back and verify */
3252 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); 3302 if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
3253 if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
3254 return 0; 3303 return 0;
3255 } 3304 }
3256 3305
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 67cfc1795ecd..2399f25ec037 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -741,7 +741,7 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
741 ret = connector_status_disconnected; 741 ret = connector_status_disconnected;
742 742
743 if (radeon_connector->ddc_bus) 743 if (radeon_connector->ddc_bus)
744 dret = radeon_ddc_probe(radeon_connector); 744 dret = radeon_ddc_probe(radeon_connector, false);
745 if (dret) { 745 if (dret) {
746 radeon_connector->detected_by_load = false; 746 radeon_connector->detected_by_load = false;
747 if (radeon_connector->edid) { 747 if (radeon_connector->edid) {
@@ -941,13 +941,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
941 struct drm_mode_object *obj; 941 struct drm_mode_object *obj;
942 int i; 942 int i;
943 enum drm_connector_status ret = connector_status_disconnected; 943 enum drm_connector_status ret = connector_status_disconnected;
944 bool dret = false; 944 bool dret = false, broken_edid = false;
945 945
946 if (!force && radeon_check_hpd_status_unchanged(connector)) 946 if (!force && radeon_check_hpd_status_unchanged(connector))
947 return connector->status; 947 return connector->status;
948 948
949 if (radeon_connector->ddc_bus) 949 if (radeon_connector->ddc_bus)
950 dret = radeon_ddc_probe(radeon_connector); 950 dret = radeon_ddc_probe(radeon_connector, false);
951 if (dret) { 951 if (dret) {
952 radeon_connector->detected_by_load = false; 952 radeon_connector->detected_by_load = false;
953 if (radeon_connector->edid) { 953 if (radeon_connector->edid) {
@@ -965,6 +965,9 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
965 ret = connector_status_disconnected; 965 ret = connector_status_disconnected;
966 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector)); 966 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
967 radeon_connector->ddc_bus = NULL; 967 radeon_connector->ddc_bus = NULL;
968 } else {
969 ret = connector_status_connected;
970 broken_edid = true; /* defer use_digital to later */
968 } 971 }
969 } else { 972 } else {
970 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 973 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
@@ -1047,13 +1050,24 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
1047 1050
1048 encoder_funcs = encoder->helper_private; 1051 encoder_funcs = encoder->helper_private;
1049 if (encoder_funcs->detect) { 1052 if (encoder_funcs->detect) {
1050 if (ret != connector_status_connected) { 1053 if (!broken_edid) {
1051 ret = encoder_funcs->detect(encoder, connector); 1054 if (ret != connector_status_connected) {
1052 if (ret == connector_status_connected) { 1055 /* deal with analog monitors without DDC */
1053 radeon_connector->use_digital = false; 1056 ret = encoder_funcs->detect(encoder, connector);
1057 if (ret == connector_status_connected) {
1058 radeon_connector->use_digital = false;
1059 }
1060 if (ret != connector_status_disconnected)
1061 radeon_connector->detected_by_load = true;
1054 } 1062 }
1055 if (ret != connector_status_disconnected) 1063 } else {
1056 radeon_connector->detected_by_load = true; 1064 enum drm_connector_status lret;
1065 /* assume digital unless load detected otherwise */
1066 radeon_connector->use_digital = true;
1067 lret = encoder_funcs->detect(encoder, connector);
1068 DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
1069 if (lret == connector_status_connected)
1070 radeon_connector->use_digital = false;
1057 } 1071 }
1058 break; 1072 break;
1059 } 1073 }
@@ -1387,7 +1401,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1387 if (encoder) { 1401 if (encoder) {
1388 /* setup ddc on the bridge */ 1402 /* setup ddc on the bridge */
1389 radeon_atom_ext_encoder_setup_ddc(encoder); 1403 radeon_atom_ext_encoder_setup_ddc(encoder);
1390 if (radeon_ddc_probe(radeon_connector)) /* try DDC */ 1404 /* bridge chips are always aux */
1405 if (radeon_ddc_probe(radeon_connector, true)) /* try DDC */
1391 ret = connector_status_connected; 1406 ret = connector_status_connected;
1392 else if (radeon_connector->dac_load_detect) { /* try load detection */ 1407 else if (radeon_connector->dac_load_detect) { /* try load detection */
1393 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; 1408 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1405,7 +1420,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1405 if (radeon_dp_getdpcd(radeon_connector)) 1420 if (radeon_dp_getdpcd(radeon_connector))
1406 ret = connector_status_connected; 1421 ret = connector_status_connected;
1407 } else { 1422 } else {
1408 if (radeon_ddc_probe(radeon_connector)) 1423 /* try non-aux ddc (DP to DVI/HMDI/etc. adapter) */
1424 if (radeon_ddc_probe(radeon_connector, false))
1409 ret = connector_status_connected; 1425 ret = connector_status_connected;
1410 } 1426 }
1411 } 1427 }
@@ -1585,7 +1601,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1585 connector->interlace_allowed = true; 1601 connector->interlace_allowed = true;
1586 connector->doublescan_allowed = true; 1602 connector->doublescan_allowed = true;
1587 radeon_connector->dac_load_detect = true; 1603 radeon_connector->dac_load_detect = true;
1588 drm_connector_attach_property(&radeon_connector->base, 1604 drm_object_attach_property(&radeon_connector->base.base,
1589 rdev->mode_info.load_detect_property, 1605 rdev->mode_info.load_detect_property,
1590 1); 1606 1);
1591 break; 1607 break;
@@ -1594,13 +1610,13 @@ radeon_add_atom_connector(struct drm_device *dev,
1594 case DRM_MODE_CONNECTOR_HDMIA: 1610 case DRM_MODE_CONNECTOR_HDMIA:
1595 case DRM_MODE_CONNECTOR_HDMIB: 1611 case DRM_MODE_CONNECTOR_HDMIB:
1596 case DRM_MODE_CONNECTOR_DisplayPort: 1612 case DRM_MODE_CONNECTOR_DisplayPort:
1597 drm_connector_attach_property(&radeon_connector->base, 1613 drm_object_attach_property(&radeon_connector->base.base,
1598 rdev->mode_info.underscan_property, 1614 rdev->mode_info.underscan_property,
1599 UNDERSCAN_OFF); 1615 UNDERSCAN_OFF);
1600 drm_connector_attach_property(&radeon_connector->base, 1616 drm_object_attach_property(&radeon_connector->base.base,
1601 rdev->mode_info.underscan_hborder_property, 1617 rdev->mode_info.underscan_hborder_property,
1602 0); 1618 0);
1603 drm_connector_attach_property(&radeon_connector->base, 1619 drm_object_attach_property(&radeon_connector->base.base,
1604 rdev->mode_info.underscan_vborder_property, 1620 rdev->mode_info.underscan_vborder_property,
1605 0); 1621 0);
1606 subpixel_order = SubPixelHorizontalRGB; 1622 subpixel_order = SubPixelHorizontalRGB;
@@ -1611,14 +1627,14 @@ radeon_add_atom_connector(struct drm_device *dev,
1611 connector->doublescan_allowed = false; 1627 connector->doublescan_allowed = false;
1612 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1628 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1613 radeon_connector->dac_load_detect = true; 1629 radeon_connector->dac_load_detect = true;
1614 drm_connector_attach_property(&radeon_connector->base, 1630 drm_object_attach_property(&radeon_connector->base.base,
1615 rdev->mode_info.load_detect_property, 1631 rdev->mode_info.load_detect_property,
1616 1); 1632 1);
1617 } 1633 }
1618 break; 1634 break;
1619 case DRM_MODE_CONNECTOR_LVDS: 1635 case DRM_MODE_CONNECTOR_LVDS:
1620 case DRM_MODE_CONNECTOR_eDP: 1636 case DRM_MODE_CONNECTOR_eDP:
1621 drm_connector_attach_property(&radeon_connector->base, 1637 drm_object_attach_property(&radeon_connector->base.base,
1622 dev->mode_config.scaling_mode_property, 1638 dev->mode_config.scaling_mode_property,
1623 DRM_MODE_SCALE_FULLSCREEN); 1639 DRM_MODE_SCALE_FULLSCREEN);
1624 subpixel_order = SubPixelHorizontalRGB; 1640 subpixel_order = SubPixelHorizontalRGB;
@@ -1637,7 +1653,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1637 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1653 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1638 } 1654 }
1639 radeon_connector->dac_load_detect = true; 1655 radeon_connector->dac_load_detect = true;
1640 drm_connector_attach_property(&radeon_connector->base, 1656 drm_object_attach_property(&radeon_connector->base.base,
1641 rdev->mode_info.load_detect_property, 1657 rdev->mode_info.load_detect_property,
1642 1); 1658 1);
1643 /* no HPD on analog connectors */ 1659 /* no HPD on analog connectors */
@@ -1655,7 +1671,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1655 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1671 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1656 } 1672 }
1657 radeon_connector->dac_load_detect = true; 1673 radeon_connector->dac_load_detect = true;
1658 drm_connector_attach_property(&radeon_connector->base, 1674 drm_object_attach_property(&radeon_connector->base.base,
1659 rdev->mode_info.load_detect_property, 1675 rdev->mode_info.load_detect_property,
1660 1); 1676 1);
1661 /* no HPD on analog connectors */ 1677 /* no HPD on analog connectors */
@@ -1678,23 +1694,23 @@ radeon_add_atom_connector(struct drm_device *dev,
1678 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1694 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1679 } 1695 }
1680 subpixel_order = SubPixelHorizontalRGB; 1696 subpixel_order = SubPixelHorizontalRGB;
1681 drm_connector_attach_property(&radeon_connector->base, 1697 drm_object_attach_property(&radeon_connector->base.base,
1682 rdev->mode_info.coherent_mode_property, 1698 rdev->mode_info.coherent_mode_property,
1683 1); 1699 1);
1684 if (ASIC_IS_AVIVO(rdev)) { 1700 if (ASIC_IS_AVIVO(rdev)) {
1685 drm_connector_attach_property(&radeon_connector->base, 1701 drm_object_attach_property(&radeon_connector->base.base,
1686 rdev->mode_info.underscan_property, 1702 rdev->mode_info.underscan_property,
1687 UNDERSCAN_OFF); 1703 UNDERSCAN_OFF);
1688 drm_connector_attach_property(&radeon_connector->base, 1704 drm_object_attach_property(&radeon_connector->base.base,
1689 rdev->mode_info.underscan_hborder_property, 1705 rdev->mode_info.underscan_hborder_property,
1690 0); 1706 0);
1691 drm_connector_attach_property(&radeon_connector->base, 1707 drm_object_attach_property(&radeon_connector->base.base,
1692 rdev->mode_info.underscan_vborder_property, 1708 rdev->mode_info.underscan_vborder_property,
1693 0); 1709 0);
1694 } 1710 }
1695 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1711 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1696 radeon_connector->dac_load_detect = true; 1712 radeon_connector->dac_load_detect = true;
1697 drm_connector_attach_property(&radeon_connector->base, 1713 drm_object_attach_property(&radeon_connector->base.base,
1698 rdev->mode_info.load_detect_property, 1714 rdev->mode_info.load_detect_property,
1699 1); 1715 1);
1700 } 1716 }
@@ -1718,17 +1734,17 @@ radeon_add_atom_connector(struct drm_device *dev,
1718 if (!radeon_connector->ddc_bus) 1734 if (!radeon_connector->ddc_bus)
1719 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1735 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1720 } 1736 }
1721 drm_connector_attach_property(&radeon_connector->base, 1737 drm_object_attach_property(&radeon_connector->base.base,
1722 rdev->mode_info.coherent_mode_property, 1738 rdev->mode_info.coherent_mode_property,
1723 1); 1739 1);
1724 if (ASIC_IS_AVIVO(rdev)) { 1740 if (ASIC_IS_AVIVO(rdev)) {
1725 drm_connector_attach_property(&radeon_connector->base, 1741 drm_object_attach_property(&radeon_connector->base.base,
1726 rdev->mode_info.underscan_property, 1742 rdev->mode_info.underscan_property,
1727 UNDERSCAN_OFF); 1743 UNDERSCAN_OFF);
1728 drm_connector_attach_property(&radeon_connector->base, 1744 drm_object_attach_property(&radeon_connector->base.base,
1729 rdev->mode_info.underscan_hborder_property, 1745 rdev->mode_info.underscan_hborder_property,
1730 0); 1746 0);
1731 drm_connector_attach_property(&radeon_connector->base, 1747 drm_object_attach_property(&radeon_connector->base.base,
1732 rdev->mode_info.underscan_vborder_property, 1748 rdev->mode_info.underscan_vborder_property,
1733 0); 1749 0);
1734 } 1750 }
@@ -1757,17 +1773,17 @@ radeon_add_atom_connector(struct drm_device *dev,
1757 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1773 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1758 } 1774 }
1759 subpixel_order = SubPixelHorizontalRGB; 1775 subpixel_order = SubPixelHorizontalRGB;
1760 drm_connector_attach_property(&radeon_connector->base, 1776 drm_object_attach_property(&radeon_connector->base.base,
1761 rdev->mode_info.coherent_mode_property, 1777 rdev->mode_info.coherent_mode_property,
1762 1); 1778 1);
1763 if (ASIC_IS_AVIVO(rdev)) { 1779 if (ASIC_IS_AVIVO(rdev)) {
1764 drm_connector_attach_property(&radeon_connector->base, 1780 drm_object_attach_property(&radeon_connector->base.base,
1765 rdev->mode_info.underscan_property, 1781 rdev->mode_info.underscan_property,
1766 UNDERSCAN_OFF); 1782 UNDERSCAN_OFF);
1767 drm_connector_attach_property(&radeon_connector->base, 1783 drm_object_attach_property(&radeon_connector->base.base,
1768 rdev->mode_info.underscan_hborder_property, 1784 rdev->mode_info.underscan_hborder_property,
1769 0); 1785 0);
1770 drm_connector_attach_property(&radeon_connector->base, 1786 drm_object_attach_property(&radeon_connector->base.base,
1771 rdev->mode_info.underscan_vborder_property, 1787 rdev->mode_info.underscan_vborder_property,
1772 0); 1788 0);
1773 } 1789 }
@@ -1792,7 +1808,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1792 if (!radeon_connector->ddc_bus) 1808 if (!radeon_connector->ddc_bus)
1793 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1809 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1794 } 1810 }
1795 drm_connector_attach_property(&radeon_connector->base, 1811 drm_object_attach_property(&radeon_connector->base.base,
1796 dev->mode_config.scaling_mode_property, 1812 dev->mode_config.scaling_mode_property,
1797 DRM_MODE_SCALE_FULLSCREEN); 1813 DRM_MODE_SCALE_FULLSCREEN);
1798 subpixel_order = SubPixelHorizontalRGB; 1814 subpixel_order = SubPixelHorizontalRGB;
@@ -1805,10 +1821,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1805 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1821 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1806 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1822 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1807 radeon_connector->dac_load_detect = true; 1823 radeon_connector->dac_load_detect = true;
1808 drm_connector_attach_property(&radeon_connector->base, 1824 drm_object_attach_property(&radeon_connector->base.base,
1809 rdev->mode_info.load_detect_property, 1825 rdev->mode_info.load_detect_property,
1810 1); 1826 1);
1811 drm_connector_attach_property(&radeon_connector->base, 1827 drm_object_attach_property(&radeon_connector->base.base,
1812 rdev->mode_info.tv_std_property, 1828 rdev->mode_info.tv_std_property,
1813 radeon_atombios_get_tv_info(rdev)); 1829 radeon_atombios_get_tv_info(rdev));
1814 /* no HPD on analog connectors */ 1830 /* no HPD on analog connectors */
@@ -1829,7 +1845,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1829 if (!radeon_connector->ddc_bus) 1845 if (!radeon_connector->ddc_bus)
1830 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1846 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1831 } 1847 }
1832 drm_connector_attach_property(&radeon_connector->base, 1848 drm_object_attach_property(&radeon_connector->base.base,
1833 dev->mode_config.scaling_mode_property, 1849 dev->mode_config.scaling_mode_property,
1834 DRM_MODE_SCALE_FULLSCREEN); 1850 DRM_MODE_SCALE_FULLSCREEN);
1835 subpixel_order = SubPixelHorizontalRGB; 1851 subpixel_order = SubPixelHorizontalRGB;
@@ -1908,7 +1924,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1908 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1924 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1909 } 1925 }
1910 radeon_connector->dac_load_detect = true; 1926 radeon_connector->dac_load_detect = true;
1911 drm_connector_attach_property(&radeon_connector->base, 1927 drm_object_attach_property(&radeon_connector->base.base,
1912 rdev->mode_info.load_detect_property, 1928 rdev->mode_info.load_detect_property,
1913 1); 1929 1);
1914 /* no HPD on analog connectors */ 1930 /* no HPD on analog connectors */
@@ -1926,7 +1942,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1926 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1942 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1927 } 1943 }
1928 radeon_connector->dac_load_detect = true; 1944 radeon_connector->dac_load_detect = true;
1929 drm_connector_attach_property(&radeon_connector->base, 1945 drm_object_attach_property(&radeon_connector->base.base,
1930 rdev->mode_info.load_detect_property, 1946 rdev->mode_info.load_detect_property,
1931 1); 1947 1);
1932 /* no HPD on analog connectors */ 1948 /* no HPD on analog connectors */
@@ -1945,7 +1961,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1945 } 1961 }
1946 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1962 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1947 radeon_connector->dac_load_detect = true; 1963 radeon_connector->dac_load_detect = true;
1948 drm_connector_attach_property(&radeon_connector->base, 1964 drm_object_attach_property(&radeon_connector->base.base,
1949 rdev->mode_info.load_detect_property, 1965 rdev->mode_info.load_detect_property,
1950 1); 1966 1);
1951 } 1967 }
@@ -1969,10 +1985,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
1969 */ 1985 */
1970 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) 1986 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1971 radeon_connector->dac_load_detect = false; 1987 radeon_connector->dac_load_detect = false;
1972 drm_connector_attach_property(&radeon_connector->base, 1988 drm_object_attach_property(&radeon_connector->base.base,
1973 rdev->mode_info.load_detect_property, 1989 rdev->mode_info.load_detect_property,
1974 radeon_connector->dac_load_detect); 1990 radeon_connector->dac_load_detect);
1975 drm_connector_attach_property(&radeon_connector->base, 1991 drm_object_attach_property(&radeon_connector->base.base,
1976 rdev->mode_info.tv_std_property, 1992 rdev->mode_info.tv_std_property,
1977 radeon_combios_get_tv_info(rdev)); 1993 radeon_combios_get_tv_info(rdev));
1978 /* no HPD on analog connectors */ 1994 /* no HPD on analog connectors */
@@ -1988,7 +2004,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1988 if (!radeon_connector->ddc_bus) 2004 if (!radeon_connector->ddc_bus)
1989 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 2005 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1990 } 2006 }
1991 drm_connector_attach_property(&radeon_connector->base, 2007 drm_object_attach_property(&radeon_connector->base.base,
1992 dev->mode_config.scaling_mode_property, 2008 dev->mode_config.scaling_mode_property,
1993 DRM_MODE_SCALE_FULLSCREEN); 2009 DRM_MODE_SCALE_FULLSCREEN);
1994 subpixel_order = SubPixelHorizontalRGB; 2010 subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 8b2797dc7b64..9143fc45e35b 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
116 } 116 }
117} 117}
118 118
119u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
120{
121 u32 ret;
122
123 if (addr < 0x10000)
124 ret = DRM_READ32(dev_priv->mmio, addr);
125 else {
126 DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
127 ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
128 }
129
130 return ret;
131}
132
133static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 119static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
134{ 120{
135 u32 ret; 121 u32 ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41672cc563fb..469661fd1903 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
43 return 0; 43 return 0;
44 } 44 }
45 chunk = &p->chunks[p->chunk_relocs_idx]; 45 chunk = &p->chunks[p->chunk_relocs_idx];
46 p->dma_reloc_idx = 0;
46 /* FIXME: we assume that each relocs use 4 dwords */ 47 /* FIXME: we assume that each relocs use 4 dwords */
47 p->nrelocs = chunk->length_dw / 4; 48 p->nrelocs = chunk->length_dw / 4;
48 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); 49 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
111 } else 112 } else
112 p->ring = RADEON_RING_TYPE_GFX_INDEX; 113 p->ring = RADEON_RING_TYPE_GFX_INDEX;
113 break; 114 break;
115 case RADEON_CS_RING_DMA:
116 if (p->rdev->family >= CHIP_CAYMAN) {
117 if (p->priority > 0)
118 p->ring = R600_RING_TYPE_DMA_INDEX;
119 else
120 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
121 } else if (p->rdev->family >= CHIP_R600) {
122 p->ring = R600_RING_TYPE_DMA_INDEX;
123 } else {
124 return -EINVAL;
125 }
126 break;
114 } 127 }
115 return 0; 128 return 0;
116} 129}
@@ -266,13 +279,13 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
266 p->chunks[p->chunk_ib_idx].length_dw); 279 p->chunks[p->chunk_ib_idx].length_dw);
267 return -EINVAL; 280 return -EINVAL;
268 } 281 }
269 if ((p->rdev->flags & RADEON_IS_AGP)) { 282 if (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) {
270 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 283 p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
271 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 284 p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
272 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL || 285 if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
273 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) { 286 p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
274 kfree(p->chunks[i].kpage[0]); 287 kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
275 kfree(p->chunks[i].kpage[1]); 288 kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
276 return -ENOMEM; 289 return -ENOMEM;
277 } 290 }
278 } 291 }
@@ -570,7 +583,8 @@ static int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
570 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 583 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
571 int i; 584 int i;
572 int size = PAGE_SIZE; 585 int size = PAGE_SIZE;
573 bool copy1 = (p->rdev->flags & RADEON_IS_AGP) ? false : true; 586 bool copy1 = (p->rdev && (p->rdev->flags & RADEON_IS_AGP)) ?
587 false : true;
574 588
575 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) { 589 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
576 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)), 590 if (DRM_COPY_FROM_USER(p->ib.ptr + (i * (PAGE_SIZE/4)),
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0fe56c9f64bd..ad6df625e8b8 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
66 struct radeon_device *rdev = crtc->dev->dev_private; 66 struct radeon_device *rdev = crtc->dev->dev_private;
67 67
68 if (ASIC_IS_DCE4(rdev)) { 68 if (ASIC_IS_DCE4(rdev)) {
69 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 69 WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
70 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 70 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
71 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 71 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
72 } else if (ASIC_IS_AVIVO(rdev)) { 72 } else if (ASIC_IS_AVIVO(rdev)) {
73 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 73 WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
74 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 74 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
75 } else { 75 } else {
76 u32 reg;
76 switch (radeon_crtc->crtc_id) { 77 switch (radeon_crtc->crtc_id) {
77 case 0: 78 case 0:
78 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); 79 reg = RADEON_CRTC_GEN_CNTL;
79 break; 80 break;
80 case 1: 81 case 1:
81 WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); 82 reg = RADEON_CRTC2_GEN_CNTL;
82 break; 83 break;
83 default: 84 default:
84 return; 85 return;
85 } 86 }
86 WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN); 87 WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
87 } 88 }
88} 89}
89 90
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e2f5f888c374..edfc54e41842 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -897,6 +897,25 @@ static void radeon_check_arguments(struct radeon_device *rdev)
897} 897}
898 898
899/** 899/**
900 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
901 * needed for waking up.
902 *
903 * @pdev: pci dev pointer
904 */
905static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
906{
907
908 /* 6600m in a macbook pro */
909 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
910 pdev->subsystem_device == 0x00e2) {
911 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
912 return true;
913 }
914
915 return false;
916}
917
918/**
900 * radeon_switcheroo_set_state - set switcheroo state 919 * radeon_switcheroo_set_state - set switcheroo state
901 * 920 *
902 * @pdev: pci dev pointer 921 * @pdev: pci dev pointer
@@ -910,10 +929,19 @@ static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
910 struct drm_device *dev = pci_get_drvdata(pdev); 929 struct drm_device *dev = pci_get_drvdata(pdev);
911 pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; 930 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
912 if (state == VGA_SWITCHEROO_ON) { 931 if (state == VGA_SWITCHEROO_ON) {
932 unsigned d3_delay = dev->pdev->d3_delay;
933
913 printk(KERN_INFO "radeon: switched on\n"); 934 printk(KERN_INFO "radeon: switched on\n");
914 /* don't suspend or resume card normally */ 935 /* don't suspend or resume card normally */
915 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 936 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
937
938 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
939 dev->pdev->d3_delay = 20;
940
916 radeon_resume_kms(dev); 941 radeon_resume_kms(dev);
942
943 dev->pdev->d3_delay = d3_delay;
944
917 dev->switch_power_state = DRM_SWITCH_POWER_ON; 945 dev->switch_power_state = DRM_SWITCH_POWER_ON;
918 drm_kms_helper_poll_enable(dev); 946 drm_kms_helper_poll_enable(dev);
919 } else { 947 } else {
@@ -1059,6 +1087,7 @@ int radeon_device_init(struct radeon_device *rdev,
1059 1087
1060 /* Registers mapping */ 1088 /* Registers mapping */
1061 /* TODO: block userspace mapping of io register */ 1089 /* TODO: block userspace mapping of io register */
1090 spin_lock_init(&rdev->mmio_idx_lock);
1062 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 1091 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1063 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1092 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1064 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1093 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
@@ -1163,6 +1192,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1163 struct drm_crtc *crtc; 1192 struct drm_crtc *crtc;
1164 struct drm_connector *connector; 1193 struct drm_connector *connector;
1165 int i, r; 1194 int i, r;
1195 bool force_completion = false;
1166 1196
1167 if (dev == NULL || dev->dev_private == NULL) { 1197 if (dev == NULL || dev->dev_private == NULL) {
1168 return -ENODEV; 1198 return -ENODEV;
@@ -1205,8 +1235,16 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1205 1235
1206 mutex_lock(&rdev->ring_lock); 1236 mutex_lock(&rdev->ring_lock);
1207 /* wait for gpu to finish processing current batch */ 1237 /* wait for gpu to finish processing current batch */
1208 for (i = 0; i < RADEON_NUM_RINGS; i++) 1238 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1209 radeon_fence_wait_empty_locked(rdev, i); 1239 r = radeon_fence_wait_empty_locked(rdev, i);
1240 if (r) {
1241 /* delay GPU reset to resume */
1242 force_completion = true;
1243 }
1244 }
1245 if (force_completion) {
1246 radeon_fence_driver_force_completion(rdev);
1247 }
1210 mutex_unlock(&rdev->ring_lock); 1248 mutex_unlock(&rdev->ring_lock);
1211 1249
1212 radeon_save_bios_scratch_regs(rdev); 1250 radeon_save_bios_scratch_regs(rdev);
@@ -1337,7 +1375,6 @@ retry:
1337 } 1375 }
1338 1376
1339 radeon_restore_bios_scratch_regs(rdev); 1377 radeon_restore_bios_scratch_regs(rdev);
1340 drm_helper_resume_force_mode(rdev->ddev);
1341 1378
1342 if (!r) { 1379 if (!r) {
1343 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1380 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1357,11 +1394,14 @@ retry:
1357 } 1394 }
1358 } 1395 }
1359 } else { 1396 } else {
1397 radeon_fence_driver_force_completion(rdev);
1360 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1398 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1361 kfree(ring_data[i]); 1399 kfree(ring_data[i]);
1362 } 1400 }
1363 } 1401 }
1364 1402
1403 drm_helper_resume_force_mode(rdev->ddev);
1404
1365 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 1405 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1366 if (r) { 1406 if (r) {
1367 /* bad news, how to tell it to userspace ? */ 1407 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bfa2a6015727..1da2386d7cf7 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
378 work->old_rbo = rbo; 378 work->old_rbo = rbo;
379 obj = new_radeon_fb->obj; 379 obj = new_radeon_fb->obj;
380 rbo = gem_to_radeon_bo(obj); 380 rbo = gem_to_radeon_bo(obj);
381
382 spin_lock(&rbo->tbo.bdev->fence_lock);
381 if (rbo->tbo.sync_obj) 383 if (rbo->tbo.sync_obj)
382 work->fence = radeon_fence_ref(rbo->tbo.sync_obj); 384 work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
385 spin_unlock(&rbo->tbo.bdev->fence_lock);
386
383 INIT_WORK(&work->work, radeon_unpin_work_func); 387 INIT_WORK(&work->work, radeon_unpin_work_func);
384 388
385 /* We borrow the event spin lock for protecting unpin_work */ 389 /* We borrow the event spin lock for protecting unpin_work */
@@ -695,10 +699,15 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
695 if (radeon_connector->router.ddc_valid) 699 if (radeon_connector->router.ddc_valid)
696 radeon_router_select_ddc_port(radeon_connector); 700 radeon_router_select_ddc_port(radeon_connector);
697 701
698 if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || 702 if (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
699 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) || 703 ENCODER_OBJECT_ID_NONE) {
700 (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) != 704 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
701 ENCODER_OBJECT_ID_NONE)) { 705
706 if (dig->dp_i2c_bus)
707 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
708 &dig->dp_i2c_bus->adapter);
709 } else if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
710 (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
702 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; 711 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
703 712
704 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT || 713 if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 07eb84e8a8a4..dff6cf77f953 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -65,9 +65,13 @@
65 * 2.22.0 - r600 only: RESOLVE_BOX allowed 65 * 2.22.0 - r600 only: RESOLVE_BOX allowed
66 * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880 66 * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
67 * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures 67 * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
68 * 2.25.0 - eg+: new info request for num SE and num SH
69 * 2.26.0 - r600-eg: fix htile size computation
70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
71 * 2.28.0 - r600-eg: Add MEM_WRITE packet support
68 */ 72 */
69#define KMS_DRIVER_MAJOR 2 73#define KMS_DRIVER_MAJOR 2
70#define KMS_DRIVER_MINOR 24 74#define KMS_DRIVER_MINOR 28
71#define KMS_DRIVER_PATCHLEVEL 0 75#define KMS_DRIVER_PATCHLEVEL 0
72int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 76int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
73int radeon_driver_unload_kms(struct drm_device *dev); 77int radeon_driver_unload_kms(struct drm_device *dev);
@@ -281,12 +285,15 @@ static struct drm_driver driver_old = {
281 285
282static struct drm_driver kms_driver; 286static struct drm_driver kms_driver;
283 287
284static void radeon_kick_out_firmware_fb(struct pci_dev *pdev) 288static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
285{ 289{
286 struct apertures_struct *ap; 290 struct apertures_struct *ap;
287 bool primary = false; 291 bool primary = false;
288 292
289 ap = alloc_apertures(1); 293 ap = alloc_apertures(1);
294 if (!ap)
295 return -ENOMEM;
296
290 ap->ranges[0].base = pci_resource_start(pdev, 0); 297 ap->ranges[0].base = pci_resource_start(pdev, 0);
291 ap->ranges[0].size = pci_resource_len(pdev, 0); 298 ap->ranges[0].size = pci_resource_len(pdev, 0);
292 299
@@ -295,13 +302,19 @@ static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
295#endif 302#endif
296 remove_conflicting_framebuffers(ap, "radeondrmfb", primary); 303 remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
297 kfree(ap); 304 kfree(ap);
305
306 return 0;
298} 307}
299 308
300static int __devinit 309static int radeon_pci_probe(struct pci_dev *pdev,
301radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 310 const struct pci_device_id *ent)
302{ 311{
312 int ret;
313
303 /* Get rid of things like offb */ 314 /* Get rid of things like offb */
304 radeon_kick_out_firmware_fb(pdev); 315 ret = radeon_kick_out_firmware_fb(pdev);
316 if (ret)
317 return ret;
305 318
306 return drm_get_pci_dev(pdev, ent, &kms_driver); 319 return drm_get_pci_dev(pdev, ent, &kms_driver);
307} 320}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index a1b59ca96d01..e7fdf163a8ca 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
366extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv); 366extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
367extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc); 367extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
368extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base); 368extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
369extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
370 369
371extern void radeon_freelist_reset(struct drm_device * dev); 370extern void radeon_freelist_reset(struct drm_device * dev);
372extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); 371extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 22bd6c2c2740..34356252567a 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -609,26 +609,20 @@ int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
609 * Returns 0 if the fences have passed, error for all other cases. 609 * Returns 0 if the fences have passed, error for all other cases.
610 * Caller must hold ring lock. 610 * Caller must hold ring lock.
611 */ 611 */
612void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring) 612int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
613{ 613{
614 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring]; 614 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
615 int r;
615 616
616 while(1) { 617 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
617 int r; 618 if (r) {
618 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
619 if (r == -EDEADLK) { 619 if (r == -EDEADLK) {
620 mutex_unlock(&rdev->ring_lock); 620 return -EDEADLK;
621 r = radeon_gpu_reset(rdev);
622 mutex_lock(&rdev->ring_lock);
623 if (!r)
624 continue;
625 }
626 if (r) {
627 dev_err(rdev->dev, "error waiting for ring to become"
628 " idle (%d)\n", r);
629 } 621 }
630 return; 622 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
623 ring, r);
631 } 624 }
625 return 0;
632} 626}
633 627
634/** 628/**
@@ -772,7 +766,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
772 int r; 766 int r;
773 767
774 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 768 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
775 if (rdev->wb.use_event) { 769 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
776 rdev->fence_drv[ring].scratch_reg = 0; 770 rdev->fence_drv[ring].scratch_reg = 0;
777 index = R600_WB_EVENT_OFFSET + ring * 4; 771 index = R600_WB_EVENT_OFFSET + ring * 4;
778 } else { 772 } else {
@@ -854,13 +848,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
854 */ 848 */
855void radeon_fence_driver_fini(struct radeon_device *rdev) 849void radeon_fence_driver_fini(struct radeon_device *rdev)
856{ 850{
857 int ring; 851 int ring, r;
858 852
859 mutex_lock(&rdev->ring_lock); 853 mutex_lock(&rdev->ring_lock);
860 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) { 854 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
861 if (!rdev->fence_drv[ring].initialized) 855 if (!rdev->fence_drv[ring].initialized)
862 continue; 856 continue;
863 radeon_fence_wait_empty_locked(rdev, ring); 857 r = radeon_fence_wait_empty_locked(rdev, ring);
858 if (r) {
859 /* no need to trigger GPU reset as we are unloading */
860 radeon_fence_driver_force_completion(rdev);
861 }
864 wake_up_all(&rdev->fence_queue); 862 wake_up_all(&rdev->fence_queue);
865 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 863 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
866 rdev->fence_drv[ring].initialized = false; 864 rdev->fence_drv[ring].initialized = false;
@@ -868,6 +866,25 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
868 mutex_unlock(&rdev->ring_lock); 866 mutex_unlock(&rdev->ring_lock);
869} 867}
870 868
869/**
870 * radeon_fence_driver_force_completion - force all fence waiter to complete
871 *
872 * @rdev: radeon device pointer
873 *
874 * In case of GPU reset failure make sure no process keep waiting on fence
875 * that will never complete.
876 */
877void radeon_fence_driver_force_completion(struct radeon_device *rdev)
878{
879 int ring;
880
881 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
882 if (!rdev->fence_drv[ring].initialized)
883 continue;
884 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
885 }
886}
887
871 888
872/* 889/*
873 * Fence debugfs 890 * Fence debugfs
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4debd60e5aa6..6e24f84755b5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1237{ 1237{
1238 struct radeon_bo_va *bo_va; 1238 struct radeon_bo_va *bo_va;
1239 1239
1240 BUG_ON(!atomic_read(&bo->tbo.reserved));
1241 list_for_each_entry(bo_va, &bo->va, bo_list) { 1240 list_for_each_entry(bo_va, &bo->va, bo_list) {
1242 bo_va->valid = false; 1241 bo_va->valid = false;
1243 } 1242 }
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index c5bddd630eb9..fc60b74ee304 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -39,7 +39,7 @@ extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
39 * radeon_ddc_probe 39 * radeon_ddc_probe
40 * 40 *
41 */ 41 */
42bool radeon_ddc_probe(struct radeon_connector *radeon_connector) 42bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux)
43{ 43{
44 u8 out = 0x0; 44 u8 out = 0x0;
45 u8 buf[8]; 45 u8 buf[8];
@@ -63,7 +63,13 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
63 if (radeon_connector->router.ddc_valid) 63 if (radeon_connector->router.ddc_valid)
64 radeon_router_select_ddc_port(radeon_connector); 64 radeon_router_select_ddc_port(radeon_connector);
65 65
66 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); 66 if (use_aux) {
67 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
68 ret = i2c_transfer(&dig->dp_i2c_bus->adapter, msgs, 2);
69 } else {
70 ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
71 }
72
67 if (ret != 2) 73 if (ret != 2)
68 /* Couldn't find an accessible DDC on this connector */ 74 /* Couldn't find an accessible DDC on this connector */
69 return false; 75 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dc781c49b96b..9c312f9afb68 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
361 return -EINVAL; 361 return -EINVAL;
362 } 362 }
363 break; 363 break;
364 case RADEON_INFO_MAX_SE:
365 if (rdev->family >= CHIP_TAHITI)
366 value = rdev->config.si.max_shader_engines;
367 else if (rdev->family >= CHIP_CAYMAN)
368 value = rdev->config.cayman.max_shader_engines;
369 else if (rdev->family >= CHIP_CEDAR)
370 value = rdev->config.evergreen.num_ses;
371 else
372 value = 1;
373 break;
374 case RADEON_INFO_MAX_SH_PER_SE:
375 if (rdev->family >= CHIP_TAHITI)
376 value = rdev->config.si.max_sh_per_se;
377 else
378 return -EINVAL;
379 break;
364 default: 380 default:
365 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 381 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
366 return -EINVAL; 382 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 5677a424b585..6857cb4efb76 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -295,6 +295,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 295 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
296 struct drm_device *dev = crtc->dev; 296 struct drm_device *dev = crtc->dev;
297 struct radeon_device *rdev = dev->dev_private; 297 struct radeon_device *rdev = dev->dev_private;
298 uint32_t crtc_ext_cntl = 0;
298 uint32_t mask; 299 uint32_t mask;
299 300
300 if (radeon_crtc->crtc_id) 301 if (radeon_crtc->crtc_id)
@@ -307,6 +308,16 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
307 RADEON_CRTC_VSYNC_DIS | 308 RADEON_CRTC_VSYNC_DIS |
308 RADEON_CRTC_HSYNC_DIS); 309 RADEON_CRTC_HSYNC_DIS);
309 310
311 /*
312 * On all dual CRTC GPUs this bit controls the CRTC of the primary DAC.
313 * Therefore it is set in the DAC DMPS function.
314 * This is different for GPU's with a single CRTC but a primary and a
315 * TV DAC: here it controls the single CRTC no matter where it is
316 * routed. Therefore we set it here.
317 */
318 if (rdev->flags & RADEON_SINGLE_CRTC)
319 crtc_ext_cntl = RADEON_CRTC_CRT_ON;
320
310 switch (mode) { 321 switch (mode) {
311 case DRM_MODE_DPMS_ON: 322 case DRM_MODE_DPMS_ON:
312 radeon_crtc->enabled = true; 323 radeon_crtc->enabled = true;
@@ -317,7 +328,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
317 else { 328 else {
318 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | 329 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
319 RADEON_CRTC_DISP_REQ_EN_B)); 330 RADEON_CRTC_DISP_REQ_EN_B));
320 WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); 331 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
321 } 332 }
322 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); 333 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
323 radeon_crtc_load_lut(crtc); 334 radeon_crtc_load_lut(crtc);
@@ -331,7 +342,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
331 else { 342 else {
332 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | 343 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
333 RADEON_CRTC_DISP_REQ_EN_B)); 344 RADEON_CRTC_DISP_REQ_EN_B));
334 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~mask); 345 WREG32_P(RADEON_CRTC_EXT_CNTL, mask, ~(mask | crtc_ext_cntl));
335 } 346 }
336 radeon_crtc->enabled = false; 347 radeon_crtc->enabled = false;
337 /* adjust pm to dpms changes AFTER disabling crtcs */ 348 /* adjust pm to dpms changes AFTER disabling crtcs */
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0063df9d166d..62cd512f5c8d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -537,7 +537,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
537 break; 537 break;
538 } 538 }
539 539
540 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); 540 /* handled in radeon_crtc_dpms() */
541 if (!(rdev->flags & RADEON_SINGLE_CRTC))
542 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
541 WREG32(RADEON_DAC_CNTL, dac_cntl); 543 WREG32(RADEON_DAC_CNTL, dac_cntl);
542 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl); 544 WREG32(RADEON_DAC_MACRO_CNTL, dac_macro_cntl);
543 545
@@ -638,6 +640,14 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
638 enum drm_connector_status found = connector_status_disconnected; 640 enum drm_connector_status found = connector_status_disconnected;
639 bool color = true; 641 bool color = true;
640 642
643 /* just don't bother on RN50 those chip are often connected to remoting
644 * console hw and often we get failure to load detect those. So to make
645 * everyone happy report the encoder as always connected.
646 */
647 if (ASIC_IS_RN50(rdev)) {
648 return connector_status_connected;
649 }
650
641 /* save the regs we need */ 651 /* save the regs we need */
642 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL); 652 vclk_ecp_cntl = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
643 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); 653 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
@@ -662,6 +672,8 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
662 672
663 if (ASIC_IS_R300(rdev)) 673 if (ASIC_IS_R300(rdev))
664 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT); 674 tmp |= (0x1b6 << RADEON_DAC_FORCE_DATA_SHIFT);
675 else if (ASIC_IS_RV100(rdev))
676 tmp |= (0x1ac << RADEON_DAC_FORCE_DATA_SHIFT);
665 else 677 else
666 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT); 678 tmp |= (0x180 << RADEON_DAC_FORCE_DATA_SHIFT);
667 679
@@ -671,6 +683,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
671 tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN; 683 tmp |= RADEON_DAC_RANGE_CNTL_PS2 | RADEON_DAC_CMP_EN;
672 WREG32(RADEON_DAC_CNTL, tmp); 684 WREG32(RADEON_DAC_CNTL, tmp);
673 685
686 tmp = dac_macro_cntl;
674 tmp &= ~(RADEON_DAC_PDWN_R | 687 tmp &= ~(RADEON_DAC_PDWN_R |
675 RADEON_DAC_PDWN_G | 688 RADEON_DAC_PDWN_G |
676 RADEON_DAC_PDWN_B); 689 RADEON_DAC_PDWN_B);
@@ -1092,7 +1105,8 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
1092 } else { 1105 } else {
1093 if (is_tv) 1106 if (is_tv)
1094 WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); 1107 WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl);
1095 else 1108 /* handled in radeon_crtc_dpms() */
1109 else if (!(rdev->flags & RADEON_SINGLE_CRTC))
1096 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); 1110 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1097 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 1111 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1098 } 1112 }
@@ -1416,13 +1430,104 @@ static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
1416 return found; 1430 return found;
1417} 1431}
1418 1432
1433static bool radeon_legacy_ext_dac_detect(struct drm_encoder *encoder,
1434 struct drm_connector *connector)
1435{
1436 struct drm_device *dev = encoder->dev;
1437 struct radeon_device *rdev = dev->dev_private;
1438 uint32_t gpio_monid, fp2_gen_cntl, disp_output_cntl, crtc2_gen_cntl;
1439 uint32_t disp_lin_trans_grph_a, disp_lin_trans_grph_b, disp_lin_trans_grph_c;
1440 uint32_t disp_lin_trans_grph_d, disp_lin_trans_grph_e, disp_lin_trans_grph_f;
1441 uint32_t tmp, crtc2_h_total_disp, crtc2_v_total_disp;
1442 uint32_t crtc2_h_sync_strt_wid, crtc2_v_sync_strt_wid;
1443 bool found = false;
1444 int i;
1445
1446 /* save the regs we need */
1447 gpio_monid = RREG32(RADEON_GPIO_MONID);
1448 fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
1449 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
1450 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1451 disp_lin_trans_grph_a = RREG32(RADEON_DISP_LIN_TRANS_GRPH_A);
1452 disp_lin_trans_grph_b = RREG32(RADEON_DISP_LIN_TRANS_GRPH_B);
1453 disp_lin_trans_grph_c = RREG32(RADEON_DISP_LIN_TRANS_GRPH_C);
1454 disp_lin_trans_grph_d = RREG32(RADEON_DISP_LIN_TRANS_GRPH_D);
1455 disp_lin_trans_grph_e = RREG32(RADEON_DISP_LIN_TRANS_GRPH_E);
1456 disp_lin_trans_grph_f = RREG32(RADEON_DISP_LIN_TRANS_GRPH_F);
1457 crtc2_h_total_disp = RREG32(RADEON_CRTC2_H_TOTAL_DISP);
1458 crtc2_v_total_disp = RREG32(RADEON_CRTC2_V_TOTAL_DISP);
1459 crtc2_h_sync_strt_wid = RREG32(RADEON_CRTC2_H_SYNC_STRT_WID);
1460 crtc2_v_sync_strt_wid = RREG32(RADEON_CRTC2_V_SYNC_STRT_WID);
1461
1462 tmp = RREG32(RADEON_GPIO_MONID);
1463 tmp &= ~RADEON_GPIO_A_0;
1464 WREG32(RADEON_GPIO_MONID, tmp);
1465
1466 WREG32(RADEON_FP2_GEN_CNTL, (RADEON_FP2_ON |
1467 RADEON_FP2_PANEL_FORMAT |
1468 R200_FP2_SOURCE_SEL_TRANS_UNIT |
1469 RADEON_FP2_DVO_EN |
1470 R200_FP2_DVO_RATE_SEL_SDR));
1471
1472 WREG32(RADEON_DISP_OUTPUT_CNTL, (RADEON_DISP_DAC_SOURCE_RMX |
1473 RADEON_DISP_TRANS_MATRIX_GRAPHICS));
1474
1475 WREG32(RADEON_CRTC2_GEN_CNTL, (RADEON_CRTC2_EN |
1476 RADEON_CRTC2_DISP_REQ_EN_B));
1477
1478 WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, 0x00000000);
1479 WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, 0x000003f0);
1480 WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, 0x00000000);
1481 WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, 0x000003f0);
1482 WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, 0x00000000);
1483 WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, 0x000003f0);
1484
1485 WREG32(RADEON_CRTC2_H_TOTAL_DISP, 0x01000008);
1486 WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, 0x00000800);
1487 WREG32(RADEON_CRTC2_V_TOTAL_DISP, 0x00080001);
1488 WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, 0x00000080);
1489
1490 for (i = 0; i < 200; i++) {
1491 tmp = RREG32(RADEON_GPIO_MONID);
1492 if (tmp & RADEON_GPIO_Y_0)
1493 found = true;
1494
1495 if (found)
1496 break;
1497
1498 if (!drm_can_sleep())
1499 mdelay(1);
1500 else
1501 msleep(1);
1502 }
1503
1504 /* restore the regs we used */
1505 WREG32(RADEON_DISP_LIN_TRANS_GRPH_A, disp_lin_trans_grph_a);
1506 WREG32(RADEON_DISP_LIN_TRANS_GRPH_B, disp_lin_trans_grph_b);
1507 WREG32(RADEON_DISP_LIN_TRANS_GRPH_C, disp_lin_trans_grph_c);
1508 WREG32(RADEON_DISP_LIN_TRANS_GRPH_D, disp_lin_trans_grph_d);
1509 WREG32(RADEON_DISP_LIN_TRANS_GRPH_E, disp_lin_trans_grph_e);
1510 WREG32(RADEON_DISP_LIN_TRANS_GRPH_F, disp_lin_trans_grph_f);
1511 WREG32(RADEON_CRTC2_H_TOTAL_DISP, crtc2_h_total_disp);
1512 WREG32(RADEON_CRTC2_V_TOTAL_DISP, crtc2_v_total_disp);
1513 WREG32(RADEON_CRTC2_H_SYNC_STRT_WID, crtc2_h_sync_strt_wid);
1514 WREG32(RADEON_CRTC2_V_SYNC_STRT_WID, crtc2_v_sync_strt_wid);
1515 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1516 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
1517 WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl);
1518 WREG32(RADEON_GPIO_MONID, gpio_monid);
1519
1520 return found;
1521}
1522
1419static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, 1523static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder,
1420 struct drm_connector *connector) 1524 struct drm_connector *connector)
1421{ 1525{
1422 struct drm_device *dev = encoder->dev; 1526 struct drm_device *dev = encoder->dev;
1423 struct radeon_device *rdev = dev->dev_private; 1527 struct radeon_device *rdev = dev->dev_private;
1424 uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; 1528 uint32_t crtc2_gen_cntl = 0, tv_dac_cntl, dac_cntl2, dac_ext_cntl;
1425 uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp; 1529 uint32_t gpiopad_a = 0, pixclks_cntl, tmp;
1530 uint32_t disp_output_cntl = 0, disp_hw_debug = 0, crtc_ext_cntl = 0;
1426 enum drm_connector_status found = connector_status_disconnected; 1531 enum drm_connector_status found = connector_status_disconnected;
1427 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1532 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1428 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; 1533 struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv;
@@ -1459,12 +1564,27 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1459 return connector_status_disconnected; 1564 return connector_status_disconnected;
1460 } 1565 }
1461 1566
1567 /* R200 uses an external DAC for secondary DAC */
1568 if (rdev->family == CHIP_R200) {
1569 if (radeon_legacy_ext_dac_detect(encoder, connector))
1570 found = connector_status_connected;
1571 return found;
1572 }
1573
1462 /* save the regs we need */ 1574 /* save the regs we need */
1463 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); 1575 pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL);
1464 gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0; 1576
1465 disp_output_cntl = ASIC_IS_R300(rdev) ? RREG32(RADEON_DISP_OUTPUT_CNTL) : 0; 1577 if (rdev->flags & RADEON_SINGLE_CRTC) {
1466 disp_hw_debug = ASIC_IS_R300(rdev) ? 0 : RREG32(RADEON_DISP_HW_DEBUG); 1578 crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
1467 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); 1579 } else {
1580 if (ASIC_IS_R300(rdev)) {
1581 gpiopad_a = RREG32(RADEON_GPIOPAD_A);
1582 disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL);
1583 } else {
1584 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
1585 }
1586 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1587 }
1468 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); 1588 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
1469 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); 1589 dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL);
1470 dac_cntl2 = RREG32(RADEON_DAC_CNTL2); 1590 dac_cntl2 = RREG32(RADEON_DAC_CNTL2);
@@ -1473,22 +1593,24 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1473 | RADEON_PIX2CLK_DAC_ALWAYS_ONb); 1593 | RADEON_PIX2CLK_DAC_ALWAYS_ONb);
1474 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 1594 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
1475 1595
1476 if (ASIC_IS_R300(rdev)) 1596 if (rdev->flags & RADEON_SINGLE_CRTC) {
1477 WREG32_P(RADEON_GPIOPAD_A, 1, ~1); 1597 tmp = crtc_ext_cntl | RADEON_CRTC_CRT_ON;
1478 1598 WREG32(RADEON_CRTC_EXT_CNTL, tmp);
1479 tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
1480 tmp |= RADEON_CRTC2_CRT2_ON |
1481 (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
1482
1483 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
1484
1485 if (ASIC_IS_R300(rdev)) {
1486 tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
1487 tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1488 WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
1489 } else { 1599 } else {
1490 tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL; 1600 tmp = crtc2_gen_cntl & ~RADEON_CRTC2_PIX_WIDTH_MASK;
1491 WREG32(RADEON_DISP_HW_DEBUG, tmp); 1601 tmp |= RADEON_CRTC2_CRT2_ON |
1602 (2 << RADEON_CRTC2_PIX_WIDTH_SHIFT);
1603 WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
1604
1605 if (ASIC_IS_R300(rdev)) {
1606 WREG32_P(RADEON_GPIOPAD_A, 1, ~1);
1607 tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK;
1608 tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2;
1609 WREG32(RADEON_DISP_OUTPUT_CNTL, tmp);
1610 } else {
1611 tmp = disp_hw_debug & ~RADEON_CRT2_DISP1_SEL;
1612 WREG32(RADEON_DISP_HW_DEBUG, tmp);
1613 }
1492 } 1614 }
1493 1615
1494 tmp = RADEON_TV_DAC_NBLANK | 1616 tmp = RADEON_TV_DAC_NBLANK |
@@ -1530,14 +1652,19 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1530 WREG32(RADEON_DAC_CNTL2, dac_cntl2); 1652 WREG32(RADEON_DAC_CNTL2, dac_cntl2);
1531 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); 1653 WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl);
1532 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); 1654 WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
1533 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1534 1655
1535 if (ASIC_IS_R300(rdev)) { 1656 if (rdev->flags & RADEON_SINGLE_CRTC) {
1536 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); 1657 WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl);
1537 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1538 } else { 1658 } else {
1539 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); 1659 WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
1660 if (ASIC_IS_R300(rdev)) {
1661 WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl);
1662 WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1);
1663 } else {
1664 WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
1665 }
1540 } 1666 }
1667
1541 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); 1668 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
1542 1669
1543 return found; 1670 return found;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92c5f473cf08..4003f5a68c09 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,7 +209,8 @@ enum radeon_connector_table {
209 CT_RN50_POWER, 209 CT_RN50_POWER,
210 CT_MAC_X800, 210 CT_MAC_X800,
211 CT_MAC_G5_9600, 211 CT_MAC_G5_9600,
212 CT_SAM440EP 212 CT_SAM440EP,
213 CT_MAC_G4_SILVER
213}; 214};
214 215
215enum radeon_dvo_chip { 216enum radeon_dvo_chip {
@@ -427,7 +428,7 @@ struct radeon_connector_atom_dig {
427 uint32_t igp_lane_info; 428 uint32_t igp_lane_info;
428 /* displayport */ 429 /* displayport */
429 struct radeon_i2c_chan *dp_i2c_bus; 430 struct radeon_i2c_chan *dp_i2c_bus;
430 u8 dpcd[8]; 431 u8 dpcd[DP_RECEIVER_CAP_SIZE];
431 u8 dp_sink_type; 432 u8 dp_sink_type;
432 int dp_clock; 433 int dp_clock;
433 int dp_lane_count; 434 int dp_lane_count;
@@ -558,7 +559,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
558 u8 val); 559 u8 val);
559extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); 560extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
560extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); 561extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
561extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 562extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool use_aux);
562extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 563extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
563 564
564extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector); 565extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b91118ccef86..883c95d8d90f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,17 +84,34 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
84 rbo->placement.fpfn = 0; 84 rbo->placement.fpfn = 0;
85 rbo->placement.lpfn = 0; 85 rbo->placement.lpfn = 0;
86 rbo->placement.placement = rbo->placements; 86 rbo->placement.placement = rbo->placements;
87 rbo->placement.busy_placement = rbo->placements;
88 if (domain & RADEON_GEM_DOMAIN_VRAM) 87 if (domain & RADEON_GEM_DOMAIN_VRAM)
89 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 88 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
90 TTM_PL_FLAG_VRAM; 89 TTM_PL_FLAG_VRAM;
91 if (domain & RADEON_GEM_DOMAIN_GTT) 90 if (domain & RADEON_GEM_DOMAIN_GTT) {
92 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 91 if (rbo->rdev->flags & RADEON_IS_AGP) {
93 if (domain & RADEON_GEM_DOMAIN_CPU) 92 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
94 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 93 } else {
94 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
95 }
96 }
97 if (domain & RADEON_GEM_DOMAIN_CPU) {
98 if (rbo->rdev->flags & RADEON_IS_AGP) {
99 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
100 } else {
101 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
102 }
103 }
95 if (!c) 104 if (!c)
96 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 105 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
97 rbo->placement.num_placement = c; 106 rbo->placement.num_placement = c;
107
108 c = 0;
109 rbo->placement.busy_placement = rbo->busy_placements;
110 if (rbo->rdev->flags & RADEON_IS_AGP) {
111 rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
112 } else {
113 rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
114 }
98 rbo->placement.num_busy_placement = c; 115 rbo->placement.num_busy_placement = c;
99} 116}
100 117
@@ -140,7 +157,7 @@ int radeon_bo_create(struct radeon_device *rdev,
140 /* Kernel allocation are uninterruptible */ 157 /* Kernel allocation are uninterruptible */
141 down_read(&rdev->pm.mclk_lock); 158 down_read(&rdev->pm.mclk_lock);
142 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 159 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
143 &bo->placement, page_align, 0, !kernel, NULL, 160 &bo->placement, page_align, !kernel, NULL,
144 acc_size, sg, &radeon_ttm_bo_destroy); 161 acc_size, sg, &radeon_ttm_bo_destroy);
145 up_read(&rdev->pm.mclk_lock); 162 up_read(&rdev->pm.mclk_lock);
146 if (unlikely(r != 0)) { 163 if (unlikely(r != 0)) {
@@ -240,7 +257,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
240 } 257 }
241 for (i = 0; i < bo->placement.num_placement; i++) 258 for (i = 0; i < bo->placement.num_placement; i++)
242 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 259 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
243 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 260 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
244 if (likely(r == 0)) { 261 if (likely(r == 0)) {
245 bo->pin_count = 1; 262 bo->pin_count = 1;
246 if (gpu_addr != NULL) 263 if (gpu_addr != NULL)
@@ -269,7 +286,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
269 return 0; 286 return 0;
270 for (i = 0; i < bo->placement.num_placement; i++) 287 for (i = 0; i < bo->placement.num_placement; i++)
271 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 288 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
272 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 289 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
273 if (unlikely(r != 0)) 290 if (unlikely(r != 0))
274 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 291 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
275 return r; 292 return r;
@@ -340,7 +357,6 @@ int radeon_bo_list_validate(struct list_head *head)
340{ 357{
341 struct radeon_bo_list *lobj; 358 struct radeon_bo_list *lobj;
342 struct radeon_bo *bo; 359 struct radeon_bo *bo;
343 u32 domain;
344 int r; 360 int r;
345 361
346 r = ttm_eu_reserve_buffers(head); 362 r = ttm_eu_reserve_buffers(head);
@@ -350,17 +366,9 @@ int radeon_bo_list_validate(struct list_head *head)
350 list_for_each_entry(lobj, head, tv.head) { 366 list_for_each_entry(lobj, head, tv.head) {
351 bo = lobj->bo; 367 bo = lobj->bo;
352 if (!bo->pin_count) { 368 if (!bo->pin_count) {
353 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
354
355 retry:
356 radeon_ttm_placement_from_domain(bo, domain);
357 r = ttm_bo_validate(&bo->tbo, &bo->placement, 369 r = ttm_bo_validate(&bo->tbo, &bo->placement,
358 true, false, false); 370 true, false);
359 if (unlikely(r)) { 371 if (unlikely(r)) {
360 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
361 domain |= RADEON_GEM_DOMAIN_GTT;
362 goto retry;
363 }
364 return r; 372 return r;
365 } 373 }
366 } 374 }
@@ -384,7 +392,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
384 int steal; 392 int steal;
385 int i; 393 int i;
386 394
387 BUG_ON(!atomic_read(&bo->tbo.reserved)); 395 BUG_ON(!radeon_bo_is_reserved(bo));
388 396
389 if (!bo->tiling_flags) 397 if (!bo->tiling_flags)
390 return 0; 398 return 0;
@@ -510,7 +518,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
510 uint32_t *tiling_flags, 518 uint32_t *tiling_flags,
511 uint32_t *pitch) 519 uint32_t *pitch)
512{ 520{
513 BUG_ON(!atomic_read(&bo->tbo.reserved)); 521 BUG_ON(!radeon_bo_is_reserved(bo));
514 if (tiling_flags) 522 if (tiling_flags)
515 *tiling_flags = bo->tiling_flags; 523 *tiling_flags = bo->tiling_flags;
516 if (pitch) 524 if (pitch)
@@ -520,7 +528,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
520int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 528int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
521 bool force_drop) 529 bool force_drop)
522{ 530{
523 BUG_ON(!atomic_read(&bo->tbo.reserved)); 531 BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
524 532
525 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 533 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
526 return 0; 534 return 0;
@@ -575,7 +583,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
575 /* hurrah the memory is not visible ! */ 583 /* hurrah the memory is not visible ! */
576 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 584 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
577 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 585 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
578 r = ttm_bo_validate(bo, &rbo->placement, false, true, false); 586 r = ttm_bo_validate(bo, &rbo->placement, false, false);
579 if (unlikely(r != 0)) 587 if (unlikely(r != 0))
580 return r; 588 return r;
581 offset = bo->mem.start << PAGE_SHIFT; 589 offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 93cd491fff2e..5fc86b03043b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
80 80
81static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) 81static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
82{ 82{
83 return !!atomic_read(&bo->tbo.reserved); 83 return ttm_bo_is_reserved(&bo->tbo);
84} 84}
85 85
86static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) 86static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index aa14dbb7e4fb..0bfa656aa87d 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -234,7 +234,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
234 234
235static void radeon_pm_set_clocks(struct radeon_device *rdev) 235static void radeon_pm_set_clocks(struct radeon_device *rdev)
236{ 236{
237 int i; 237 int i, r;
238 238
239 /* no need to take locks, etc. if nothing's going to change */ 239 /* no need to take locks, etc. if nothing's going to change */
240 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && 240 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
@@ -248,8 +248,17 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
248 /* wait for the rings to drain */ 248 /* wait for the rings to drain */
249 for (i = 0; i < RADEON_NUM_RINGS; i++) { 249 for (i = 0; i < RADEON_NUM_RINGS; i++) {
250 struct radeon_ring *ring = &rdev->ring[i]; 250 struct radeon_ring *ring = &rdev->ring[i];
251 if (ring->ready) 251 if (!ring->ready) {
252 radeon_fence_wait_empty_locked(rdev, i); 252 continue;
253 }
254 r = radeon_fence_wait_empty_locked(rdev, i);
255 if (r) {
256 /* needs a GPU reset dont reset here */
257 mutex_unlock(&rdev->ring_lock);
258 up_write(&rdev->pm.mclk_lock);
259 mutex_unlock(&rdev->ddev->struct_mutex);
260 return;
261 }
253 } 262 }
254 263
255 radeon_unmap_vram_bos(rdev); 264 radeon_unmap_vram_bos(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index e09521858f64..26c23bb651c6 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -194,6 +194,7 @@ struct drm_gem_object *radeon_gem_prime_import(struct drm_device *dev,
194 bo = dma_buf->priv; 194 bo = dma_buf->priv;
195 if (bo->gem_base.dev == dev) { 195 if (bo->gem_base.dev == dev) {
196 drm_gem_object_reference(&bo->gem_base); 196 drm_gem_object_reference(&bo->gem_base);
197 dma_buf_put(dma_buf);
197 return &bo->gem_base; 198 return &bo->gem_base;
198 } 199 }
199 } 200 }
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 47634f27f2e5..141f2b6a9cf2 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -459,7 +459,7 @@ void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *r
459 * 459 *
460 * @ring: radeon_ring structure holding ring information 460 * @ring: radeon_ring structure holding ring information
461 * 461 *
462 * Reset the driver's copy of the wtpr (all asics). 462 * Reset the driver's copy of the wptr (all asics).
463 */ 463 */
464void radeon_ring_undo(struct radeon_ring *ring) 464void radeon_ring_undo(struct radeon_ring *ring)
465{ 465{
@@ -503,7 +503,7 @@ void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *
503} 503}
504 504
505/** 505/**
506 * radeon_ring_force_activity - update lockup variables 506 * radeon_ring_lockup_update - update lockup variables
507 * 507 *
508 * @ring: radeon_ring structure holding ring information 508 * @ring: radeon_ring structure holding ring information
509 * 509 *
@@ -770,22 +770,28 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
770 int ridx = *(int*)node->info_ent->data; 770 int ridx = *(int*)node->info_ent->data;
771 struct radeon_ring *ring = &rdev->ring[ridx]; 771 struct radeon_ring *ring = &rdev->ring[ridx];
772 unsigned count, i, j; 772 unsigned count, i, j;
773 u32 tmp;
773 774
774 radeon_ring_free_size(rdev, ring); 775 radeon_ring_free_size(rdev, ring);
775 count = (ring->ring_size / 4) - ring->ring_free_dw; 776 count = (ring->ring_size / 4) - ring->ring_free_dw;
776 seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg)); 777 tmp = RREG32(ring->wptr_reg) >> ring->ptr_reg_shift;
777 seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg)); 778 seq_printf(m, "wptr(0x%04x): 0x%08x [%5d]\n", ring->wptr_reg, tmp, tmp);
779 tmp = RREG32(ring->rptr_reg) >> ring->ptr_reg_shift;
780 seq_printf(m, "rptr(0x%04x): 0x%08x [%5d]\n", ring->rptr_reg, tmp, tmp);
778 if (ring->rptr_save_reg) { 781 if (ring->rptr_save_reg) {
779 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg, 782 seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
780 RREG32(ring->rptr_save_reg)); 783 RREG32(ring->rptr_save_reg));
781 } 784 }
782 seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr); 785 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", ring->wptr, ring->wptr);
783 seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr); 786 seq_printf(m, "driver's copy of the rptr: 0x%08x [%5d]\n", ring->rptr, ring->rptr);
784 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); 787 seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
785 seq_printf(m, "%u dwords in ring\n", count); 788 seq_printf(m, "%u dwords in ring\n", count);
786 i = ring->rptr; 789 /* print 8 dw before current rptr as often it's the last executed
787 for (j = 0; j <= count; j++) { 790 * packet that is the root issue
788 seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]); 791 */
792 i = (ring->rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
793 for (j = 0; j <= (count + 32); j++) {
794 seq_printf(m, "r[%5d]=0x%08x\n", i, ring->ring[i]);
789 i = (i + 1) & ring->ptr_mask; 795 i = (i + 1) & ring->ptr_mask;
790 } 796 }
791 return 0; 797 return 0;
@@ -794,11 +800,15 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
794static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX; 800static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
795static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX; 801static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
796static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX; 802static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
803static int radeon_ring_type_dma1_index = R600_RING_TYPE_DMA_INDEX;
804static int radeon_ring_type_dma2_index = CAYMAN_RING_TYPE_DMA1_INDEX;
797 805
798static struct drm_info_list radeon_debugfs_ring_info_list[] = { 806static struct drm_info_list radeon_debugfs_ring_info_list[] = {
799 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index}, 807 {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
800 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index}, 808 {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
801 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index}, 809 {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
810 {"radeon_ring_dma1", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma1_index},
811 {"radeon_ring_dma2", radeon_debugfs_ring_info, 0, &radeon_ring_type_dma2_index},
802}; 812};
803 813
804static int radeon_debugfs_sa_info(struct seq_file *m, void *data) 814static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 587c09a00ba2..fda09c9ea689 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -26,16 +26,31 @@
26#include "radeon_reg.h" 26#include "radeon_reg.h"
27#include "radeon.h" 27#include "radeon.h"
28 28
29#define RADEON_TEST_COPY_BLIT 1
30#define RADEON_TEST_COPY_DMA 0
31
29 32
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 33/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31void radeon_test_moves(struct radeon_device *rdev) 34static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
32{ 35{
33 struct radeon_bo *vram_obj = NULL; 36 struct radeon_bo *vram_obj = NULL;
34 struct radeon_bo **gtt_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL;
35 struct radeon_fence *fence = NULL; 38 struct radeon_fence *fence = NULL;
36 uint64_t gtt_addr, vram_addr; 39 uint64_t gtt_addr, vram_addr;
37 unsigned i, n, size; 40 unsigned i, n, size;
38 int r; 41 int r, ring;
42
43 switch (flag) {
44 case RADEON_TEST_COPY_DMA:
45 ring = radeon_copy_dma_ring_index(rdev);
46 break;
47 case RADEON_TEST_COPY_BLIT:
48 ring = radeon_copy_blit_ring_index(rdev);
49 break;
50 default:
51 DRM_ERROR("Unknown copy method\n");
52 return;
53 }
39 54
40 size = 1024 * 1024; 55 size = 1024 * 1024;
41 56
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
106 121
107 radeon_bo_kunmap(gtt_obj[i]); 122 radeon_bo_kunmap(gtt_obj[i]);
108 123
109 r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 124 if (ring == R600_RING_TYPE_DMA_INDEX)
125 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
126 else
127 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
110 if (r) { 128 if (r) {
111 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 129 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
112 goto out_cleanup; 130 goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
149 167
150 radeon_bo_kunmap(vram_obj); 168 radeon_bo_kunmap(vram_obj);
151 169
152 r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 170 if (ring == R600_RING_TYPE_DMA_INDEX)
171 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
172 else
173 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
153 if (r) { 174 if (r) {
154 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 175 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
155 goto out_cleanup; 176 goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
223 } 244 }
224} 245}
225 246
247void radeon_test_moves(struct radeon_device *rdev)
248{
249 if (rdev->asic->copy.dma)
250 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
251 if (rdev->asic->copy.blit)
252 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
253}
254
226void radeon_test_ring_sync(struct radeon_device *rdev, 255void radeon_test_ring_sync(struct radeon_device *rdev,
227 struct radeon_ring *ringA, 256 struct radeon_ring *ringA,
228 struct radeon_ring *ringB) 257 struct radeon_ring *ringB)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5ebe1b3e5db2..1d8ff2f850ba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
216} 216}
217 217
218static int radeon_move_blit(struct ttm_buffer_object *bo, 218static int radeon_move_blit(struct ttm_buffer_object *bo,
219 bool evict, int no_wait_reserve, bool no_wait_gpu, 219 bool evict, bool no_wait_gpu,
220 struct ttm_mem_reg *new_mem, 220 struct ttm_mem_reg *new_mem,
221 struct ttm_mem_reg *old_mem) 221 struct ttm_mem_reg *old_mem)
222{ 222{
@@ -265,15 +265,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
265 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ 265 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
266 &fence); 266 &fence);
267 /* FIXME: handle copy error */ 267 /* FIXME: handle copy error */
268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
269 evict, no_wait_reserve, no_wait_gpu, new_mem); 269 evict, no_wait_gpu, new_mem);
270 radeon_fence_unref(&fence); 270 radeon_fence_unref(&fence);
271 return r; 271 return r;
272} 272}
273 273
274static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 274static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
275 bool evict, bool interruptible, 275 bool evict, bool interruptible,
276 bool no_wait_reserve, bool no_wait_gpu, 276 bool no_wait_gpu,
277 struct ttm_mem_reg *new_mem) 277 struct ttm_mem_reg *new_mem)
278{ 278{
279 struct radeon_device *rdev; 279 struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
294 placement.busy_placement = &placements; 294 placement.busy_placement = &placements;
295 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 295 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
296 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 296 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
297 interruptible, no_wait_reserve, no_wait_gpu); 297 interruptible, no_wait_gpu);
298 if (unlikely(r)) { 298 if (unlikely(r)) {
299 return r; 299 return r;
300 } 300 }
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
308 if (unlikely(r)) { 308 if (unlikely(r)) {
309 goto out_cleanup; 309 goto out_cleanup;
310 } 310 }
311 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); 311 r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
312 if (unlikely(r)) { 312 if (unlikely(r)) {
313 goto out_cleanup; 313 goto out_cleanup;
314 } 314 }
315 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 315 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
316out_cleanup: 316out_cleanup:
317 ttm_bo_mem_put(bo, &tmp_mem); 317 ttm_bo_mem_put(bo, &tmp_mem);
318 return r; 318 return r;
@@ -320,7 +320,7 @@ out_cleanup:
320 320
321static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 321static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
322 bool evict, bool interruptible, 322 bool evict, bool interruptible,
323 bool no_wait_reserve, bool no_wait_gpu, 323 bool no_wait_gpu,
324 struct ttm_mem_reg *new_mem) 324 struct ttm_mem_reg *new_mem)
325{ 325{
326 struct radeon_device *rdev; 326 struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
340 placement.num_busy_placement = 1; 340 placement.num_busy_placement = 1;
341 placement.busy_placement = &placements; 341 placement.busy_placement = &placements;
342 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 342 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
343 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); 343 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
344 interruptible, no_wait_gpu);
344 if (unlikely(r)) { 345 if (unlikely(r)) {
345 return r; 346 return r;
346 } 347 }
347 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 348 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
348 if (unlikely(r)) { 349 if (unlikely(r)) {
349 goto out_cleanup; 350 goto out_cleanup;
350 } 351 }
351 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 352 r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
352 if (unlikely(r)) { 353 if (unlikely(r)) {
353 goto out_cleanup; 354 goto out_cleanup;
354 } 355 }
@@ -359,7 +360,7 @@ out_cleanup:
359 360
360static int radeon_bo_move(struct ttm_buffer_object *bo, 361static int radeon_bo_move(struct ttm_buffer_object *bo,
361 bool evict, bool interruptible, 362 bool evict, bool interruptible,
362 bool no_wait_reserve, bool no_wait_gpu, 363 bool no_wait_gpu,
363 struct ttm_mem_reg *new_mem) 364 struct ttm_mem_reg *new_mem)
364{ 365{
365 struct radeon_device *rdev; 366 struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
388 if (old_mem->mem_type == TTM_PL_VRAM && 389 if (old_mem->mem_type == TTM_PL_VRAM &&
389 new_mem->mem_type == TTM_PL_SYSTEM) { 390 new_mem->mem_type == TTM_PL_SYSTEM) {
390 r = radeon_move_vram_ram(bo, evict, interruptible, 391 r = radeon_move_vram_ram(bo, evict, interruptible,
391 no_wait_reserve, no_wait_gpu, new_mem); 392 no_wait_gpu, new_mem);
392 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 393 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
393 new_mem->mem_type == TTM_PL_VRAM) { 394 new_mem->mem_type == TTM_PL_VRAM) {
394 r = radeon_move_ram_vram(bo, evict, interruptible, 395 r = radeon_move_ram_vram(bo, evict, interruptible,
395 no_wait_reserve, no_wait_gpu, new_mem); 396 no_wait_gpu, new_mem);
396 } else { 397 } else {
397 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 398 r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
398 } 399 }
399 400
400 if (r) { 401 if (r) {
401memcpy: 402memcpy:
402 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 403 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
403 } 404 }
404 return r; 405 return r;
405} 406}
@@ -471,13 +472,12 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
471{ 472{
472} 473}
473 474
474static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 475static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
475 bool lazy, bool interruptible)
476{ 476{
477 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); 477 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
478} 478}
479 479
480static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) 480static int radeon_sync_obj_flush(void *sync_obj)
481{ 481{
482 return 0; 482 return 0;
483} 483}
@@ -492,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj)
492 return radeon_fence_ref((struct radeon_fence *)sync_obj); 492 return radeon_fence_ref((struct radeon_fence *)sync_obj);
493} 493}
494 494
495static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) 495static bool radeon_sync_obj_signaled(void *sync_obj)
496{ 496{
497 return radeon_fence_signaled((struct radeon_fence *)sync_obj); 497 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
498} 498}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 785d09590b24..2bb6d0e84b3d 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
40static void rv515_gpu_init(struct radeon_device *rdev); 40static void rv515_gpu_init(struct radeon_device *rdev);
41int rv515_mc_wait_for_idle(struct radeon_device *rdev); 41int rv515_mc_wait_for_idle(struct radeon_device *rdev);
42 42
43static const u32 crtc_offsets[2] =
44{
45 0,
46 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
47};
48
43void rv515_debugfs(struct radeon_device *rdev) 49void rv515_debugfs(struct radeon_device *rdev)
44{ 50{
45 if (r100_debugfs_rbbm_init(rdev)) { 51 if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,114 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
281 287
282void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) 288void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
283{ 289{
290 u32 crtc_enabled, tmp, frame_count, blackout;
291 int i, j;
292
284 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); 293 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
285 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); 294 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
286 295
287 /* Stop all video */ 296 /* disable VGA render */
288 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
289 WREG32(R_000300_VGA_RENDER_CONTROL, 0); 297 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
290 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); 298 /* blank the display controllers */
291 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); 299 for (i = 0; i < rdev->num_crtc; i++) {
292 WREG32(R_006080_D1CRTC_CONTROL, 0); 300 crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
293 WREG32(R_006880_D2CRTC_CONTROL, 0); 301 if (crtc_enabled) {
294 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); 302 save->crtc_enabled[i] = true;
295 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 303 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
296 WREG32(R_000330_D1VGA_CONTROL, 0); 304 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
297 WREG32(R_000338_D2VGA_CONTROL, 0); 305 radeon_wait_for_vblank(rdev, i);
306 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
307 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
308 }
309 /* wait for the next frame */
310 frame_count = radeon_get_vblank_counter(rdev, i);
311 for (j = 0; j < rdev->usec_timeout; j++) {
312 if (radeon_get_vblank_counter(rdev, i) != frame_count)
313 break;
314 udelay(1);
315 }
316 } else {
317 save->crtc_enabled[i] = false;
318 }
319 }
320
321 radeon_mc_wait_for_idle(rdev);
322
323 if (rdev->family >= CHIP_R600) {
324 if (rdev->family >= CHIP_RV770)
325 blackout = RREG32(R700_MC_CITF_CNTL);
326 else
327 blackout = RREG32(R600_CITF_CNTL);
328 if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
329 /* Block CPU access */
330 WREG32(R600_BIF_FB_EN, 0);
331 /* blackout the MC */
332 blackout |= R600_BLACKOUT_MASK;
333 if (rdev->family >= CHIP_RV770)
334 WREG32(R700_MC_CITF_CNTL, blackout);
335 else
336 WREG32(R600_CITF_CNTL, blackout);
337 }
338 }
298} 339}
299 340
300void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 341void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
301{ 342{
302 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); 343 u32 tmp, frame_count;
303 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); 344 int i, j;
304 WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); 345
305 WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); 346 /* update crtc base addresses */
306 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); 347 for (i = 0; i < rdev->num_crtc; i++) {
307 /* Unlock host access */ 348 if (rdev->family >= CHIP_RV770) {
349 if (i == 1) {
350 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
351 upper_32_bits(rdev->mc.vram_start));
352 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
353 upper_32_bits(rdev->mc.vram_start));
354 } else {
355 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
356 upper_32_bits(rdev->mc.vram_start));
357 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
358 upper_32_bits(rdev->mc.vram_start));
359 }
360 }
361 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
362 (u32)rdev->mc.vram_start);
363 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
364 (u32)rdev->mc.vram_start);
365 }
366 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
367
368 if (rdev->family >= CHIP_R600) {
369 /* unblackout the MC */
370 if (rdev->family >= CHIP_RV770)
371 tmp = RREG32(R700_MC_CITF_CNTL);
372 else
373 tmp = RREG32(R600_CITF_CNTL);
374 tmp &= ~R600_BLACKOUT_MASK;
375 if (rdev->family >= CHIP_RV770)
376 WREG32(R700_MC_CITF_CNTL, tmp);
377 else
378 WREG32(R600_CITF_CNTL, tmp);
379 /* allow CPU access */
380 WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
381 }
382
383 for (i = 0; i < rdev->num_crtc; i++) {
384 if (save->crtc_enabled[i]) {
385 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
386 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
387 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
388 /* wait for the next frame */
389 frame_count = radeon_get_vblank_counter(rdev, i);
390 for (j = 0; j < rdev->usec_timeout; j++) {
391 if (radeon_get_vblank_counter(rdev, i) != frame_count)
392 break;
393 udelay(1);
394 }
395 }
396 }
397 /* Unlock vga access */
308 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); 398 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
309 mdelay(1); 399 mdelay(1);
310 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 400 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 79814a08c8e5..1b2444f4d8f4 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
316 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 316 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
317 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 317 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
318 WREG32(SCRATCH_UMSK, 0); 318 WREG32(SCRATCH_UMSK, 0);
319 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
319} 320}
320 321
321static int rv770_cp_load_microcode(struct radeon_device *rdev) 322static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
583 WREG32(GB_TILING_CONFIG, gb_tiling_config); 584 WREG32(GB_TILING_CONFIG, gb_tiling_config);
584 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 585 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
585 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 586 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
587 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
588 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
586 589
587 WREG32(CGTS_SYS_TCC_DISABLE, 0); 590 WREG32(CGTS_SYS_TCC_DISABLE, 0);
588 WREG32(CGTS_TCC_DISABLE, 0); 591 WREG32(CGTS_TCC_DISABLE, 0);
@@ -884,9 +887,83 @@ static int rv770_mc_init(struct radeon_device *rdev)
884 return 0; 887 return 0;
885} 888}
886 889
890/**
891 * rv770_copy_dma - copy pages using the DMA engine
892 *
893 * @rdev: radeon_device pointer
894 * @src_offset: src GPU address
895 * @dst_offset: dst GPU address
896 * @num_gpu_pages: number of GPU pages to xfer
897 * @fence: radeon fence object
898 *
899 * Copy GPU paging using the DMA engine (r7xx).
900 * Used by the radeon ttm implementation to move pages if
901 * registered as the asic copy callback.
902 */
903int rv770_copy_dma(struct radeon_device *rdev,
904 uint64_t src_offset, uint64_t dst_offset,
905 unsigned num_gpu_pages,
906 struct radeon_fence **fence)
907{
908 struct radeon_semaphore *sem = NULL;
909 int ring_index = rdev->asic->copy.dma_ring_index;
910 struct radeon_ring *ring = &rdev->ring[ring_index];
911 u32 size_in_dw, cur_size_in_dw;
912 int i, num_loops;
913 int r = 0;
914
915 r = radeon_semaphore_create(rdev, &sem);
916 if (r) {
917 DRM_ERROR("radeon: moving bo (%d).\n", r);
918 return r;
919 }
920
921 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
922 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
923 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
924 if (r) {
925 DRM_ERROR("radeon: moving bo (%d).\n", r);
926 radeon_semaphore_free(rdev, &sem, NULL);
927 return r;
928 }
929
930 if (radeon_fence_need_sync(*fence, ring->idx)) {
931 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
932 ring->idx);
933 radeon_fence_note_sync(*fence, ring->idx);
934 } else {
935 radeon_semaphore_free(rdev, &sem, NULL);
936 }
937
938 for (i = 0; i < num_loops; i++) {
939 cur_size_in_dw = size_in_dw;
940 if (cur_size_in_dw > 0xFFFF)
941 cur_size_in_dw = 0xFFFF;
942 size_in_dw -= cur_size_in_dw;
943 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
944 radeon_ring_write(ring, dst_offset & 0xfffffffc);
945 radeon_ring_write(ring, src_offset & 0xfffffffc);
946 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
947 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
948 src_offset += cur_size_in_dw * 4;
949 dst_offset += cur_size_in_dw * 4;
950 }
951
952 r = radeon_fence_emit(rdev, fence, ring->idx);
953 if (r) {
954 radeon_ring_unlock_undo(rdev, ring);
955 return r;
956 }
957
958 radeon_ring_unlock_commit(rdev, ring);
959 radeon_semaphore_free(rdev, &sem, *fence);
960
961 return r;
962}
963
887static int rv770_startup(struct radeon_device *rdev) 964static int rv770_startup(struct radeon_device *rdev)
888{ 965{
889 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 966 struct radeon_ring *ring;
890 int r; 967 int r;
891 968
892 /* enable pcie gen2 link */ 969 /* enable pcie gen2 link */
@@ -932,6 +1009,12 @@ static int rv770_startup(struct radeon_device *rdev)
932 return r; 1009 return r;
933 } 1010 }
934 1011
1012 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1013 if (r) {
1014 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1015 return r;
1016 }
1017
935 /* Enable IRQ */ 1018 /* Enable IRQ */
936 r = r600_irq_init(rdev); 1019 r = r600_irq_init(rdev);
937 if (r) { 1020 if (r) {
@@ -941,11 +1024,20 @@ static int rv770_startup(struct radeon_device *rdev)
941 } 1024 }
942 r600_irq_set(rdev); 1025 r600_irq_set(rdev);
943 1026
1027 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
944 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 1028 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
945 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 1029 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
946 0, 0xfffff, RADEON_CP_PACKET2); 1030 0, 0xfffff, RADEON_CP_PACKET2);
947 if (r) 1031 if (r)
948 return r; 1032 return r;
1033
1034 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1035 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1036 DMA_RB_RPTR, DMA_RB_WPTR,
1037 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1038 if (r)
1039 return r;
1040
949 r = rv770_cp_load_microcode(rdev); 1041 r = rv770_cp_load_microcode(rdev);
950 if (r) 1042 if (r)
951 return r; 1043 return r;
@@ -953,6 +1045,10 @@ static int rv770_startup(struct radeon_device *rdev)
953 if (r) 1045 if (r)
954 return r; 1046 return r;
955 1047
1048 r = r600_dma_resume(rdev);
1049 if (r)
1050 return r;
1051
956 r = radeon_ib_pool_init(rdev); 1052 r = radeon_ib_pool_init(rdev);
957 if (r) { 1053 if (r) {
958 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1054 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1091,7 @@ int rv770_suspend(struct radeon_device *rdev)
995{ 1091{
996 r600_audio_fini(rdev); 1092 r600_audio_fini(rdev);
997 r700_cp_stop(rdev); 1093 r700_cp_stop(rdev);
998 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1094 r600_dma_stop(rdev);
999 r600_irq_suspend(rdev); 1095 r600_irq_suspend(rdev);
1000 radeon_wb_disable(rdev); 1096 radeon_wb_disable(rdev);
1001 rv770_pcie_gart_disable(rdev); 1097 rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1162,9 @@ int rv770_init(struct radeon_device *rdev)
1066 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 1162 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1067 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 1163 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1068 1164
1165 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
1166 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
1167
1069 rdev->ih.ring_obj = NULL; 1168 rdev->ih.ring_obj = NULL;
1070 r600_ih_ring_init(rdev, 64 * 1024); 1169 r600_ih_ring_init(rdev, 64 * 1024);
1071 1170
@@ -1078,6 +1177,7 @@ int rv770_init(struct radeon_device *rdev)
1078 if (r) { 1177 if (r) {
1079 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1178 dev_err(rdev->dev, "disabling GPU acceleration\n");
1080 r700_cp_fini(rdev); 1179 r700_cp_fini(rdev);
1180 r600_dma_fini(rdev);
1081 r600_irq_fini(rdev); 1181 r600_irq_fini(rdev);
1082 radeon_wb_fini(rdev); 1182 radeon_wb_fini(rdev);
1083 radeon_ib_pool_fini(rdev); 1183 radeon_ib_pool_fini(rdev);
@@ -1093,6 +1193,7 @@ void rv770_fini(struct radeon_device *rdev)
1093{ 1193{
1094 r600_blit_fini(rdev); 1194 r600_blit_fini(rdev);
1095 r700_cp_fini(rdev); 1195 r700_cp_fini(rdev);
1196 r600_dma_fini(rdev);
1096 r600_irq_fini(rdev); 1197 r600_irq_fini(rdev);
1097 radeon_wb_fini(rdev); 1198 radeon_wb_fini(rdev);
1098 radeon_ib_pool_fini(rdev); 1199 radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b0adfc595d75..20e29d23d348 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -109,6 +109,9 @@
109#define PIPE_TILING__SHIFT 1 109#define PIPE_TILING__SHIFT 1
110#define PIPE_TILING__MASK 0x0000000e 110#define PIPE_TILING__MASK 0x0000000e
111 111
112#define DMA_TILING_CONFIG 0x3ec8
113#define DMA_TILING_CONFIG2 0xd0b8
114
112#define GC_USER_SHADER_PIPE_CONFIG 0x8954 115#define GC_USER_SHADER_PIPE_CONFIG 0x8954
113#define INACTIVE_QD_PIPES(x) ((x) << 8) 116#define INACTIVE_QD_PIPES(x) ((x) << 8)
114#define INACTIVE_QD_PIPES_MASK 0x0000FF00 117#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -358,6 +361,26 @@
358 361
359#define WAIT_UNTIL 0x8040 362#define WAIT_UNTIL 0x8040
360 363
364/* async DMA */
365#define DMA_RB_RPTR 0xd008
366#define DMA_RB_WPTR 0xd00c
367
368/* async DMA packets */
369#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
370 (((t) & 0x1) << 23) | \
371 (((s) & 0x1) << 22) | \
372 (((n) & 0xFFFF) << 0))
373/* async DMA Packet types */
374#define DMA_PACKET_WRITE 0x2
375#define DMA_PACKET_COPY 0x3
376#define DMA_PACKET_INDIRECT_BUFFER 0x4
377#define DMA_PACKET_SEMAPHORE 0x5
378#define DMA_PACKET_FENCE 0x6
379#define DMA_PACKET_TRAP 0x7
380#define DMA_PACKET_CONSTANT_FILL 0xd
381#define DMA_PACKET_NOP 0xf
382
383
361#define SRBM_STATUS 0x0E50 384#define SRBM_STATUS 0x0E50
362 385
363/* DCE 3.2 HDMI */ 386/* DCE 3.2 HDMI */
@@ -551,6 +574,54 @@
551#define HDMI_OFFSET0 (0x7400 - 0x7400) 574#define HDMI_OFFSET0 (0x7400 - 0x7400)
552#define HDMI_OFFSET1 (0x7800 - 0x7400) 575#define HDMI_OFFSET1 (0x7800 - 0x7400)
553 576
577/* DCE3.2 ELD audio interface */
578#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
579#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
580#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
581#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
582#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
583#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
584#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
585#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
586#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
587#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
588#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
589#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
590#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
591#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
592# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
593/* max channels minus one. 7 = 8 channels */
594# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
595# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
596# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
597/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
598 * bit0 = 32 kHz
599 * bit1 = 44.1 kHz
600 * bit2 = 48 kHz
601 * bit3 = 88.2 kHz
602 * bit4 = 96 kHz
603 * bit5 = 176.4 kHz
604 * bit6 = 192 kHz
605 */
606
607#define AZ_HOT_PLUG_CONTROL 0x7300
608# define AZ_FORCE_CODEC_WAKE (1 << 0)
609# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
610# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
611# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
612# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
613# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
614# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
615# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
616# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
617# define CODEC_HOT_PLUG_ENABLE (1 << 12)
618# define PIN0_AUDIO_ENABLED (1 << 24)
619# define PIN1_AUDIO_ENABLED (1 << 25)
620# define PIN2_AUDIO_ENABLED (1 << 26)
621# define PIN3_AUDIO_ENABLED (1 << 27)
622# define AUDIO_ENABLED (1 << 31)
623
624
554#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 625#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
555#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 626#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
556#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 627#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index b0db712060fb..3240a3d64f30 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
1660 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1660 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1661 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1661 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1662 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1662 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1663 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1664 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1663 1665
1664 si_tiling_mode_table_init(rdev); 1666 si_tiling_mode_table_init(rdev);
1665 1667
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
1836 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1838 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1837 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); 1839 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
1838 WREG32(SCRATCH_UMSK, 0); 1840 WREG32(SCRATCH_UMSK, 0);
1841 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1842 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1843 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1839 } 1844 }
1840 udelay(50); 1845 udelay(50);
1841} 1846}
@@ -2007,7 +2012,7 @@ static int si_cp_resume(struct radeon_device *rdev)
2007 ring->wptr = 0; 2012 ring->wptr = 0;
2008 WREG32(CP_RB0_WPTR, ring->wptr); 2013 WREG32(CP_RB0_WPTR, ring->wptr);
2009 2014
2010 /* set the wb address wether it's enabled or not */ 2015 /* set the wb address whether it's enabled or not */
2011 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 2016 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
2012 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2017 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2013 2018
@@ -2040,7 +2045,7 @@ static int si_cp_resume(struct radeon_device *rdev)
2040 ring->wptr = 0; 2045 ring->wptr = 0;
2041 WREG32(CP_RB1_WPTR, ring->wptr); 2046 WREG32(CP_RB1_WPTR, ring->wptr);
2042 2047
2043 /* set the wb address wether it's enabled or not */ 2048 /* set the wb address whether it's enabled or not */
2044 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); 2049 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
2045 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); 2050 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
2046 2051
@@ -2066,7 +2071,7 @@ static int si_cp_resume(struct radeon_device *rdev)
2066 ring->wptr = 0; 2071 ring->wptr = 0;
2067 WREG32(CP_RB2_WPTR, ring->wptr); 2072 WREG32(CP_RB2_WPTR, ring->wptr);
2068 2073
2069 /* set the wb address wether it's enabled or not */ 2074 /* set the wb address whether it's enabled or not */
2070 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); 2075 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
2071 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); 2076 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
2072 2077
@@ -2121,15 +2126,13 @@ bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
2121 return radeon_ring_test_lockup(rdev, ring); 2126 return radeon_ring_test_lockup(rdev, ring);
2122} 2127}
2123 2128
2124static int si_gpu_soft_reset(struct radeon_device *rdev) 2129static void si_gpu_soft_reset_gfx(struct radeon_device *rdev)
2125{ 2130{
2126 struct evergreen_mc_save save;
2127 u32 grbm_reset = 0; 2131 u32 grbm_reset = 0;
2128 2132
2129 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) 2133 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
2130 return 0; 2134 return;
2131 2135
2132 dev_info(rdev->dev, "GPU softreset \n");
2133 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2136 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2134 RREG32(GRBM_STATUS)); 2137 RREG32(GRBM_STATUS));
2135 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", 2138 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2140,10 +2143,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
2140 RREG32(GRBM_STATUS_SE1)); 2143 RREG32(GRBM_STATUS_SE1));
2141 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2144 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2142 RREG32(SRBM_STATUS)); 2145 RREG32(SRBM_STATUS));
2143 evergreen_mc_stop(rdev, &save); 2146
2144 if (radeon_mc_wait_for_idle(rdev)) {
2145 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2146 }
2147 /* Disable CP parsing/prefetching */ 2147 /* Disable CP parsing/prefetching */
2148 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); 2148 WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
2149 2149
@@ -2168,8 +2168,7 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
2168 udelay(50); 2168 udelay(50);
2169 WREG32(GRBM_SOFT_RESET, 0); 2169 WREG32(GRBM_SOFT_RESET, 0);
2170 (void)RREG32(GRBM_SOFT_RESET); 2170 (void)RREG32(GRBM_SOFT_RESET);
2171 /* Wait a little for things to settle down */ 2171
2172 udelay(50);
2173 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", 2172 dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
2174 RREG32(GRBM_STATUS)); 2173 RREG32(GRBM_STATUS));
2175 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", 2174 dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
@@ -2180,13 +2179,75 @@ static int si_gpu_soft_reset(struct radeon_device *rdev)
2180 RREG32(GRBM_STATUS_SE1)); 2179 RREG32(GRBM_STATUS_SE1));
2181 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", 2180 dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
2182 RREG32(SRBM_STATUS)); 2181 RREG32(SRBM_STATUS));
2182}
2183
2184static void si_gpu_soft_reset_dma(struct radeon_device *rdev)
2185{
2186 u32 tmp;
2187
2188 if (RREG32(DMA_STATUS_REG) & DMA_IDLE)
2189 return;
2190
2191 dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
2192 RREG32(DMA_STATUS_REG));
2193
2194 /* dma0 */
2195 tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
2196 tmp &= ~DMA_RB_ENABLE;
2197 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp);
2198
2199 /* dma1 */
2200 tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
2201 tmp &= ~DMA_RB_ENABLE;
2202 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp);
2203
2204 /* Reset dma */
2205 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
2206 RREG32(SRBM_SOFT_RESET);
2207 udelay(50);
2208 WREG32(SRBM_SOFT_RESET, 0);
2209
2210 dev_info(rdev->dev, " DMA_STATUS_REG = 0x%08X\n",
2211 RREG32(DMA_STATUS_REG));
2212}
2213
2214static int si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
2215{
2216 struct evergreen_mc_save save;
2217
2218 if (reset_mask == 0)
2219 return 0;
2220
2221 dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask);
2222
2223 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
2224 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
2225 dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
2226 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
2227
2228 evergreen_mc_stop(rdev, &save);
2229 if (radeon_mc_wait_for_idle(rdev)) {
2230 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2231 }
2232
2233 if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE))
2234 si_gpu_soft_reset_gfx(rdev);
2235
2236 if (reset_mask & RADEON_RESET_DMA)
2237 si_gpu_soft_reset_dma(rdev);
2238
2239 /* Wait a little for things to settle down */
2240 udelay(50);
2241
2183 evergreen_mc_resume(rdev, &save); 2242 evergreen_mc_resume(rdev, &save);
2184 return 0; 2243 return 0;
2185} 2244}
2186 2245
2187int si_asic_reset(struct radeon_device *rdev) 2246int si_asic_reset(struct radeon_device *rdev)
2188{ 2247{
2189 return si_gpu_soft_reset(rdev); 2248 return si_gpu_soft_reset(rdev, (RADEON_RESET_GFX |
2249 RADEON_RESET_COMPUTE |
2250 RADEON_RESET_DMA));
2190} 2251}
2191 2252
2192/* MC */ 2253/* MC */
@@ -2426,9 +2487,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
2426 /* enable context1-15 */ 2487 /* enable context1-15 */
2427 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 2488 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2428 (u32)(rdev->dummy_page.addr >> 12)); 2489 (u32)(rdev->dummy_page.addr >> 12));
2429 WREG32(VM_CONTEXT1_CNTL2, 0); 2490 WREG32(VM_CONTEXT1_CNTL2, 4);
2430 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 2491 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2431 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 2492 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2493 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2494 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2495 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2496 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2497 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2498 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2499 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2500 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2501 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2502 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2503 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
2432 2504
2433 si_pcie_gart_tlb_flush(rdev); 2505 si_pcie_gart_tlb_flush(rdev);
2434 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 2506 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2474,6 +2546,7 @@ static bool si_vm_reg_valid(u32 reg)
2474 /* check config regs */ 2546 /* check config regs */
2475 switch (reg) { 2547 switch (reg) {
2476 case GRBM_GFX_INDEX: 2548 case GRBM_GFX_INDEX:
2549 case CP_STRMOUT_CNTL:
2477 case VGT_VTX_VECT_EJECT_REG: 2550 case VGT_VTX_VECT_EJECT_REG:
2478 case VGT_CACHE_INVALIDATION: 2551 case VGT_CACHE_INVALIDATION:
2479 case VGT_ESGS_RING_SIZE: 2552 case VGT_ESGS_RING_SIZE:
@@ -2533,6 +2606,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
2533 u32 idx = pkt->idx + 1; 2606 u32 idx = pkt->idx + 1;
2534 u32 idx_value = ib[idx]; 2607 u32 idx_value = ib[idx];
2535 u32 start_reg, end_reg, reg, i; 2608 u32 start_reg, end_reg, reg, i;
2609 u32 command, info;
2536 2610
2537 switch (pkt->opcode) { 2611 switch (pkt->opcode) {
2538 case PACKET3_NOP: 2612 case PACKET3_NOP:
@@ -2632,6 +2706,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
2632 return -EINVAL; 2706 return -EINVAL;
2633 } 2707 }
2634 break; 2708 break;
2709 case PACKET3_CP_DMA:
2710 command = ib[idx + 4];
2711 info = ib[idx + 1];
2712 if (command & PACKET3_CP_DMA_CMD_SAS) {
2713 /* src address space is register */
2714 if (((info & 0x60000000) >> 29) == 0) {
2715 start_reg = idx_value << 2;
2716 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2717 reg = start_reg;
2718 if (!si_vm_reg_valid(reg)) {
2719 DRM_ERROR("CP DMA Bad SRC register\n");
2720 return -EINVAL;
2721 }
2722 } else {
2723 for (i = 0; i < (command & 0x1fffff); i++) {
2724 reg = start_reg + (4 * i);
2725 if (!si_vm_reg_valid(reg)) {
2726 DRM_ERROR("CP DMA Bad SRC register\n");
2727 return -EINVAL;
2728 }
2729 }
2730 }
2731 }
2732 }
2733 if (command & PACKET3_CP_DMA_CMD_DAS) {
2734 /* dst address space is register */
2735 if (((info & 0x00300000) >> 20) == 0) {
2736 start_reg = ib[idx + 2];
2737 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2738 reg = start_reg;
2739 if (!si_vm_reg_valid(reg)) {
2740 DRM_ERROR("CP DMA Bad DST register\n");
2741 return -EINVAL;
2742 }
2743 } else {
2744 for (i = 0; i < (command & 0x1fffff); i++) {
2745 reg = start_reg + (4 * i);
2746 if (!si_vm_reg_valid(reg)) {
2747 DRM_ERROR("CP DMA Bad DST register\n");
2748 return -EINVAL;
2749 }
2750 }
2751 }
2752 }
2753 }
2754 break;
2635 default: 2755 default:
2636 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); 2756 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
2637 return -EINVAL; 2757 return -EINVAL;
@@ -2808,30 +2928,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2808{ 2928{
2809 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 2929 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2810 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 2930 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2811 2931 uint64_t value;
2812 while (count) { 2932 unsigned ndw;
2813 unsigned ndw = 2 + count * 2; 2933
2814 if (ndw > 0x3FFE) 2934 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
2815 ndw = 0x3FFE; 2935 while (count) {
2816 2936 ndw = 2 + count * 2;
2817 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw)); 2937 if (ndw > 0x3FFE)
2818 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2938 ndw = 0x3FFE;
2819 WRITE_DATA_DST_SEL(1))); 2939
2820 radeon_ring_write(ring, pe); 2940 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
2821 radeon_ring_write(ring, upper_32_bits(pe)); 2941 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2822 for (; ndw > 2; ndw -= 2, --count, pe += 8) { 2942 WRITE_DATA_DST_SEL(1)));
2823 uint64_t value; 2943 radeon_ring_write(ring, pe);
2824 if (flags & RADEON_VM_PAGE_SYSTEM) { 2944 radeon_ring_write(ring, upper_32_bits(pe));
2825 value = radeon_vm_map_gart(rdev, addr); 2945 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
2826 value &= 0xFFFFFFFFFFFFF000ULL; 2946 if (flags & RADEON_VM_PAGE_SYSTEM) {
2827 } else if (flags & RADEON_VM_PAGE_VALID) 2947 value = radeon_vm_map_gart(rdev, addr);
2828 value = addr; 2948 value &= 0xFFFFFFFFFFFFF000ULL;
2829 else 2949 } else if (flags & RADEON_VM_PAGE_VALID) {
2830 value = 0; 2950 value = addr;
2831 addr += incr; 2951 } else {
2832 value |= r600_flags; 2952 value = 0;
2833 radeon_ring_write(ring, value); 2953 }
2834 radeon_ring_write(ring, upper_32_bits(value)); 2954 addr += incr;
2955 value |= r600_flags;
2956 radeon_ring_write(ring, value);
2957 radeon_ring_write(ring, upper_32_bits(value));
2958 }
2959 }
2960 } else {
2961 /* DMA */
2962 if (flags & RADEON_VM_PAGE_SYSTEM) {
2963 while (count) {
2964 ndw = count * 2;
2965 if (ndw > 0xFFFFE)
2966 ndw = 0xFFFFE;
2967
2968 /* for non-physically contiguous pages (system) */
2969 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
2970 radeon_ring_write(ring, pe);
2971 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
2972 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2973 if (flags & RADEON_VM_PAGE_SYSTEM) {
2974 value = radeon_vm_map_gart(rdev, addr);
2975 value &= 0xFFFFFFFFFFFFF000ULL;
2976 } else if (flags & RADEON_VM_PAGE_VALID) {
2977 value = addr;
2978 } else {
2979 value = 0;
2980 }
2981 addr += incr;
2982 value |= r600_flags;
2983 radeon_ring_write(ring, value);
2984 radeon_ring_write(ring, upper_32_bits(value));
2985 }
2986 }
2987 } else {
2988 while (count) {
2989 ndw = count * 2;
2990 if (ndw > 0xFFFFE)
2991 ndw = 0xFFFFE;
2992
2993 if (flags & RADEON_VM_PAGE_VALID)
2994 value = addr;
2995 else
2996 value = 0;
2997 /* for physically contiguous pages (vram) */
2998 radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
2999 radeon_ring_write(ring, pe); /* dst addr */
3000 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
3001 radeon_ring_write(ring, r600_flags); /* mask */
3002 radeon_ring_write(ring, 0);
3003 radeon_ring_write(ring, value); /* value */
3004 radeon_ring_write(ring, upper_32_bits(value));
3005 radeon_ring_write(ring, incr); /* increment size */
3006 radeon_ring_write(ring, 0);
3007 pe += ndw * 4;
3008 addr += (ndw / 2) * incr;
3009 count -= ndw / 2;
3010 }
2835 } 3011 }
2836 } 3012 }
2837} 3013}
@@ -2879,6 +3055,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2879 radeon_ring_write(ring, 0x0); 3055 radeon_ring_write(ring, 0x0);
2880} 3056}
2881 3057
3058void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
3059{
3060 struct radeon_ring *ring = &rdev->ring[ridx];
3061
3062 if (vm == NULL)
3063 return;
3064
3065 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
3066 if (vm->id < 8) {
3067 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
3068 } else {
3069 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
3070 }
3071 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
3072
3073 /* flush hdp cache */
3074 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
3075 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
3076 radeon_ring_write(ring, 1);
3077
3078 /* bits 0-7 are the VM contexts0-7 */
3079 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
3080 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
3081 radeon_ring_write(ring, 1 << vm->id);
3082}
3083
2882/* 3084/*
2883 * RLC 3085 * RLC
2884 */ 3086 */
@@ -3047,6 +3249,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
3047 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3249 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3048 WREG32(CP_INT_CNTL_RING1, 0); 3250 WREG32(CP_INT_CNTL_RING1, 0);
3049 WREG32(CP_INT_CNTL_RING2, 0); 3251 WREG32(CP_INT_CNTL_RING2, 0);
3252 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
3253 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
3254 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
3255 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
3050 WREG32(GRBM_INT_CNTL, 0); 3256 WREG32(GRBM_INT_CNTL, 0);
3051 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 3257 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
3052 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 3258 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3166,6 +3372,7 @@ int si_irq_set(struct radeon_device *rdev)
3166 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 3372 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
3167 u32 grbm_int_cntl = 0; 3373 u32 grbm_int_cntl = 0;
3168 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 3374 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
3375 u32 dma_cntl, dma_cntl1;
3169 3376
3170 if (!rdev->irq.installed) { 3377 if (!rdev->irq.installed) {
3171 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3378 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3186,6 +3393,9 @@ int si_irq_set(struct radeon_device *rdev)
3186 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3393 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3187 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3394 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3188 3395
3396 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
3397 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
3398
3189 /* enable CP interrupts on all rings */ 3399 /* enable CP interrupts on all rings */
3190 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3400 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3191 DRM_DEBUG("si_irq_set: sw int gfx\n"); 3401 DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3199,6 +3409,15 @@ int si_irq_set(struct radeon_device *rdev)
3199 DRM_DEBUG("si_irq_set: sw int cp2\n"); 3409 DRM_DEBUG("si_irq_set: sw int cp2\n");
3200 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; 3410 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
3201 } 3411 }
3412 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3413 DRM_DEBUG("si_irq_set: sw int dma\n");
3414 dma_cntl |= TRAP_ENABLE;
3415 }
3416
3417 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
3418 DRM_DEBUG("si_irq_set: sw int dma1\n");
3419 dma_cntl1 |= TRAP_ENABLE;
3420 }
3202 if (rdev->irq.crtc_vblank_int[0] || 3421 if (rdev->irq.crtc_vblank_int[0] ||
3203 atomic_read(&rdev->irq.pflip[0])) { 3422 atomic_read(&rdev->irq.pflip[0])) {
3204 DRM_DEBUG("si_irq_set: vblank 0\n"); 3423 DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3258,6 +3477,9 @@ int si_irq_set(struct radeon_device *rdev)
3258 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); 3477 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
3259 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); 3478 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
3260 3479
3480 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
3481 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
3482
3261 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3483 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3262 3484
3263 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 3485 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3683,6 +3905,16 @@ restart_ih:
3683 break; 3905 break;
3684 } 3906 }
3685 break; 3907 break;
3908 case 146:
3909 case 147:
3910 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3911 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3912 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3913 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3914 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3915 /* reset addr and status */
3916 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3917 break;
3686 case 176: /* RINGID0 CP_INT */ 3918 case 176: /* RINGID0 CP_INT */
3687 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3919 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3688 break; 3920 break;
@@ -3706,9 +3938,17 @@ restart_ih:
3706 break; 3938 break;
3707 } 3939 }
3708 break; 3940 break;
3941 case 224: /* DMA trap event */
3942 DRM_DEBUG("IH: DMA trap\n");
3943 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3944 break;
3709 case 233: /* GUI IDLE */ 3945 case 233: /* GUI IDLE */
3710 DRM_DEBUG("IH: GUI idle\n"); 3946 DRM_DEBUG("IH: GUI idle\n");
3711 break; 3947 break;
3948 case 244: /* DMA trap event */
3949 DRM_DEBUG("IH: DMA1 trap\n");
3950 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3951 break;
3712 default: 3952 default:
3713 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3953 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3714 break; 3954 break;
@@ -3732,6 +3972,80 @@ restart_ih:
3732 return IRQ_HANDLED; 3972 return IRQ_HANDLED;
3733} 3973}
3734 3974
3975/**
3976 * si_copy_dma - copy pages using the DMA engine
3977 *
3978 * @rdev: radeon_device pointer
3979 * @src_offset: src GPU address
3980 * @dst_offset: dst GPU address
3981 * @num_gpu_pages: number of GPU pages to xfer
3982 * @fence: radeon fence object
3983 *
3984 * Copy GPU paging using the DMA engine (SI).
3985 * Used by the radeon ttm implementation to move pages if
3986 * registered as the asic copy callback.
3987 */
3988int si_copy_dma(struct radeon_device *rdev,
3989 uint64_t src_offset, uint64_t dst_offset,
3990 unsigned num_gpu_pages,
3991 struct radeon_fence **fence)
3992{
3993 struct radeon_semaphore *sem = NULL;
3994 int ring_index = rdev->asic->copy.dma_ring_index;
3995 struct radeon_ring *ring = &rdev->ring[ring_index];
3996 u32 size_in_bytes, cur_size_in_bytes;
3997 int i, num_loops;
3998 int r = 0;
3999
4000 r = radeon_semaphore_create(rdev, &sem);
4001 if (r) {
4002 DRM_ERROR("radeon: moving bo (%d).\n", r);
4003 return r;
4004 }
4005
4006 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
4007 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
4008 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
4009 if (r) {
4010 DRM_ERROR("radeon: moving bo (%d).\n", r);
4011 radeon_semaphore_free(rdev, &sem, NULL);
4012 return r;
4013 }
4014
4015 if (radeon_fence_need_sync(*fence, ring->idx)) {
4016 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
4017 ring->idx);
4018 radeon_fence_note_sync(*fence, ring->idx);
4019 } else {
4020 radeon_semaphore_free(rdev, &sem, NULL);
4021 }
4022
4023 for (i = 0; i < num_loops; i++) {
4024 cur_size_in_bytes = size_in_bytes;
4025 if (cur_size_in_bytes > 0xFFFFF)
4026 cur_size_in_bytes = 0xFFFFF;
4027 size_in_bytes -= cur_size_in_bytes;
4028 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
4029 radeon_ring_write(ring, dst_offset & 0xffffffff);
4030 radeon_ring_write(ring, src_offset & 0xffffffff);
4031 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
4032 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
4033 src_offset += cur_size_in_bytes;
4034 dst_offset += cur_size_in_bytes;
4035 }
4036
4037 r = radeon_fence_emit(rdev, fence, ring->idx);
4038 if (r) {
4039 radeon_ring_unlock_undo(rdev, ring);
4040 return r;
4041 }
4042
4043 radeon_ring_unlock_commit(rdev, ring);
4044 radeon_semaphore_free(rdev, &sem, *fence);
4045
4046 return r;
4047}
4048
3735/* 4049/*
3736 * startup/shutdown callbacks 4050 * startup/shutdown callbacks
3737 */ 4051 */
@@ -3803,6 +4117,18 @@ static int si_startup(struct radeon_device *rdev)
3803 return r; 4117 return r;
3804 } 4118 }
3805 4119
4120 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
4121 if (r) {
4122 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
4123 return r;
4124 }
4125
4126 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
4127 if (r) {
4128 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
4129 return r;
4130 }
4131
3806 /* Enable IRQ */ 4132 /* Enable IRQ */
3807 r = si_irq_init(rdev); 4133 r = si_irq_init(rdev);
3808 if (r) { 4134 if (r) {
@@ -3833,6 +4159,22 @@ static int si_startup(struct radeon_device *rdev)
3833 if (r) 4159 if (r)
3834 return r; 4160 return r;
3835 4161
4162 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
4163 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
4164 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
4165 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
4166 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
4167 if (r)
4168 return r;
4169
4170 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
4171 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
4172 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
4173 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
4174 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
4175 if (r)
4176 return r;
4177
3836 r = si_cp_load_microcode(rdev); 4178 r = si_cp_load_microcode(rdev);
3837 if (r) 4179 if (r)
3838 return r; 4180 return r;
@@ -3840,6 +4182,10 @@ static int si_startup(struct radeon_device *rdev)
3840 if (r) 4182 if (r)
3841 return r; 4183 return r;
3842 4184
4185 r = cayman_dma_resume(rdev);
4186 if (r)
4187 return r;
4188
3843 r = radeon_ib_pool_init(rdev); 4189 r = radeon_ib_pool_init(rdev);
3844 if (r) { 4190 if (r) {
3845 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 4191 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3881,9 +4227,7 @@ int si_resume(struct radeon_device *rdev)
3881int si_suspend(struct radeon_device *rdev) 4227int si_suspend(struct radeon_device *rdev)
3882{ 4228{
3883 si_cp_enable(rdev, false); 4229 si_cp_enable(rdev, false);
3884 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 4230 cayman_dma_stop(rdev);
3885 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3886 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3887 si_irq_suspend(rdev); 4231 si_irq_suspend(rdev);
3888 radeon_wb_disable(rdev); 4232 radeon_wb_disable(rdev);
3889 si_pcie_gart_disable(rdev); 4233 si_pcie_gart_disable(rdev);
@@ -3961,6 +4305,14 @@ int si_init(struct radeon_device *rdev)
3961 ring->ring_obj = NULL; 4305 ring->ring_obj = NULL;
3962 r600_ring_init(rdev, ring, 1024 * 1024); 4306 r600_ring_init(rdev, ring, 1024 * 1024);
3963 4307
4308 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
4309 ring->ring_obj = NULL;
4310 r600_ring_init(rdev, ring, 64 * 1024);
4311
4312 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
4313 ring->ring_obj = NULL;
4314 r600_ring_init(rdev, ring, 64 * 1024);
4315
3964 rdev->ih.ring_obj = NULL; 4316 rdev->ih.ring_obj = NULL;
3965 r600_ih_ring_init(rdev, 64 * 1024); 4317 r600_ih_ring_init(rdev, 64 * 1024);
3966 4318
@@ -3973,6 +4325,7 @@ int si_init(struct radeon_device *rdev)
3973 if (r) { 4325 if (r) {
3974 dev_err(rdev->dev, "disabling GPU acceleration\n"); 4326 dev_err(rdev->dev, "disabling GPU acceleration\n");
3975 si_cp_fini(rdev); 4327 si_cp_fini(rdev);
4328 cayman_dma_fini(rdev);
3976 si_irq_fini(rdev); 4329 si_irq_fini(rdev);
3977 si_rlc_fini(rdev); 4330 si_rlc_fini(rdev);
3978 radeon_wb_fini(rdev); 4331 radeon_wb_fini(rdev);
@@ -4001,6 +4354,7 @@ void si_fini(struct radeon_device *rdev)
4001 r600_blit_fini(rdev); 4354 r600_blit_fini(rdev);
4002#endif 4355#endif
4003 si_cp_fini(rdev); 4356 si_cp_fini(rdev);
4357 cayman_dma_fini(rdev);
4004 si_irq_fini(rdev); 4358 si_irq_fini(rdev);
4005 si_rlc_fini(rdev); 4359 si_rlc_fini(rdev);
4006 radeon_wb_fini(rdev); 4360 radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 7d2a20e56577..c056aae814f0 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -62,6 +62,22 @@
62 62
63#define SRBM_STATUS 0xE50 63#define SRBM_STATUS 0xE50
64 64
65#define SRBM_SOFT_RESET 0x0E60
66#define SOFT_RESET_BIF (1 << 1)
67#define SOFT_RESET_DC (1 << 5)
68#define SOFT_RESET_DMA1 (1 << 6)
69#define SOFT_RESET_GRBM (1 << 8)
70#define SOFT_RESET_HDP (1 << 9)
71#define SOFT_RESET_IH (1 << 10)
72#define SOFT_RESET_MC (1 << 11)
73#define SOFT_RESET_ROM (1 << 14)
74#define SOFT_RESET_SEM (1 << 15)
75#define SOFT_RESET_VMC (1 << 17)
76#define SOFT_RESET_DMA (1 << 20)
77#define SOFT_RESET_TST (1 << 21)
78#define SOFT_RESET_REGBB (1 << 22)
79#define SOFT_RESET_ORB (1 << 23)
80
65#define CC_SYS_RB_BACKEND_DISABLE 0xe80 81#define CC_SYS_RB_BACKEND_DISABLE 0xe80
66#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 82#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
67 83
@@ -91,7 +107,18 @@
91#define VM_CONTEXT0_CNTL 0x1410 107#define VM_CONTEXT0_CNTL 0x1410
92#define ENABLE_CONTEXT (1 << 0) 108#define ENABLE_CONTEXT (1 << 0)
93#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) 109#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
110#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
94#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) 111#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
112#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
113#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
114#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
115#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
116#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
117#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
118#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
119#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
120#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
121#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
95#define VM_CONTEXT1_CNTL 0x1414 122#define VM_CONTEXT1_CNTL 0x1414
96#define VM_CONTEXT0_CNTL2 0x1430 123#define VM_CONTEXT0_CNTL2 0x1430
97#define VM_CONTEXT1_CNTL2 0x1434 124#define VM_CONTEXT1_CNTL2 0x1434
@@ -104,6 +131,9 @@
104#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450 131#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
105#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454 132#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
106 133
134#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
135#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
136
107#define VM_INVALIDATE_REQUEST 0x1478 137#define VM_INVALIDATE_REQUEST 0x1478
108#define VM_INVALIDATE_RESPONSE 0x147c 138#define VM_INVALIDATE_RESPONSE 0x147c
109 139
@@ -424,6 +454,7 @@
424# define RDERR_INT_ENABLE (1 << 0) 454# define RDERR_INT_ENABLE (1 << 0)
425# define GUI_IDLE_INT_ENABLE (1 << 19) 455# define GUI_IDLE_INT_ENABLE (1 << 19)
426 456
457#define CP_STRMOUT_CNTL 0x84FC
427#define SCRATCH_REG0 0x8500 458#define SCRATCH_REG0 0x8500
428#define SCRATCH_REG1 0x8504 459#define SCRATCH_REG1 0x8504
429#define SCRATCH_REG2 0x8508 460#define SCRATCH_REG2 0x8508
@@ -834,6 +865,54 @@
834#define PACKET3_WAIT_REG_MEM 0x3C 865#define PACKET3_WAIT_REG_MEM 0x3C
835#define PACKET3_MEM_WRITE 0x3D 866#define PACKET3_MEM_WRITE 0x3D
836#define PACKET3_COPY_DATA 0x40 867#define PACKET3_COPY_DATA 0x40
868#define PACKET3_CP_DMA 0x41
869/* 1. header
870 * 2. SRC_ADDR_LO or DATA [31:0]
871 * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
872 * SRC_ADDR_HI [7:0]
873 * 4. DST_ADDR_LO [31:0]
874 * 5. DST_ADDR_HI [7:0]
875 * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
876 */
877# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
878 /* 0 - SRC_ADDR
879 * 1 - GDS
880 */
881# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
882 /* 0 - ME
883 * 1 - PFP
884 */
885# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
886 /* 0 - SRC_ADDR
887 * 1 - GDS
888 * 2 - DATA
889 */
890# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
891/* COMMAND */
892# define PACKET3_CP_DMA_DIS_WC (1 << 21)
893# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
894 /* 0 - none
895 * 1 - 8 in 16
896 * 2 - 8 in 32
897 * 3 - 8 in 64
898 */
899# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
900 /* 0 - none
901 * 1 - 8 in 16
902 * 2 - 8 in 32
903 * 3 - 8 in 64
904 */
905# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
906 /* 0 - memory
907 * 1 - register
908 */
909# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
910 /* 0 - memory
911 * 1 - register
912 */
913# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
914# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
915# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
837#define PACKET3_PFP_SYNC_ME 0x42 916#define PACKET3_PFP_SYNC_ME 0x42
838#define PACKET3_SURFACE_SYNC 0x43 917#define PACKET3_SURFACE_SYNC 0x43
839# define PACKET3_DEST_BASE_0_ENA (1 << 0) 918# define PACKET3_DEST_BASE_0_ENA (1 << 0)
@@ -921,4 +1000,63 @@
921#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A 1000#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
922#define PACKET3_SWITCH_BUFFER 0x8B 1001#define PACKET3_SWITCH_BUFFER 0x8B
923 1002
1003/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
1004#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
1005#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
1006
1007#define DMA_RB_CNTL 0xd000
1008# define DMA_RB_ENABLE (1 << 0)
1009# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
1010# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
1011# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
1012# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
1013# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
1014#define DMA_RB_BASE 0xd004
1015#define DMA_RB_RPTR 0xd008
1016#define DMA_RB_WPTR 0xd00c
1017
1018#define DMA_RB_RPTR_ADDR_HI 0xd01c
1019#define DMA_RB_RPTR_ADDR_LO 0xd020
1020
1021#define DMA_IB_CNTL 0xd024
1022# define DMA_IB_ENABLE (1 << 0)
1023# define DMA_IB_SWAP_ENABLE (1 << 4)
1024#define DMA_IB_RPTR 0xd028
1025#define DMA_CNTL 0xd02c
1026# define TRAP_ENABLE (1 << 0)
1027# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
1028# define SEM_WAIT_INT_ENABLE (1 << 2)
1029# define DATA_SWAP_ENABLE (1 << 3)
1030# define FENCE_SWAP_ENABLE (1 << 4)
1031# define CTXEMPTY_INT_ENABLE (1 << 28)
1032#define DMA_STATUS_REG 0xd034
1033# define DMA_IDLE (1 << 0)
1034#define DMA_TILING_CONFIG 0xd0b8
1035
1036#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
1037 (((b) & 0x1) << 26) | \
1038 (((t) & 0x1) << 23) | \
1039 (((s) & 0x1) << 22) | \
1040 (((n) & 0xFFFFF) << 0))
1041
1042#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
1043 (((vmid) & 0xF) << 20) | \
1044 (((n) & 0xFFFFF) << 0))
1045
1046#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
1047 (1 << 26) | \
1048 (1 << 21) | \
1049 (((n) & 0xFFFFF) << 0))
1050
1051/* async DMA Packet types */
1052#define DMA_PACKET_WRITE 0x2
1053#define DMA_PACKET_COPY 0x3
1054#define DMA_PACKET_INDIRECT_BUFFER 0x4
1055#define DMA_PACKET_SEMAPHORE 0x5
1056#define DMA_PACKET_FENCE 0x6
1057#define DMA_PACKET_TRAP 0x7
1058#define DMA_PACKET_SRBM_WRITE 0x9
1059#define DMA_PACKET_CONSTANT_FILL 0xd
1060#define DMA_PACKET_NOP 0xf
1061
924#endif 1062#endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0e7a9306bd0c..d917a411ca85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
748 connector->encoder = encoder; 748 connector->encoder = encoder;
749 749
750 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 750 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
751 drm_connector_property_set_value(connector, 751 drm_object_property_set_value(&connector->base,
752 sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 752 sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
753 753
754 return 0; 754 return 0;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 1c350fc4e449..d1d5306ebf24 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -33,7 +33,7 @@
33 * Hardware initialization 33 * Hardware initialization
34 */ 34 */
35 35
36static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev) 36static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
37{ 37{
38 static const u32 ldmt1r[] = { 38 static const u32 ldmt1r[] = {
39 [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8, 39 [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
@@ -67,7 +67,7 @@ static int __devinit shmob_drm_init_interface(struct shmob_drm_device *sdev)
67 return 0; 67 return 0;
68} 68}
69 69
70static int __devinit shmob_drm_setup_clocks(struct shmob_drm_device *sdev, 70static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
71 enum shmob_drm_clk_source clksrc) 71 enum shmob_drm_clk_source clksrc)
72{ 72{
73 struct clk *clk; 73 struct clk *clk;
@@ -330,12 +330,12 @@ static const struct dev_pm_ops shmob_drm_pm_ops = {
330 * Platform driver 330 * Platform driver
331 */ 331 */
332 332
333static int __devinit shmob_drm_probe(struct platform_device *pdev) 333static int shmob_drm_probe(struct platform_device *pdev)
334{ 334{
335 return drm_platform_init(&shmob_drm_driver, pdev); 335 return drm_platform_init(&shmob_drm_driver, pdev);
336} 336}
337 337
338static int __devexit shmob_drm_remove(struct platform_device *pdev) 338static int shmob_drm_remove(struct platform_device *pdev)
339{ 339{
340 drm_platform_exit(&shmob_drm_driver, pdev); 340 drm_platform_exit(&shmob_drm_driver, pdev);
341 341
@@ -344,7 +344,7 @@ static int __devexit shmob_drm_remove(struct platform_device *pdev)
344 344
345static struct platform_driver shmob_drm_platform_driver = { 345static struct platform_driver shmob_drm_platform_driver = {
346 .probe = shmob_drm_probe, 346 .probe = shmob_drm_probe,
347 .remove = __devexit_p(shmob_drm_remove), 347 .remove = shmob_drm_remove,
348 .driver = { 348 .driver = {
349 .owner = THIS_MODULE, 349 .owner = THIS_MODULE,
350 .name = "shmob-drm", 350 .name = "shmob-drm",
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644
index 000000000000..be1daf7344d3
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -0,0 +1,23 @@
1config DRM_TEGRA
2 tristate "NVIDIA Tegra DRM"
3 depends on DRM && OF && ARCH_TEGRA
4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 help
11 Choose this option if you have an NVIDIA Tegra SoC.
12
13 To compile this driver as a module, choose M here: the module
14 will be called tegra-drm.
15
16if DRM_TEGRA
17
18config DRM_TEGRA_DEBUG
19 bool "NVIDIA Tegra DRM debug support"
20 help
21 Say yes here to enable debugging support.
22
23endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..80f73d1315d0
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,7 @@
1ccflags-y := -Iinclude/drm
2ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
3
4tegra-drm-y := drm.o fb.o dc.o host1x.o
5tegra-drm-y += output.o rgb.o hdmi.o
6
7obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644
index 000000000000..656b2e3334a6
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -0,0 +1,833 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/debugfs.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#include <mach/clk.h>
17
18#include "drm.h"
19#include "dc.h"
20
21struct tegra_dc_window {
22 fixed20_12 x;
23 fixed20_12 y;
24 fixed20_12 w;
25 fixed20_12 h;
26 unsigned int outx;
27 unsigned int outy;
28 unsigned int outw;
29 unsigned int outh;
30 unsigned int stride;
31 unsigned int fmt;
32};
33
34static const struct drm_crtc_funcs tegra_crtc_funcs = {
35 .set_config = drm_crtc_helper_set_config,
36 .destroy = drm_crtc_cleanup,
37};
38
39static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
40{
41}
42
43static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
44 const struct drm_display_mode *mode,
45 struct drm_display_mode *adjusted)
46{
47 return true;
48}
49
50static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
51 unsigned int bpp)
52{
53 fixed20_12 outf = dfixed_init(out);
54 u32 dda_inc;
55 int max;
56
57 if (v)
58 max = 15;
59 else {
60 switch (bpp) {
61 case 2:
62 max = 8;
63 break;
64
65 default:
66 WARN_ON_ONCE(1);
67 /* fallthrough */
68 case 4:
69 max = 4;
70 break;
71 }
72 }
73
74 outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
75 inf.full -= dfixed_const(1);
76
77 dda_inc = dfixed_div(inf, outf);
78 dda_inc = min_t(u32, dda_inc, dfixed_const(max));
79
80 return dda_inc;
81}
82
83static inline u32 compute_initial_dda(fixed20_12 in)
84{
85 return dfixed_frac(in);
86}
87
88static int tegra_dc_set_timings(struct tegra_dc *dc,
89 struct drm_display_mode *mode)
90{
91 /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
92 unsigned int h_ref_to_sync = 0;
93 unsigned int v_ref_to_sync = 0;
94 unsigned long value;
95
96 tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
97
98 value = (v_ref_to_sync << 16) | h_ref_to_sync;
99 tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
100
101 value = ((mode->vsync_end - mode->vsync_start) << 16) |
102 ((mode->hsync_end - mode->hsync_start) << 0);
103 tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
104
105 value = ((mode->vtotal - mode->vsync_end) << 16) |
106 ((mode->htotal - mode->hsync_end) << 0);
107 tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
108
109 value = ((mode->vsync_start - mode->vdisplay) << 16) |
110 ((mode->hsync_start - mode->hdisplay) << 0);
111 tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
112
113 value = (mode->vdisplay << 16) | mode->hdisplay;
114 tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
115
116 return 0;
117}
118
119static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
120 struct drm_display_mode *mode,
121 unsigned long *div)
122{
123 unsigned long pclk = mode->clock * 1000, rate;
124 struct tegra_dc *dc = to_tegra_dc(crtc);
125 struct tegra_output *output = NULL;
126 struct drm_encoder *encoder;
127 long err;
128
129 list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
130 if (encoder->crtc == crtc) {
131 output = encoder_to_output(encoder);
132 break;
133 }
134
135 if (!output)
136 return -ENODEV;
137
138 /*
139 * This assumes that the display controller will divide its parent
140 * clock by 2 to generate the pixel clock.
141 */
142 err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
143 if (err < 0) {
144 dev_err(dc->dev, "failed to setup clock: %ld\n", err);
145 return err;
146 }
147
148 rate = clk_get_rate(dc->clk);
149 *div = (rate * 2 / pclk) - 2;
150
151 DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
152
153 return 0;
154}
155
156static int tegra_crtc_mode_set(struct drm_crtc *crtc,
157 struct drm_display_mode *mode,
158 struct drm_display_mode *adjusted,
159 int x, int y, struct drm_framebuffer *old_fb)
160{
161 struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
162 struct tegra_dc *dc = to_tegra_dc(crtc);
163 unsigned int h_dda, v_dda, bpp;
164 struct tegra_dc_window win;
165 unsigned long div, value;
166 int err;
167
168 err = tegra_crtc_setup_clk(crtc, mode, &div);
169 if (err) {
170 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
171 return err;
172 }
173
174 /* program display mode */
175 tegra_dc_set_timings(dc, mode);
176
177 value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
178 tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
179
180 value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
181 value &= ~LVS_OUTPUT_POLARITY_LOW;
182 value &= ~LHS_OUTPUT_POLARITY_LOW;
183 tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
184
185 value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
186 DISP_ORDER_RED_BLUE;
187 tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
188
189 tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
190
191 value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
192 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
193
194 /* setup window parameters */
195 memset(&win, 0, sizeof(win));
196 win.x.full = dfixed_const(0);
197 win.y.full = dfixed_const(0);
198 win.w.full = dfixed_const(mode->hdisplay);
199 win.h.full = dfixed_const(mode->vdisplay);
200 win.outx = 0;
201 win.outy = 0;
202 win.outw = mode->hdisplay;
203 win.outh = mode->vdisplay;
204
205 switch (crtc->fb->pixel_format) {
206 case DRM_FORMAT_XRGB8888:
207 win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
208 break;
209
210 case DRM_FORMAT_RGB565:
211 win.fmt = WIN_COLOR_DEPTH_B5G6R5;
212 break;
213
214 default:
215 win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
216 WARN_ON(1);
217 break;
218 }
219
220 bpp = crtc->fb->bits_per_pixel / 8;
221 win.stride = crtc->fb->pitches[0];
222
223 /* program window registers */
224 value = WINDOW_A_SELECT;
225 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
226
227 tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
228 tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
229
230 value = V_POSITION(win.outy) | H_POSITION(win.outx);
231 tegra_dc_writel(dc, value, DC_WIN_POSITION);
232
233 value = V_SIZE(win.outh) | H_SIZE(win.outw);
234 tegra_dc_writel(dc, value, DC_WIN_SIZE);
235
236 value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
237 H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
238 tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
239
240 h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
241 v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
242
243 value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
244 tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
245
246 h_dda = compute_initial_dda(win.x);
247 v_dda = compute_initial_dda(win.y);
248
249 tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
250 tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
251
252 tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
253 tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
254
255 tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
256 tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
257 tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
258 DC_WINBUF_ADDR_H_OFFSET);
259 tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
260
261 value = WIN_ENABLE;
262
263 if (bpp < 24)
264 value |= COLOR_EXPAND;
265
266 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
267
268 tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
269 tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
270
271 return 0;
272}
273
274static void tegra_crtc_prepare(struct drm_crtc *crtc)
275{
276 struct tegra_dc *dc = to_tegra_dc(crtc);
277 unsigned int syncpt;
278 unsigned long value;
279
280 /* hardware initialization */
281 tegra_periph_reset_deassert(dc->clk);
282 usleep_range(10000, 20000);
283
284 if (dc->pipe)
285 syncpt = SYNCPT_VBLANK1;
286 else
287 syncpt = SYNCPT_VBLANK0;
288
289 /* initialize display controller */
290 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
291 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
292
293 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
294 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
295
296 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
297 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
298 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
299
300 value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
301 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
302 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
303
304 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
305 value |= DISP_CTRL_MODE_C_DISPLAY;
306 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
307
308 /* initialize timer */
309 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
310 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
311 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
312
313 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
314 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
315 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
316
317 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
318 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
319
320 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
321 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
322}
323
324static void tegra_crtc_commit(struct drm_crtc *crtc)
325{
326 struct tegra_dc *dc = to_tegra_dc(crtc);
327 unsigned long update_mask;
328 unsigned long value;
329
330 update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
331
332 tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
333
334 value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
335 value |= FRAME_END_INT;
336 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
337
338 value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
339 value |= FRAME_END_INT;
340 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
341
342 tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
343}
344
345static void tegra_crtc_load_lut(struct drm_crtc *crtc)
346{
347}
348
349static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
350 .dpms = tegra_crtc_dpms,
351 .mode_fixup = tegra_crtc_mode_fixup,
352 .mode_set = tegra_crtc_mode_set,
353 .prepare = tegra_crtc_prepare,
354 .commit = tegra_crtc_commit,
355 .load_lut = tegra_crtc_load_lut,
356};
357
358static irqreturn_t tegra_drm_irq(int irq, void *data)
359{
360 struct tegra_dc *dc = data;
361 unsigned long status;
362
363 status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
364 tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
365
366 if (status & FRAME_END_INT) {
367 /*
368 dev_dbg(dc->dev, "%s(): frame end\n", __func__);
369 */
370 }
371
372 if (status & VBLANK_INT) {
373 /*
374 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
375 */
376 drm_handle_vblank(dc->base.dev, dc->pipe);
377 }
378
379 if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
380 /*
381 dev_dbg(dc->dev, "%s(): underflow\n", __func__);
382 */
383 }
384
385 return IRQ_HANDLED;
386}
387
388static int tegra_dc_show_regs(struct seq_file *s, void *data)
389{
390 struct drm_info_node *node = s->private;
391 struct tegra_dc *dc = node->info_ent->data;
392
393#define DUMP_REG(name) \
394 seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \
395 tegra_dc_readl(dc, name))
396
397 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
398 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
399 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
400 DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
401 DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
402 DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
403 DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
404 DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
405 DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
406 DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
407 DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
408 DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
409 DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
410 DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
411 DUMP_REG(DC_CMD_DISPLAY_COMMAND);
412 DUMP_REG(DC_CMD_SIGNAL_RAISE);
413 DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
414 DUMP_REG(DC_CMD_INT_STATUS);
415 DUMP_REG(DC_CMD_INT_MASK);
416 DUMP_REG(DC_CMD_INT_ENABLE);
417 DUMP_REG(DC_CMD_INT_TYPE);
418 DUMP_REG(DC_CMD_INT_POLARITY);
419 DUMP_REG(DC_CMD_SIGNAL_RAISE1);
420 DUMP_REG(DC_CMD_SIGNAL_RAISE2);
421 DUMP_REG(DC_CMD_SIGNAL_RAISE3);
422 DUMP_REG(DC_CMD_STATE_ACCESS);
423 DUMP_REG(DC_CMD_STATE_CONTROL);
424 DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
425 DUMP_REG(DC_CMD_REG_ACT_CONTROL);
426 DUMP_REG(DC_COM_CRC_CONTROL);
427 DUMP_REG(DC_COM_CRC_CHECKSUM);
428 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
429 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
430 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
431 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
432 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
433 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
434 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
435 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
436 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
437 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
438 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
439 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
440 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
441 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
442 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
443 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
444 DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
445 DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
446 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
447 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
448 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
449 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
450 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
451 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
452 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
453 DUMP_REG(DC_COM_PIN_MISC_CONTROL);
454 DUMP_REG(DC_COM_PIN_PM0_CONTROL);
455 DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
456 DUMP_REG(DC_COM_PIN_PM1_CONTROL);
457 DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
458 DUMP_REG(DC_COM_SPI_CONTROL);
459 DUMP_REG(DC_COM_SPI_START_BYTE);
460 DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
461 DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
462 DUMP_REG(DC_COM_HSPI_CS_DC);
463 DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
464 DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
465 DUMP_REG(DC_COM_GPIO_CTRL);
466 DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
467 DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
468 DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
469 DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
470 DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
471 DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
472 DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
473 DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
474 DUMP_REG(DC_DISP_REF_TO_SYNC);
475 DUMP_REG(DC_DISP_SYNC_WIDTH);
476 DUMP_REG(DC_DISP_BACK_PORCH);
477 DUMP_REG(DC_DISP_ACTIVE);
478 DUMP_REG(DC_DISP_FRONT_PORCH);
479 DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
480 DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
481 DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
482 DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
483 DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
484 DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
485 DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
486 DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
487 DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
488 DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
489 DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
490 DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
491 DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
492 DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
493 DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
494 DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
495 DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
496 DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
497 DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
498 DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
499 DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
500 DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
501 DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
502 DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
503 DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
504 DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
505 DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
506 DUMP_REG(DC_DISP_M0_CONTROL);
507 DUMP_REG(DC_DISP_M1_CONTROL);
508 DUMP_REG(DC_DISP_DI_CONTROL);
509 DUMP_REG(DC_DISP_PP_CONTROL);
510 DUMP_REG(DC_DISP_PP_SELECT_A);
511 DUMP_REG(DC_DISP_PP_SELECT_B);
512 DUMP_REG(DC_DISP_PP_SELECT_C);
513 DUMP_REG(DC_DISP_PP_SELECT_D);
514 DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
515 DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
516 DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
517 DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
518 DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
519 DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
520 DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
521 DUMP_REG(DC_DISP_BORDER_COLOR);
522 DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
523 DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
524 DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
525 DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
526 DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
527 DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
528 DUMP_REG(DC_DISP_CURSOR_START_ADDR);
529 DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
530 DUMP_REG(DC_DISP_CURSOR_POSITION);
531 DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
532 DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
533 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
534 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
535 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
536 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
537 DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
538 DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
539 DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
540 DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
541 DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
542 DUMP_REG(DC_DISP_DAC_CRT_CTRL);
543 DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
544 DUMP_REG(DC_DISP_SD_CONTROL);
545 DUMP_REG(DC_DISP_SD_CSC_COEFF);
546 DUMP_REG(DC_DISP_SD_LUT(0));
547 DUMP_REG(DC_DISP_SD_LUT(1));
548 DUMP_REG(DC_DISP_SD_LUT(2));
549 DUMP_REG(DC_DISP_SD_LUT(3));
550 DUMP_REG(DC_DISP_SD_LUT(4));
551 DUMP_REG(DC_DISP_SD_LUT(5));
552 DUMP_REG(DC_DISP_SD_LUT(6));
553 DUMP_REG(DC_DISP_SD_LUT(7));
554 DUMP_REG(DC_DISP_SD_LUT(8));
555 DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
556 DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
557 DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
558 DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
559 DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
560 DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
561 DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
562 DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
563 DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
564 DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
565 DUMP_REG(DC_DISP_SD_BL_TF(0));
566 DUMP_REG(DC_DISP_SD_BL_TF(1));
567 DUMP_REG(DC_DISP_SD_BL_TF(2));
568 DUMP_REG(DC_DISP_SD_BL_TF(3));
569 DUMP_REG(DC_DISP_SD_BL_CONTROL);
570 DUMP_REG(DC_DISP_SD_HW_K_VALUES);
571 DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
572 DUMP_REG(DC_WIN_WIN_OPTIONS);
573 DUMP_REG(DC_WIN_BYTE_SWAP);
574 DUMP_REG(DC_WIN_BUFFER_CONTROL);
575 DUMP_REG(DC_WIN_COLOR_DEPTH);
576 DUMP_REG(DC_WIN_POSITION);
577 DUMP_REG(DC_WIN_SIZE);
578 DUMP_REG(DC_WIN_PRESCALED_SIZE);
579 DUMP_REG(DC_WIN_H_INITIAL_DDA);
580 DUMP_REG(DC_WIN_V_INITIAL_DDA);
581 DUMP_REG(DC_WIN_DDA_INC);
582 DUMP_REG(DC_WIN_LINE_STRIDE);
583 DUMP_REG(DC_WIN_BUF_STRIDE);
584 DUMP_REG(DC_WIN_UV_BUF_STRIDE);
585 DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
586 DUMP_REG(DC_WIN_DV_CONTROL);
587 DUMP_REG(DC_WIN_BLEND_NOKEY);
588 DUMP_REG(DC_WIN_BLEND_1WIN);
589 DUMP_REG(DC_WIN_BLEND_2WIN_X);
590 DUMP_REG(DC_WIN_BLEND_2WIN_Y);
591 DUMP_REG(DC_WIN_BLEND32WIN_XY);
592 DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
593 DUMP_REG(DC_WINBUF_START_ADDR);
594 DUMP_REG(DC_WINBUF_START_ADDR_NS);
595 DUMP_REG(DC_WINBUF_START_ADDR_U);
596 DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
597 DUMP_REG(DC_WINBUF_START_ADDR_V);
598 DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
599 DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
600 DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
601 DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
602 DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
603 DUMP_REG(DC_WINBUF_UFLOW_STATUS);
604 DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
605 DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
606 DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
607
608#undef DUMP_REG
609
610 return 0;
611}
612
613static struct drm_info_list debugfs_files[] = {
614 { "regs", tegra_dc_show_regs, 0, NULL },
615};
616
617static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
618{
619 unsigned int i;
620 char *name;
621 int err;
622
623 name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
624 dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
625 kfree(name);
626
627 if (!dc->debugfs)
628 return -ENOMEM;
629
630 dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
631 GFP_KERNEL);
632 if (!dc->debugfs_files) {
633 err = -ENOMEM;
634 goto remove;
635 }
636
637 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
638 dc->debugfs_files[i].data = dc;
639
640 err = drm_debugfs_create_files(dc->debugfs_files,
641 ARRAY_SIZE(debugfs_files),
642 dc->debugfs, minor);
643 if (err < 0)
644 goto free;
645
646 dc->minor = minor;
647
648 return 0;
649
650free:
651 kfree(dc->debugfs_files);
652 dc->debugfs_files = NULL;
653remove:
654 debugfs_remove(dc->debugfs);
655 dc->debugfs = NULL;
656
657 return err;
658}
659
660static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
661{
662 drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
663 dc->minor);
664 dc->minor = NULL;
665
666 kfree(dc->debugfs_files);
667 dc->debugfs_files = NULL;
668
669 debugfs_remove(dc->debugfs);
670 dc->debugfs = NULL;
671
672 return 0;
673}
674
675static int tegra_dc_drm_init(struct host1x_client *client,
676 struct drm_device *drm)
677{
678 struct tegra_dc *dc = host1x_client_to_dc(client);
679 int err;
680
681 dc->pipe = drm->mode_config.num_crtc;
682
683 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
684 drm_mode_crtc_set_gamma_size(&dc->base, 256);
685 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
686
687 err = tegra_dc_rgb_init(drm, dc);
688 if (err < 0 && err != -ENODEV) {
689 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
690 return err;
691 }
692
693 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
694 err = tegra_dc_debugfs_init(dc, drm->primary);
695 if (err < 0)
696 dev_err(dc->dev, "debugfs setup failed: %d\n", err);
697 }
698
699 err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
700 dev_name(dc->dev), dc);
701 if (err < 0) {
702 dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
703 err);
704 return err;
705 }
706
707 return 0;
708}
709
710static int tegra_dc_drm_exit(struct host1x_client *client)
711{
712 struct tegra_dc *dc = host1x_client_to_dc(client);
713 int err;
714
715 devm_free_irq(dc->dev, dc->irq, dc);
716
717 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
718 err = tegra_dc_debugfs_exit(dc);
719 if (err < 0)
720 dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
721 }
722
723 err = tegra_dc_rgb_exit(dc);
724 if (err) {
725 dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
726 return err;
727 }
728
729 return 0;
730}
731
732static const struct host1x_client_ops dc_client_ops = {
733 .drm_init = tegra_dc_drm_init,
734 .drm_exit = tegra_dc_drm_exit,
735};
736
737static int tegra_dc_probe(struct platform_device *pdev)
738{
739 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
740 struct resource *regs;
741 struct tegra_dc *dc;
742 int err;
743
744 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
745 if (!dc)
746 return -ENOMEM;
747
748 INIT_LIST_HEAD(&dc->list);
749 dc->dev = &pdev->dev;
750
751 dc->clk = devm_clk_get(&pdev->dev, NULL);
752 if (IS_ERR(dc->clk)) {
753 dev_err(&pdev->dev, "failed to get clock\n");
754 return PTR_ERR(dc->clk);
755 }
756
757 err = clk_prepare_enable(dc->clk);
758 if (err < 0)
759 return err;
760
761 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
762 if (!regs) {
763 dev_err(&pdev->dev, "failed to get registers\n");
764 return -ENXIO;
765 }
766
767 dc->regs = devm_request_and_ioremap(&pdev->dev, regs);
768 if (!dc->regs) {
769 dev_err(&pdev->dev, "failed to remap registers\n");
770 return -ENXIO;
771 }
772
773 dc->irq = platform_get_irq(pdev, 0);
774 if (dc->irq < 0) {
775 dev_err(&pdev->dev, "failed to get IRQ\n");
776 return -ENXIO;
777 }
778
779 INIT_LIST_HEAD(&dc->client.list);
780 dc->client.ops = &dc_client_ops;
781 dc->client.dev = &pdev->dev;
782
783 err = tegra_dc_rgb_probe(dc);
784 if (err < 0 && err != -ENODEV) {
785 dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
786 return err;
787 }
788
789 err = host1x_register_client(host1x, &dc->client);
790 if (err < 0) {
791 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
792 err);
793 return err;
794 }
795
796 platform_set_drvdata(pdev, dc);
797
798 return 0;
799}
800
801static int tegra_dc_remove(struct platform_device *pdev)
802{
803 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
804 struct tegra_dc *dc = platform_get_drvdata(pdev);
805 int err;
806
807 err = host1x_unregister_client(host1x, &dc->client);
808 if (err < 0) {
809 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
810 err);
811 return err;
812 }
813
814 clk_disable_unprepare(dc->clk);
815
816 return 0;
817}
818
819static struct of_device_id tegra_dc_of_match[] = {
820 { .compatible = "nvidia,tegra30-dc", },
821 { .compatible = "nvidia,tegra20-dc", },
822 { },
823};
824
825struct platform_driver tegra_dc_driver = {
826 .driver = {
827 .name = "tegra-dc",
828 .owner = THIS_MODULE,
829 .of_match_table = tegra_dc_of_match,
830 },
831 .probe = tegra_dc_probe,
832 .remove = tegra_dc_remove,
833};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644
index 000000000000..99977b5d5c36
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -0,0 +1,388 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef TEGRA_DC_H
11#define TEGRA_DC_H 1
12
13#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
14#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
15#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
16#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
17#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
18#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
19#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
20#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
21#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
22#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
23#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
24#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
25#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
26#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
27#define DC_CMD_DISPLAY_COMMAND 0x032
28#define DISP_CTRL_MODE_STOP (0 << 5)
29#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
30#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
31#define DC_CMD_SIGNAL_RAISE 0x033
32#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
33#define PW0_ENABLE (1 << 0)
34#define PW1_ENABLE (1 << 2)
35#define PW2_ENABLE (1 << 4)
36#define PW3_ENABLE (1 << 6)
37#define PW4_ENABLE (1 << 8)
38#define PM0_ENABLE (1 << 16)
39#define PM1_ENABLE (1 << 18)
40
41#define DC_CMD_INT_STATUS 0x037
42#define DC_CMD_INT_MASK 0x038
43#define DC_CMD_INT_ENABLE 0x039
44#define DC_CMD_INT_TYPE 0x03a
45#define DC_CMD_INT_POLARITY 0x03b
46#define CTXSW_INT (1 << 0)
47#define FRAME_END_INT (1 << 1)
48#define VBLANK_INT (1 << 2)
49#define WIN_A_UF_INT (1 << 8)
50#define WIN_B_UF_INT (1 << 9)
51#define WIN_C_UF_INT (1 << 10)
52#define WIN_A_OF_INT (1 << 14)
53#define WIN_B_OF_INT (1 << 15)
54#define WIN_C_OF_INT (1 << 16)
55
56#define DC_CMD_SIGNAL_RAISE1 0x03c
57#define DC_CMD_SIGNAL_RAISE2 0x03d
58#define DC_CMD_SIGNAL_RAISE3 0x03e
59
60#define DC_CMD_STATE_ACCESS 0x040
61
62#define DC_CMD_STATE_CONTROL 0x041
63#define GENERAL_ACT_REQ (1 << 0)
64#define WIN_A_ACT_REQ (1 << 1)
65#define WIN_B_ACT_REQ (1 << 2)
66#define WIN_C_ACT_REQ (1 << 3)
67#define GENERAL_UPDATE (1 << 8)
68#define WIN_A_UPDATE (1 << 9)
69#define WIN_B_UPDATE (1 << 10)
70#define WIN_C_UPDATE (1 << 11)
71#define NC_HOST_TRIG (1 << 24)
72
73#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
74#define WINDOW_A_SELECT (1 << 4)
75#define WINDOW_B_SELECT (1 << 5)
76#define WINDOW_C_SELECT (1 << 6)
77
78#define DC_CMD_REG_ACT_CONTROL 0x043
79
80#define DC_COM_CRC_CONTROL 0x300
81#define DC_COM_CRC_CHECKSUM 0x301
82#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
83#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
84#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
85#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
86#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
87#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
88#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
89#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
90
91#define DC_COM_PIN_MISC_CONTROL 0x31b
92#define DC_COM_PIN_PM0_CONTROL 0x31c
93#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d
94#define DC_COM_PIN_PM1_CONTROL 0x31e
95#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f
96
97#define DC_COM_SPI_CONTROL 0x320
98#define DC_COM_SPI_START_BYTE 0x321
99#define DC_COM_HSPI_WRITE_DATA_AB 0x322
100#define DC_COM_HSPI_WRITE_DATA_CD 0x323
101#define DC_COM_HSPI_CS_DC 0x324
102#define DC_COM_SCRATCH_REGISTER_A 0x325
103#define DC_COM_SCRATCH_REGISTER_B 0x326
104#define DC_COM_GPIO_CTRL 0x327
105#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
106#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
107
108#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
109#define H_PULSE_0_ENABLE (1 << 8)
110#define H_PULSE_1_ENABLE (1 << 10)
111#define H_PULSE_2_ENABLE (1 << 12)
112
113#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
114
115#define DC_DISP_DISP_WIN_OPTIONS 0x402
116#define HDMI_ENABLE (1 << 30)
117
118#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
119#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
120#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
121#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8)
122#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0)
123
124#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
125#define CURSOR_DELAY(x) (((x) & 0x3f) << 24)
126#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
127#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8)
128#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0)
129
130#define DC_DISP_DISP_TIMING_OPTIONS 0x405
131#define VSYNC_H_POSITION(x) ((x) & 0xfff)
132
133#define DC_DISP_REF_TO_SYNC 0x406
134#define DC_DISP_SYNC_WIDTH 0x407
135#define DC_DISP_BACK_PORCH 0x408
136#define DC_DISP_ACTIVE 0x409
137#define DC_DISP_FRONT_PORCH 0x40a
138#define DC_DISP_H_PULSE0_CONTROL 0x40b
139#define DC_DISP_H_PULSE0_POSITION_A 0x40c
140#define DC_DISP_H_PULSE0_POSITION_B 0x40d
141#define DC_DISP_H_PULSE0_POSITION_C 0x40e
142#define DC_DISP_H_PULSE0_POSITION_D 0x40f
143#define DC_DISP_H_PULSE1_CONTROL 0x410
144#define DC_DISP_H_PULSE1_POSITION_A 0x411
145#define DC_DISP_H_PULSE1_POSITION_B 0x412
146#define DC_DISP_H_PULSE1_POSITION_C 0x413
147#define DC_DISP_H_PULSE1_POSITION_D 0x414
148#define DC_DISP_H_PULSE2_CONTROL 0x415
149#define DC_DISP_H_PULSE2_POSITION_A 0x416
150#define DC_DISP_H_PULSE2_POSITION_B 0x417
151#define DC_DISP_H_PULSE2_POSITION_C 0x418
152#define DC_DISP_H_PULSE2_POSITION_D 0x419
153#define DC_DISP_V_PULSE0_CONTROL 0x41a
154#define DC_DISP_V_PULSE0_POSITION_A 0x41b
155#define DC_DISP_V_PULSE0_POSITION_B 0x41c
156#define DC_DISP_V_PULSE0_POSITION_C 0x41d
157#define DC_DISP_V_PULSE1_CONTROL 0x41e
158#define DC_DISP_V_PULSE1_POSITION_A 0x41f
159#define DC_DISP_V_PULSE1_POSITION_B 0x420
160#define DC_DISP_V_PULSE1_POSITION_C 0x421
161#define DC_DISP_V_PULSE2_CONTROL 0x422
162#define DC_DISP_V_PULSE2_POSITION_A 0x423
163#define DC_DISP_V_PULSE3_CONTROL 0x424
164#define DC_DISP_V_PULSE3_POSITION_A 0x425
165#define DC_DISP_M0_CONTROL 0x426
166#define DC_DISP_M1_CONTROL 0x427
167#define DC_DISP_DI_CONTROL 0x428
168#define DC_DISP_PP_CONTROL 0x429
169#define DC_DISP_PP_SELECT_A 0x42a
170#define DC_DISP_PP_SELECT_B 0x42b
171#define DC_DISP_PP_SELECT_C 0x42c
172#define DC_DISP_PP_SELECT_D 0x42d
173
174#define PULSE_MODE_NORMAL (0 << 3)
175#define PULSE_MODE_ONE_CLOCK (1 << 3)
176#define PULSE_POLARITY_HIGH (0 << 4)
177#define PULSE_POLARITY_LOW (1 << 4)
178#define PULSE_QUAL_ALWAYS (0 << 6)
179#define PULSE_QUAL_VACTIVE (2 << 6)
180#define PULSE_QUAL_VACTIVE1 (3 << 6)
181#define PULSE_LAST_START_A (0 << 8)
182#define PULSE_LAST_END_A (1 << 8)
183#define PULSE_LAST_START_B (2 << 8)
184#define PULSE_LAST_END_B (3 << 8)
185#define PULSE_LAST_START_C (4 << 8)
186#define PULSE_LAST_END_C (5 << 8)
187#define PULSE_LAST_START_D (6 << 8)
188#define PULSE_LAST_END_D (7 << 8)
189
190#define PULSE_START(x) (((x) & 0xfff) << 0)
191#define PULSE_END(x) (((x) & 0xfff) << 16)
192
193#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
194#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
195#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
196#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
197#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
198#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
199#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
200#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
201#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
202#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
203#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
204#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
205#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
206#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
207#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
208
209#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
210#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
211#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
212#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
213#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
214#define DISP_DATA_FORMAT_DF2S (4 << 0)
215#define DISP_DATA_FORMAT_DF3S (5 << 0)
216#define DISP_DATA_FORMAT_DFSPI (6 << 0)
217#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
218#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
219#define DISP_ALIGNMENT_MSB (0 << 8)
220#define DISP_ALIGNMENT_LSB (1 << 8)
221#define DISP_ORDER_RED_BLUE (0 << 9)
222#define DISP_ORDER_BLUE_RED (1 << 9)
223
224#define DC_DISP_DISP_COLOR_CONTROL 0x430
225#define BASE_COLOR_SIZE666 (0 << 0)
226#define BASE_COLOR_SIZE111 (1 << 0)
227#define BASE_COLOR_SIZE222 (2 << 0)
228#define BASE_COLOR_SIZE333 (3 << 0)
229#define BASE_COLOR_SIZE444 (4 << 0)
230#define BASE_COLOR_SIZE555 (5 << 0)
231#define BASE_COLOR_SIZE565 (6 << 0)
232#define BASE_COLOR_SIZE332 (7 << 0)
233#define BASE_COLOR_SIZE888 (8 << 0)
234#define DITHER_CONTROL_DISABLE (0 << 8)
235#define DITHER_CONTROL_ORDERED (2 << 8)
236#define DITHER_CONTROL_ERRDIFF (3 << 8)
237
238#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
239
240#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
241#define DE_SELECT_ACTIVE_BLANK (0 << 0)
242#define DE_SELECT_ACTIVE (1 << 0)
243#define DE_SELECT_ACTIVE_IS (2 << 0)
244#define DE_CONTROL_ONECLK (0 << 2)
245#define DE_CONTROL_NORMAL (1 << 2)
246#define DE_CONTROL_EARLY_EXT (2 << 2)
247#define DE_CONTROL_EARLY (3 << 2)
248#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
249
250#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
251#define DC_DISP_LCD_SPI_OPTIONS 0x434
252#define DC_DISP_BORDER_COLOR 0x435
253#define DC_DISP_COLOR_KEY0_LOWER 0x436
254#define DC_DISP_COLOR_KEY0_UPPER 0x437
255#define DC_DISP_COLOR_KEY1_LOWER 0x438
256#define DC_DISP_COLOR_KEY1_UPPER 0x439
257
258#define DC_DISP_CURSOR_FOREGROUND 0x43c
259#define DC_DISP_CURSOR_BACKGROUND 0x43d
260
261#define DC_DISP_CURSOR_START_ADDR 0x43e
262#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
263
264#define DC_DISP_CURSOR_POSITION 0x440
265#define DC_DISP_CURSOR_POSITION_NS 0x441
266
267#define DC_DISP_INIT_SEQ_CONTROL 0x442
268#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
269#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
270#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
271#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
272
273#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
274#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
275#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
276#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483
277#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
278
279#define DC_DISP_DAC_CRT_CTRL 0x4c0
280#define DC_DISP_DISP_MISC_CONTROL 0x4c1
281#define DC_DISP_SD_CONTROL 0x4c2
282#define DC_DISP_SD_CSC_COEFF 0x4c3
283#define DC_DISP_SD_LUT(x) (0x4c4 + (x))
284#define DC_DISP_SD_FLICKER_CONTROL 0x4cd
285#define DC_DISP_DC_PIXEL_COUNT 0x4ce
286#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x))
287#define DC_DISP_SD_BL_PARAMETERS 0x4d7
288#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x))
289#define DC_DISP_SD_BL_CONTROL 0x4dc
290#define DC_DISP_SD_HW_K_VALUES 0x4dd
291#define DC_DISP_SD_MAN_K_VALUES 0x4de
292
293#define DC_WIN_WIN_OPTIONS 0x700
294#define COLOR_EXPAND (1 << 6)
295#define WIN_ENABLE (1 << 30)
296
297#define DC_WIN_BYTE_SWAP 0x701
298#define BYTE_SWAP_NOSWAP (0 << 0)
299#define BYTE_SWAP_SWAP2 (1 << 0)
300#define BYTE_SWAP_SWAP4 (2 << 0)
301#define BYTE_SWAP_SWAP4HW (3 << 0)
302
303#define DC_WIN_BUFFER_CONTROL 0x702
304#define BUFFER_CONTROL_HOST (0 << 0)
305#define BUFFER_CONTROL_VI (1 << 0)
306#define BUFFER_CONTROL_EPP (2 << 0)
307#define BUFFER_CONTROL_MPEGE (3 << 0)
308#define BUFFER_CONTROL_SB2D (4 << 0)
309
310#define DC_WIN_COLOR_DEPTH 0x703
311#define WIN_COLOR_DEPTH_P1 0
312#define WIN_COLOR_DEPTH_P2 1
313#define WIN_COLOR_DEPTH_P4 2
314#define WIN_COLOR_DEPTH_P8 3
315#define WIN_COLOR_DEPTH_B4G4R4A4 4
316#define WIN_COLOR_DEPTH_B5G5R5A 5
317#define WIN_COLOR_DEPTH_B5G6R5 6
318#define WIN_COLOR_DEPTH_AB5G5R5 7
319#define WIN_COLOR_DEPTH_B8G8R8A8 12
320#define WIN_COLOR_DEPTH_R8G8B8A8 13
321#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
322#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
323#define WIN_COLOR_DEPTH_YCbCr422 16
324#define WIN_COLOR_DEPTH_YUV422 17
325#define WIN_COLOR_DEPTH_YCbCr420P 18
326#define WIN_COLOR_DEPTH_YUV420P 19
327#define WIN_COLOR_DEPTH_YCbCr422P 20
328#define WIN_COLOR_DEPTH_YUV422P 21
329#define WIN_COLOR_DEPTH_YCbCr422R 22
330#define WIN_COLOR_DEPTH_YUV422R 23
331#define WIN_COLOR_DEPTH_YCbCr422RA 24
332#define WIN_COLOR_DEPTH_YUV422RA 25
333
334#define DC_WIN_POSITION 0x704
335#define H_POSITION(x) (((x) & 0x1fff) << 0)
336#define V_POSITION(x) (((x) & 0x1fff) << 16)
337
338#define DC_WIN_SIZE 0x705
339#define H_SIZE(x) (((x) & 0x1fff) << 0)
340#define V_SIZE(x) (((x) & 0x1fff) << 16)
341
342#define DC_WIN_PRESCALED_SIZE 0x706
343#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0)
344#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
345
346#define DC_WIN_H_INITIAL_DDA 0x707
347#define DC_WIN_V_INITIAL_DDA 0x708
348#define DC_WIN_DDA_INC 0x709
349#define H_DDA_INC(x) (((x) & 0xffff) << 0)
350#define V_DDA_INC(x) (((x) & 0xffff) << 16)
351
352#define DC_WIN_LINE_STRIDE 0x70a
353#define DC_WIN_BUF_STRIDE 0x70b
354#define DC_WIN_UV_BUF_STRIDE 0x70c
355#define DC_WIN_BUFFER_ADDR_MODE 0x70d
356#define DC_WIN_DV_CONTROL 0x70e
357
358#define DC_WIN_BLEND_NOKEY 0x70f
359#define DC_WIN_BLEND_1WIN 0x710
360#define DC_WIN_BLEND_2WIN_X 0x711
361#define DC_WIN_BLEND_2WIN_Y 0x712
362#define DC_WIN_BLEND32WIN_XY 0x713
363
364#define DC_WIN_HP_FETCH_CONTROL 0x714
365
366#define DC_WINBUF_START_ADDR 0x800
367#define DC_WINBUF_START_ADDR_NS 0x801
368#define DC_WINBUF_START_ADDR_U 0x802
369#define DC_WINBUF_START_ADDR_U_NS 0x803
370#define DC_WINBUF_START_ADDR_V 0x804
371#define DC_WINBUF_START_ADDR_V_NS 0x805
372
373#define DC_WINBUF_ADDR_H_OFFSET 0x806
374#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
375#define DC_WINBUF_ADDR_V_OFFSET 0x808
376#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
377
378#define DC_WINBUF_UFLOW_STATUS 0x80a
379
380#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
381#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
382#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
383
384/* synchronization points */
385#define SYNCPT_VBLANK0 26
386#define SYNCPT_VBLANK1 27
387
388#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000000..3a503c9e4686
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <mach/clk.h>
15#include <linux/dma-mapping.h>
16#include <asm/dma-iommu.h>
17
18#include "drm.h"
19
20#define DRIVER_NAME "tegra"
21#define DRIVER_DESC "NVIDIA Tegra graphics"
22#define DRIVER_DATE "20120330"
23#define DRIVER_MAJOR 0
24#define DRIVER_MINOR 0
25#define DRIVER_PATCHLEVEL 0
26
27static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
28{
29 struct device *dev = drm->dev;
30 struct host1x *host1x;
31 int err;
32
33 host1x = dev_get_drvdata(dev);
34 drm->dev_private = host1x;
35 host1x->drm = drm;
36
37 drm_mode_config_init(drm);
38
39 err = host1x_drm_init(host1x, drm);
40 if (err < 0)
41 return err;
42
43 err = tegra_drm_fb_init(drm);
44 if (err < 0)
45 return err;
46
47 drm_kms_helper_poll_init(drm);
48
49 return 0;
50}
51
52static int tegra_drm_unload(struct drm_device *drm)
53{
54 drm_kms_helper_poll_fini(drm);
55 tegra_drm_fb_exit(drm);
56
57 drm_mode_config_cleanup(drm);
58
59 return 0;
60}
61
62static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
63{
64 return 0;
65}
66
67static void tegra_drm_lastclose(struct drm_device *drm)
68{
69 struct host1x *host1x = drm->dev_private;
70
71 drm_fbdev_cma_restore_mode(host1x->fbdev);
72}
73
74static struct drm_ioctl_desc tegra_drm_ioctls[] = {
75};
76
77static const struct file_operations tegra_drm_fops = {
78 .owner = THIS_MODULE,
79 .open = drm_open,
80 .release = drm_release,
81 .unlocked_ioctl = drm_ioctl,
82 .mmap = drm_gem_cma_mmap,
83 .poll = drm_poll,
84 .fasync = drm_fasync,
85 .read = drm_read,
86#ifdef CONFIG_COMPAT
87 .compat_ioctl = drm_compat_ioctl,
88#endif
89 .llseek = noop_llseek,
90};
91
92struct drm_driver tegra_drm_driver = {
93 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
94 .load = tegra_drm_load,
95 .unload = tegra_drm_unload,
96 .open = tegra_drm_open,
97 .lastclose = tegra_drm_lastclose,
98
99 .gem_free_object = drm_gem_cma_free_object,
100 .gem_vm_ops = &drm_gem_cma_vm_ops,
101 .dumb_create = drm_gem_cma_dumb_create,
102 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
103 .dumb_destroy = drm_gem_cma_dumb_destroy,
104
105 .ioctls = tegra_drm_ioctls,
106 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
107 .fops = &tegra_drm_fops,
108
109 .name = DRIVER_NAME,
110 .desc = DRIVER_DESC,
111 .date = DRIVER_DATE,
112 .major = DRIVER_MAJOR,
113 .minor = DRIVER_MINOR,
114 .patchlevel = DRIVER_PATCHLEVEL,
115};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644
index 000000000000..741b5dc2742c
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -0,0 +1,216 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef TEGRA_DRM_H
11#define TEGRA_DRM_H 1
12
13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_edid.h>
16#include <drm/drm_fb_helper.h>
17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_fixed.h>
20
21struct tegra_framebuffer {
22 struct drm_framebuffer base;
23 struct drm_gem_cma_object *obj;
24};
25
26static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
27{
28 return container_of(fb, struct tegra_framebuffer, base);
29}
30
31struct host1x {
32 struct drm_device *drm;
33 struct device *dev;
34 void __iomem *regs;
35 struct clk *clk;
36 int syncpt;
37 int irq;
38
39 struct mutex drm_clients_lock;
40 struct list_head drm_clients;
41 struct list_head drm_active;
42
43 struct mutex clients_lock;
44 struct list_head clients;
45
46 struct drm_fbdev_cma *fbdev;
47 struct tegra_framebuffer fb;
48};
49
50struct host1x_client;
51
52struct host1x_client_ops {
53 int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
54 int (*drm_exit)(struct host1x_client *client);
55};
56
57struct host1x_client {
58 struct host1x *host1x;
59 struct device *dev;
60
61 const struct host1x_client_ops *ops;
62
63 struct list_head list;
64};
65
66extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
67extern int host1x_drm_exit(struct host1x *host1x);
68
69extern int host1x_register_client(struct host1x *host1x,
70 struct host1x_client *client);
71extern int host1x_unregister_client(struct host1x *host1x,
72 struct host1x_client *client);
73
74struct tegra_output;
75
76struct tegra_dc {
77 struct host1x_client client;
78
79 struct host1x *host1x;
80 struct device *dev;
81
82 struct drm_crtc base;
83 int pipe;
84
85 struct clk *clk;
86
87 void __iomem *regs;
88 int irq;
89
90 struct tegra_output *rgb;
91
92 struct list_head list;
93
94 struct drm_info_list *debugfs_files;
95 struct drm_minor *minor;
96 struct dentry *debugfs;
97};
98
99static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
100{
101 return container_of(client, struct tegra_dc, client);
102}
103
104static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
105{
106 return container_of(crtc, struct tegra_dc, base);
107}
108
109static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
110 unsigned long reg)
111{
112 writel(value, dc->regs + (reg << 2));
113}
114
115static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
116 unsigned long reg)
117{
118 return readl(dc->regs + (reg << 2));
119}
120
121struct tegra_output_ops {
122 int (*enable)(struct tegra_output *output);
123 int (*disable)(struct tegra_output *output);
124 int (*setup_clock)(struct tegra_output *output, struct clk *clk,
125 unsigned long pclk);
126 int (*check_mode)(struct tegra_output *output,
127 struct drm_display_mode *mode,
128 enum drm_mode_status *status);
129};
130
131enum tegra_output_type {
132 TEGRA_OUTPUT_RGB,
133 TEGRA_OUTPUT_HDMI,
134};
135
136struct tegra_output {
137 struct device_node *of_node;
138 struct device *dev;
139
140 const struct tegra_output_ops *ops;
141 enum tegra_output_type type;
142
143 struct i2c_adapter *ddc;
144 const struct edid *edid;
145 unsigned int hpd_irq;
146 int hpd_gpio;
147
148 struct drm_encoder encoder;
149 struct drm_connector connector;
150};
151
152static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
153{
154 return container_of(e, struct tegra_output, encoder);
155}
156
157static inline struct tegra_output *connector_to_output(struct drm_connector *c)
158{
159 return container_of(c, struct tegra_output, connector);
160}
161
162static inline int tegra_output_enable(struct tegra_output *output)
163{
164 if (output && output->ops && output->ops->enable)
165 return output->ops->enable(output);
166
167 return output ? -ENOSYS : -EINVAL;
168}
169
170static inline int tegra_output_disable(struct tegra_output *output)
171{
172 if (output && output->ops && output->ops->disable)
173 return output->ops->disable(output);
174
175 return output ? -ENOSYS : -EINVAL;
176}
177
178static inline int tegra_output_setup_clock(struct tegra_output *output,
179 struct clk *clk, unsigned long pclk)
180{
181 if (output && output->ops && output->ops->setup_clock)
182 return output->ops->setup_clock(output, clk, pclk);
183
184 return output ? -ENOSYS : -EINVAL;
185}
186
187static inline int tegra_output_check_mode(struct tegra_output *output,
188 struct drm_display_mode *mode,
189 enum drm_mode_status *status)
190{
191 if (output && output->ops && output->ops->check_mode)
192 return output->ops->check_mode(output, mode, status);
193
194 return output ? -ENOSYS : -EINVAL;
195}
196
197/* from rgb.c */
198extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
199extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
200extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
201
202/* from output.c */
203extern int tegra_output_parse_dt(struct tegra_output *output);
204extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
205extern int tegra_output_exit(struct tegra_output *output);
206
207/* from fb.c */
208extern int tegra_drm_fb_init(struct drm_device *drm);
209extern void tegra_drm_fb_exit(struct drm_device *drm);
210
211extern struct platform_driver tegra_host1x_driver;
212extern struct platform_driver tegra_hdmi_driver;
213extern struct platform_driver tegra_dc_driver;
214extern struct drm_driver tegra_drm_driver;
215
216#endif /* TEGRA_DRM_H */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644
index 000000000000..97993c6835fd
--- /dev/null
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include "drm.h"
11
12static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
13{
14 struct host1x *host1x = drm->dev_private;
15
16 drm_fbdev_cma_hotplug_event(host1x->fbdev);
17}
18
19static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
20 .fb_create = drm_fb_cma_create,
21 .output_poll_changed = tegra_drm_fb_output_poll_changed,
22};
23
24int tegra_drm_fb_init(struct drm_device *drm)
25{
26 struct host1x *host1x = drm->dev_private;
27 struct drm_fbdev_cma *fbdev;
28
29 drm->mode_config.min_width = 0;
30 drm->mode_config.min_height = 0;
31
32 drm->mode_config.max_width = 4096;
33 drm->mode_config.max_height = 4096;
34
35 drm->mode_config.funcs = &tegra_drm_mode_funcs;
36
37 fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
38 drm->mode_config.num_connector);
39 if (IS_ERR(fbdev))
40 return PTR_ERR(fbdev);
41
42#ifndef CONFIG_FRAMEBUFFER_CONSOLE
43 drm_fbdev_cma_restore_mode(fbdev);
44#endif
45
46 host1x->fbdev = fbdev;
47
48 return 0;
49}
50
51void tegra_drm_fb_exit(struct drm_device *drm)
52{
53 struct host1x *host1x = drm->dev_private;
54
55 drm_fbdev_cma_fini(host1x->fbdev);
56}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644
index 000000000000..e060c7e6434d
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -0,0 +1,1321 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/debugfs.h>
12#include <linux/gpio.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/consumer.h>
17
18#include <mach/clk.h>
19
20#include "hdmi.h"
21#include "drm.h"
22#include "dc.h"
23
24struct tegra_hdmi {
25 struct host1x_client client;
26 struct tegra_output output;
27 struct device *dev;
28
29 struct regulator *vdd;
30 struct regulator *pll;
31
32 void __iomem *regs;
33 unsigned int irq;
34
35 struct clk *clk_parent;
36 struct clk *clk;
37
38 unsigned int audio_source;
39 unsigned int audio_freq;
40 bool stereo;
41 bool dvi;
42
43 struct drm_info_list *debugfs_files;
44 struct drm_minor *minor;
45 struct dentry *debugfs;
46};
47
48static inline struct tegra_hdmi *
49host1x_client_to_hdmi(struct host1x_client *client)
50{
51 return container_of(client, struct tegra_hdmi, client);
52}
53
54static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
55{
56 return container_of(output, struct tegra_hdmi, output);
57}
58
59#define HDMI_AUDIOCLK_FREQ 216000000
60#define HDMI_REKEY_DEFAULT 56
61
62enum {
63 AUTO = 0,
64 SPDIF,
65 HDA,
66};
67
68static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
69 unsigned long reg)
70{
71 return readl(hdmi->regs + (reg << 2));
72}
73
74static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
75 unsigned long reg)
76{
77 writel(val, hdmi->regs + (reg << 2));
78}
79
80struct tegra_hdmi_audio_config {
81 unsigned int pclk;
82 unsigned int n;
83 unsigned int cts;
84 unsigned int aval;
85};
86
87static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
88 { 25200000, 4096, 25200, 24000 },
89 { 27000000, 4096, 27000, 24000 },
90 { 74250000, 4096, 74250, 24000 },
91 { 148500000, 4096, 148500, 24000 },
92 { 0, 0, 0, 0 },
93};
94
95static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
96 { 25200000, 5880, 26250, 25000 },
97 { 27000000, 5880, 28125, 25000 },
98 { 74250000, 4704, 61875, 20000 },
99 { 148500000, 4704, 123750, 20000 },
100 { 0, 0, 0, 0 },
101};
102
103static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
104 { 25200000, 6144, 25200, 24000 },
105 { 27000000, 6144, 27000, 24000 },
106 { 74250000, 6144, 74250, 24000 },
107 { 148500000, 6144, 148500, 24000 },
108 { 0, 0, 0, 0 },
109};
110
111static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
112 { 25200000, 11760, 26250, 25000 },
113 { 27000000, 11760, 28125, 25000 },
114 { 74250000, 9408, 61875, 20000 },
115 { 148500000, 9408, 123750, 20000 },
116 { 0, 0, 0, 0 },
117};
118
119static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
120 { 25200000, 12288, 25200, 24000 },
121 { 27000000, 12288, 27000, 24000 },
122 { 74250000, 12288, 74250, 24000 },
123 { 148500000, 12288, 148500, 24000 },
124 { 0, 0, 0, 0 },
125};
126
127static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
128 { 25200000, 23520, 26250, 25000 },
129 { 27000000, 23520, 28125, 25000 },
130 { 74250000, 18816, 61875, 20000 },
131 { 148500000, 18816, 123750, 20000 },
132 { 0, 0, 0, 0 },
133};
134
135static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
136 { 25200000, 24576, 25200, 24000 },
137 { 27000000, 24576, 27000, 24000 },
138 { 74250000, 24576, 74250, 24000 },
139 { 148500000, 24576, 148500, 24000 },
140 { 0, 0, 0, 0 },
141};
142
143struct tmds_config {
144 unsigned int pclk;
145 u32 pll0;
146 u32 pll1;
147 u32 pe_current;
148 u32 drive_current;
149};
150
151static const struct tmds_config tegra2_tmds_config[] = {
152 { /* slow pixel clock modes */
153 .pclk = 27000000,
154 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
155 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
156 SOR_PLL_TX_REG_LOAD(3),
157 .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
158 .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
159 PE_CURRENT1(PE_CURRENT_0_0_mA) |
160 PE_CURRENT2(PE_CURRENT_0_0_mA) |
161 PE_CURRENT3(PE_CURRENT_0_0_mA),
162 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
163 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
164 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
165 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
166 },
167 { /* high pixel clock modes */
168 .pclk = UINT_MAX,
169 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
170 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
171 SOR_PLL_TX_REG_LOAD(3),
172 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
173 .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
174 PE_CURRENT1(PE_CURRENT_6_0_mA) |
175 PE_CURRENT2(PE_CURRENT_6_0_mA) |
176 PE_CURRENT3(PE_CURRENT_6_0_mA),
177 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
178 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
179 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
180 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
181 },
182};
183
184static const struct tmds_config tegra3_tmds_config[] = {
185 { /* 480p modes */
186 .pclk = 27000000,
187 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
188 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
189 SOR_PLL_TX_REG_LOAD(0),
190 .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
191 .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
192 PE_CURRENT1(PE_CURRENT_0_0_mA) |
193 PE_CURRENT2(PE_CURRENT_0_0_mA) |
194 PE_CURRENT3(PE_CURRENT_0_0_mA),
195 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
196 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
197 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
198 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
199 }, { /* 720p modes */
200 .pclk = 74250000,
201 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
202 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
203 SOR_PLL_TX_REG_LOAD(0),
204 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
205 .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
206 PE_CURRENT1(PE_CURRENT_5_0_mA) |
207 PE_CURRENT2(PE_CURRENT_5_0_mA) |
208 PE_CURRENT3(PE_CURRENT_5_0_mA),
209 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
210 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
211 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
212 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
213 }, { /* 1080p modes */
214 .pclk = UINT_MAX,
215 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
216 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
217 SOR_PLL_TX_REG_LOAD(0),
218 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
219 .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
220 PE_CURRENT1(PE_CURRENT_5_0_mA) |
221 PE_CURRENT2(PE_CURRENT_5_0_mA) |
222 PE_CURRENT3(PE_CURRENT_5_0_mA),
223 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
224 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
225 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
226 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
227 },
228};
229
230static const struct tegra_hdmi_audio_config *
231tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
232{
233 const struct tegra_hdmi_audio_config *table;
234
235 switch (audio_freq) {
236 case 32000:
237 table = tegra_hdmi_audio_32k;
238 break;
239
240 case 44100:
241 table = tegra_hdmi_audio_44_1k;
242 break;
243
244 case 48000:
245 table = tegra_hdmi_audio_48k;
246 break;
247
248 case 88200:
249 table = tegra_hdmi_audio_88_2k;
250 break;
251
252 case 96000:
253 table = tegra_hdmi_audio_96k;
254 break;
255
256 case 176400:
257 table = tegra_hdmi_audio_176_4k;
258 break;
259
260 case 192000:
261 table = tegra_hdmi_audio_192k;
262 break;
263
264 default:
265 return NULL;
266 }
267
268 while (table->pclk) {
269 if (table->pclk == pclk)
270 return table;
271
272 table++;
273 }
274
275 return NULL;
276}
277
278static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
279{
280 const unsigned int freqs[] = {
281 32000, 44100, 48000, 88200, 96000, 176400, 192000
282 };
283 unsigned int i;
284
285 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
286 unsigned int f = freqs[i];
287 unsigned int eight_half;
288 unsigned long value;
289 unsigned int delta;
290
291 if (f > 96000)
292 delta = 2;
293 else if (f > 480000)
294 delta = 6;
295 else
296 delta = 9;
297
298 eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
299 value = AUDIO_FS_LOW(eight_half - delta) |
300 AUDIO_FS_HIGH(eight_half + delta);
301 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
302 }
303}
304
305static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
306{
307 struct device_node *node = hdmi->dev->of_node;
308 const struct tegra_hdmi_audio_config *config;
309 unsigned int offset = 0;
310 unsigned long value;
311
312 switch (hdmi->audio_source) {
313 case HDA:
314 value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
315 break;
316
317 case SPDIF:
318 value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
319 break;
320
321 default:
322 value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
323 break;
324 }
325
326 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
327 value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
328 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
329 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
330 } else {
331 value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
332 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
333
334 value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
335 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
336 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
337 }
338
339 config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
340 if (!config) {
341 dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
342 hdmi->audio_freq, pclk);
343 return -EINVAL;
344 }
345
346 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
347
348 value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
349 AUDIO_N_VALUE(config->n - 1);
350 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
351
352 tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
353 HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
354
355 value = ACR_SUBPACK_CTS(config->cts);
356 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
357
358 value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
359 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
360
361 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
362 value &= ~AUDIO_N_RESETF;
363 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
364
365 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
366 switch (hdmi->audio_freq) {
367 case 32000:
368 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
369 break;
370
371 case 44100:
372 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
373 break;
374
375 case 48000:
376 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
377 break;
378
379 case 88200:
380 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
381 break;
382
383 case 96000:
384 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
385 break;
386
387 case 176400:
388 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
389 break;
390
391 case 192000:
392 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
393 break;
394 }
395
396 tegra_hdmi_writel(hdmi, config->aval, offset);
397 }
398
399 tegra_hdmi_setup_audio_fs_tables(hdmi);
400
401 return 0;
402}
403
404static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
405 unsigned int offset, u8 type,
406 u8 version, void *data, size_t size)
407{
408 unsigned long value;
409 u8 *ptr = data;
410 u32 subpack[2];
411 size_t i;
412 u8 csum;
413
414 /* first byte of data is the checksum */
415 csum = type + version + size - 1;
416
417 for (i = 1; i < size; i++)
418 csum += ptr[i];
419
420 ptr[0] = 0x100 - csum;
421
422 value = INFOFRAME_HEADER_TYPE(type) |
423 INFOFRAME_HEADER_VERSION(version) |
424 INFOFRAME_HEADER_LEN(size - 1);
425 tegra_hdmi_writel(hdmi, value, offset);
426
427 /* The audio inforame only has one set of subpack registers. The hdmi
428 * block pads the rest of the data as per the spec so we have to fixup
429 * the length before filling in the subpacks.
430 */
431 if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
432 size = 6;
433
434 /* each subpack 7 bytes devided into:
435 * subpack_low - bytes 0 - 3
436 * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
437 */
438 for (i = 0; i < size; i++) {
439 size_t index = i % 7;
440
441 if (index == 0)
442 memset(subpack, 0x0, sizeof(subpack));
443
444 ((u8 *)subpack)[index] = ptr[i];
445
446 if (index == 6 || (i + 1 == size)) {
447 unsigned int reg = offset + 1 + (i / 7) * 2;
448
449 tegra_hdmi_writel(hdmi, subpack[0], reg);
450 tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
451 }
452 }
453}
454
455static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
456 struct drm_display_mode *mode)
457{
458 struct hdmi_avi_infoframe frame;
459 unsigned int h_front_porch;
460 unsigned int hsize = 16;
461 unsigned int vsize = 9;
462
463 if (hdmi->dvi) {
464 tegra_hdmi_writel(hdmi, 0,
465 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
466 return;
467 }
468
469 h_front_porch = mode->hsync_start - mode->hdisplay;
470 memset(&frame, 0, sizeof(frame));
471 frame.r = HDMI_AVI_R_SAME;
472
473 switch (mode->vdisplay) {
474 case 480:
475 if (mode->hdisplay == 640) {
476 frame.m = HDMI_AVI_M_4_3;
477 frame.vic = 1;
478 } else {
479 frame.m = HDMI_AVI_M_16_9;
480 frame.vic = 3;
481 }
482 break;
483
484 case 576:
485 if (((hsize * 10) / vsize) > 14) {
486 frame.m = HDMI_AVI_M_16_9;
487 frame.vic = 18;
488 } else {
489 frame.m = HDMI_AVI_M_4_3;
490 frame.vic = 17;
491 }
492 break;
493
494 case 720:
495 case 1470: /* stereo mode */
496 frame.m = HDMI_AVI_M_16_9;
497
498 if (h_front_porch == 110)
499 frame.vic = 4;
500 else
501 frame.vic = 19;
502 break;
503
504 case 1080:
505 case 2205: /* stereo mode */
506 frame.m = HDMI_AVI_M_16_9;
507
508 switch (h_front_porch) {
509 case 88:
510 frame.vic = 16;
511 break;
512
513 case 528:
514 frame.vic = 31;
515 break;
516
517 default:
518 frame.vic = 32;
519 break;
520 }
521 break;
522
523 default:
524 frame.m = HDMI_AVI_M_16_9;
525 frame.vic = 0;
526 break;
527 }
528
529 tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
530 HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
531 &frame, sizeof(frame));
532
533 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
534 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
535}
536
537static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
538{
539 struct hdmi_audio_infoframe frame;
540
541 if (hdmi->dvi) {
542 tegra_hdmi_writel(hdmi, 0,
543 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
544 return;
545 }
546
547 memset(&frame, 0, sizeof(frame));
548 frame.cc = HDMI_AUDIO_CC_2;
549
550 tegra_hdmi_write_infopack(hdmi,
551 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
552 HDMI_INFOFRAME_TYPE_AUDIO,
553 HDMI_AUDIO_VERSION,
554 &frame, sizeof(frame));
555
556 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
557 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
558}
559
560static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
561{
562 struct hdmi_stereo_infoframe frame;
563 unsigned long value;
564
565 if (!hdmi->stereo) {
566 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
567 value &= ~GENERIC_CTRL_ENABLE;
568 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
569 return;
570 }
571
572 memset(&frame, 0, sizeof(frame));
573 frame.regid0 = 0x03;
574 frame.regid1 = 0x0c;
575 frame.regid2 = 0x00;
576 frame.hdmi_video_format = 2;
577
578 /* TODO: 74 MHz limit? */
579 if (1) {
580 frame._3d_structure = 0;
581 } else {
582 frame._3d_structure = 8;
583 frame._3d_ext_data = 0;
584 }
585
586 tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
587 HDMI_INFOFRAME_TYPE_VENDOR,
588 HDMI_VENDOR_VERSION, &frame, 6);
589
590 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
591 value |= GENERIC_CTRL_ENABLE;
592 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
593}
594
595static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
596 const struct tmds_config *tmds)
597{
598 unsigned long value;
599
600 tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
601 tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
602 tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
603
604 value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
605 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
606}
607
608static int tegra_output_hdmi_enable(struct tegra_output *output)
609{
610 unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
611 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
612 struct drm_display_mode *mode = &dc->base.mode;
613 struct tegra_hdmi *hdmi = to_hdmi(output);
614 struct device_node *node = hdmi->dev->of_node;
615 unsigned int pulse_start, div82, pclk;
616 const struct tmds_config *tmds;
617 unsigned int num_tmds;
618 unsigned long value;
619 int retries = 1000;
620 int err;
621
622 pclk = mode->clock * 1000;
623 h_sync_width = mode->hsync_end - mode->hsync_start;
624 h_back_porch = mode->htotal - mode->hsync_end;
625 h_front_porch = mode->hsync_start - mode->hdisplay;
626
627 err = regulator_enable(hdmi->vdd);
628 if (err < 0) {
629 dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
630 return err;
631 }
632
633 err = regulator_enable(hdmi->pll);
634 if (err < 0) {
635 dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
636 return err;
637 }
638
639 /*
640 * This assumes that the display controller will divide its parent
641 * clock by 2 to generate the pixel clock.
642 */
643 err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
644 if (err < 0) {
645 dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
646 return err;
647 }
648
649 err = clk_set_rate(hdmi->clk, pclk);
650 if (err < 0)
651 return err;
652
653 err = clk_enable(hdmi->clk);
654 if (err < 0) {
655 dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
656 return err;
657 }
658
659 tegra_periph_reset_assert(hdmi->clk);
660 usleep_range(1000, 2000);
661 tegra_periph_reset_deassert(hdmi->clk);
662
663 tegra_dc_writel(dc, VSYNC_H_POSITION(1),
664 DC_DISP_DISP_TIMING_OPTIONS);
665 tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
666 DC_DISP_DISP_COLOR_CONTROL);
667
668 /* video_preamble uses h_pulse2 */
669 pulse_start = 1 + h_sync_width + h_back_porch - 10;
670
671 tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
672
673 value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
674 PULSE_LAST_END_A;
675 tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
676
677 value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
678 tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
679
680 value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
681 VSYNC_WINDOW_ENABLE;
682 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
683
684 if (dc->pipe)
685 value = HDMI_SRC_DISPLAYB;
686 else
687 value = HDMI_SRC_DISPLAYA;
688
689 if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
690 (mode->vdisplay == 576)))
691 tegra_hdmi_writel(hdmi,
692 value | ARM_VIDEO_RANGE_FULL,
693 HDMI_NV_PDISP_INPUT_CONTROL);
694 else
695 tegra_hdmi_writel(hdmi,
696 value | ARM_VIDEO_RANGE_LIMITED,
697 HDMI_NV_PDISP_INPUT_CONTROL);
698
699 div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
700 value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
701 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
702
703 if (!hdmi->dvi) {
704 err = tegra_hdmi_setup_audio(hdmi, pclk);
705 if (err < 0)
706 hdmi->dvi = true;
707 }
708
709 if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
710 /*
711 * TODO: add ELD support
712 */
713 }
714
715 rekey = HDMI_REKEY_DEFAULT;
716 value = HDMI_CTRL_REKEY(rekey);
717 value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
718 h_front_porch - rekey - 18) / 32);
719
720 if (!hdmi->dvi)
721 value |= HDMI_CTRL_ENABLE;
722
723 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
724
725 if (hdmi->dvi)
726 tegra_hdmi_writel(hdmi, 0x0,
727 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
728 else
729 tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
730 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
731
732 tegra_hdmi_setup_avi_infoframe(hdmi, mode);
733 tegra_hdmi_setup_audio_infoframe(hdmi);
734 tegra_hdmi_setup_stereo_infoframe(hdmi);
735
736 /* TMDS CONFIG */
737 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
738 num_tmds = ARRAY_SIZE(tegra3_tmds_config);
739 tmds = tegra3_tmds_config;
740 } else {
741 num_tmds = ARRAY_SIZE(tegra2_tmds_config);
742 tmds = tegra2_tmds_config;
743 }
744
745 for (i = 0; i < num_tmds; i++) {
746 if (pclk <= tmds[i].pclk) {
747 tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
748 break;
749 }
750 }
751
752 tegra_hdmi_writel(hdmi,
753 SOR_SEQ_CTL_PU_PC(0) |
754 SOR_SEQ_PU_PC_ALT(0) |
755 SOR_SEQ_PD_PC(8) |
756 SOR_SEQ_PD_PC_ALT(8),
757 HDMI_NV_PDISP_SOR_SEQ_CTL);
758
759 value = SOR_SEQ_INST_WAIT_TIME(1) |
760 SOR_SEQ_INST_WAIT_UNITS_VSYNC |
761 SOR_SEQ_INST_HALT |
762 SOR_SEQ_INST_PIN_A_LOW |
763 SOR_SEQ_INST_PIN_B_LOW |
764 SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
765
766 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
767 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
768
769 value = 0x1c800;
770 value &= ~SOR_CSTM_ROTCLK(~0);
771 value |= SOR_CSTM_ROTCLK(2);
772 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
773
774 tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
775 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
776 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
777
778 /* start SOR */
779 tegra_hdmi_writel(hdmi,
780 SOR_PWR_NORMAL_STATE_PU |
781 SOR_PWR_NORMAL_START_NORMAL |
782 SOR_PWR_SAFE_STATE_PD |
783 SOR_PWR_SETTING_NEW_TRIGGER,
784 HDMI_NV_PDISP_SOR_PWR);
785 tegra_hdmi_writel(hdmi,
786 SOR_PWR_NORMAL_STATE_PU |
787 SOR_PWR_NORMAL_START_NORMAL |
788 SOR_PWR_SAFE_STATE_PD |
789 SOR_PWR_SETTING_NEW_DONE,
790 HDMI_NV_PDISP_SOR_PWR);
791
792 do {
793 BUG_ON(--retries < 0);
794 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
795 } while (value & SOR_PWR_SETTING_NEW_PENDING);
796
797 value = SOR_STATE_ASY_CRCMODE_COMPLETE |
798 SOR_STATE_ASY_OWNER_HEAD0 |
799 SOR_STATE_ASY_SUBOWNER_BOTH |
800 SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
801 SOR_STATE_ASY_DEPOL_POS;
802
803 /* setup sync polarities */
804 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
805 value |= SOR_STATE_ASY_HSYNCPOL_POS;
806
807 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
808 value |= SOR_STATE_ASY_HSYNCPOL_NEG;
809
810 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
811 value |= SOR_STATE_ASY_VSYNCPOL_POS;
812
813 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
814 value |= SOR_STATE_ASY_VSYNCPOL_NEG;
815
816 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
817
818 value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
819 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
820
821 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
822 tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
823 tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
824 HDMI_NV_PDISP_SOR_STATE1);
825 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
826
827 tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
828
829 value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
830 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
831 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
832
833 value = DISP_CTRL_MODE_C_DISPLAY;
834 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
835
836 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
837 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
838
839 /* TODO: add HDCP support */
840
841 return 0;
842}
843
844static int tegra_output_hdmi_disable(struct tegra_output *output)
845{
846 struct tegra_hdmi *hdmi = to_hdmi(output);
847
848 tegra_periph_reset_assert(hdmi->clk);
849 clk_disable(hdmi->clk);
850 regulator_disable(hdmi->pll);
851 regulator_disable(hdmi->vdd);
852
853 return 0;
854}
855
856static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
857 struct clk *clk, unsigned long pclk)
858{
859 struct tegra_hdmi *hdmi = to_hdmi(output);
860 struct clk *base;
861 int err;
862
863 err = clk_set_parent(clk, hdmi->clk_parent);
864 if (err < 0) {
865 dev_err(output->dev, "failed to set parent: %d\n", err);
866 return err;
867 }
868
869 base = clk_get_parent(hdmi->clk_parent);
870
871 /*
872 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
873 * respectively, each of which divides the base pll_d by 2.
874 */
875 err = clk_set_rate(base, pclk * 2);
876 if (err < 0)
877 dev_err(output->dev,
878 "failed to set base clock rate to %lu Hz\n",
879 pclk * 2);
880
881 return 0;
882}
883
884static int tegra_output_hdmi_check_mode(struct tegra_output *output,
885 struct drm_display_mode *mode,
886 enum drm_mode_status *status)
887{
888 struct tegra_hdmi *hdmi = to_hdmi(output);
889 unsigned long pclk = mode->clock * 1000;
890 struct clk *parent;
891 long err;
892
893 parent = clk_get_parent(hdmi->clk_parent);
894
895 err = clk_round_rate(parent, pclk * 4);
896 if (err < 0)
897 *status = MODE_NOCLOCK;
898 else
899 *status = MODE_OK;
900
901 return 0;
902}
903
904static const struct tegra_output_ops hdmi_ops = {
905 .enable = tegra_output_hdmi_enable,
906 .disable = tegra_output_hdmi_disable,
907 .setup_clock = tegra_output_hdmi_setup_clock,
908 .check_mode = tegra_output_hdmi_check_mode,
909};
910
911static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
912{
913 struct drm_info_node *node = s->private;
914 struct tegra_hdmi *hdmi = node->info_ent->data;
915
916#define DUMP_REG(name) \
917 seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
918 tegra_hdmi_readl(hdmi, name))
919
920 DUMP_REG(HDMI_CTXSW);
921 DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
922 DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
923 DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
924 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
925 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
926 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
927 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
928 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
929 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
930 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
931 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
932 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
933 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
934 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
935 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
936 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
937 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
938 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
939 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
940 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
941 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
942 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
943 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
944 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
945 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
946 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
947 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
948 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
949 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
950 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
951 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
952 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
953 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
954 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
955 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
956 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
957 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
958 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
959 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
960 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
961 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
962 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
963 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
964 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
965 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
966 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
967 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
968 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
969 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
970 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
971 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
972 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
973 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
974 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
975 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
976 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
977 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
978 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
979 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
980 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
981 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
982 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
983 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
984 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
985 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
986 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
987 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
988 DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
989 DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
990 DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
991 DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
992 DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
993 DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
994 DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
995 DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
996 DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
997 DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
998 DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
999 DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
1000 DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
1001 DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
1002 DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
1003 DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
1004 DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
1005 DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
1006 DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
1007 DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
1008 DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
1009 DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
1010 DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
1011 DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
1012 DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
1013 DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
1014 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
1015 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
1016 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
1017 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
1018 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
1019 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
1020 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
1021 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
1022 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
1023 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
1024 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
1025 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
1026 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
1027 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
1028 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
1029 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
1030 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
1031 DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
1032 DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
1033 DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
1034 DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
1035 DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
1036 DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
1037 DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
1038 DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
1039 DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
1040 DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
1041 DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
1042 DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
1043 DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
1044 DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
1045 DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
1046 DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
1047 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
1048 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
1049 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
1050 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
1051 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
1052 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
1053 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
1054 DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
1055 DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
1056 DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
1057 DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
1058 DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
1059 DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
1060 DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
1061 DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
1062 DUMP_REG(HDMI_NV_PDISP_SCRATCH);
1063 DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
1064 DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
1065 DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
1066 DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
1067 DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
1068 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
1069 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
1070 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
1071 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
1072 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
1073 DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
1074 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
1075 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
1076 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
1077
1078#undef DUMP_REG
1079
1080 return 0;
1081}
1082
1083static struct drm_info_list debugfs_files[] = {
1084 { "regs", tegra_hdmi_show_regs, 0, NULL },
1085};
1086
1087static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
1088 struct drm_minor *minor)
1089{
1090 unsigned int i;
1091 int err;
1092
1093 hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
1094 if (!hdmi->debugfs)
1095 return -ENOMEM;
1096
1097 hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
1098 GFP_KERNEL);
1099 if (!hdmi->debugfs_files) {
1100 err = -ENOMEM;
1101 goto remove;
1102 }
1103
1104 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
1105 hdmi->debugfs_files[i].data = hdmi;
1106
1107 err = drm_debugfs_create_files(hdmi->debugfs_files,
1108 ARRAY_SIZE(debugfs_files),
1109 hdmi->debugfs, minor);
1110 if (err < 0)
1111 goto free;
1112
1113 hdmi->minor = minor;
1114
1115 return 0;
1116
1117free:
1118 kfree(hdmi->debugfs_files);
1119 hdmi->debugfs_files = NULL;
1120remove:
1121 debugfs_remove(hdmi->debugfs);
1122 hdmi->debugfs = NULL;
1123
1124 return err;
1125}
1126
1127static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
1128{
1129 drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
1130 hdmi->minor);
1131 hdmi->minor = NULL;
1132
1133 kfree(hdmi->debugfs_files);
1134 hdmi->debugfs_files = NULL;
1135
1136 debugfs_remove(hdmi->debugfs);
1137 hdmi->debugfs = NULL;
1138
1139 return 0;
1140}
1141
1142static int tegra_hdmi_drm_init(struct host1x_client *client,
1143 struct drm_device *drm)
1144{
1145 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1146 int err;
1147
1148 hdmi->output.type = TEGRA_OUTPUT_HDMI;
1149 hdmi->output.dev = client->dev;
1150 hdmi->output.ops = &hdmi_ops;
1151
1152 err = tegra_output_init(drm, &hdmi->output);
1153 if (err < 0) {
1154 dev_err(client->dev, "output setup failed: %d\n", err);
1155 return err;
1156 }
1157
1158 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1159 err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
1160 if (err < 0)
1161 dev_err(client->dev, "debugfs setup failed: %d\n", err);
1162 }
1163
1164 return 0;
1165}
1166
1167static int tegra_hdmi_drm_exit(struct host1x_client *client)
1168{
1169 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1170 int err;
1171
1172 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1173 err = tegra_hdmi_debugfs_exit(hdmi);
1174 if (err < 0)
1175 dev_err(client->dev, "debugfs cleanup failed: %d\n",
1176 err);
1177 }
1178
1179 err = tegra_output_disable(&hdmi->output);
1180 if (err < 0) {
1181 dev_err(client->dev, "output failed to disable: %d\n", err);
1182 return err;
1183 }
1184
1185 err = tegra_output_exit(&hdmi->output);
1186 if (err < 0) {
1187 dev_err(client->dev, "output cleanup failed: %d\n", err);
1188 return err;
1189 }
1190
1191 return 0;
1192}
1193
1194static const struct host1x_client_ops hdmi_client_ops = {
1195 .drm_init = tegra_hdmi_drm_init,
1196 .drm_exit = tegra_hdmi_drm_exit,
1197};
1198
1199static int tegra_hdmi_probe(struct platform_device *pdev)
1200{
1201 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
1202 struct tegra_hdmi *hdmi;
1203 struct resource *regs;
1204 int err;
1205
1206 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
1207 if (!hdmi)
1208 return -ENOMEM;
1209
1210 hdmi->dev = &pdev->dev;
1211 hdmi->audio_source = AUTO;
1212 hdmi->audio_freq = 44100;
1213 hdmi->stereo = false;
1214 hdmi->dvi = false;
1215
1216 hdmi->clk = devm_clk_get(&pdev->dev, NULL);
1217 if (IS_ERR(hdmi->clk)) {
1218 dev_err(&pdev->dev, "failed to get clock\n");
1219 return PTR_ERR(hdmi->clk);
1220 }
1221
1222 err = clk_prepare(hdmi->clk);
1223 if (err < 0)
1224 return err;
1225
1226 hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
1227 if (IS_ERR(hdmi->clk_parent))
1228 return PTR_ERR(hdmi->clk_parent);
1229
1230 err = clk_prepare(hdmi->clk_parent);
1231 if (err < 0)
1232 return err;
1233
1234 err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
1235 if (err < 0) {
1236 dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
1237 return err;
1238 }
1239
1240 hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
1241 if (IS_ERR(hdmi->vdd)) {
1242 dev_err(&pdev->dev, "failed to get VDD regulator\n");
1243 return PTR_ERR(hdmi->vdd);
1244 }
1245
1246 hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
1247 if (IS_ERR(hdmi->pll)) {
1248 dev_err(&pdev->dev, "failed to get PLL regulator\n");
1249 return PTR_ERR(hdmi->pll);
1250 }
1251
1252 hdmi->output.dev = &pdev->dev;
1253
1254 err = tegra_output_parse_dt(&hdmi->output);
1255 if (err < 0)
1256 return err;
1257
1258 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1259 if (!regs)
1260 return -ENXIO;
1261
1262 hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs);
1263 if (!hdmi->regs)
1264 return -EADDRNOTAVAIL;
1265
1266 err = platform_get_irq(pdev, 0);
1267 if (err < 0)
1268 return err;
1269
1270 hdmi->irq = err;
1271
1272 hdmi->client.ops = &hdmi_client_ops;
1273 INIT_LIST_HEAD(&hdmi->client.list);
1274 hdmi->client.dev = &pdev->dev;
1275
1276 err = host1x_register_client(host1x, &hdmi->client);
1277 if (err < 0) {
1278 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1279 err);
1280 return err;
1281 }
1282
1283 platform_set_drvdata(pdev, hdmi);
1284
1285 return 0;
1286}
1287
1288static int tegra_hdmi_remove(struct platform_device *pdev)
1289{
1290 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
1291 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1292 int err;
1293
1294 err = host1x_unregister_client(host1x, &hdmi->client);
1295 if (err < 0) {
1296 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1297 err);
1298 return err;
1299 }
1300
1301 clk_unprepare(hdmi->clk_parent);
1302 clk_unprepare(hdmi->clk);
1303
1304 return 0;
1305}
1306
1307static struct of_device_id tegra_hdmi_of_match[] = {
1308 { .compatible = "nvidia,tegra30-hdmi", },
1309 { .compatible = "nvidia,tegra20-hdmi", },
1310 { },
1311};
1312
1313struct platform_driver tegra_hdmi_driver = {
1314 .driver = {
1315 .name = "tegra-hdmi",
1316 .owner = THIS_MODULE,
1317 .of_match_table = tegra_hdmi_of_match,
1318 },
1319 .probe = tegra_hdmi_probe,
1320 .remove = tegra_hdmi_remove,
1321};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644
index 000000000000..1477f36eb45a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -0,0 +1,575 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef TEGRA_HDMI_H
11#define TEGRA_HDMI_H 1
12
13#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
14#define HDMI_INFOFRAME_TYPE_AVI 0x82
15#define HDMI_INFOFRAME_TYPE_SPD 0x83
16#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
17#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
18#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
19
20/* all fields little endian */
21struct hdmi_avi_infoframe {
22 /* PB0 */
23 u8 csum;
24
25 /* PB1 */
26 unsigned s:2; /* scan information */
27 unsigned b:2; /* bar info data valid */
28 unsigned a:1; /* active info present */
29 unsigned y:2; /* RGB or YCbCr */
30 unsigned res1:1;
31
32 /* PB2 */
33 unsigned r:4; /* active format aspect ratio */
34 unsigned m:2; /* picture aspect ratio */
35 unsigned c:2; /* colorimetry */
36
37 /* PB3 */
38 unsigned sc:2; /* scan information */
39 unsigned q:2; /* quantization range */
40 unsigned ec:3; /* extended colorimetry */
41 unsigned itc:1; /* it content */
42
43 /* PB4 */
44 unsigned vic:7; /* video format id code */
45 unsigned res4:1;
46
47 /* PB5 */
48 unsigned pr:4; /* pixel repetition factor */
49 unsigned cn:2; /* it content type*/
50 unsigned yq:2; /* ycc quantization range */
51
52 /* PB6-7 */
53 u16 top_bar_end_line;
54
55 /* PB8-9 */
56 u16 bot_bar_start_line;
57
58 /* PB10-11 */
59 u16 left_bar_end_pixel;
60
61 /* PB12-13 */
62 u16 right_bar_start_pixel;
63} __packed;
64
65#define HDMI_AVI_VERSION 0x02
66
67#define HDMI_AVI_Y_RGB 0x0
68#define HDMI_AVI_Y_YCBCR_422 0x1
69#define HDMI_AVI_Y_YCBCR_444 0x2
70
71#define HDMI_AVI_B_VERT 0x1
72#define HDMI_AVI_B_HORIZ 0x2
73
74#define HDMI_AVI_S_NONE 0x0
75#define HDMI_AVI_S_OVERSCAN 0x1
76#define HDMI_AVI_S_UNDERSCAN 0x2
77
78#define HDMI_AVI_C_NONE 0x0
79#define HDMI_AVI_C_SMPTE 0x1
80#define HDMI_AVI_C_ITU_R 0x2
81#define HDMI_AVI_C_EXTENDED 0x4
82
83#define HDMI_AVI_M_4_3 0x1
84#define HDMI_AVI_M_16_9 0x2
85
86#define HDMI_AVI_R_SAME 0x8
87#define HDMI_AVI_R_4_3_CENTER 0x9
88#define HDMI_AVI_R_16_9_CENTER 0xa
89#define HDMI_AVI_R_14_9_CENTER 0xb
90
91/* all fields little endian */
92struct hdmi_audio_infoframe {
93 /* PB0 */
94 u8 csum;
95
96 /* PB1 */
97 unsigned cc:3; /* channel count */
98 unsigned res1:1;
99 unsigned ct:4; /* coding type */
100
101 /* PB2 */
102 unsigned ss:2; /* sample size */
103 unsigned sf:3; /* sample frequency */
104 unsigned res2:3;
105
106 /* PB3 */
107 unsigned cxt:5; /* coding extention type */
108 unsigned res3:3;
109
110 /* PB4 */
111 u8 ca; /* channel/speaker allocation */
112
113 /* PB5 */
114 unsigned res5:3;
115 unsigned lsv:4; /* level shift value */
116 unsigned dm_inh:1; /* downmix inhibit */
117
118 /* PB6-10 reserved */
119 u8 res6;
120 u8 res7;
121 u8 res8;
122 u8 res9;
123 u8 res10;
124} __packed;
125
126#define HDMI_AUDIO_VERSION 0x01
127
128#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
129#define HDMI_AUDIO_CC_2 0x1
130#define HDMI_AUDIO_CC_3 0x2
131#define HDMI_AUDIO_CC_4 0x3
132#define HDMI_AUDIO_CC_5 0x4
133#define HDMI_AUDIO_CC_6 0x5
134#define HDMI_AUDIO_CC_7 0x6
135#define HDMI_AUDIO_CC_8 0x7
136
137#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
138#define HDMI_AUDIO_CT_PCM 0x1
139#define HDMI_AUDIO_CT_AC3 0x2
140#define HDMI_AUDIO_CT_MPEG1 0x3
141#define HDMI_AUDIO_CT_MP3 0x4
142#define HDMI_AUDIO_CT_MPEG2 0x5
143#define HDMI_AUDIO_CT_AAC_LC 0x6
144#define HDMI_AUDIO_CT_DTS 0x7
145#define HDMI_AUDIO_CT_ATRAC 0x8
146#define HDMI_AUDIO_CT_DSD 0x9
147#define HDMI_AUDIO_CT_E_AC3 0xa
148#define HDMI_AUDIO_CT_DTS_HD 0xb
149#define HDMI_AUDIO_CT_MLP 0xc
150#define HDMI_AUDIO_CT_DST 0xd
151#define HDMI_AUDIO_CT_WMA_PRO 0xe
152#define HDMI_AUDIO_CT_CXT 0xf
153
154#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
155#define HDMI_AUIDO_SF_32K 0x1
156#define HDMI_AUDIO_SF_44_1K 0x2
157#define HDMI_AUDIO_SF_48K 0x3
158#define HDMI_AUDIO_SF_88_2K 0x4
159#define HDMI_AUDIO_SF_96K 0x5
160#define HDMI_AUDIO_SF_176_4K 0x6
161#define HDMI_AUDIO_SF_192K 0x7
162
163#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
164#define HDMI_AUDIO_SS_16BIT 0x1
165#define HDMI_AUDIO_SS_20BIT 0x2
166#define HDMI_AUDIO_SS_24BIT 0x3
167
168#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
169#define HDMI_AUDIO_CXT_HE_AAC 0x1
170#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
171#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
172
173/* all fields little endian */
174struct hdmi_stereo_infoframe {
175 /* PB0 */
176 u8 csum;
177
178 /* PB1 */
179 u8 regid0;
180
181 /* PB2 */
182 u8 regid1;
183
184 /* PB3 */
185 u8 regid2;
186
187 /* PB4 */
188 unsigned res1:5;
189 unsigned hdmi_video_format:3;
190
191 /* PB5 */
192 unsigned res2:4;
193 unsigned _3d_structure:4;
194
195 /* PB6*/
196 unsigned res3:4;
197 unsigned _3d_ext_data:4;
198} __packed;
199
200#define HDMI_VENDOR_VERSION 0x01
201
202/* register definitions */
203#define HDMI_CTXSW 0x00
204
205#define HDMI_NV_PDISP_SOR_STATE0 0x01
206#define SOR_STATE_UPDATE (1 << 0)
207
208#define HDMI_NV_PDISP_SOR_STATE1 0x02
209#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
210#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
211#define SOR_STATE_ATTACHED (1 << 3)
212
213#define HDMI_NV_PDISP_SOR_STATE2 0x03
214#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
215#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
216#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
217#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
218#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
219#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
220#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
221#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
222#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
223#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
224#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
225#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
226#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
227#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
228#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
229#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
230#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
231
232#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
233#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
234#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
235#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
236#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
237#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
238#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
239#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
240#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
241#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
242#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
243#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
244#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
245#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
246#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
247#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
248#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
249#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
250#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
251#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
252#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
253#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
254#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
255#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
256#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
257#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
258
259#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
260#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
261#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
262#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
263#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
264#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
265#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
266#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
267#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
268#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
269#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
270#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
271
272#define INFOFRAME_CTRL_ENABLE (1 << 0)
273
274#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
275#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
276#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
277
278#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
279#define GENERIC_CTRL_ENABLE (1 << 0)
280#define GENERIC_CTRL_OTHER (1 << 4)
281#define GENERIC_CTRL_SINGLE (1 << 8)
282#define GENERIC_CTRL_HBLANK (1 << 12)
283#define GENERIC_CTRL_AUDIO (1 << 16)
284
285#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
286#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
287#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
288#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
289#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
290#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
291#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
292#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
293#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
294#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
295
296#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
297#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
298#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
299#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
300#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
301#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
302#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
303#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
304#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
305#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
306#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
307#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
308#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
309#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
310#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
311
312#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
313#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0)
314#define ACR_ENABLE (1 << 31)
315
316#define HDMI_NV_PDISP_HDMI_CTRL 0x44
317#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
318#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
319#define HDMI_CTRL_ENABLE (1 << 30)
320
321#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
322#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
323#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
324#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
325#define VSYNC_WINDOW_ENABLE (1 << 31)
326
327#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
328#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
329#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
330#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
331#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
332#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
333#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
334#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
335
336#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
337#define SPARE_HW_CTS (1 << 0)
338#define SPARE_FORCE_SW_CTS (1 << 1)
339#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
340
341#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
342#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
343#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53
344#define HDMI_NV_PDISP_SOR_CAP 0x54
345#define HDMI_NV_PDISP_SOR_PWR 0x55
346#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
347#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
348#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
349#define SOR_PWR_NORMAL_START_ALT (1 << 1)
350#define SOR_PWR_SAFE_STATE_PD (0 << 16)
351#define SOR_PWR_SAFE_STATE_PU (1 << 16)
352#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
353#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
354#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
355
356#define HDMI_NV_PDISP_SOR_TEST 0x56
357#define HDMI_NV_PDISP_SOR_PLL0 0x57
358#define SOR_PLL_PWR (1 << 0)
359#define SOR_PLL_PDBG (1 << 1)
360#define SOR_PLL_VCAPD (1 << 2)
361#define SOR_PLL_PDPORT (1 << 3)
362#define SOR_PLL_RESISTORSEL (1 << 4)
363#define SOR_PLL_PULLDOWN (1 << 5)
364#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
365#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
366#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
367#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
368#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
369
370#define HDMI_NV_PDISP_SOR_PLL1 0x58
371#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
372#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
373#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
374#define SOR_PLL_PE_EN (1 << 28)
375#define SOR_PLL_HALF_FULL_PE (1 << 29)
376#define SOR_PLL_S_D_PIN_PE (1 << 30)
377
378#define HDMI_NV_PDISP_SOR_PLL2 0x59
379
380#define HDMI_NV_PDISP_SOR_CSTM 0x5a
381#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
382
383#define HDMI_NV_PDISP_SOR_LVDS 0x5b
384#define HDMI_NV_PDISP_SOR_CRCA 0x5c
385#define HDMI_NV_PDISP_SOR_CRCB 0x5d
386#define HDMI_NV_PDISP_SOR_BLANK 0x5e
387#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
388#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
389#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
390#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
391#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
392#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
393#define SOR_SEQ_STATUS (1 << 28)
394#define SOR_SEQ_SWITCH (1 << 30)
395
396#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x))
397
398#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
399#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
400#define SOR_SEQ_INST_HALT (1 << 15)
401#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
402#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
403#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
404#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
405#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
406
407#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
408#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
409#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
410#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
411#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
412#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
413#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
414#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
415#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
416#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
417#define HDMI_NV_PDISP_SOR_TRIG 0x7c
418#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
419
420#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
421#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
422#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
423#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
424#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
425#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
426
427#define DRIVE_CURRENT_1_500_mA 0x00
428#define DRIVE_CURRENT_1_875_mA 0x01
429#define DRIVE_CURRENT_2_250_mA 0x02
430#define DRIVE_CURRENT_2_625_mA 0x03
431#define DRIVE_CURRENT_3_000_mA 0x04
432#define DRIVE_CURRENT_3_375_mA 0x05
433#define DRIVE_CURRENT_3_750_mA 0x06
434#define DRIVE_CURRENT_4_125_mA 0x07
435#define DRIVE_CURRENT_4_500_mA 0x08
436#define DRIVE_CURRENT_4_875_mA 0x09
437#define DRIVE_CURRENT_5_250_mA 0x0a
438#define DRIVE_CURRENT_5_625_mA 0x0b
439#define DRIVE_CURRENT_6_000_mA 0x0c
440#define DRIVE_CURRENT_6_375_mA 0x0d
441#define DRIVE_CURRENT_6_750_mA 0x0e
442#define DRIVE_CURRENT_7_125_mA 0x0f
443#define DRIVE_CURRENT_7_500_mA 0x10
444#define DRIVE_CURRENT_7_875_mA 0x11
445#define DRIVE_CURRENT_8_250_mA 0x12
446#define DRIVE_CURRENT_8_625_mA 0x13
447#define DRIVE_CURRENT_9_000_mA 0x14
448#define DRIVE_CURRENT_9_375_mA 0x15
449#define DRIVE_CURRENT_9_750_mA 0x16
450#define DRIVE_CURRENT_10_125_mA 0x17
451#define DRIVE_CURRENT_10_500_mA 0x18
452#define DRIVE_CURRENT_10_875_mA 0x19
453#define DRIVE_CURRENT_11_250_mA 0x1a
454#define DRIVE_CURRENT_11_625_mA 0x1b
455#define DRIVE_CURRENT_12_000_mA 0x1c
456#define DRIVE_CURRENT_12_375_mA 0x1d
457#define DRIVE_CURRENT_12_750_mA 0x1e
458#define DRIVE_CURRENT_13_125_mA 0x1f
459#define DRIVE_CURRENT_13_500_mA 0x20
460#define DRIVE_CURRENT_13_875_mA 0x21
461#define DRIVE_CURRENT_14_250_mA 0x22
462#define DRIVE_CURRENT_14_625_mA 0x23
463#define DRIVE_CURRENT_15_000_mA 0x24
464#define DRIVE_CURRENT_15_375_mA 0x25
465#define DRIVE_CURRENT_15_750_mA 0x26
466#define DRIVE_CURRENT_16_125_mA 0x27
467#define DRIVE_CURRENT_16_500_mA 0x28
468#define DRIVE_CURRENT_16_875_mA 0x29
469#define DRIVE_CURRENT_17_250_mA 0x2a
470#define DRIVE_CURRENT_17_625_mA 0x2b
471#define DRIVE_CURRENT_18_000_mA 0x2c
472#define DRIVE_CURRENT_18_375_mA 0x2d
473#define DRIVE_CURRENT_18_750_mA 0x2e
474#define DRIVE_CURRENT_19_125_mA 0x2f
475#define DRIVE_CURRENT_19_500_mA 0x30
476#define DRIVE_CURRENT_19_875_mA 0x31
477#define DRIVE_CURRENT_20_250_mA 0x32
478#define DRIVE_CURRENT_20_625_mA 0x33
479#define DRIVE_CURRENT_21_000_mA 0x34
480#define DRIVE_CURRENT_21_375_mA 0x35
481#define DRIVE_CURRENT_21_750_mA 0x36
482#define DRIVE_CURRENT_22_125_mA 0x37
483#define DRIVE_CURRENT_22_500_mA 0x38
484#define DRIVE_CURRENT_22_875_mA 0x39
485#define DRIVE_CURRENT_23_250_mA 0x3a
486#define DRIVE_CURRENT_23_625_mA 0x3b
487#define DRIVE_CURRENT_24_000_mA 0x3c
488#define DRIVE_CURRENT_24_375_mA 0x3d
489#define DRIVE_CURRENT_24_750_mA 0x3e
490
491#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
492#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
493#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
494
495#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
496#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
497#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
498
499#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
500#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
501#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
502#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
503#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
504#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
505#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
506#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
507
508#define HDMI_NV_PDISP_AUDIO_N 0x8c
509#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
510#define AUDIO_N_RESETF (1 << 20)
511#define AUDIO_N_GENERATE_NORMAL (0 << 24)
512#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
513
514#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
515#define HDMI_NV_PDISP_SOR_REFCLK 0x95
516#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
517#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
518
519#define HDMI_NV_PDISP_CRC_CONTROL 0x96
520#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
521#define HDMI_SRC_DISPLAYA (0 << 0)
522#define HDMI_SRC_DISPLAYB (1 << 0)
523#define ARM_VIDEO_RANGE_FULL (0 << 1)
524#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
525
526#define HDMI_NV_PDISP_SCRATCH 0x98
527#define HDMI_NV_PDISP_PE_CURRENT 0x99
528#define PE_CURRENT0(x) (((x) & 0xf) << 0)
529#define PE_CURRENT1(x) (((x) & 0xf) << 8)
530#define PE_CURRENT2(x) (((x) & 0xf) << 16)
531#define PE_CURRENT3(x) (((x) & 0xf) << 24)
532
533#define PE_CURRENT_0_0_mA 0x0
534#define PE_CURRENT_0_5_mA 0x1
535#define PE_CURRENT_1_0_mA 0x2
536#define PE_CURRENT_1_5_mA 0x3
537#define PE_CURRENT_2_0_mA 0x4
538#define PE_CURRENT_2_5_mA 0x5
539#define PE_CURRENT_3_0_mA 0x6
540#define PE_CURRENT_3_5_mA 0x7
541#define PE_CURRENT_4_0_mA 0x8
542#define PE_CURRENT_4_5_mA 0x9
543#define PE_CURRENT_5_0_mA 0xa
544#define PE_CURRENT_5_5_mA 0xb
545#define PE_CURRENT_6_0_mA 0xc
546#define PE_CURRENT_6_5_mA 0xd
547#define PE_CURRENT_7_0_mA 0xe
548#define PE_CURRENT_7_5_mA 0xf
549
550#define HDMI_NV_PDISP_KEY_CTRL 0x9a
551#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
552#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
553#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
554#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
555#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
556#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
557#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
558#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
559#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
560
561#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
562#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
563#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
564#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
565
566#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
567#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
568#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1
569#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2
570#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3
571#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4
572#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
573#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
574
575#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
new file mode 100644
index 000000000000..5d17b113a6fc
--- /dev/null
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -0,0 +1,327 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#include "drm.h"
17
18struct host1x_drm_client {
19 struct host1x_client *client;
20 struct device_node *np;
21 struct list_head list;
22};
23
24static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
25{
26 struct host1x_drm_client *client;
27
28 client = kzalloc(sizeof(*client), GFP_KERNEL);
29 if (!client)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&client->list);
33 client->np = of_node_get(np);
34
35 list_add_tail(&client->list, &host1x->drm_clients);
36
37 return 0;
38}
39
40static int host1x_activate_drm_client(struct host1x *host1x,
41 struct host1x_drm_client *drm,
42 struct host1x_client *client)
43{
44 mutex_lock(&host1x->drm_clients_lock);
45 list_del_init(&drm->list);
46 list_add_tail(&drm->list, &host1x->drm_active);
47 drm->client = client;
48 mutex_unlock(&host1x->drm_clients_lock);
49
50 return 0;
51}
52
53static int host1x_remove_drm_client(struct host1x *host1x,
54 struct host1x_drm_client *client)
55{
56 mutex_lock(&host1x->drm_clients_lock);
57 list_del_init(&client->list);
58 mutex_unlock(&host1x->drm_clients_lock);
59
60 of_node_put(client->np);
61 kfree(client);
62
63 return 0;
64}
65
66static int host1x_parse_dt(struct host1x *host1x)
67{
68 static const char * const compat[] = {
69 "nvidia,tegra20-dc",
70 "nvidia,tegra20-hdmi",
71 "nvidia,tegra30-dc",
72 "nvidia,tegra30-hdmi",
73 };
74 unsigned int i;
75 int err;
76
77 for (i = 0; i < ARRAY_SIZE(compat); i++) {
78 struct device_node *np;
79
80 for_each_child_of_node(host1x->dev->of_node, np) {
81 if (of_device_is_compatible(np, compat[i]) &&
82 of_device_is_available(np)) {
83 err = host1x_add_drm_client(host1x, np);
84 if (err < 0)
85 return err;
86 }
87 }
88 }
89
90 return 0;
91}
92
93static int tegra_host1x_probe(struct platform_device *pdev)
94{
95 struct host1x *host1x;
96 struct resource *regs;
97 int err;
98
99 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
100 if (!host1x)
101 return -ENOMEM;
102
103 mutex_init(&host1x->drm_clients_lock);
104 INIT_LIST_HEAD(&host1x->drm_clients);
105 INIT_LIST_HEAD(&host1x->drm_active);
106 mutex_init(&host1x->clients_lock);
107 INIT_LIST_HEAD(&host1x->clients);
108 host1x->dev = &pdev->dev;
109
110 err = host1x_parse_dt(host1x);
111 if (err < 0) {
112 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
113 return err;
114 }
115
116 host1x->clk = devm_clk_get(&pdev->dev, NULL);
117 if (IS_ERR(host1x->clk))
118 return PTR_ERR(host1x->clk);
119
120 err = clk_prepare_enable(host1x->clk);
121 if (err < 0)
122 return err;
123
124 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
125 if (!regs) {
126 err = -ENXIO;
127 goto err;
128 }
129
130 err = platform_get_irq(pdev, 0);
131 if (err < 0)
132 goto err;
133
134 host1x->syncpt = err;
135
136 err = platform_get_irq(pdev, 1);
137 if (err < 0)
138 goto err;
139
140 host1x->irq = err;
141
142 host1x->regs = devm_request_and_ioremap(&pdev->dev, regs);
143 if (!host1x->regs) {
144 err = -EADDRNOTAVAIL;
145 goto err;
146 }
147
148 platform_set_drvdata(pdev, host1x);
149
150 return 0;
151
152err:
153 clk_disable_unprepare(host1x->clk);
154 return err;
155}
156
157static int tegra_host1x_remove(struct platform_device *pdev)
158{
159 struct host1x *host1x = platform_get_drvdata(pdev);
160
161 clk_disable_unprepare(host1x->clk);
162
163 return 0;
164}
165
166int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
167{
168 struct host1x_client *client;
169
170 mutex_lock(&host1x->clients_lock);
171
172 list_for_each_entry(client, &host1x->clients, list) {
173 if (client->ops && client->ops->drm_init) {
174 int err = client->ops->drm_init(client, drm);
175 if (err < 0) {
176 dev_err(host1x->dev,
177 "DRM setup failed for %s: %d\n",
178 dev_name(client->dev), err);
179 return err;
180 }
181 }
182 }
183
184 mutex_unlock(&host1x->clients_lock);
185
186 return 0;
187}
188
189int host1x_drm_exit(struct host1x *host1x)
190{
191 struct platform_device *pdev = to_platform_device(host1x->dev);
192 struct host1x_client *client;
193
194 if (!host1x->drm)
195 return 0;
196
197 mutex_lock(&host1x->clients_lock);
198
199 list_for_each_entry_reverse(client, &host1x->clients, list) {
200 if (client->ops && client->ops->drm_exit) {
201 int err = client->ops->drm_exit(client);
202 if (err < 0) {
203 dev_err(host1x->dev,
204 "DRM cleanup failed for %s: %d\n",
205 dev_name(client->dev), err);
206 return err;
207 }
208 }
209 }
210
211 mutex_unlock(&host1x->clients_lock);
212
213 drm_platform_exit(&tegra_drm_driver, pdev);
214 host1x->drm = NULL;
215
216 return 0;
217}
218
219int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 mutex_lock(&host1x->clients_lock);
225 list_add_tail(&client->list, &host1x->clients);
226 mutex_unlock(&host1x->clients_lock);
227
228 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
229 if (drm->np == client->dev->of_node)
230 host1x_activate_drm_client(host1x, drm, client);
231
232 if (list_empty(&host1x->drm_clients)) {
233 struct platform_device *pdev = to_platform_device(host1x->dev);
234
235 err = drm_platform_init(&tegra_drm_driver, pdev);
236 if (err < 0) {
237 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
238 return err;
239 }
240 }
241
242 client->host1x = host1x;
243
244 return 0;
245}
246
247int host1x_unregister_client(struct host1x *host1x,
248 struct host1x_client *client)
249{
250 struct host1x_drm_client *drm, *tmp;
251 int err;
252
253 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
254 if (drm->client == client) {
255 err = host1x_drm_exit(host1x);
256 if (err < 0) {
257 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
258 err);
259 return err;
260 }
261
262 host1x_remove_drm_client(host1x, drm);
263 break;
264 }
265 }
266
267 mutex_lock(&host1x->clients_lock);
268 list_del_init(&client->list);
269 mutex_unlock(&host1x->clients_lock);
270
271 return 0;
272}
273
274static struct of_device_id tegra_host1x_of_match[] = {
275 { .compatible = "nvidia,tegra30-host1x", },
276 { .compatible = "nvidia,tegra20-host1x", },
277 { },
278};
279MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
280
281struct platform_driver tegra_host1x_driver = {
282 .driver = {
283 .name = "tegra-host1x",
284 .owner = THIS_MODULE,
285 .of_match_table = tegra_host1x_of_match,
286 },
287 .probe = tegra_host1x_probe,
288 .remove = tegra_host1x_remove,
289};
290
291static int __init tegra_host1x_init(void)
292{
293 int err;
294
295 err = platform_driver_register(&tegra_host1x_driver);
296 if (err < 0)
297 return err;
298
299 err = platform_driver_register(&tegra_dc_driver);
300 if (err < 0)
301 goto unregister_host1x;
302
303 err = platform_driver_register(&tegra_hdmi_driver);
304 if (err < 0)
305 goto unregister_dc;
306
307 return 0;
308
309unregister_dc:
310 platform_driver_unregister(&tegra_dc_driver);
311unregister_host1x:
312 platform_driver_unregister(&tegra_host1x_driver);
313 return err;
314}
315module_init(tegra_host1x_init);
316
317static void __exit tegra_host1x_exit(void)
318{
319 platform_driver_unregister(&tegra_hdmi_driver);
320 platform_driver_unregister(&tegra_dc_driver);
321 platform_driver_unregister(&tegra_host1x_driver);
322}
323module_exit(tegra_host1x_exit);
324
325MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
326MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
327MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644
index 000000000000..8140fc6c34d8
--- /dev/null
+++ b/drivers/gpu/drm/tegra/output.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_gpio.h>
12#include <linux/of_i2c.h>
13
14#include "drm.h"
15
16static int tegra_connector_get_modes(struct drm_connector *connector)
17{
18 struct tegra_output *output = connector_to_output(connector);
19 struct edid *edid = NULL;
20 int err = 0;
21
22 if (output->edid)
23 edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
24 else if (output->ddc)
25 edid = drm_get_edid(connector, output->ddc);
26
27 drm_mode_connector_update_edid_property(connector, edid);
28
29 if (edid) {
30 err = drm_add_edid_modes(connector, edid);
31 kfree(edid);
32 }
33
34 return err;
35}
36
37static int tegra_connector_mode_valid(struct drm_connector *connector,
38 struct drm_display_mode *mode)
39{
40 struct tegra_output *output = connector_to_output(connector);
41 enum drm_mode_status status = MODE_OK;
42 int err;
43
44 err = tegra_output_check_mode(output, mode, &status);
45 if (err < 0)
46 return MODE_ERROR;
47
48 return status;
49}
50
51static struct drm_encoder *
52tegra_connector_best_encoder(struct drm_connector *connector)
53{
54 struct tegra_output *output = connector_to_output(connector);
55
56 return &output->encoder;
57}
58
59static const struct drm_connector_helper_funcs connector_helper_funcs = {
60 .get_modes = tegra_connector_get_modes,
61 .mode_valid = tegra_connector_mode_valid,
62 .best_encoder = tegra_connector_best_encoder,
63};
64
65static enum drm_connector_status
66tegra_connector_detect(struct drm_connector *connector, bool force)
67{
68 struct tegra_output *output = connector_to_output(connector);
69 enum drm_connector_status status = connector_status_unknown;
70
71 if (gpio_is_valid(output->hpd_gpio)) {
72 if (gpio_get_value(output->hpd_gpio) == 0)
73 status = connector_status_disconnected;
74 else
75 status = connector_status_connected;
76 } else {
77 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
78 status = connector_status_connected;
79 }
80
81 return status;
82}
83
84static void tegra_connector_destroy(struct drm_connector *connector)
85{
86 drm_sysfs_connector_remove(connector);
87 drm_connector_cleanup(connector);
88}
89
90static const struct drm_connector_funcs connector_funcs = {
91 .dpms = drm_helper_connector_dpms,
92 .detect = tegra_connector_detect,
93 .fill_modes = drm_helper_probe_single_connector_modes,
94 .destroy = tegra_connector_destroy,
95};
96
97static void tegra_encoder_destroy(struct drm_encoder *encoder)
98{
99 drm_encoder_cleanup(encoder);
100}
101
102static const struct drm_encoder_funcs encoder_funcs = {
103 .destroy = tegra_encoder_destroy,
104};
105
106static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
107{
108}
109
110static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
111 const struct drm_display_mode *mode,
112 struct drm_display_mode *adjusted)
113{
114 return true;
115}
116
117static void tegra_encoder_prepare(struct drm_encoder *encoder)
118{
119}
120
121static void tegra_encoder_commit(struct drm_encoder *encoder)
122{
123}
124
125static void tegra_encoder_mode_set(struct drm_encoder *encoder,
126 struct drm_display_mode *mode,
127 struct drm_display_mode *adjusted)
128{
129 struct tegra_output *output = encoder_to_output(encoder);
130 int err;
131
132 err = tegra_output_enable(output);
133 if (err < 0)
134 dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
135}
136
137static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
138 .dpms = tegra_encoder_dpms,
139 .mode_fixup = tegra_encoder_mode_fixup,
140 .prepare = tegra_encoder_prepare,
141 .commit = tegra_encoder_commit,
142 .mode_set = tegra_encoder_mode_set,
143};
144
145static irqreturn_t hpd_irq(int irq, void *data)
146{
147 struct tegra_output *output = data;
148
149 drm_helper_hpd_irq_event(output->connector.dev);
150
151 return IRQ_HANDLED;
152}
153
154int tegra_output_parse_dt(struct tegra_output *output)
155{
156 enum of_gpio_flags flags;
157 struct device_node *ddc;
158 size_t size;
159 int err;
160
161 if (!output->of_node)
162 output->of_node = output->dev->of_node;
163
164 output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
165
166 ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
167 if (ddc) {
168 output->ddc = of_find_i2c_adapter_by_node(ddc);
169 if (!output->ddc) {
170 err = -EPROBE_DEFER;
171 of_node_put(ddc);
172 return err;
173 }
174
175 of_node_put(ddc);
176 }
177
178 if (!output->edid && !output->ddc)
179 return -ENODEV;
180
181 output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
182 "nvidia,hpd-gpio", 0,
183 &flags);
184
185 return 0;
186}
187
188int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
189{
190 int connector, encoder, err;
191
192 if (gpio_is_valid(output->hpd_gpio)) {
193 unsigned long flags;
194
195 err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
196 "HDMI hotplug detect");
197 if (err < 0) {
198 dev_err(output->dev, "gpio_request_one(): %d\n", err);
199 return err;
200 }
201
202 err = gpio_to_irq(output->hpd_gpio);
203 if (err < 0) {
204 dev_err(output->dev, "gpio_to_irq(): %d\n", err);
205 goto free_hpd;
206 }
207
208 output->hpd_irq = err;
209
210 flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
211 IRQF_ONESHOT;
212
213 err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
214 flags, "hpd", output);
215 if (err < 0) {
216 dev_err(output->dev, "failed to request IRQ#%u: %d\n",
217 output->hpd_irq, err);
218 goto free_hpd;
219 }
220
221 output->connector.polled = DRM_CONNECTOR_POLL_HPD;
222 }
223
224 switch (output->type) {
225 case TEGRA_OUTPUT_RGB:
226 connector = DRM_MODE_CONNECTOR_LVDS;
227 encoder = DRM_MODE_ENCODER_LVDS;
228 break;
229
230 case TEGRA_OUTPUT_HDMI:
231 connector = DRM_MODE_CONNECTOR_HDMIA;
232 encoder = DRM_MODE_ENCODER_TMDS;
233 break;
234
235 default:
236 connector = DRM_MODE_CONNECTOR_Unknown;
237 encoder = DRM_MODE_ENCODER_NONE;
238 break;
239 }
240
241 drm_connector_init(drm, &output->connector, &connector_funcs,
242 connector);
243 drm_connector_helper_add(&output->connector, &connector_helper_funcs);
244
245 drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
246 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
247
248 drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
249 drm_sysfs_connector_add(&output->connector);
250
251 output->encoder.possible_crtcs = 0x3;
252
253 return 0;
254
255free_hpd:
256 gpio_free(output->hpd_gpio);
257
258 return err;
259}
260
261int tegra_output_exit(struct tegra_output *output)
262{
263 if (gpio_is_valid(output->hpd_gpio)) {
264 free_irq(output->hpd_irq, output);
265 gpio_free(output->hpd_gpio);
266 }
267
268 if (output->ddc)
269 put_device(&output->ddc->dev);
270
271 return 0;
272}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644
index 000000000000..ed4416f20260
--- /dev/null
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/platform_device.h>
14
15#include "drm.h"
16#include "dc.h"
17
18struct tegra_rgb {
19 struct tegra_output output;
20 struct clk *clk_parent;
21 struct clk *clk;
22};
23
24static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
25{
26 return container_of(output, struct tegra_rgb, output);
27}
28
29struct reg_entry {
30 unsigned long offset;
31 unsigned long value;
32};
33
34static const struct reg_entry rgb_enable[] = {
35 { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 },
36 { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 },
37 { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 },
38 { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 },
39 { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
40 { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
41 { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
42 { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
43 { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 },
44 { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 },
45 { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 },
46 { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 },
47 { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
48 { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
49 { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
50 { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
51 { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 },
52 { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 },
53 { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 },
54};
55
56static const struct reg_entry rgb_disable[] = {
57 { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 },
58 { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 },
59 { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 },
60 { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
61 { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
62 { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
63 { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
64 { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa },
65 { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa },
66 { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa },
67 { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa },
68 { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
69 { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
70 { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
71 { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
72 { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 },
73 { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 },
74 { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 },
75 { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 },
76};
77
78static void tegra_dc_write_regs(struct tegra_dc *dc,
79 const struct reg_entry *table,
80 unsigned int num)
81{
82 unsigned int i;
83
84 for (i = 0; i < num; i++)
85 tegra_dc_writel(dc, table[i].value, table[i].offset);
86}
87
88static int tegra_output_rgb_enable(struct tegra_output *output)
89{
90 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
91
92 tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
93
94 return 0;
95}
96
97static int tegra_output_rgb_disable(struct tegra_output *output)
98{
99 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
100
101 tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
102
103 return 0;
104}
105
106static int tegra_output_rgb_setup_clock(struct tegra_output *output,
107 struct clk *clk, unsigned long pclk)
108{
109 struct tegra_rgb *rgb = to_rgb(output);
110
111 return clk_set_parent(clk, rgb->clk_parent);
112}
113
114static int tegra_output_rgb_check_mode(struct tegra_output *output,
115 struct drm_display_mode *mode,
116 enum drm_mode_status *status)
117{
118 /*
119 * FIXME: For now, always assume that the mode is okay. There are
120 * unresolved issues with clk_round_rate(), which doesn't always
121 * reliably report whether a frequency can be set or not.
122 */
123
124 *status = MODE_OK;
125
126 return 0;
127}
128
129static const struct tegra_output_ops rgb_ops = {
130 .enable = tegra_output_rgb_enable,
131 .disable = tegra_output_rgb_disable,
132 .setup_clock = tegra_output_rgb_setup_clock,
133 .check_mode = tegra_output_rgb_check_mode,
134};
135
136int tegra_dc_rgb_probe(struct tegra_dc *dc)
137{
138 struct device_node *np;
139 struct tegra_rgb *rgb;
140 int err;
141
142 np = of_get_child_by_name(dc->dev->of_node, "rgb");
143 if (!np || !of_device_is_available(np))
144 return -ENODEV;
145
146 rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
147 if (!rgb)
148 return -ENOMEM;
149
150 rgb->clk = devm_clk_get(dc->dev, NULL);
151 if (IS_ERR(rgb->clk)) {
152 dev_err(dc->dev, "failed to get clock\n");
153 return PTR_ERR(rgb->clk);
154 }
155
156 rgb->clk_parent = devm_clk_get(dc->dev, "parent");
157 if (IS_ERR(rgb->clk_parent)) {
158 dev_err(dc->dev, "failed to get parent clock\n");
159 return PTR_ERR(rgb->clk_parent);
160 }
161
162 err = clk_set_parent(rgb->clk, rgb->clk_parent);
163 if (err < 0) {
164 dev_err(dc->dev, "failed to set parent clock: %d\n", err);
165 return err;
166 }
167
168 rgb->output.dev = dc->dev;
169 rgb->output.of_node = np;
170
171 err = tegra_output_parse_dt(&rgb->output);
172 if (err < 0)
173 return err;
174
175 dc->rgb = &rgb->output;
176
177 return 0;
178}
179
180int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
181{
182 struct tegra_rgb *rgb = to_rgb(dc->rgb);
183 int err;
184
185 if (!dc->rgb)
186 return -ENODEV;
187
188 rgb->output.type = TEGRA_OUTPUT_RGB;
189 rgb->output.ops = &rgb_ops;
190
191 err = tegra_output_init(dc->base.dev, &rgb->output);
192 if (err < 0) {
193 dev_err(dc->dev, "output setup failed: %d\n", err);
194 return err;
195 }
196
197 /*
198 * By default, outputs can be associated with each display controller.
199 * RGB outputs are an exception, so we make sure they can be attached
200 * to only their parent display controller.
201 */
202 rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
203
204 return 0;
205}
206
207int tegra_dc_rgb_exit(struct tegra_dc *dc)
208{
209 if (dc->rgb) {
210 int err;
211
212 err = tegra_output_disable(dc->rgb);
213 if (err < 0) {
214 dev_err(dc->dev, "output failed to disable: %d\n", err);
215 return err;
216 }
217
218 err = tegra_output_exit(dc->rgb);
219 if (err < 0) {
220 dev_err(dc->dev, "output cleanup failed: %d\n", err);
221 return err;
222 }
223
224 dc->rgb = NULL;
225 }
226
227 return 0;
228}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bf6e4b5a73b5..33d20be87db5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
162{ 162{
163 if (interruptible) { 163 if (interruptible) {
164 return wait_event_interruptible(bo->event_queue, 164 return wait_event_interruptible(bo->event_queue,
165 atomic_read(&bo->reserved) == 0); 165 !ttm_bo_is_reserved(bo));
166 } else { 166 } else {
167 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); 167 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
168 return 0; 168 return 0;
169 } 169 }
170} 170}
@@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
175 struct ttm_bo_device *bdev = bo->bdev; 175 struct ttm_bo_device *bdev = bo->bdev;
176 struct ttm_mem_type_manager *man; 176 struct ttm_mem_type_manager *man;
177 177
178 BUG_ON(!atomic_read(&bo->reserved)); 178 BUG_ON(!ttm_bo_is_reserved(bo));
179 179
180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
181 181
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
220 struct ttm_bo_global *glob = bo->glob; 220 struct ttm_bo_global *glob = bo->glob;
221 int ret; 221 int ret;
222 222
223 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 223 while (unlikely(atomic_read(&bo->reserved) != 0)) {
224 /** 224 /**
225 * Deadlock avoidance for multi-bo reserving. 225 * Deadlock avoidance for multi-bo reserving.
226 */ 226 */
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
249 return ret; 249 return ret;
250 } 250 }
251 251
252 atomic_set(&bo->reserved, 1);
252 if (use_sequence) { 253 if (use_sequence) {
253 /** 254 /**
254 * Wake up waiters that may need to recheck for deadlock, 255 * Wake up waiters that may need to recheck for deadlock,
@@ -365,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
365static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 366static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
366 struct ttm_mem_reg *mem, 367 struct ttm_mem_reg *mem,
367 bool evict, bool interruptible, 368 bool evict, bool interruptible,
368 bool no_wait_reserve, bool no_wait_gpu) 369 bool no_wait_gpu)
369{ 370{
370 struct ttm_bo_device *bdev = bo->bdev; 371 struct ttm_bo_device *bdev = bo->bdev;
371 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 372 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -419,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
419 420
420 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 421 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
421 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 422 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
422 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); 423 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
423 else if (bdev->driver->move) 424 else if (bdev->driver->move)
424 ret = bdev->driver->move(bo, evict, interruptible, 425 ret = bdev->driver->move(bo, evict, interruptible,
425 no_wait_reserve, no_wait_gpu, mem); 426 no_wait_gpu, mem);
426 else 427 else
427 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); 428 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
428 429
429 if (ret) { 430 if (ret) {
430 if (bdev->driver->move_notify) { 431 if (bdev->driver->move_notify) {
@@ -487,40 +488,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
487 ttm_bo_mem_put(bo, &bo->mem); 488 ttm_bo_mem_put(bo, &bo->mem);
488 489
489 atomic_set(&bo->reserved, 0); 490 atomic_set(&bo->reserved, 0);
491 wake_up_all(&bo->event_queue);
490 492
491 /* 493 /*
492 * Make processes trying to reserve really pick it up. 494 * Since the final reference to this bo may not be dropped by
495 * the current task we have to put a memory barrier here to make
496 * sure the changes done in this function are always visible.
497 *
498 * This function only needs protection against the final kref_put.
493 */ 499 */
494 smp_mb__after_atomic_dec(); 500 smp_mb__before_atomic_dec();
495 wake_up_all(&bo->event_queue);
496} 501}
497 502
498static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 503static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
499{ 504{
500 struct ttm_bo_device *bdev = bo->bdev; 505 struct ttm_bo_device *bdev = bo->bdev;
501 struct ttm_bo_global *glob = bo->glob; 506 struct ttm_bo_global *glob = bo->glob;
502 struct ttm_bo_driver *driver; 507 struct ttm_bo_driver *driver = bdev->driver;
503 void *sync_obj = NULL; 508 void *sync_obj = NULL;
504 void *sync_obj_arg;
505 int put_count; 509 int put_count;
506 int ret; 510 int ret;
507 511
512 spin_lock(&glob->lru_lock);
513 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
514
508 spin_lock(&bdev->fence_lock); 515 spin_lock(&bdev->fence_lock);
509 (void) ttm_bo_wait(bo, false, false, true); 516 (void) ttm_bo_wait(bo, false, false, true);
510 if (!bo->sync_obj) { 517 if (!ret && !bo->sync_obj) {
511
512 spin_lock(&glob->lru_lock);
513
514 /**
515 * Lock inversion between bo:reserve and bdev::fence_lock here,
516 * but that's OK, since we're only trylocking.
517 */
518
519 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
520
521 if (unlikely(ret == -EBUSY))
522 goto queue;
523
524 spin_unlock(&bdev->fence_lock); 518 spin_unlock(&bdev->fence_lock);
525 put_count = ttm_bo_del_from_lru(bo); 519 put_count = ttm_bo_del_from_lru(bo);
526 520
@@ -530,22 +524,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
530 ttm_bo_list_ref_sub(bo, put_count, true); 524 ttm_bo_list_ref_sub(bo, put_count, true);
531 525
532 return; 526 return;
533 } else {
534 spin_lock(&glob->lru_lock);
535 } 527 }
536queue:
537 driver = bdev->driver;
538 if (bo->sync_obj) 528 if (bo->sync_obj)
539 sync_obj = driver->sync_obj_ref(bo->sync_obj); 529 sync_obj = driver->sync_obj_ref(bo->sync_obj);
540 sync_obj_arg = bo->sync_obj_arg; 530 spin_unlock(&bdev->fence_lock);
531
532 if (!ret) {
533 atomic_set(&bo->reserved, 0);
534 wake_up_all(&bo->event_queue);
535 }
541 536
542 kref_get(&bo->list_kref); 537 kref_get(&bo->list_kref);
543 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 538 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
544 spin_unlock(&glob->lru_lock); 539 spin_unlock(&glob->lru_lock);
545 spin_unlock(&bdev->fence_lock);
546 540
547 if (sync_obj) { 541 if (sync_obj) {
548 driver->sync_obj_flush(sync_obj, sync_obj_arg); 542 driver->sync_obj_flush(sync_obj);
549 driver->sync_obj_unref(&sync_obj); 543 driver->sync_obj_unref(&sync_obj);
550 } 544 }
551 schedule_delayed_work(&bdev->wq, 545 schedule_delayed_work(&bdev->wq,
@@ -553,68 +547,84 @@ queue:
553} 547}
554 548
555/** 549/**
556 * function ttm_bo_cleanup_refs 550 * function ttm_bo_cleanup_refs_and_unlock
557 * If bo idle, remove from delayed- and lru lists, and unref. 551 * If bo idle, remove from delayed- and lru lists, and unref.
558 * If not idle, do nothing. 552 * If not idle, do nothing.
559 * 553 *
554 * Must be called with lru_lock and reservation held, this function
555 * will drop both before returning.
556 *
560 * @interruptible Any sleeps should occur interruptibly. 557 * @interruptible Any sleeps should occur interruptibly.
561 * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
562 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 558 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
563 */ 559 */
564 560
565static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, 561static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
566 bool interruptible, 562 bool interruptible,
567 bool no_wait_reserve, 563 bool no_wait_gpu)
568 bool no_wait_gpu)
569{ 564{
570 struct ttm_bo_device *bdev = bo->bdev; 565 struct ttm_bo_device *bdev = bo->bdev;
566 struct ttm_bo_driver *driver = bdev->driver;
571 struct ttm_bo_global *glob = bo->glob; 567 struct ttm_bo_global *glob = bo->glob;
572 int put_count; 568 int put_count;
573 int ret = 0; 569 int ret;
574 570
575retry:
576 spin_lock(&bdev->fence_lock); 571 spin_lock(&bdev->fence_lock);
577 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 572 ret = ttm_bo_wait(bo, false, false, true);
578 spin_unlock(&bdev->fence_lock);
579 573
580 if (unlikely(ret != 0)) 574 if (ret && !no_wait_gpu) {
581 return ret; 575 void *sync_obj;
582 576
583retry_reserve: 577 /*
584 spin_lock(&glob->lru_lock); 578 * Take a reference to the fence and unreserve,
579 * at this point the buffer should be dead, so
580 * no new sync objects can be attached.
581 */
582 sync_obj = driver->sync_obj_ref(bo->sync_obj);
583 spin_unlock(&bdev->fence_lock);
585 584
586 if (unlikely(list_empty(&bo->ddestroy))) { 585 atomic_set(&bo->reserved, 0);
586 wake_up_all(&bo->event_queue);
587 spin_unlock(&glob->lru_lock); 587 spin_unlock(&glob->lru_lock);
588 return 0;
589 }
590
591 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
592 588
593 if (unlikely(ret == -EBUSY)) { 589 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
594 spin_unlock(&glob->lru_lock); 590 driver->sync_obj_unref(&sync_obj);
595 if (likely(!no_wait_reserve)) 591 if (ret)
596 ret = ttm_bo_wait_unreserved(bo, interruptible);
597 if (unlikely(ret != 0))
598 return ret; 592 return ret;
599 593
600 goto retry_reserve; 594 /*
601 } 595 * remove sync_obj with ttm_bo_wait, the wait should be
596 * finished, and no new wait object should have been added.
597 */
598 spin_lock(&bdev->fence_lock);
599 ret = ttm_bo_wait(bo, false, false, true);
600 WARN_ON(ret);
601 spin_unlock(&bdev->fence_lock);
602 if (ret)
603 return ret;
602 604
603 BUG_ON(ret != 0); 605 spin_lock(&glob->lru_lock);
606 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
604 607
605 /** 608 /*
606 * We can re-check for sync object without taking 609 * We raced, and lost, someone else holds the reservation now,
607 * the bo::lock since setting the sync object requires 610 * and is probably busy in ttm_bo_cleanup_memtype_use.
608 * also bo::reserved. A busy object at this point may 611 *
609 * be caused by another thread recently starting an accelerated 612 * Even if it's not the case, because we finished waiting any
610 * eviction. 613 * delayed destruction would succeed, so just return success
611 */ 614 * here.
615 */
616 if (ret) {
617 spin_unlock(&glob->lru_lock);
618 return 0;
619 }
620 } else
621 spin_unlock(&bdev->fence_lock);
612 622
613 if (unlikely(bo->sync_obj)) { 623 if (ret || unlikely(list_empty(&bo->ddestroy))) {
614 atomic_set(&bo->reserved, 0); 624 atomic_set(&bo->reserved, 0);
615 wake_up_all(&bo->event_queue); 625 wake_up_all(&bo->event_queue);
616 spin_unlock(&glob->lru_lock); 626 spin_unlock(&glob->lru_lock);
617 goto retry; 627 return ret;
618 } 628 }
619 629
620 put_count = ttm_bo_del_from_lru(bo); 630 put_count = ttm_bo_del_from_lru(bo);
@@ -657,9 +667,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
657 kref_get(&nentry->list_kref); 667 kref_get(&nentry->list_kref);
658 } 668 }
659 669
660 spin_unlock(&glob->lru_lock); 670 ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
661 ret = ttm_bo_cleanup_refs(entry, false, !remove_all, 671 if (!ret)
662 !remove_all); 672 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
673 !remove_all);
674 else
675 spin_unlock(&glob->lru_lock);
676
663 kref_put(&entry->list_kref, ttm_bo_release_list); 677 kref_put(&entry->list_kref, ttm_bo_release_list);
664 entry = nentry; 678 entry = nentry;
665 679
@@ -697,6 +711,7 @@ static void ttm_bo_release(struct kref *kref)
697 struct ttm_bo_device *bdev = bo->bdev; 711 struct ttm_bo_device *bdev = bo->bdev;
698 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 712 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
699 713
714 write_lock(&bdev->vm_lock);
700 if (likely(bo->vm_node != NULL)) { 715 if (likely(bo->vm_node != NULL)) {
701 rb_erase(&bo->vm_rb, &bdev->addr_space_rb); 716 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
702 drm_mm_put_block(bo->vm_node); 717 drm_mm_put_block(bo->vm_node);
@@ -708,18 +723,14 @@ static void ttm_bo_release(struct kref *kref)
708 ttm_mem_io_unlock(man); 723 ttm_mem_io_unlock(man);
709 ttm_bo_cleanup_refs_or_queue(bo); 724 ttm_bo_cleanup_refs_or_queue(bo);
710 kref_put(&bo->list_kref, ttm_bo_release_list); 725 kref_put(&bo->list_kref, ttm_bo_release_list);
711 write_lock(&bdev->vm_lock);
712} 726}
713 727
714void ttm_bo_unref(struct ttm_buffer_object **p_bo) 728void ttm_bo_unref(struct ttm_buffer_object **p_bo)
715{ 729{
716 struct ttm_buffer_object *bo = *p_bo; 730 struct ttm_buffer_object *bo = *p_bo;
717 struct ttm_bo_device *bdev = bo->bdev;
718 731
719 *p_bo = NULL; 732 *p_bo = NULL;
720 write_lock(&bdev->vm_lock);
721 kref_put(&bo->kref, ttm_bo_release); 733 kref_put(&bo->kref, ttm_bo_release);
722 write_unlock(&bdev->vm_lock);
723} 734}
724EXPORT_SYMBOL(ttm_bo_unref); 735EXPORT_SYMBOL(ttm_bo_unref);
725 736
@@ -738,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
738EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 749EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
739 750
740static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 751static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
741 bool no_wait_reserve, bool no_wait_gpu) 752 bool no_wait_gpu)
742{ 753{
743 struct ttm_bo_device *bdev = bo->bdev; 754 struct ttm_bo_device *bdev = bo->bdev;
744 struct ttm_mem_reg evict_mem; 755 struct ttm_mem_reg evict_mem;
@@ -756,7 +767,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
756 goto out; 767 goto out;
757 } 768 }
758 769
759 BUG_ON(!atomic_read(&bo->reserved)); 770 BUG_ON(!ttm_bo_is_reserved(bo));
760 771
761 evict_mem = bo->mem; 772 evict_mem = bo->mem;
762 evict_mem.mm_node = NULL; 773 evict_mem.mm_node = NULL;
@@ -769,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
769 placement.num_busy_placement = 0; 780 placement.num_busy_placement = 0;
770 bdev->driver->evict_flags(bo, &placement); 781 bdev->driver->evict_flags(bo, &placement);
771 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 782 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
772 no_wait_reserve, no_wait_gpu); 783 no_wait_gpu);
773 if (ret) { 784 if (ret) {
774 if (ret != -ERESTARTSYS) { 785 if (ret != -ERESTARTSYS) {
775 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 786 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -780,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
780 } 791 }
781 792
782 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 793 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
783 no_wait_reserve, no_wait_gpu); 794 no_wait_gpu);
784 if (ret) { 795 if (ret) {
785 if (ret != -ERESTARTSYS) 796 if (ret != -ERESTARTSYS)
786 pr_err("Buffer eviction failed\n"); 797 pr_err("Buffer eviction failed\n");
@@ -794,49 +805,33 @@ out:
794 805
795static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 806static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
796 uint32_t mem_type, 807 uint32_t mem_type,
797 bool interruptible, bool no_wait_reserve, 808 bool interruptible,
798 bool no_wait_gpu) 809 bool no_wait_gpu)
799{ 810{
800 struct ttm_bo_global *glob = bdev->glob; 811 struct ttm_bo_global *glob = bdev->glob;
801 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 812 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
802 struct ttm_buffer_object *bo; 813 struct ttm_buffer_object *bo;
803 int ret, put_count = 0; 814 int ret = -EBUSY, put_count;
804 815
805retry:
806 spin_lock(&glob->lru_lock); 816 spin_lock(&glob->lru_lock);
807 if (list_empty(&man->lru)) { 817 list_for_each_entry(bo, &man->lru, lru) {
808 spin_unlock(&glob->lru_lock); 818 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
809 return -EBUSY; 819 if (!ret)
820 break;
810 } 821 }
811 822
812 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 823 if (ret) {
813 kref_get(&bo->list_kref);
814
815 if (!list_empty(&bo->ddestroy)) {
816 spin_unlock(&glob->lru_lock); 824 spin_unlock(&glob->lru_lock);
817 ret = ttm_bo_cleanup_refs(bo, interruptible,
818 no_wait_reserve, no_wait_gpu);
819 kref_put(&bo->list_kref, ttm_bo_release_list);
820
821 return ret; 825 return ret;
822 } 826 }
823 827
824 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 828 kref_get(&bo->list_kref);
825
826 if (unlikely(ret == -EBUSY)) {
827 spin_unlock(&glob->lru_lock);
828 if (likely(!no_wait_reserve))
829 ret = ttm_bo_wait_unreserved(bo, interruptible);
830 829
830 if (!list_empty(&bo->ddestroy)) {
831 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
832 no_wait_gpu);
831 kref_put(&bo->list_kref, ttm_bo_release_list); 833 kref_put(&bo->list_kref, ttm_bo_release_list);
832 834 return ret;
833 /**
834 * We *need* to retry after releasing the lru lock.
835 */
836
837 if (unlikely(ret != 0))
838 return ret;
839 goto retry;
840 } 835 }
841 836
842 put_count = ttm_bo_del_from_lru(bo); 837 put_count = ttm_bo_del_from_lru(bo);
@@ -846,7 +841,7 @@ retry:
846 841
847 ttm_bo_list_ref_sub(bo, put_count, true); 842 ttm_bo_list_ref_sub(bo, put_count, true);
848 843
849 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); 844 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
850 ttm_bo_unreserve(bo); 845 ttm_bo_unreserve(bo);
851 846
852 kref_put(&bo->list_kref, ttm_bo_release_list); 847 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -871,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
871 struct ttm_placement *placement, 866 struct ttm_placement *placement,
872 struct ttm_mem_reg *mem, 867 struct ttm_mem_reg *mem,
873 bool interruptible, 868 bool interruptible,
874 bool no_wait_reserve,
875 bool no_wait_gpu) 869 bool no_wait_gpu)
876{ 870{
877 struct ttm_bo_device *bdev = bo->bdev; 871 struct ttm_bo_device *bdev = bo->bdev;
@@ -884,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
884 return ret; 878 return ret;
885 if (mem->mm_node) 879 if (mem->mm_node)
886 break; 880 break;
887 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 881 ret = ttm_mem_evict_first(bdev, mem_type,
888 no_wait_reserve, no_wait_gpu); 882 interruptible, no_wait_gpu);
889 if (unlikely(ret != 0)) 883 if (unlikely(ret != 0))
890 return ret; 884 return ret;
891 } while (1); 885 } while (1);
@@ -950,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
950int ttm_bo_mem_space(struct ttm_buffer_object *bo, 944int ttm_bo_mem_space(struct ttm_buffer_object *bo,
951 struct ttm_placement *placement, 945 struct ttm_placement *placement,
952 struct ttm_mem_reg *mem, 946 struct ttm_mem_reg *mem,
953 bool interruptible, bool no_wait_reserve, 947 bool interruptible,
954 bool no_wait_gpu) 948 bool no_wait_gpu)
955{ 949{
956 struct ttm_bo_device *bdev = bo->bdev; 950 struct ttm_bo_device *bdev = bo->bdev;
@@ -1041,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1041 } 1035 }
1042 1036
1043 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 1037 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1044 interruptible, no_wait_reserve, no_wait_gpu); 1038 interruptible, no_wait_gpu);
1045 if (ret == 0 && mem->mm_node) { 1039 if (ret == 0 && mem->mm_node) {
1046 mem->placement = cur_flags; 1040 mem->placement = cur_flags;
1047 return 0; 1041 return 0;
@@ -1054,26 +1048,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1054} 1048}
1055EXPORT_SYMBOL(ttm_bo_mem_space); 1049EXPORT_SYMBOL(ttm_bo_mem_space);
1056 1050
1057int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1058{
1059 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1060 return -EBUSY;
1061
1062 return wait_event_interruptible(bo->event_queue,
1063 atomic_read(&bo->cpu_writers) == 0);
1064}
1065EXPORT_SYMBOL(ttm_bo_wait_cpu);
1066
1067int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1051int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1068 struct ttm_placement *placement, 1052 struct ttm_placement *placement,
1069 bool interruptible, bool no_wait_reserve, 1053 bool interruptible,
1070 bool no_wait_gpu) 1054 bool no_wait_gpu)
1071{ 1055{
1072 int ret = 0; 1056 int ret = 0;
1073 struct ttm_mem_reg mem; 1057 struct ttm_mem_reg mem;
1074 struct ttm_bo_device *bdev = bo->bdev; 1058 struct ttm_bo_device *bdev = bo->bdev;
1075 1059
1076 BUG_ON(!atomic_read(&bo->reserved)); 1060 BUG_ON(!ttm_bo_is_reserved(bo));
1077 1061
1078 /* 1062 /*
1079 * FIXME: It's possible to pipeline buffer moves. 1063 * FIXME: It's possible to pipeline buffer moves.
@@ -1093,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1093 /* 1077 /*
1094 * Determine where to move the buffer. 1078 * Determine where to move the buffer.
1095 */ 1079 */
1096 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); 1080 ret = ttm_bo_mem_space(bo, placement, &mem,
1081 interruptible, no_wait_gpu);
1097 if (ret) 1082 if (ret)
1098 goto out_unlock; 1083 goto out_unlock;
1099 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 1084 ret = ttm_bo_handle_move_mem(bo, &mem, false,
1085 interruptible, no_wait_gpu);
1100out_unlock: 1086out_unlock:
1101 if (ret && mem.mm_node) 1087 if (ret && mem.mm_node)
1102 ttm_bo_mem_put(bo, &mem); 1088 ttm_bo_mem_put(bo, &mem);
@@ -1125,12 +1111,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1125 1111
1126int ttm_bo_validate(struct ttm_buffer_object *bo, 1112int ttm_bo_validate(struct ttm_buffer_object *bo,
1127 struct ttm_placement *placement, 1113 struct ttm_placement *placement,
1128 bool interruptible, bool no_wait_reserve, 1114 bool interruptible,
1129 bool no_wait_gpu) 1115 bool no_wait_gpu)
1130{ 1116{
1131 int ret; 1117 int ret;
1132 1118
1133 BUG_ON(!atomic_read(&bo->reserved)); 1119 BUG_ON(!ttm_bo_is_reserved(bo));
1134 /* Check that range is valid */ 1120 /* Check that range is valid */
1135 if (placement->lpfn || placement->fpfn) 1121 if (placement->lpfn || placement->fpfn)
1136 if (placement->fpfn > placement->lpfn || 1122 if (placement->fpfn > placement->lpfn ||
@@ -1141,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1141 */ 1127 */
1142 ret = ttm_bo_mem_compat(placement, &bo->mem); 1128 ret = ttm_bo_mem_compat(placement, &bo->mem);
1143 if (ret < 0) { 1129 if (ret < 0) {
1144 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); 1130 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1131 no_wait_gpu);
1145 if (ret) 1132 if (ret)
1146 return ret; 1133 return ret;
1147 } else { 1134 } else {
@@ -1179,7 +1166,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1179 enum ttm_bo_type type, 1166 enum ttm_bo_type type,
1180 struct ttm_placement *placement, 1167 struct ttm_placement *placement,
1181 uint32_t page_alignment, 1168 uint32_t page_alignment,
1182 unsigned long buffer_start,
1183 bool interruptible, 1169 bool interruptible,
1184 struct file *persistent_swap_storage, 1170 struct file *persistent_swap_storage,
1185 size_t acc_size, 1171 size_t acc_size,
@@ -1200,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1200 return -ENOMEM; 1186 return -ENOMEM;
1201 } 1187 }
1202 1188
1203 size += buffer_start & ~PAGE_MASK;
1204 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1189 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1205 if (num_pages == 0) { 1190 if (num_pages == 0) {
1206 pr_err("Illegal buffer object size\n"); 1191 pr_err("Illegal buffer object size\n");
@@ -1233,7 +1218,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1233 bo->mem.page_alignment = page_alignment; 1218 bo->mem.page_alignment = page_alignment;
1234 bo->mem.bus.io_reserved_vm = false; 1219 bo->mem.bus.io_reserved_vm = false;
1235 bo->mem.bus.io_reserved_count = 0; 1220 bo->mem.bus.io_reserved_count = 0;
1236 bo->buffer_start = buffer_start & PAGE_MASK;
1237 bo->priv_flags = 0; 1221 bo->priv_flags = 0;
1238 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1222 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1239 bo->seq_valid = false; 1223 bo->seq_valid = false;
@@ -1257,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1257 goto out_err; 1241 goto out_err;
1258 } 1242 }
1259 1243
1260 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 1244 ret = ttm_bo_validate(bo, placement, interruptible, false);
1261 if (ret) 1245 if (ret)
1262 goto out_err; 1246 goto out_err;
1263 1247
@@ -1306,7 +1290,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1306 enum ttm_bo_type type, 1290 enum ttm_bo_type type,
1307 struct ttm_placement *placement, 1291 struct ttm_placement *placement,
1308 uint32_t page_alignment, 1292 uint32_t page_alignment,
1309 unsigned long buffer_start,
1310 bool interruptible, 1293 bool interruptible,
1311 struct file *persistent_swap_storage, 1294 struct file *persistent_swap_storage,
1312 struct ttm_buffer_object **p_bo) 1295 struct ttm_buffer_object **p_bo)
@@ -1321,8 +1304,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1321 1304
1322 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1305 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1323 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1306 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1324 buffer_start, interruptible, 1307 interruptible, persistent_swap_storage, acc_size,
1325 persistent_swap_storage, acc_size, NULL, NULL); 1308 NULL, NULL);
1326 if (likely(ret == 0)) 1309 if (likely(ret == 0))
1327 *p_bo = bo; 1310 *p_bo = bo;
1328 1311
@@ -1344,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1344 spin_lock(&glob->lru_lock); 1327 spin_lock(&glob->lru_lock);
1345 while (!list_empty(&man->lru)) { 1328 while (!list_empty(&man->lru)) {
1346 spin_unlock(&glob->lru_lock); 1329 spin_unlock(&glob->lru_lock);
1347 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); 1330 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1348 if (ret) { 1331 if (ret) {
1349 if (allow_errors) { 1332 if (allow_errors) {
1350 return ret; 1333 return ret;
@@ -1577,7 +1560,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1577 goto out_no_addr_mm; 1560 goto out_no_addr_mm;
1578 1561
1579 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1562 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1580 bdev->nice_mode = true;
1581 INIT_LIST_HEAD(&bdev->ddestroy); 1563 INIT_LIST_HEAD(&bdev->ddestroy);
1582 bdev->dev_mapping = NULL; 1564 bdev->dev_mapping = NULL;
1583 bdev->glob = glob; 1565 bdev->glob = glob;
@@ -1721,7 +1703,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1721 struct ttm_bo_driver *driver = bo->bdev->driver; 1703 struct ttm_bo_driver *driver = bo->bdev->driver;
1722 struct ttm_bo_device *bdev = bo->bdev; 1704 struct ttm_bo_device *bdev = bo->bdev;
1723 void *sync_obj; 1705 void *sync_obj;
1724 void *sync_obj_arg;
1725 int ret = 0; 1706 int ret = 0;
1726 1707
1727 if (likely(bo->sync_obj == NULL)) 1708 if (likely(bo->sync_obj == NULL))
@@ -1729,7 +1710,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1729 1710
1730 while (bo->sync_obj) { 1711 while (bo->sync_obj) {
1731 1712
1732 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { 1713 if (driver->sync_obj_signaled(bo->sync_obj)) {
1733 void *tmp_obj = bo->sync_obj; 1714 void *tmp_obj = bo->sync_obj;
1734 bo->sync_obj = NULL; 1715 bo->sync_obj = NULL;
1735 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1716 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
@@ -1743,9 +1724,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1743 return -EBUSY; 1724 return -EBUSY;
1744 1725
1745 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1726 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1746 sync_obj_arg = bo->sync_obj_arg;
1747 spin_unlock(&bdev->fence_lock); 1727 spin_unlock(&bdev->fence_lock);
1748 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, 1728 ret = driver->sync_obj_wait(sync_obj,
1749 lazy, interruptible); 1729 lazy, interruptible);
1750 if (unlikely(ret != 0)) { 1730 if (unlikely(ret != 0)) {
1751 driver->sync_obj_unref(&sync_obj); 1731 driver->sync_obj_unref(&sync_obj);
@@ -1753,8 +1733,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1753 return ret; 1733 return ret;
1754 } 1734 }
1755 spin_lock(&bdev->fence_lock); 1735 spin_lock(&bdev->fence_lock);
1756 if (likely(bo->sync_obj == sync_obj && 1736 if (likely(bo->sync_obj == sync_obj)) {
1757 bo->sync_obj_arg == sync_obj_arg)) {
1758 void *tmp_obj = bo->sync_obj; 1737 void *tmp_obj = bo->sync_obj;
1759 bo->sync_obj = NULL; 1738 bo->sync_obj = NULL;
1760 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1739 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
@@ -1797,8 +1776,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1797 1776
1798void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1777void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1799{ 1778{
1800 if (atomic_dec_and_test(&bo->cpu_writers)) 1779 atomic_dec(&bo->cpu_writers);
1801 wake_up_all(&bo->event_queue);
1802} 1780}
1803EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1781EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1804 1782
@@ -1817,40 +1795,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1817 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1795 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1818 1796
1819 spin_lock(&glob->lru_lock); 1797 spin_lock(&glob->lru_lock);
1820 while (ret == -EBUSY) { 1798 list_for_each_entry(bo, &glob->swap_lru, swap) {
1821 if (unlikely(list_empty(&glob->swap_lru))) { 1799 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1822 spin_unlock(&glob->lru_lock); 1800 if (!ret)
1823 return -EBUSY; 1801 break;
1824 } 1802 }
1825
1826 bo = list_first_entry(&glob->swap_lru,
1827 struct ttm_buffer_object, swap);
1828 kref_get(&bo->list_kref);
1829 1803
1830 if (!list_empty(&bo->ddestroy)) { 1804 if (ret) {
1831 spin_unlock(&glob->lru_lock); 1805 spin_unlock(&glob->lru_lock);
1832 (void) ttm_bo_cleanup_refs(bo, false, false, false); 1806 return ret;
1833 kref_put(&bo->list_kref, ttm_bo_release_list); 1807 }
1834 spin_lock(&glob->lru_lock);
1835 continue;
1836 }
1837 1808
1838 /** 1809 kref_get(&bo->list_kref);
1839 * Reserve buffer. Since we unlock while sleeping, we need
1840 * to re-check that nobody removed us from the swap-list while
1841 * we slept.
1842 */
1843 1810
1844 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 1811 if (!list_empty(&bo->ddestroy)) {
1845 if (unlikely(ret == -EBUSY)) { 1812 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1846 spin_unlock(&glob->lru_lock); 1813 kref_put(&bo->list_kref, ttm_bo_release_list);
1847 ttm_bo_wait_unreserved(bo, false); 1814 return ret;
1848 kref_put(&bo->list_kref, ttm_bo_release_list);
1849 spin_lock(&glob->lru_lock);
1850 }
1851 } 1815 }
1852 1816
1853 BUG_ON(ret != 0);
1854 put_count = ttm_bo_del_from_lru(bo); 1817 put_count = ttm_bo_del_from_lru(bo);
1855 spin_unlock(&glob->lru_lock); 1818 spin_unlock(&glob->lru_lock);
1856 1819
@@ -1876,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1876 evict_mem.mem_type = TTM_PL_SYSTEM; 1839 evict_mem.mem_type = TTM_PL_SYSTEM;
1877 1840
1878 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1841 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1879 false, false, false); 1842 false, false);
1880 if (unlikely(ret != 0)) 1843 if (unlikely(ret != 0))
1881 goto out; 1844 goto out;
1882 } 1845 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2026060f03e0..d73d6e3e17b2 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43} 43}
44 44
45int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 bool evict, bool no_wait_reserve, 46 bool evict,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48{ 48{
49 struct ttm_tt *ttm = bo->ttm; 49 struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
314} 314}
315 315
316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317 bool evict, bool no_wait_reserve, bool no_wait_gpu, 317 bool evict, bool no_wait_gpu,
318 struct ttm_mem_reg *new_mem) 318 struct ttm_mem_reg *new_mem)
319{ 319{
320 struct ttm_bo_device *bdev = bo->bdev; 320 struct ttm_bo_device *bdev = bo->bdev;
@@ -611,8 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
611 611
612int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 612int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
613 void *sync_obj, 613 void *sync_obj,
614 void *sync_obj_arg, 614 bool evict,
615 bool evict, bool no_wait_reserve,
616 bool no_wait_gpu, 615 bool no_wait_gpu,
617 struct ttm_mem_reg *new_mem) 616 struct ttm_mem_reg *new_mem)
618{ 617{
@@ -630,7 +629,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
630 bo->sync_obj = NULL; 629 bo->sync_obj = NULL;
631 } 630 }
632 bo->sync_obj = driver->sync_obj_ref(sync_obj); 631 bo->sync_obj = driver->sync_obj_ref(sync_obj);
633 bo->sync_obj_arg = sync_obj_arg;
634 if (evict) { 632 if (evict) {
635 ret = ttm_bo_wait(bo, false, false, false); 633 ret = ttm_bo_wait(bo, false, false, false);
636 spin_unlock(&bdev->fence_lock); 634 spin_unlock(&bdev->fence_lock);
@@ -656,11 +654,13 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
656 */ 654 */
657 655
658 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 656 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
657
658 /* ttm_buffer_object_transfer accesses bo->sync_obj */
659 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
659 spin_unlock(&bdev->fence_lock); 660 spin_unlock(&bdev->fence_lock);
660 if (tmp_obj) 661 if (tmp_obj)
661 driver->sync_obj_unref(&tmp_obj); 662 driver->sync_obj_unref(&tmp_obj);
662 663
663 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
664 if (ret) 664 if (ret)
665 return ret; 665 return ret;
666 666
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ba72dbdc4bd..74705f329d99 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
259 read_lock(&bdev->vm_lock); 259 read_lock(&bdev->vm_lock);
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, 260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
261 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); 261 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
262 if (likely(bo != NULL)) 262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
263 ttm_bo_reference(bo); 263 bo = NULL;
264 read_unlock(&bdev->vm_lock); 264 read_unlock(&bdev->vm_lock);
265 265
266 if (unlikely(bo == NULL)) { 266 if (unlikely(bo == NULL)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1937069432c5..cd9e4523dc56 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -185,10 +185,7 @@ retry_this_bo:
185 ttm_eu_backoff_reservation_locked(list); 185 ttm_eu_backoff_reservation_locked(list);
186 spin_unlock(&glob->lru_lock); 186 spin_unlock(&glob->lru_lock);
187 ttm_eu_list_ref_sub(list); 187 ttm_eu_list_ref_sub(list);
188 ret = ttm_bo_wait_cpu(bo, false); 188 return -EBUSY;
189 if (ret)
190 return ret;
191 goto retry;
192 } 189 }
193 } 190 }
194 191
@@ -216,19 +213,18 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
216 driver = bdev->driver; 213 driver = bdev->driver;
217 glob = bo->glob; 214 glob = bo->glob;
218 215
219 spin_lock(&bdev->fence_lock);
220 spin_lock(&glob->lru_lock); 216 spin_lock(&glob->lru_lock);
217 spin_lock(&bdev->fence_lock);
221 218
222 list_for_each_entry(entry, list, head) { 219 list_for_each_entry(entry, list, head) {
223 bo = entry->bo; 220 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj; 221 entry->old_sync_obj = bo->sync_obj;
225 bo->sync_obj = driver->sync_obj_ref(sync_obj); 222 bo->sync_obj = driver->sync_obj_ref(sync_obj);
226 bo->sync_obj_arg = entry->new_sync_obj_arg;
227 ttm_bo_unreserve_locked(bo); 223 ttm_bo_unreserve_locked(bo);
228 entry->reserved = false; 224 entry->reserved = false;
229 } 225 }
230 spin_unlock(&glob->lru_lock);
231 spin_unlock(&bdev->fence_lock); 226 spin_unlock(&bdev->fence_lock);
227 spin_unlock(&glob->lru_lock);
232 228
233 list_for_each_entry(entry, list, head) { 229 list_for_each_entry(entry, list, head) {
234 if (entry->old_sync_obj) 230 if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 479c6b0467ca..dbc2def887cd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -367,7 +367,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
367 spin_lock_init(&glob->lock); 367 spin_lock_init(&glob->lock);
368 glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 368 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
369 INIT_WORK(&glob->work, ttm_shrink_work); 369 INIT_WORK(&glob->work, ttm_shrink_work);
370 init_waitqueue_head(&glob->queue);
371 ret = kobject_init_and_add( 370 ret = kobject_init_and_add(
372 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); 371 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
373 if (unlikely(ret != 0)) { 372 if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index c7857874956a..58a5f3261c0b 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -80,7 +80,7 @@ struct ttm_object_file {
80 */ 80 */
81 81
82struct ttm_object_device { 82struct ttm_object_device {
83 rwlock_t object_lock; 83 spinlock_t object_lock;
84 struct drm_open_hash object_hash; 84 struct drm_open_hash object_hash;
85 atomic_t object_count; 85 atomic_t object_count;
86 struct ttm_mem_global *mem_glob; 86 struct ttm_mem_global *mem_glob;
@@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
157 base->refcount_release = refcount_release; 157 base->refcount_release = refcount_release;
158 base->ref_obj_release = ref_obj_release; 158 base->ref_obj_release = ref_obj_release;
159 base->object_type = object_type; 159 base->object_type = object_type;
160 write_lock(&tdev->object_lock);
161 kref_init(&base->refcount); 160 kref_init(&base->refcount);
162 ret = drm_ht_just_insert_please(&tdev->object_hash, 161 spin_lock(&tdev->object_lock);
163 &base->hash, 162 ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
164 (unsigned long)base, 31, 0, 0); 163 &base->hash,
165 write_unlock(&tdev->object_lock); 164 (unsigned long)base, 31, 0, 0);
165 spin_unlock(&tdev->object_lock);
166 if (unlikely(ret != 0)) 166 if (unlikely(ret != 0))
167 goto out_err0; 167 goto out_err0;
168 168
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
174 174
175 return 0; 175 return 0;
176out_err1: 176out_err1:
177 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); 177 spin_lock(&tdev->object_lock);
178 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
179 spin_unlock(&tdev->object_lock);
178out_err0: 180out_err0:
179 return ret; 181 return ret;
180} 182}
@@ -186,30 +188,29 @@ static void ttm_release_base(struct kref *kref)
186 container_of(kref, struct ttm_base_object, refcount); 188 container_of(kref, struct ttm_base_object, refcount);
187 struct ttm_object_device *tdev = base->tfile->tdev; 189 struct ttm_object_device *tdev = base->tfile->tdev;
188 190
189 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); 191 spin_lock(&tdev->object_lock);
190 write_unlock(&tdev->object_lock); 192 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
193 spin_unlock(&tdev->object_lock);
194
195 /*
196 * Note: We don't use synchronize_rcu() here because it's far
197 * too slow. It's up to the user to free the object using
198 * call_rcu() or ttm_base_object_kfree().
199 */
200
191 if (base->refcount_release) { 201 if (base->refcount_release) {
192 ttm_object_file_unref(&base->tfile); 202 ttm_object_file_unref(&base->tfile);
193 base->refcount_release(&base); 203 base->refcount_release(&base);
194 } 204 }
195 write_lock(&tdev->object_lock);
196} 205}
197 206
198void ttm_base_object_unref(struct ttm_base_object **p_base) 207void ttm_base_object_unref(struct ttm_base_object **p_base)
199{ 208{
200 struct ttm_base_object *base = *p_base; 209 struct ttm_base_object *base = *p_base;
201 struct ttm_object_device *tdev = base->tfile->tdev;
202 210
203 *p_base = NULL; 211 *p_base = NULL;
204 212
205 /*
206 * Need to take the lock here to avoid racing with
207 * users trying to look up the object.
208 */
209
210 write_lock(&tdev->object_lock);
211 kref_put(&base->refcount, ttm_release_base); 213 kref_put(&base->refcount, ttm_release_base);
212 write_unlock(&tdev->object_lock);
213} 214}
214EXPORT_SYMBOL(ttm_base_object_unref); 215EXPORT_SYMBOL(ttm_base_object_unref);
215 216
@@ -221,14 +222,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
221 struct drm_hash_item *hash; 222 struct drm_hash_item *hash;
222 int ret; 223 int ret;
223 224
224 read_lock(&tdev->object_lock); 225 rcu_read_lock();
225 ret = drm_ht_find_item(&tdev->object_hash, key, &hash); 226 ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
226 227
227 if (likely(ret == 0)) { 228 if (likely(ret == 0)) {
228 base = drm_hash_entry(hash, struct ttm_base_object, hash); 229 base = drm_hash_entry(hash, struct ttm_base_object, hash);
229 kref_get(&base->refcount); 230 ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
230 } 231 }
231 read_unlock(&tdev->object_lock); 232 rcu_read_unlock();
232 233
233 if (unlikely(ret != 0)) 234 if (unlikely(ret != 0))
234 return NULL; 235 return NULL;
@@ -426,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
426 return NULL; 427 return NULL;
427 428
428 tdev->mem_glob = mem_glob; 429 tdev->mem_glob = mem_glob;
429 rwlock_init(&tdev->object_lock); 430 spin_lock_init(&tdev->object_lock);
430 atomic_set(&tdev->object_count, 0); 431 atomic_set(&tdev->object_count, 0);
431 ret = drm_ht_create(&tdev->object_hash, hash_order); 432 ret = drm_ht_create(&tdev->object_hash, hash_order);
432 433
@@ -444,9 +445,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
444 445
445 *p_tdev = NULL; 446 *p_tdev = NULL;
446 447
447 write_lock(&tdev->object_lock); 448 spin_lock(&tdev->object_lock);
448 drm_ht_remove(&tdev->object_hash); 449 drm_ht_remove(&tdev->object_hash);
449 write_unlock(&tdev->object_lock); 450 spin_unlock(&tdev->object_lock);
450 451
451 kfree(tdev); 452 kfree(tdev);
452} 453}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 860dc4813e99..bd2a3b40cd12 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -749,7 +749,10 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
749 /* clear the pages coming from the pool if requested */ 749 /* clear the pages coming from the pool if requested */
750 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { 750 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
751 list_for_each_entry(p, &plist, lru) { 751 list_for_each_entry(p, &plist, lru) {
752 clear_page(page_address(p)); 752 if (PageHighMem(p))
753 clear_highpage(p);
754 else
755 clear_page(page_address(p));
753 } 756 }
754 } 757 }
755 758
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index bf8260133ea9..7d759a430294 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -308,9 +308,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
308 if (unlikely(to_page == NULL)) 308 if (unlikely(to_page == NULL))
309 goto out_err; 309 goto out_err;
310 310
311 preempt_disable();
312 copy_highpage(to_page, from_page); 311 copy_highpage(to_page, from_page);
313 preempt_enable();
314 page_cache_release(from_page); 312 page_cache_release(from_page);
315 } 313 }
316 314
@@ -358,9 +356,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
358 ret = PTR_ERR(to_page); 356 ret = PTR_ERR(to_page);
359 goto out_err; 357 goto out_err;
360 } 358 }
361 preempt_disable();
362 copy_highpage(to_page, from_page); 359 copy_highpage(to_page, from_page);
363 preempt_enable();
364 set_page_dirty(to_page); 360 set_page_dirty(to_page);
365 mark_page_accessed(to_page); 361 mark_page_accessed(to_page);
366 page_cache_release(to_page); 362 page_cache_release(to_page);
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b3b2cedf6745..fe5cdbcf2636 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -22,13 +22,17 @@
22static u8 *udl_get_edid(struct udl_device *udl) 22static u8 *udl_get_edid(struct udl_device *udl)
23{ 23{
24 u8 *block; 24 u8 *block;
25 char rbuf[3]; 25 char *rbuf;
26 int ret, i; 26 int ret, i;
27 27
28 block = kmalloc(EDID_LENGTH, GFP_KERNEL); 28 block = kmalloc(EDID_LENGTH, GFP_KERNEL);
29 if (block == NULL) 29 if (block == NULL)
30 return NULL; 30 return NULL;
31 31
32 rbuf = kmalloc(2, GFP_KERNEL);
33 if (rbuf == NULL)
34 goto error;
35
32 for (i = 0; i < EDID_LENGTH; i++) { 36 for (i = 0; i < EDID_LENGTH; i++) {
33 ret = usb_control_msg(udl->ddev->usbdev, 37 ret = usb_control_msg(udl->ddev->usbdev,
34 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02), 38 usb_rcvctrlpipe(udl->ddev->usbdev, 0), (0x02),
@@ -36,16 +40,17 @@ static u8 *udl_get_edid(struct udl_device *udl)
36 HZ); 40 HZ);
37 if (ret < 1) { 41 if (ret < 1) {
38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 42 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
39 i--;
40 goto error; 43 goto error;
41 } 44 }
42 block[i] = rbuf[1]; 45 block[i] = rbuf[1];
43 } 46 }
44 47
48 kfree(rbuf);
45 return block; 49 return block;
46 50
47error: 51error:
48 kfree(block); 52 kfree(block);
53 kfree(rbuf);
49 return NULL; 54 return NULL;
50} 55}
51 56
@@ -57,6 +62,14 @@ static int udl_get_modes(struct drm_connector *connector)
57 62
58 edid = (struct edid *)udl_get_edid(udl); 63 edid = (struct edid *)udl_get_edid(udl);
59 64
65 /*
66 * We only read the main block, but if the monitor reports extension
67 * blocks then the drm edid code expects them to be present, so patch
68 * the extension count to 0.
69 */
70 edid->checksum += edid->extensions;
71 edid->extensions = 0;
72
60 drm_mode_connector_update_edid_property(connector, edid); 73 drm_mode_connector_update_edid_property(connector, edid);
61 ret = drm_add_edid_modes(connector, edid); 74 ret = drm_add_edid_modes(connector, edid);
62 kfree(edid); 75 kfree(edid);
@@ -84,7 +97,8 @@ udl_detect(struct drm_connector *connector, bool force)
84 return connector_status_connected; 97 return connector_status_connected;
85} 98}
86 99
87struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector) 100static struct drm_encoder*
101udl_best_single_encoder(struct drm_connector *connector)
88{ 102{
89 int enc_id = connector->encoder_ids[0]; 103 int enc_id = connector->encoder_ids[0];
90 struct drm_mode_object *obj; 104 struct drm_mode_object *obj;
@@ -97,8 +111,9 @@ struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
97 return encoder; 111 return encoder;
98} 112}
99 113
100int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property, 114static int udl_connector_set_property(struct drm_connector *connector,
101 uint64_t val) 115 struct drm_property *property,
116 uint64_t val)
102{ 117{
103 return 0; 118 return 0;
104} 119}
@@ -110,13 +125,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
110 kfree(connector); 125 kfree(connector);
111} 126}
112 127
113struct drm_connector_helper_funcs udl_connector_helper_funcs = { 128static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
114 .get_modes = udl_get_modes, 129 .get_modes = udl_get_modes,
115 .mode_valid = udl_mode_valid, 130 .mode_valid = udl_mode_valid,
116 .best_encoder = udl_best_single_encoder, 131 .best_encoder = udl_best_single_encoder,
117}; 132};
118 133
119struct drm_connector_funcs udl_connector_funcs = { 134static struct drm_connector_funcs udl_connector_funcs = {
120 .dpms = drm_helper_connector_dpms, 135 .dpms = drm_helper_connector_dpms,
121 .detect = udl_detect, 136 .detect = udl_detect,
122 .fill_modes = drm_helper_probe_single_connector_modes, 137 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -138,7 +153,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
138 drm_sysfs_connector_add(connector); 153 drm_sysfs_connector_add(connector);
139 drm_mode_connector_attach_encoder(connector, encoder); 154 drm_mode_connector_attach_encoder(connector, encoder);
140 155
141 drm_connector_attach_property(connector, 156 drm_object_attach_property(&connector->base,
142 dev->mode_config.dirty_info_property, 157 dev->mode_config.dirty_info_property,
143 1); 158 1);
144 return 0; 159 return 0;
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index fccd361f7b50..87aa5f5d3c88 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -104,7 +104,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
104 104
105int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, 105int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
106 const char *front, char **urb_buf_ptr, 106 const char *front, char **urb_buf_ptr,
107 u32 byte_offset, u32 byte_width, 107 u32 byte_offset, u32 device_byte_offset, u32 byte_width,
108 int *ident_ptr, int *sent_ptr); 108 int *ident_ptr, int *sent_ptr);
109 109
110int udl_dumb_create(struct drm_file *file_priv, 110int udl_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 69a2b16f42a6..d4ab3beaada0 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -114,9 +114,10 @@ static void udlfb_dpy_deferred_io(struct fb_info *info,
114 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 114 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
115 115
116 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8), 116 if (udl_render_hline(dev, (ufbdev->ufb.base.bits_per_pixel / 8),
117 &urb, (char *) info->fix.smem_start, 117 &urb, (char *) info->fix.smem_start,
118 &cmd, cur->index << PAGE_SHIFT, 118 &cmd, cur->index << PAGE_SHIFT,
119 PAGE_SIZE, &bytes_identical, &bytes_sent)) 119 cur->index << PAGE_SHIFT,
120 PAGE_SIZE, &bytes_identical, &bytes_sent))
120 goto error; 121 goto error;
121 bytes_rendered += PAGE_SIZE; 122 bytes_rendered += PAGE_SIZE;
122 } 123 }
@@ -187,10 +188,11 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
187 for (i = y; i < y + height ; i++) { 188 for (i = y; i < y + height ; i++) {
188 const int line_offset = fb->base.pitches[0] * i; 189 const int line_offset = fb->base.pitches[0] * i;
189 const int byte_offset = line_offset + (x * bpp); 190 const int byte_offset = line_offset + (x * bpp);
190 191 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
191 if (udl_render_hline(dev, bpp, &urb, 192 if (udl_render_hline(dev, bpp, &urb,
192 (char *) fb->obj->vmapping, 193 (char *) fb->obj->vmapping,
193 &cmd, byte_offset, width * bpp, 194 &cmd, byte_offset, dev_byte_offset,
195 width * bpp,
194 &bytes_identical, &bytes_sent)) 196 &bytes_identical, &bytes_sent))
195 goto error; 197 goto error;
196 } 198 }
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index dc095526ffb7..142fee5f983f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -213,11 +213,12 @@ static void udl_compress_hline16(
213 */ 213 */
214int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr, 214int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
215 const char *front, char **urb_buf_ptr, 215 const char *front, char **urb_buf_ptr,
216 u32 byte_offset, u32 byte_width, 216 u32 byte_offset, u32 device_byte_offset,
217 u32 byte_width,
217 int *ident_ptr, int *sent_ptr) 218 int *ident_ptr, int *sent_ptr)
218{ 219{
219 const u8 *line_start, *line_end, *next_pixel; 220 const u8 *line_start, *line_end, *next_pixel;
220 u32 base16 = 0 + (byte_offset / bpp) * 2; 221 u32 base16 = 0 + (device_byte_offset / bpp) * 2;
221 struct urb *urb = *urb_ptr; 222 struct urb *urb = *urb_ptr;
222 u8 *cmd = *urb_buf_ptr; 223 u8 *cmd = *urb_buf_ptr;
223 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; 224 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c8c11f..2cc6cd91ac11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o
9 10
10obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..8369c3ba10fe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
1/**************************************************************************
2 *
3 * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifdef __KERNEL__
29
30#include <drm/vmwgfx_drm.h>
31#define surf_size_struct struct drm_vmw_size
32
33#else /* __KERNEL__ */
34
35#ifndef ARRAY_SIZE
36#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
37#endif /* ARRAY_SIZE */
38
39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
40#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
41#define surf_size_struct SVGA3dSize
42#define u32 uint32
43
44#endif /* __KERNEL__ */
45
46#include "svga3d_reg.h"
47
48/*
49 * enum svga3d_block_desc describes the active data channels in a block.
50 *
51 * There can be at-most four active channels in a block:
52 * 1. Red, bump W, luminance and depth are stored in the first channel.
53 * 2. Green, bump V and stencil are stored in the second channel.
54 * 3. Blue and bump U are stored in the third channel.
55 * 4. Alpha and bump Q are stored in the fourth channel.
56 *
57 * Block channels can be used to store compressed and buffer data:
58 * 1. For compressed formats, only the data channel is used and its size
59 * is equal to that of a singular block in the compression scheme.
60 * 2. For buffer formats, only the data channel is used and its size is
61 * exactly one byte in length.
62 * 3. In each case the bit depth represent the size of a singular block.
63 *
64 * Note: Compressed and IEEE formats do not use the bitMask structure.
65 */
66
67enum svga3d_block_desc {
68 SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
69 SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
70 data */
71 SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
72 data */
73 SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
74 U and V */
75 SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
76 data */
77 SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
78 data */
79 SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
80 channel */
81 SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
82 data */
83 SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
84 data */
85 SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
86 data */
87 SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
88 data */
89 SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
90 SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
91 channel */
92 SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
93 data */
94 SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
95 data */
96 SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
97 data depending on the
98 compression method used */
99 SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
100 floating point
101 representation in
102 all channels */
103 SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
104 data. */
105 SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
106 SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
107 SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
108 SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
109 SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
110 e.g., NV12. */
111 SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
112 Y, U, V, e.g., YV12. */
113
114 SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
115 SVGA3DBLOCKDESC_GREEN,
116 SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
117 SVGA3DBLOCKDESC_BLUE,
118 SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
119 SVGA3DBLOCKDESC_SRGB,
120 SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
121 SVGA3DBLOCKDESC_ALPHA,
122 SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
123 SVGA3DBLOCKDESC_SRGB,
124 SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
125 SVGA3DBLOCKDESC_V,
126 SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
127 SVGA3DBLOCKDESC_LUMINANCE,
128 SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
129 SVGA3DBLOCKDESC_W,
130 SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
131 SVGA3DBLOCKDESC_ALPHA,
132 SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
133 SVGA3DBLOCKDESC_V |
134 SVGA3DBLOCKDESC_W |
135 SVGA3DBLOCKDESC_Q,
136 SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
137 SVGA3DBLOCKDESC_ALPHA,
138 SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
139 SVGA3DBLOCKDESC_IEEE_FP,
140 SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
141 SVGA3DBLOCKDESC_GREEN,
142 SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
143 SVGA3DBLOCKDESC_BLUE,
144 SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
145 SVGA3DBLOCKDESC_ALPHA,
146 SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
147 SVGA3DBLOCKDESC_STENCIL,
148 SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
149 SVGA3DBLOCKDESC_Y,
150 SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
151 SVGA3DBLOCKDESC_Y |
152 SVGA3DBLOCKDESC_U_VIDEO |
153 SVGA3DBLOCKDESC_V_VIDEO,
154 SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
155 SVGA3DBLOCKDESC_EXP,
156 SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
157 SVGA3DBLOCKDESC_SRGB,
158 SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
159 SVGA3DBLOCKDESC_2PLANAR_YUV,
160 SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
161 SVGA3DBLOCKDESC_3PLANAR_YUV,
162};
163
164/*
165 * SVGA3dSurfaceDesc describes the actual pixel data.
166 *
167 * This structure provides the following information:
168 * 1. Block description.
169 * 2. Dimensions of a block in the surface.
170 * 3. Size of block in bytes.
171 * 4. Bit depth of the pixel data.
172 * 5. Channel bit depths and masks (if applicable).
173 */
174#define SVGA3D_CHANNEL_DEF(type) \
175 struct { \
176 union { \
177 type blue; \
178 type u; \
179 type uv_video; \
180 type u_video; \
181 }; \
182 union { \
183 type green; \
184 type v; \
185 type stencil; \
186 type v_video; \
187 }; \
188 union { \
189 type red; \
190 type w; \
191 type luminance; \
192 type y; \
193 type depth; \
194 type data; \
195 }; \
196 union { \
197 type alpha; \
198 type q; \
199 type exp; \
200 }; \
201 }
202
203struct svga3d_surface_desc {
204 enum svga3d_block_desc block_desc;
205 surf_size_struct block_size;
206 u32 bytes_per_block;
207 u32 pitch_bytes_per_block;
208
209 struct {
210 u32 total;
211 SVGA3D_CHANNEL_DEF(uint8);
212 } bit_depth;
213
214 struct {
215 SVGA3D_CHANNEL_DEF(uint8);
216 } bit_offset;
217};
218
219static const struct svga3d_surface_desc svga3d_surface_descs[] = {
220 {SVGA3DBLOCKDESC_NONE,
221 {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
222 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
223
224 {SVGA3DBLOCKDESC_RGB,
225 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
226 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
227
228 {SVGA3DBLOCKDESC_RGBA,
229 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
230 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
231
232 {SVGA3DBLOCKDESC_RGB,
233 {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
234 {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
235
236 {SVGA3DBLOCKDESC_RGB,
237 {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
238 {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
239
240 {SVGA3DBLOCKDESC_RGBA,
241 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
242 {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
243
244 {SVGA3DBLOCKDESC_RGBA,
245 {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
246 {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
247
248 {SVGA3DBLOCKDESC_DEPTH,
249 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
250 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
251
252 {SVGA3DBLOCKDESC_DEPTH,
253 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
254 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
255
256 {SVGA3DBLOCKDESC_DS,
257 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
258 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
259
260 {SVGA3DBLOCKDESC_DS,
261 {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
262 {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
263
264 {SVGA3DBLOCKDESC_LUMINANCE,
265 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
266 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
267
268 {SVGA3DBLOCKDESC_LA,
269 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
270 {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
271
272 {SVGA3DBLOCKDESC_LUMINANCE,
273 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
274 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
275
276 {SVGA3DBLOCKDESC_LA,
277 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
278 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
279
280 {SVGA3DBLOCKDESC_COMPRESSED,
281 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
282 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
283
284 {SVGA3DBLOCKDESC_COMPRESSED,
285 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
286 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
287
288 {SVGA3DBLOCKDESC_COMPRESSED,
289 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
290 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
291
292 {SVGA3DBLOCKDESC_COMPRESSED,
293 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
294 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
295
296 {SVGA3DBLOCKDESC_COMPRESSED,
297 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
298 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
299
300 {SVGA3DBLOCKDESC_UV,
301 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
302 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
303
304 {SVGA3DBLOCKDESC_UVL,
305 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
306 {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
307
308 {SVGA3DBLOCKDESC_UVL,
309 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
310 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
311
312 {SVGA3DBLOCKDESC_UVL,
313 {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
314 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
315
316 {SVGA3DBLOCKDESC_RGBA_FP,
317 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
318 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
319
320 {SVGA3DBLOCKDESC_RGBA_FP,
321 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
322 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
323
324 {SVGA3DBLOCKDESC_RGBA,
325 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
326 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
327
328 {SVGA3DBLOCKDESC_UV,
329 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
330 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
331
332 {SVGA3DBLOCKDESC_UVWQ,
333 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
334 {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
335
336 {SVGA3DBLOCKDESC_UV,
337 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
338 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
339
340 {SVGA3DBLOCKDESC_UVL,
341 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
342 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
343
344 {SVGA3DBLOCKDESC_UVWA,
345 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
346 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
347
348 {SVGA3DBLOCKDESC_ALPHA,
349 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
350 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
351
352 {SVGA3DBLOCKDESC_R_FP,
353 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
354 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
355
356 {SVGA3DBLOCKDESC_R_FP,
357 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
358 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
359
360 {SVGA3DBLOCKDESC_RG_FP,
361 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
362 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
363
364 {SVGA3DBLOCKDESC_RG_FP,
365 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
366 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
367
368 {SVGA3DBLOCKDESC_BUFFER,
369 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
370 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
371
372 {SVGA3DBLOCKDESC_DEPTH,
373 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
374 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
375
376 {SVGA3DBLOCKDESC_UV,
377 {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
378 {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
379
380 {SVGA3DBLOCKDESC_RG,
381 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
382 {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
383
384 {SVGA3DBLOCKDESC_RGBA,
385 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
386 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
387
388 {SVGA3DBLOCKDESC_YUV,
389 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
390 {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
391
392 {SVGA3DBLOCKDESC_YUV,
393 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
394 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
395
396 {SVGA3DBLOCKDESC_NV12,
397 {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
398 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
399
400 {SVGA3DBLOCKDESC_AYUV,
401 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
402 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
403
404 {SVGA3DBLOCKDESC_RGBA,
405 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
406 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
407
408 {SVGA3DBLOCKDESC_RGBA,
409 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
410 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
411
412 {SVGA3DBLOCKDESC_UVWQ,
413 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
414 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
415
416 {SVGA3DBLOCKDESC_RGB,
417 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
418 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
419
420 {SVGA3DBLOCKDESC_RGB_FP,
421 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
422 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
423
424 {SVGA3DBLOCKDESC_RGB,
425 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
426 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
427
428 {SVGA3DBLOCKDESC_UVW,
429 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
430 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
431
432 {SVGA3DBLOCKDESC_RGBA,
433 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
434 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
435
436 {SVGA3DBLOCKDESC_RGBA,
437 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
438 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
439
440 {SVGA3DBLOCKDESC_UVWQ,
441 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
442 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
443
444 {SVGA3DBLOCKDESC_UVWQ,
445 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
446 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
447
448 {SVGA3DBLOCKDESC_RG,
449 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
450 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
451
452 {SVGA3DBLOCKDESC_RG,
453 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
454 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
455
456 {SVGA3DBLOCKDESC_UV,
457 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
458 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
459
460 {SVGA3DBLOCKDESC_RG,
461 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
462 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
463
464 {SVGA3DBLOCKDESC_DS,
465 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
466 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
467
468 {SVGA3DBLOCKDESC_R_FP,
469 {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
470 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
471
472 {SVGA3DBLOCKDESC_GREEN,
473 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
474 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
475
476 {SVGA3DBLOCKDESC_RGBA,
477 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
478 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
479
480 {SVGA3DBLOCKDESC_RGBA,
481 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
482 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
483
484 {SVGA3DBLOCKDESC_RGB_FP,
485 {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
486 {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
487
488 {SVGA3DBLOCKDESC_RGBA,
489 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
490 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
491
492 {SVGA3DBLOCKDESC_RGBA,
493 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
494 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
495
496 {SVGA3DBLOCKDESC_RGBA_SRGB,
497 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
498 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
499
500 {SVGA3DBLOCKDESC_RGBA,
501 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
502 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
503
504 {SVGA3DBLOCKDESC_RGBA,
505 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
506 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
507
508 {SVGA3DBLOCKDESC_RG,
509 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
510 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
511
512 {SVGA3DBLOCKDESC_RG_FP,
513 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
514 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
515
516 {SVGA3DBLOCKDESC_UV,
517 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
518 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
519
520 {SVGA3DBLOCKDESC_RED,
521 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
522 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
523
524 {SVGA3DBLOCKDESC_DEPTH,
525 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
526 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
527
528 {SVGA3DBLOCKDESC_RED,
529 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
530 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
531
532 {SVGA3DBLOCKDESC_RED,
533 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
534 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
535
536 {SVGA3DBLOCKDESC_RG,
537 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
538 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
539
540 {SVGA3DBLOCKDESC_DS,
541 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
542 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
543
544 {SVGA3DBLOCKDESC_RED,
545 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
546 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
547
548 {SVGA3DBLOCKDESC_GREEN,
549 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
550 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
551
552 {SVGA3DBLOCKDESC_RG,
553 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
554 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
555
556 {SVGA3DBLOCKDESC_RG,
557 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
558 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
559
560 {SVGA3DBLOCKDESC_RG,
561 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
562 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
563
564 {SVGA3DBLOCKDESC_UV,
565 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
566 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
567
568 {SVGA3DBLOCKDESC_RED,
569 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
570 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
571
572 {SVGA3DBLOCKDESC_RED,
573 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
574 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
575
576 {SVGA3DBLOCKDESC_RED,
577 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
578 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
579
580 {SVGA3DBLOCKDESC_U,
581 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
582 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
583
584 {SVGA3DBLOCKDESC_U,
585 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
586 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
587
588 {SVGA3DBLOCKDESC_RED,
589 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
590 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
591
592 {SVGA3DBLOCKDESC_RED,
593 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
594 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
595
596 {SVGA3DBLOCKDESC_RED,
597 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
598 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
599
600 {SVGA3DBLOCKDESC_U,
601 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
602 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
603
604 {SVGA3DBLOCKDESC_U,
605 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
606 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
607
608 {SVGA3DBLOCKDESC_RED,
609 {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
610 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
611
612 {SVGA3DBLOCKDESC_RGBE,
613 {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
614 {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
615
616 {SVGA3DBLOCKDESC_RG,
617 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
618 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
619
620 {SVGA3DBLOCKDESC_RG,
621 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
622 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
623
624 {SVGA3DBLOCKDESC_COMPRESSED,
625 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
626 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
627
628 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
629 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
630 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
631
632 {SVGA3DBLOCKDESC_COMPRESSED,
633 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
634 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
635
636 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
637 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
638 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
639
640 {SVGA3DBLOCKDESC_COMPRESSED,
641 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
642 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
643
644 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
645 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
646 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
647
648 {SVGA3DBLOCKDESC_COMPRESSED,
649 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
650 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
651
652 {SVGA3DBLOCKDESC_COMPRESSED,
653 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
654 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
655
656 {SVGA3DBLOCKDESC_COMPRESSED,
657 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
658 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
659
660 {SVGA3DBLOCKDESC_COMPRESSED,
661 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
662 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
663
664 {SVGA3DBLOCKDESC_COMPRESSED,
665 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
666 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
667
668 {SVGA3DBLOCKDESC_COMPRESSED,
669 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
670 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
671
672 {SVGA3DBLOCKDESC_RGBA,
673 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
674 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
675
676 {SVGA3DBLOCKDESC_RGBA,
677 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
678 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
679
680 {SVGA3DBLOCKDESC_RGBA_SRGB,
681 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
682 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
683
684 {SVGA3DBLOCKDESC_RGB,
685 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
686 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
687
688 {SVGA3DBLOCKDESC_RGB_SRGB,
689 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
690 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
691
692 {SVGA3DBLOCKDESC_DEPTH,
693 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
694 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
695
696 {SVGA3DBLOCKDESC_DS,
697 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
698 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
699
700 {SVGA3DBLOCKDESC_DS,
701 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
702 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
703};
704
705static inline u32 clamped_umul32(u32 a, u32 b)
706{
707 uint64_t tmp = (uint64_t) a*b;
708 return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
709}
710
711static inline const struct svga3d_surface_desc *
712svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
713{
714 if (format < ARRAY_SIZE(svga3d_surface_descs))
715 return &svga3d_surface_descs[format];
716
717 return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
718}
719
720/*
721 *----------------------------------------------------------------------
722 *
723 * svga3dsurface_get_mip_size --
724 *
725 * Given a base level size and the mip level, compute the size of
726 * the mip level.
727 *
728 * Results:
729 * See above.
730 *
731 * Side effects:
732 * None.
733 *
734 *----------------------------------------------------------------------
735 */
736
737static inline surf_size_struct
738svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
739{
740 surf_size_struct size;
741
742 size.width = max_t(u32, base_level.width >> mip_level, 1);
743 size.height = max_t(u32, base_level.height >> mip_level, 1);
744 size.depth = max_t(u32, base_level.depth >> mip_level, 1);
745 return size;
746}
747
748static inline void
749svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
750 const surf_size_struct *pixel_size,
751 surf_size_struct *block_size)
752{
753 block_size->width = DIV_ROUND_UP(pixel_size->width,
754 desc->block_size.width);
755 block_size->height = DIV_ROUND_UP(pixel_size->height,
756 desc->block_size.height);
757 block_size->depth = DIV_ROUND_UP(pixel_size->depth,
758 desc->block_size.depth);
759}
760
761static inline bool
762svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
763{
764 return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
765}
766
767static inline u32
768svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
769 const surf_size_struct *size)
770{
771 u32 pitch;
772 surf_size_struct blocks;
773
774 svga3dsurface_get_size_in_blocks(desc, size, &blocks);
775
776 pitch = blocks.width * desc->pitch_bytes_per_block;
777
778 return pitch;
779}
780
781/*
782 *-----------------------------------------------------------------------------
783 *
784 * svga3dsurface_get_image_buffer_size --
785 *
786 * Return the number of bytes of buffer space required to store
787 * one image of a surface, optionally using the specified pitch.
788 *
789 * If pitch is zero, it is assumed that rows are tightly packed.
790 *
791 * This function is overflow-safe. If the result would have
792 * overflowed, instead we return MAX_UINT32.
793 *
794 * Results:
795 * Byte count.
796 *
797 * Side effects:
798 * None.
799 *
800 *-----------------------------------------------------------------------------
801 */
802
803static inline u32
804svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
805 const surf_size_struct *size,
806 u32 pitch)
807{
808 surf_size_struct image_blocks;
809 u32 slice_size, total_size;
810
811 svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
812
813 if (svga3dsurface_is_planar_surface(desc)) {
814 total_size = clamped_umul32(image_blocks.width,
815 image_blocks.height);
816 total_size = clamped_umul32(total_size, image_blocks.depth);
817 total_size = clamped_umul32(total_size, desc->bytes_per_block);
818 return total_size;
819 }
820
821 if (pitch == 0)
822 pitch = svga3dsurface_calculate_pitch(desc, size);
823
824 slice_size = clamped_umul32(image_blocks.height, pitch);
825 total_size = clamped_umul32(slice_size, image_blocks.depth);
826
827 return total_size;
828}
829
830static inline u32
831svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
832 surf_size_struct base_level_size,
833 u32 num_mip_levels,
834 bool cubemap)
835{
836 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
837 u32 total_size = 0;
838 u32 mip;
839
840 for (mip = 0; mip < num_mip_levels; mip++) {
841 surf_size_struct size =
842 svga3dsurface_get_mip_size(base_level_size, mip);
843 total_size += svga3dsurface_get_image_buffer_size(desc,
844 &size, 0);
845 }
846
847 if (cubemap)
848 total_size *= SVGA3D_MAX_SURFACE_FACES;
849
850 return total_size;
851}
852
853
854/**
855 * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
856 * in an image (or volume).
857 *
858 * @width: The image width in pixels.
859 * @height: The image height in pixels
860 */
861static inline u32
862svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
863 u32 width, u32 height,
864 u32 x, u32 y, u32 z)
865{
866 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
867 const u32 bw = desc->block_size.width, bh = desc->block_size.height;
868 const u32 bd = desc->block_size.depth;
869 const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
870 const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
871 const u32 offset = (z / bd * imgstride +
872 y / bh * rowstride +
873 x / bw * desc->bytes_per_block);
874 return offset;
875}
876
877
878static inline u32
879svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
880 surf_size_struct baseLevelSize,
881 u32 numMipLevels,
882 u32 face,
883 u32 mip)
884
885{
886 u32 offset;
887 u32 mipChainBytes;
888 u32 mipChainBytesToLevel;
889 u32 i;
890 const struct svga3d_surface_desc *desc;
891 surf_size_struct mipSize;
892 u32 bytes;
893
894 desc = svga3dsurface_get_desc(format);
895
896 mipChainBytes = 0;
897 mipChainBytesToLevel = 0;
898 for (i = 0; i < numMipLevels; i++) {
899 mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
900 bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
901 mipChainBytes += bytes;
902 if (i < mip)
903 mipChainBytesToLevel += bytes;
904 }
905
906 offset = mipChainBytes * face + mipChainBytesToLevel;
907
908 return offset;
909}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 9826fbc88154..96dc84dc34d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
248 *placement = vmw_sys_placement; 248 *placement = vmw_sys_placement;
249} 249}
250 250
251/**
252 * FIXME: Proper access checks on buffers.
253 */
254
255static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) 251static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
256{ 252{
257 return 0; 253 struct ttm_object_file *tfile =
254 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
255
256 return vmw_user_dmabuf_verify_access(bo, tfile);
258} 257}
259 258
260static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 259static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -310,27 +309,23 @@ static void vmw_sync_obj_unref(void **sync_obj)
310 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); 309 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
311} 310}
312 311
313static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) 312static int vmw_sync_obj_flush(void *sync_obj)
314{ 313{
315 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); 314 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
316 return 0; 315 return 0;
317} 316}
318 317
319static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) 318static bool vmw_sync_obj_signaled(void *sync_obj)
320{ 319{
321 unsigned long flags = (unsigned long) sync_arg;
322 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, 320 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
323 (uint32_t) flags); 321 DRM_VMW_FENCE_FLAG_EXEC);
324 322
325} 323}
326 324
327static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, 325static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
328 bool lazy, bool interruptible)
329{ 326{
330 unsigned long flags = (unsigned long) sync_arg;
331
332 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, 327 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
333 (uint32_t) flags, 328 DRM_VMW_FENCE_FLAG_EXEC,
334 lazy, interruptible, 329 lazy, interruptible,
335 VMW_FENCE_WAIT_TIMEOUT); 330 VMW_FENCE_WAIT_TIMEOUT);
336} 331}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 000000000000..00ae0925aca8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h"
31
32struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
35};
36
37static void vmw_user_context_free(struct vmw_resource *res);
38static struct vmw_resource *
39vmw_user_context_base_to_res(struct ttm_base_object *base);
40
41static uint64_t vmw_user_context_size;
42
43static const struct vmw_user_resource_conv user_context_conv = {
44 .object_type = VMW_RES_CONTEXT,
45 .base_obj_to_res = vmw_user_context_base_to_res,
46 .res_free = vmw_user_context_free
47};
48
49const struct vmw_user_resource_conv *user_context_converter =
50 &user_context_conv;
51
52
53static const struct vmw_res_func vmw_legacy_context_func = {
54 .res_type = vmw_res_context,
55 .needs_backup = false,
56 .may_evict = false,
57 .type_name = "legacy contexts",
58 .backup_placement = NULL,
59 .create = NULL,
60 .destroy = NULL,
61 .bind = NULL,
62 .unbind = NULL
63};
64
65/**
66 * Context management:
67 */
68
69static void vmw_hw_context_destroy(struct vmw_resource *res)
70{
71
72 struct vmw_private *dev_priv = res->dev_priv;
73 struct {
74 SVGA3dCmdHeader header;
75 SVGA3dCmdDestroyContext body;
76 } *cmd;
77
78
79 vmw_execbuf_release_pinned_bo(dev_priv);
80 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
81 if (unlikely(cmd == NULL)) {
82 DRM_ERROR("Failed reserving FIFO space for surface "
83 "destruction.\n");
84 return;
85 }
86
87 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
88 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
89 cmd->body.cid = cpu_to_le32(res->id);
90
91 vmw_fifo_commit(dev_priv, sizeof(*cmd));
92 vmw_3d_resource_dec(dev_priv, false);
93}
94
95static int vmw_context_init(struct vmw_private *dev_priv,
96 struct vmw_resource *res,
97 void (*res_free) (struct vmw_resource *res))
98{
99 int ret;
100
101 struct {
102 SVGA3dCmdHeader header;
103 SVGA3dCmdDefineContext body;
104 } *cmd;
105
106 ret = vmw_resource_init(dev_priv, res, false,
107 res_free, &vmw_legacy_context_func);
108
109 if (unlikely(ret != 0)) {
110 DRM_ERROR("Failed to allocate a resource id.\n");
111 goto out_early;
112 }
113
114 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
115 DRM_ERROR("Out of hw context ids.\n");
116 vmw_resource_unreference(&res);
117 return -ENOMEM;
118 }
119
120 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
121 if (unlikely(cmd == NULL)) {
122 DRM_ERROR("Fifo reserve failed.\n");
123 vmw_resource_unreference(&res);
124 return -ENOMEM;
125 }
126
127 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
128 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
129 cmd->body.cid = cpu_to_le32(res->id);
130
131 vmw_fifo_commit(dev_priv, sizeof(*cmd));
132 (void) vmw_3d_resource_inc(dev_priv, false);
133 vmw_resource_activate(res, vmw_hw_context_destroy);
134 return 0;
135
136out_early:
137 if (res_free == NULL)
138 kfree(res);
139 else
140 res_free(res);
141 return ret;
142}
143
144struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
145{
146 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
147 int ret;
148
149 if (unlikely(res == NULL))
150 return NULL;
151
152 ret = vmw_context_init(dev_priv, res, NULL);
153
154 return (ret == 0) ? res : NULL;
155}
156
157/**
158 * User-space context management:
159 */
160
161static struct vmw_resource *
162vmw_user_context_base_to_res(struct ttm_base_object *base)
163{
164 return &(container_of(base, struct vmw_user_context, base)->res);
165}
166
167static void vmw_user_context_free(struct vmw_resource *res)
168{
169 struct vmw_user_context *ctx =
170 container_of(res, struct vmw_user_context, res);
171 struct vmw_private *dev_priv = res->dev_priv;
172
173 ttm_base_object_kfree(ctx, base);
174 ttm_mem_global_free(vmw_mem_glob(dev_priv),
175 vmw_user_context_size);
176}
177
178/**
179 * This function is called when user space has no more references on the
180 * base object. It releases the base-object's reference on the resource object.
181 */
182
183static void vmw_user_context_base_release(struct ttm_base_object **p_base)
184{
185 struct ttm_base_object *base = *p_base;
186 struct vmw_user_context *ctx =
187 container_of(base, struct vmw_user_context, base);
188 struct vmw_resource *res = &ctx->res;
189
190 *p_base = NULL;
191 vmw_resource_unreference(&res);
192}
193
194int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
195 struct drm_file *file_priv)
196{
197 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
198 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
199
200 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
201}
202
203int vmw_context_define_ioctl(struct drm_device *dev, void *data,
204 struct drm_file *file_priv)
205{
206 struct vmw_private *dev_priv = vmw_priv(dev);
207 struct vmw_user_context *ctx;
208 struct vmw_resource *res;
209 struct vmw_resource *tmp;
210 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
211 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
212 struct vmw_master *vmaster = vmw_master(file_priv->master);
213 int ret;
214
215
216 /*
217 * Approximate idr memory usage with 128 bytes. It will be limited
218 * by maximum number_of contexts anyway.
219 */
220
221 if (unlikely(vmw_user_context_size == 0))
222 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
223
224 ret = ttm_read_lock(&vmaster->lock, true);
225 if (unlikely(ret != 0))
226 return ret;
227
228 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
229 vmw_user_context_size,
230 false, true);
231 if (unlikely(ret != 0)) {
232 if (ret != -ERESTARTSYS)
233 DRM_ERROR("Out of graphics memory for context"
234 " creation.\n");
235 goto out_unlock;
236 }
237
238 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
239 if (unlikely(ctx == NULL)) {
240 ttm_mem_global_free(vmw_mem_glob(dev_priv),
241 vmw_user_context_size);
242 ret = -ENOMEM;
243 goto out_unlock;
244 }
245
246 res = &ctx->res;
247 ctx->base.shareable = false;
248 ctx->base.tfile = NULL;
249
250 /*
251 * From here on, the destructor takes over resource freeing.
252 */
253
254 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
255 if (unlikely(ret != 0))
256 goto out_unlock;
257
258 tmp = vmw_resource_reference(&ctx->res);
259 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
260 &vmw_user_context_base_release, NULL);
261
262 if (unlikely(ret != 0)) {
263 vmw_resource_unreference(&tmp);
264 goto out_err;
265 }
266
267 arg->cid = ctx->base.hash.key;
268out_err:
269 vmw_resource_unreference(&res);
270out_unlock:
271 ttm_read_unlock(&vmaster->lock);
272 return ret;
273
274}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 3ce68a2e312d..5fae06ad7e25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
60 if (unlikely(ret != 0)) 60 if (unlikely(ret != 0))
61 return ret; 61 return ret;
62 62
63 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 63 vmw_execbuf_release_pinned_bo(dev_priv);
64 64
65 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 65 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
66 if (unlikely(ret != 0)) 66 if (unlikely(ret != 0))
67 goto err; 67 goto err;
68 68
69 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 69 ret = ttm_bo_validate(bo, placement, interruptible, false);
70 70
71 ttm_bo_unreserve(bo); 71 ttm_bo_unreserve(bo);
72 72
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
105 return ret; 105 return ret;
106 106
107 if (pin) 107 if (pin)
108 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 108 vmw_execbuf_release_pinned_bo(dev_priv);
109 109
110 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 110 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
111 if (unlikely(ret != 0)) 111 if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
123 else 123 else
124 placement = &vmw_vram_gmr_placement; 124 placement = &vmw_vram_gmr_placement;
125 125
126 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 126 ret = ttm_bo_validate(bo, placement, interruptible, false);
127 if (likely(ret == 0) || ret == -ERESTARTSYS) 127 if (likely(ret == 0) || ret == -ERESTARTSYS)
128 goto err_unreserve; 128 goto err_unreserve;
129 129
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
138 else 138 else
139 placement = &vmw_vram_placement; 139 placement = &vmw_vram_placement;
140 140
141 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 141 ret = ttm_bo_validate(bo, placement, interruptible, false);
142 142
143err_unreserve: 143err_unreserve:
144 ttm_bo_unreserve(bo); 144 ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
214 return ret; 214 return ret;
215 215
216 if (pin) 216 if (pin)
217 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 217 vmw_execbuf_release_pinned_bo(dev_priv);
218
219 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 218 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
220 if (unlikely(ret != 0)) 219 if (unlikely(ret != 0))
221 goto err_unlock; 220 goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
224 if (bo->mem.mem_type == TTM_PL_VRAM && 223 if (bo->mem.mem_type == TTM_PL_VRAM &&
225 bo->mem.start < bo->num_pages && 224 bo->mem.start < bo->num_pages &&
226 bo->mem.start > 0) 225 bo->mem.start > 0)
227 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 226 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
228 false, false);
229 227
230 ret = ttm_bo_validate(bo, &placement, interruptible, false, false); 228 ret = ttm_bo_validate(bo, &placement, interruptible, false);
231 229
232 /* For some reason we didn't up at the start of vram */ 230 /* For some reason we didn't up at the start of vram */
233 WARN_ON(ret == 0 && bo->offset != 0); 231 WARN_ON(ret == 0 && bo->offset != 0);
@@ -304,9 +302,9 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
304 uint32_t old_mem_type = bo->mem.mem_type; 302 uint32_t old_mem_type = bo->mem.mem_type;
305 int ret; 303 int ret;
306 304
307 BUG_ON(!atomic_read(&bo->reserved)); 305 BUG_ON(!ttm_bo_is_reserved(bo));
308 BUG_ON(old_mem_type != TTM_PL_VRAM && 306 BUG_ON(old_mem_type != TTM_PL_VRAM &&
309 old_mem_type != VMW_PL_FLAG_GMR); 307 old_mem_type != VMW_PL_GMR);
310 308
311 pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED; 309 pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
312 if (pin) 310 if (pin)
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
316 placement.num_placement = 1; 314 placement.num_placement = 1;
317 placement.placement = &pl_flags; 315 placement.placement = &pl_flags;
318 316
319 ret = ttm_bo_validate(bo, &placement, false, true, true); 317 ret = ttm_bo_validate(bo, &placement, false, true);
320 318
321 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); 319 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
322} 320}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ed3c1e7ddde9..161f8b2549aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
292 PAGE_SIZE, 292 PAGE_SIZE,
293 ttm_bo_type_device, 293 ttm_bo_type_device,
294 &vmw_vram_sys_placement, 294 &vmw_vram_sys_placement,
295 0, 0, false, NULL, 295 0, false, NULL,
296 &dev_priv->dummy_query_bo); 296 &dev_priv->dummy_query_bo);
297} 297}
298 298
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
432 struct vmw_private *dev_priv; 432 struct vmw_private *dev_priv;
433 int ret; 433 int ret;
434 uint32_t svga_id; 434 uint32_t svga_id;
435 enum vmw_res_type i;
435 436
436 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 437 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
437 if (unlikely(dev_priv == NULL)) { 438 if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
448 mutex_init(&dev_priv->cmdbuf_mutex); 449 mutex_init(&dev_priv->cmdbuf_mutex);
449 mutex_init(&dev_priv->release_mutex); 450 mutex_init(&dev_priv->release_mutex);
450 rwlock_init(&dev_priv->resource_lock); 451 rwlock_init(&dev_priv->resource_lock);
451 idr_init(&dev_priv->context_idr); 452
452 idr_init(&dev_priv->surface_idr); 453 for (i = vmw_res_context; i < vmw_res_max; ++i) {
453 idr_init(&dev_priv->stream_idr); 454 idr_init(&dev_priv->res_idr[i]);
455 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
456 }
457
454 mutex_init(&dev_priv->init_mutex); 458 mutex_init(&dev_priv->init_mutex);
455 init_waitqueue_head(&dev_priv->fence_queue); 459 init_waitqueue_head(&dev_priv->fence_queue);
456 init_waitqueue_head(&dev_priv->fifo_queue); 460 init_waitqueue_head(&dev_priv->fifo_queue);
457 dev_priv->fence_queue_waiters = 0; 461 dev_priv->fence_queue_waiters = 0;
458 atomic_set(&dev_priv->fifo_queue_waiters, 0); 462 atomic_set(&dev_priv->fifo_queue_waiters, 0);
459 INIT_LIST_HEAD(&dev_priv->surface_lru); 463
460 dev_priv->used_memory_size = 0; 464 dev_priv->used_memory_size = 0;
461 465
462 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 466 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -609,14 +613,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
609 } 613 }
610 } 614 }
611 615
616 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
617 ret = drm_irq_install(dev);
618 if (ret != 0) {
619 DRM_ERROR("Failed installing irq: %d\n", ret);
620 goto out_no_irq;
621 }
622 }
623
612 dev_priv->fman = vmw_fence_manager_init(dev_priv); 624 dev_priv->fman = vmw_fence_manager_init(dev_priv);
613 if (unlikely(dev_priv->fman == NULL)) 625 if (unlikely(dev_priv->fman == NULL))
614 goto out_no_fman; 626 goto out_no_fman;
615 627
616 /* Need to start the fifo to check if we can do screen objects */
617 ret = vmw_3d_resource_inc(dev_priv, true);
618 if (unlikely(ret != 0))
619 goto out_no_fifo;
620 vmw_kms_save_vga(dev_priv); 628 vmw_kms_save_vga(dev_priv);
621 629
622 /* Start kms and overlay systems, needs fifo. */ 630 /* Start kms and overlay systems, needs fifo. */
@@ -625,25 +633,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
625 goto out_no_kms; 633 goto out_no_kms;
626 vmw_overlay_init(dev_priv); 634 vmw_overlay_init(dev_priv);
627 635
628 /* 3D Depends on Screen Objects being used. */
629 DRM_INFO("Detected %sdevice 3D availability.\n",
630 vmw_fifo_have_3d(dev_priv) ?
631 "" : "no ");
632
633 /* We might be done with the fifo now */
634 if (dev_priv->enable_fb) { 636 if (dev_priv->enable_fb) {
637 ret = vmw_3d_resource_inc(dev_priv, true);
638 if (unlikely(ret != 0))
639 goto out_no_fifo;
635 vmw_fb_init(dev_priv); 640 vmw_fb_init(dev_priv);
636 } else {
637 vmw_kms_restore_vga(dev_priv);
638 vmw_3d_resource_dec(dev_priv, true);
639 }
640
641 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
642 ret = drm_irq_install(dev);
643 if (unlikely(ret != 0)) {
644 DRM_ERROR("Failed installing irq: %d\n", ret);
645 goto out_no_irq;
646 }
647 } 641 }
648 642
649 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 643 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
@@ -651,20 +645,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
651 645
652 return 0; 646 return 0;
653 647
654out_no_irq: 648out_no_fifo:
655 if (dev_priv->enable_fb)
656 vmw_fb_close(dev_priv);
657 vmw_overlay_close(dev_priv); 649 vmw_overlay_close(dev_priv);
658 vmw_kms_close(dev_priv); 650 vmw_kms_close(dev_priv);
659out_no_kms: 651out_no_kms:
660 /* We still have a 3D resource reference held */ 652 vmw_kms_restore_vga(dev_priv);
661 if (dev_priv->enable_fb) {
662 vmw_kms_restore_vga(dev_priv);
663 vmw_3d_resource_dec(dev_priv, false);
664 }
665out_no_fifo:
666 vmw_fence_manager_takedown(dev_priv->fman); 653 vmw_fence_manager_takedown(dev_priv->fman);
667out_no_fman: 654out_no_fman:
655 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
656 drm_irq_uninstall(dev_priv->dev);
657out_no_irq:
668 if (dev_priv->stealth) 658 if (dev_priv->stealth)
669 pci_release_region(dev->pdev, 2); 659 pci_release_region(dev->pdev, 2);
670 else 660 else
@@ -684,9 +674,9 @@ out_err2:
684out_err1: 674out_err1:
685 vmw_ttm_global_release(dev_priv); 675 vmw_ttm_global_release(dev_priv);
686out_err0: 676out_err0:
687 idr_destroy(&dev_priv->surface_idr); 677 for (i = vmw_res_context; i < vmw_res_max; ++i)
688 idr_destroy(&dev_priv->context_idr); 678 idr_destroy(&dev_priv->res_idr[i]);
689 idr_destroy(&dev_priv->stream_idr); 679
690 kfree(dev_priv); 680 kfree(dev_priv);
691 return ret; 681 return ret;
692} 682}
@@ -694,13 +684,14 @@ out_err0:
694static int vmw_driver_unload(struct drm_device *dev) 684static int vmw_driver_unload(struct drm_device *dev)
695{ 685{
696 struct vmw_private *dev_priv = vmw_priv(dev); 686 struct vmw_private *dev_priv = vmw_priv(dev);
687 enum vmw_res_type i;
697 688
698 unregister_pm_notifier(&dev_priv->pm_nb); 689 unregister_pm_notifier(&dev_priv->pm_nb);
699 690
691 if (dev_priv->ctx.res_ht_initialized)
692 drm_ht_remove(&dev_priv->ctx.res_ht);
700 if (dev_priv->ctx.cmd_bounce) 693 if (dev_priv->ctx.cmd_bounce)
701 vfree(dev_priv->ctx.cmd_bounce); 694 vfree(dev_priv->ctx.cmd_bounce);
702 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
703 drm_irq_uninstall(dev_priv->dev);
704 if (dev_priv->enable_fb) { 695 if (dev_priv->enable_fb) {
705 vmw_fb_close(dev_priv); 696 vmw_fb_close(dev_priv);
706 vmw_kms_restore_vga(dev_priv); 697 vmw_kms_restore_vga(dev_priv);
@@ -709,6 +700,8 @@ static int vmw_driver_unload(struct drm_device *dev)
709 vmw_kms_close(dev_priv); 700 vmw_kms_close(dev_priv);
710 vmw_overlay_close(dev_priv); 701 vmw_overlay_close(dev_priv);
711 vmw_fence_manager_takedown(dev_priv->fman); 702 vmw_fence_manager_takedown(dev_priv->fman);
703 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
704 drm_irq_uninstall(dev_priv->dev);
712 if (dev_priv->stealth) 705 if (dev_priv->stealth)
713 pci_release_region(dev->pdev, 2); 706 pci_release_region(dev->pdev, 2);
714 else 707 else
@@ -723,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
723 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 716 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
724 (void)ttm_bo_device_release(&dev_priv->bdev); 717 (void)ttm_bo_device_release(&dev_priv->bdev);
725 vmw_ttm_global_release(dev_priv); 718 vmw_ttm_global_release(dev_priv);
726 idr_destroy(&dev_priv->surface_idr); 719
727 idr_destroy(&dev_priv->context_idr); 720 for (i = vmw_res_context; i < vmw_res_max; ++i)
728 idr_destroy(&dev_priv->stream_idr); 721 idr_destroy(&dev_priv->res_idr[i]);
729 722
730 kfree(dev_priv); 723 kfree(dev_priv);
731 724
@@ -924,11 +917,11 @@ static int vmw_master_set(struct drm_device *dev,
924 917
925out_no_active_lock: 918out_no_active_lock:
926 if (!dev_priv->enable_fb) { 919 if (!dev_priv->enable_fb) {
920 vmw_kms_restore_vga(dev_priv);
921 vmw_3d_resource_dec(dev_priv, true);
927 mutex_lock(&dev_priv->hw_mutex); 922 mutex_lock(&dev_priv->hw_mutex);
928 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 923 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
929 mutex_unlock(&dev_priv->hw_mutex); 924 mutex_unlock(&dev_priv->hw_mutex);
930 vmw_kms_restore_vga(dev_priv);
931 vmw_3d_resource_dec(dev_priv, true);
932 } 925 }
933 return ret; 926 return ret;
934} 927}
@@ -949,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
949 942
950 vmw_fp->locked_master = drm_master_get(file_priv->master); 943 vmw_fp->locked_master = drm_master_get(file_priv->master);
951 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 944 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
952 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 945 vmw_execbuf_release_pinned_bo(dev_priv);
953 946
954 if (unlikely((ret != 0))) { 947 if (unlikely((ret != 0))) {
955 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 948 DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -962,11 +955,11 @@ static void vmw_master_drop(struct drm_device *dev,
962 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 955 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
963 if (unlikely(ret != 0)) 956 if (unlikely(ret != 0))
964 DRM_ERROR("Unable to clean VRAM on master drop.\n"); 957 DRM_ERROR("Unable to clean VRAM on master drop.\n");
958 vmw_kms_restore_vga(dev_priv);
959 vmw_3d_resource_dec(dev_priv, true);
965 mutex_lock(&dev_priv->hw_mutex); 960 mutex_lock(&dev_priv->hw_mutex);
966 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 961 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
967 mutex_unlock(&dev_priv->hw_mutex); 962 mutex_unlock(&dev_priv->hw_mutex);
968 vmw_kms_restore_vga(dev_priv);
969 vmw_3d_resource_dec(dev_priv, true);
970 } 963 }
971 964
972 dev_priv->active_master = &dev_priv->fbdev_master; 965 dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1001,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1001 * This empties VRAM and unbinds all GMR bindings. 994 * This empties VRAM and unbinds all GMR bindings.
1002 * Buffer contents is moved to swappable memory. 995 * Buffer contents is moved to swappable memory.
1003 */ 996 */
1004 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 997 vmw_execbuf_release_pinned_bo(dev_priv);
998 vmw_resource_evict_all(dev_priv);
1005 ttm_bo_swapout_all(&dev_priv->bdev); 999 ttm_bo_swapout_all(&dev_priv->bdev);
1006 1000
1007 break; 1001 break;
@@ -1098,6 +1092,11 @@ static void vmw_pm_complete(struct device *kdev)
1098 struct drm_device *dev = pci_get_drvdata(pdev); 1092 struct drm_device *dev = pci_get_drvdata(pdev);
1099 struct vmw_private *dev_priv = vmw_priv(dev); 1093 struct vmw_private *dev_priv = vmw_priv(dev);
1100 1094
1095 mutex_lock(&dev_priv->hw_mutex);
1096 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1097 (void) vmw_read(dev_priv, SVGA_REG_ID);
1098 mutex_unlock(&dev_priv->hw_mutex);
1099
1101 /** 1100 /**
1102 * Reclaim 3d reference held by fbdev and potentially 1101 * Reclaim 3d reference held by fbdev and potentially
1103 * start fifo. 1102 * start fifo.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 88a179e26de9..13aeda71280e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
67 67
68struct vmw_dma_buffer { 68struct vmw_dma_buffer {
69 struct ttm_buffer_object base; 69 struct ttm_buffer_object base;
70 struct list_head validate_list; 70 struct list_head res_list;
71 bool gmr_bound;
72 uint32_t cur_validate_node;
73 bool on_validate_list;
74}; 71};
75 72
73/**
74 * struct vmw_validate_buffer - Carries validation info about buffers.
75 *
76 * @base: Validation info for TTM.
77 * @hash: Hash entry for quick lookup of the TTM buffer object.
78 *
79 * This structure contains also driver private validation info
80 * on top of the info needed by TTM.
81 */
82struct vmw_validate_buffer {
83 struct ttm_validate_buffer base;
84 struct drm_hash_item hash;
85};
86
87struct vmw_res_func;
76struct vmw_resource { 88struct vmw_resource {
77 struct kref kref; 89 struct kref kref;
78 struct vmw_private *dev_priv; 90 struct vmw_private *dev_priv;
79 struct idr *idr;
80 int id; 91 int id;
81 enum ttm_object_type res_type;
82 bool avail; 92 bool avail;
83 void (*remove_from_lists) (struct vmw_resource *res); 93 unsigned long backup_size;
84 void (*hw_destroy) (struct vmw_resource *res); 94 bool res_dirty; /* Protected by backup buffer reserved */
95 bool backup_dirty; /* Protected by backup buffer reserved */
96 struct vmw_dma_buffer *backup;
97 unsigned long backup_offset;
98 const struct vmw_res_func *func;
99 struct list_head lru_head; /* Protected by the resource lock */
100 struct list_head mob_head; /* Protected by @backup reserved */
85 void (*res_free) (struct vmw_resource *res); 101 void (*res_free) (struct vmw_resource *res);
86 struct list_head validate_head; 102 void (*hw_destroy) (struct vmw_resource *res);
87 struct list_head query_head; /* Protected by the cmdbuf mutex */ 103};
88 /* TODO is a generic snooper needed? */ 104
89#if 0 105enum vmw_res_type {
90 void (*snoop)(struct vmw_resource *res, 106 vmw_res_context,
91 struct ttm_object_file *tfile, 107 vmw_res_surface,
92 SVGA3dCmdHeader *header); 108 vmw_res_stream,
93 void *snoop_priv; 109 vmw_res_max
94#endif
95}; 110};
96 111
97struct vmw_cursor_snooper { 112struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
105 120
106struct vmw_surface { 121struct vmw_surface {
107 struct vmw_resource res; 122 struct vmw_resource res;
108 struct list_head lru_head; /* Protected by the resource lock */
109 uint32_t flags; 123 uint32_t flags;
110 uint32_t format; 124 uint32_t format;
111 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 125 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
126 struct drm_vmw_size base_size;
112 struct drm_vmw_size *sizes; 127 struct drm_vmw_size *sizes;
113 uint32_t num_sizes; 128 uint32_t num_sizes;
114
115 bool scanout; 129 bool scanout;
116
117 /* TODO so far just a extra pointer */ 130 /* TODO so far just a extra pointer */
118 struct vmw_cursor_snooper snooper; 131 struct vmw_cursor_snooper snooper;
119 struct ttm_buffer_object *backup;
120 struct vmw_surface_offset *offsets; 132 struct vmw_surface_offset *offsets;
121 uint32_t backup_size; 133 SVGA3dTextureFilter autogen_filter;
134 uint32_t multisample_count;
122}; 135};
123 136
124struct vmw_marker_queue { 137struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
145 uint32_t index; 158 uint32_t index;
146}; 159};
147 160
161/**
162 * struct vmw_res_cache_entry - resource information cache entry
163 *
164 * @valid: Whether the entry is valid, which also implies that the execbuf
165 * code holds a reference to the resource, and it's placed on the
166 * validation list.
167 * @handle: User-space handle of a resource.
168 * @res: Non-ref-counted pointer to the resource.
169 *
170 * Used to avoid frequent repeated user-space handle lookups of the
171 * same resource.
172 */
173struct vmw_res_cache_entry {
174 bool valid;
175 uint32_t handle;
176 struct vmw_resource *res;
177 struct vmw_resource_val_node *node;
178};
179
148struct vmw_sw_context{ 180struct vmw_sw_context{
149 struct ida bo_list; 181 struct drm_open_hash res_ht;
150 uint32_t last_cid; 182 bool res_ht_initialized;
151 bool cid_valid;
152 bool kernel; /**< is the called made from the kernel */ 183 bool kernel; /**< is the called made from the kernel */
153 struct vmw_resource *cur_ctx;
154 uint32_t last_sid;
155 uint32_t sid_translation;
156 bool sid_valid;
157 struct ttm_object_file *tfile; 184 struct ttm_object_file *tfile;
158 struct list_head validate_nodes; 185 struct list_head validate_nodes;
159 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 186 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
160 uint32_t cur_reloc; 187 uint32_t cur_reloc;
161 struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; 188 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
162 uint32_t cur_val_buf; 189 uint32_t cur_val_buf;
163 uint32_t *cmd_bounce; 190 uint32_t *cmd_bounce;
164 uint32_t cmd_bounce_size; 191 uint32_t cmd_bounce_size;
165 struct list_head resource_list; 192 struct list_head resource_list;
166 uint32_t fence_flags; 193 uint32_t fence_flags;
167 struct list_head query_list;
168 struct ttm_buffer_object *cur_query_bo; 194 struct ttm_buffer_object *cur_query_bo;
169 uint32_t cur_query_cid; 195 struct list_head res_relocations;
170 bool query_cid_valid; 196 uint32_t *buf_start;
197 struct vmw_res_cache_entry res_cache[vmw_res_max];
198 struct vmw_resource *last_query_ctx;
199 bool needs_post_query_barrier;
200 struct vmw_resource *error_resource;
171}; 201};
172 202
173struct vmw_legacy_display; 203struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
242 */ 272 */
243 273
244 rwlock_t resource_lock; 274 rwlock_t resource_lock;
245 struct idr context_idr; 275 struct idr res_idr[vmw_res_max];
246 struct idr surface_idr;
247 struct idr stream_idr;
248
249 /* 276 /*
250 * Block lastclose from racing with firstopen. 277 * Block lastclose from racing with firstopen.
251 */ 278 */
@@ -320,6 +347,7 @@ struct vmw_private {
320 struct ttm_buffer_object *dummy_query_bo; 347 struct ttm_buffer_object *dummy_query_bo;
321 struct ttm_buffer_object *pinned_bo; 348 struct ttm_buffer_object *pinned_bo;
322 uint32_t query_cid; 349 uint32_t query_cid;
350 uint32_t query_cid_valid;
323 bool dummy_query_bo_pinned; 351 bool dummy_query_bo_pinned;
324 352
325 /* 353 /*
@@ -329,10 +357,15 @@ struct vmw_private {
329 * protected by the cmdbuf mutex for simplicity. 357 * protected by the cmdbuf mutex for simplicity.
330 */ 358 */
331 359
332 struct list_head surface_lru; 360 struct list_head res_lru[vmw_res_max];
333 uint32_t used_memory_size; 361 uint32_t used_memory_size;
334}; 362};
335 363
364static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
365{
366 return container_of(res, struct vmw_surface, res);
367}
368
336static inline struct vmw_private *vmw_priv(struct drm_device *dev) 369static inline struct vmw_private *vmw_priv(struct drm_device *dev)
337{ 370{
338 return (struct vmw_private *)dev->dev_private; 371 return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
381/** 414/**
382 * Resource utilities - vmwgfx_resource.c 415 * Resource utilities - vmwgfx_resource.c
383 */ 416 */
417struct vmw_user_resource_conv;
418extern const struct vmw_user_resource_conv *user_surface_converter;
419extern const struct vmw_user_resource_conv *user_context_converter;
384 420
385extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); 421extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
386extern void vmw_resource_unreference(struct vmw_resource **p_res); 422extern void vmw_resource_unreference(struct vmw_resource **p_res);
387extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 423extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
424extern int vmw_resource_validate(struct vmw_resource *res);
425extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
426extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
388extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 427extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
389 struct drm_file *file_priv); 428 struct drm_file *file_priv);
390extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 429extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
398 uint32_t handle, 437 uint32_t handle,
399 struct vmw_surface **out_surf, 438 struct vmw_surface **out_surf,
400 struct vmw_dma_buffer **out_buf); 439 struct vmw_dma_buffer **out_buf);
440extern int vmw_user_resource_lookup_handle(
441 struct vmw_private *dev_priv,
442 struct ttm_object_file *tfile,
443 uint32_t handle,
444 const struct vmw_user_resource_conv *converter,
445 struct vmw_resource **p_res);
401extern void vmw_surface_res_free(struct vmw_resource *res); 446extern void vmw_surface_res_free(struct vmw_resource *res);
402extern int vmw_surface_init(struct vmw_private *dev_priv,
403 struct vmw_surface *srf,
404 void (*res_free) (struct vmw_resource *res));
405extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
406 struct ttm_object_file *tfile,
407 uint32_t handle,
408 struct vmw_surface **out);
409extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 447extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
410 struct drm_file *file_priv); 448 struct drm_file *file_priv);
411extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 449extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
423 size_t size, struct ttm_placement *placement, 461 size_t size, struct ttm_placement *placement,
424 bool interuptable, 462 bool interuptable,
425 void (*bo_free) (struct ttm_buffer_object *bo)); 463 void (*bo_free) (struct ttm_buffer_object *bo));
464extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
465 struct ttm_object_file *tfile);
426extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 466extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
427 struct drm_file *file_priv); 467 struct drm_file *file_priv);
428extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 468extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
440 struct ttm_object_file *tfile, 480 struct ttm_object_file *tfile,
441 uint32_t *inout_id, 481 uint32_t *inout_id,
442 struct vmw_resource **out); 482 struct vmw_resource **out);
443extern void vmw_resource_unreserve(struct list_head *list); 483extern void vmw_resource_unreserve(struct vmw_resource *res,
484 struct vmw_dma_buffer *new_backup,
485 unsigned long new_backup_offset);
486extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
487 struct ttm_mem_reg *mem);
488extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
489 struct vmw_fence_obj *fence);
490extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
444 491
445/** 492/**
446 * DMA buffer helper routines - vmwgfx_dmabuf.c 493 * DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
538 struct drm_vmw_fence_rep __user 585 struct drm_vmw_fence_rep __user
539 *user_fence_rep, 586 *user_fence_rep,
540 struct vmw_fence_obj **out_fence); 587 struct vmw_fence_obj **out_fence);
541 588extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
542extern void 589 struct vmw_fence_obj *fence);
543vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 590extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
544 bool only_on_cid_match, uint32_t cid);
545 591
546extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 592extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
547 struct vmw_private *dev_priv, 593 struct vmw_private *dev_priv,
@@ -699,10 +745,13 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
699static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) 745static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
700{ 746{
701 struct vmw_dma_buffer *tmp_buf = *buf; 747 struct vmw_dma_buffer *tmp_buf = *buf;
702 struct ttm_buffer_object *bo = &tmp_buf->base; 748
703 *buf = NULL; 749 *buf = NULL;
750 if (tmp_buf != NULL) {
751 struct ttm_buffer_object *bo = &tmp_buf->base;
704 752
705 ttm_bo_unref(&bo); 753 ttm_bo_unref(&bo);
754 }
706} 755}
707 756
708static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) 757static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4cc972..394e6476105b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
30#include <drm/ttm/ttm_bo_api.h> 30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32 32
33#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58 * @first_usage: Set to true the first time the resource is referenced in
59 * the command stream.
60 * @no_buffer_needed: Resources do not need to allocate buffer backup on
61 * reservation. The command stream will provide one.
62 */
63struct vmw_resource_val_node {
64 struct list_head head;
65 struct drm_hash_item hash;
66 struct vmw_resource *res;
67 struct vmw_dma_buffer *new_backup;
68 unsigned long new_backup_offset;
69 bool first_usage;
70 bool no_buffer_needed;
71};
72
73/**
74 * vmw_resource_unreserve - unreserve resources previously reserved for
75 * command submission.
76 *
77 * @list_head: list of resources to unreserve.
78 * @backoff: Whether command submission failed.
79 */
80static void vmw_resource_list_unreserve(struct list_head *list,
81 bool backoff)
82{
83 struct vmw_resource_val_node *val;
84
85 list_for_each_entry(val, list, head) {
86 struct vmw_resource *res = val->res;
87 struct vmw_dma_buffer *new_backup =
88 backoff ? NULL : val->new_backup;
89
90 vmw_resource_unreserve(res, new_backup,
91 val->new_backup_offset);
92 vmw_dmabuf_unreference(&val->new_backup);
93 }
94}
95
96
97/**
98 * vmw_resource_val_add - Add a resource to the software context's
99 * resource list if it's not already on it.
100 *
101 * @sw_context: Pointer to the software context.
102 * @res: Pointer to the resource.
103 * @p_node On successful return points to a valid pointer to a
104 * struct vmw_resource_val_node, if non-NULL on entry.
105 */
106static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
107 struct vmw_resource *res,
108 struct vmw_resource_val_node **p_node)
109{
110 struct vmw_resource_val_node *node;
111 struct drm_hash_item *hash;
112 int ret;
113
114 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
115 &hash) == 0)) {
116 node = container_of(hash, struct vmw_resource_val_node, hash);
117 node->first_usage = false;
118 if (unlikely(p_node != NULL))
119 *p_node = node;
120 return 0;
121 }
122
123 node = kzalloc(sizeof(*node), GFP_KERNEL);
124 if (unlikely(node == NULL)) {
125 DRM_ERROR("Failed to allocate a resource validation "
126 "entry.\n");
127 return -ENOMEM;
128 }
129
130 node->hash.key = (unsigned long) res;
131 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
132 if (unlikely(ret != 0)) {
133 DRM_ERROR("Failed to initialize a resource validation "
134 "entry.\n");
135 kfree(node);
136 return ret;
137 }
138 list_add_tail(&node->head, &sw_context->resource_list);
139 node->res = vmw_resource_reference(res);
140 node->first_usage = true;
141
142 if (unlikely(p_node != NULL))
143 *p_node = node;
144
145 return 0;
146}
147
148/**
149 * vmw_resource_relocation_add - Add a relocation to the relocation list
150 *
151 * @list: Pointer to head of relocation list.
152 * @res: The resource.
153 * @offset: Offset into the command buffer currently being parsed where the
154 * id that needs fixup is located. Granularity is 4 bytes.
155 */
156static int vmw_resource_relocation_add(struct list_head *list,
157 const struct vmw_resource *res,
158 unsigned long offset)
159{
160 struct vmw_resource_relocation *rel;
161
162 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
163 if (unlikely(rel == NULL)) {
164 DRM_ERROR("Failed to allocate a resource relocation.\n");
165 return -ENOMEM;
166 }
167
168 rel->res = res;
169 rel->offset = offset;
170 list_add_tail(&rel->head, list);
171
172 return 0;
173}
174
175/**
176 * vmw_resource_relocations_free - Free all relocations on a list
177 *
178 * @list: Pointer to the head of the relocation list.
179 */
180static void vmw_resource_relocations_free(struct list_head *list)
181{
182 struct vmw_resource_relocation *rel, *n;
183
184 list_for_each_entry_safe(rel, n, list, head) {
185 list_del(&rel->head);
186 kfree(rel);
187 }
188}
189
190/**
191 * vmw_resource_relocations_apply - Apply all relocations on a list
192 *
193 * @cb: Pointer to the start of the command buffer bein patch. This need
194 * not be the same buffer as the one being parsed when the relocation
195 * list was built, but the contents must be the same modulo the
196 * resource ids.
197 * @list: Pointer to the head of the relocation list.
198 */
199static void vmw_resource_relocations_apply(uint32_t *cb,
200 struct list_head *list)
201{
202 struct vmw_resource_relocation *rel;
203
204 list_for_each_entry(rel, list, head)
205 cb[rel->offset] = rel->res->id;
206}
207
33static int vmw_cmd_invalid(struct vmw_private *dev_priv, 208static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context, 209 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header) 210 SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
44 return 0; 219 return 0;
45} 220}
46 221
47static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
48 struct vmw_resource **p_res)
49{
50 struct vmw_resource *res = *p_res;
51
52 if (list_empty(&res->validate_head)) {
53 list_add_tail(&res->validate_head, &sw_context->resource_list);
54 *p_res = NULL;
55 } else
56 vmw_resource_unreference(p_res);
57}
58
59/** 222/**
60 * vmw_bo_to_validate_list - add a bo to a validate list 223 * vmw_bo_to_validate_list - add a bo to a validate list
61 * 224 *
62 * @sw_context: The software context used for this command submission batch. 225 * @sw_context: The software context used for this command submission batch.
63 * @bo: The buffer object to add. 226 * @bo: The buffer object to add.
64 * @fence_flags: Fence flags to be or'ed with any other fence flags for
65 * this buffer on this submission batch.
66 * @p_val_node: If non-NULL Will be updated with the validate node number 227 * @p_val_node: If non-NULL Will be updated with the validate node number
67 * on return. 228 * on return.
68 * 229 *
@@ -71,31 +232,43 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
71 */ 232 */
72static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 233static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
73 struct ttm_buffer_object *bo, 234 struct ttm_buffer_object *bo,
74 uint32_t fence_flags,
75 uint32_t *p_val_node) 235 uint32_t *p_val_node)
76{ 236{
77 uint32_t val_node; 237 uint32_t val_node;
238 struct vmw_validate_buffer *vval_buf;
78 struct ttm_validate_buffer *val_buf; 239 struct ttm_validate_buffer *val_buf;
240 struct drm_hash_item *hash;
241 int ret;
79 242
80 val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 243 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
81 244 &hash) == 0)) {
82 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { 245 vval_buf = container_of(hash, struct vmw_validate_buffer,
83 DRM_ERROR("Max number of DMA buffers per submission" 246 hash);
84 " exceeded.\n"); 247 val_buf = &vval_buf->base;
85 return -EINVAL; 248 val_node = vval_buf - sw_context->val_bufs;
86 } 249 } else {
87 250 val_node = sw_context->cur_val_buf;
88 val_buf = &sw_context->val_bufs[val_node]; 251 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
89 if (unlikely(val_node == sw_context->cur_val_buf)) { 252 DRM_ERROR("Max number of DMA buffers per submission "
90 val_buf->new_sync_obj_arg = NULL; 253 "exceeded.\n");
254 return -EINVAL;
255 }
256 vval_buf = &sw_context->val_bufs[val_node];
257 vval_buf->hash.key = (unsigned long) bo;
258 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
259 if (unlikely(ret != 0)) {
260 DRM_ERROR("Failed to initialize a buffer validation "
261 "entry.\n");
262 return ret;
263 }
264 ++sw_context->cur_val_buf;
265 val_buf = &vval_buf->base;
91 val_buf->bo = ttm_bo_reference(bo); 266 val_buf->bo = ttm_bo_reference(bo);
267 val_buf->reserved = false;
92 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 268 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
93 ++sw_context->cur_val_buf;
94 } 269 }
95 270
96 val_buf->new_sync_obj_arg = (void *) 271 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
97 ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
98 sw_context->fence_flags |= fence_flags;
99 272
100 if (p_val_node) 273 if (p_val_node)
101 *p_val_node = val_node; 274 *p_val_node = val_node;
@@ -103,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
103 return 0; 276 return 0;
104} 277}
105 278
106static int vmw_cmd_cid_check(struct vmw_private *dev_priv, 279/**
107 struct vmw_sw_context *sw_context, 280 * vmw_resources_reserve - Reserve all resources on the sw_context's
108 SVGA3dCmdHeader *header) 281 * resource list.
282 *
283 * @sw_context: Pointer to the software context.
284 *
285 * Note that since vmware's command submission currently is protected by
286 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
287 * since only a single thread at once will attempt this.
288 */
289static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
109{ 290{
110 struct vmw_resource *ctx; 291 struct vmw_resource_val_node *val;
111
112 struct vmw_cid_cmd {
113 SVGA3dCmdHeader header;
114 __le32 cid;
115 } *cmd;
116 int ret; 292 int ret;
117 293
118 cmd = container_of(header, struct vmw_cid_cmd, header); 294 list_for_each_entry(val, &sw_context->resource_list, head) {
119 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) 295 struct vmw_resource *res = val->res;
120 return 0;
121 296
122 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, 297 ret = vmw_resource_reserve(res, val->no_buffer_needed);
123 &ctx); 298 if (unlikely(ret != 0))
124 if (unlikely(ret != 0)) { 299 return ret;
125 DRM_ERROR("Could not find or use context %u\n", 300
126 (unsigned) cmd->cid); 301 if (res->backup) {
127 return ret; 302 struct ttm_buffer_object *bo = &res->backup->base;
303
304 ret = vmw_bo_to_validate_list
305 (sw_context, bo, NULL);
306
307 if (unlikely(ret != 0))
308 return ret;
309 }
128 } 310 }
311 return 0;
312}
129 313
130 sw_context->last_cid = cmd->cid; 314/**
131 sw_context->cid_valid = true; 315 * vmw_resources_validate - Validate all resources on the sw_context's
132 sw_context->cur_ctx = ctx; 316 * resource list.
133 vmw_resource_to_validate_list(sw_context, &ctx); 317 *
318 * @sw_context: Pointer to the software context.
319 *
320 * Before this function is called, all resource backup buffers must have
321 * been validated.
322 */
323static int vmw_resources_validate(struct vmw_sw_context *sw_context)
324{
325 struct vmw_resource_val_node *val;
326 int ret;
327
328 list_for_each_entry(val, &sw_context->resource_list, head) {
329 struct vmw_resource *res = val->res;
134 330
331 ret = vmw_resource_validate(res);
332 if (unlikely(ret != 0)) {
333 if (ret != -ERESTARTSYS)
334 DRM_ERROR("Failed to validate resource.\n");
335 return ret;
336 }
337 }
135 return 0; 338 return 0;
136} 339}
137 340
138static int vmw_cmd_sid_check(struct vmw_private *dev_priv, 341/**
342 * vmw_cmd_res_check - Check that a resource is present and if so, put it
343 * on the resource validate list unless it's already there.
344 *
345 * @dev_priv: Pointer to a device private structure.
346 * @sw_context: Pointer to the software context.
347 * @res_type: Resource type.
348 * @converter: User-space visisble type specific information.
349 * @id: Pointer to the location in the command buffer currently being
350 * parsed from where the user-space resource id handle is located.
351 */
352static int vmw_cmd_res_check(struct vmw_private *dev_priv,
139 struct vmw_sw_context *sw_context, 353 struct vmw_sw_context *sw_context,
140 uint32_t *sid) 354 enum vmw_res_type res_type,
355 const struct vmw_user_resource_conv *converter,
356 uint32_t *id,
357 struct vmw_resource_val_node **p_val)
141{ 358{
142 struct vmw_surface *srf; 359 struct vmw_res_cache_entry *rcache =
143 int ret; 360 &sw_context->res_cache[res_type];
144 struct vmw_resource *res; 361 struct vmw_resource *res;
362 struct vmw_resource_val_node *node;
363 int ret;
145 364
146 if (*sid == SVGA3D_INVALID_ID) 365 if (*id == SVGA3D_INVALID_ID)
147 return 0; 366 return 0;
148 367
149 if (likely((sw_context->sid_valid && 368 /*
150 *sid == sw_context->last_sid))) { 369 * Fastpath in case of repeated commands referencing the same
151 *sid = sw_context->sid_translation; 370 * resource
152 return 0; 371 */
153 }
154 372
155 ret = vmw_user_surface_lookup_handle(dev_priv, 373 if (likely(rcache->valid && *id == rcache->handle)) {
156 sw_context->tfile, 374 const struct vmw_resource *res = rcache->res;
157 *sid, &srf); 375
158 if (unlikely(ret != 0)) { 376 rcache->node->first_usage = false;
159 DRM_ERROR("Could ot find or use surface 0x%08x " 377 if (p_val)
160 "address 0x%08lx\n", 378 *p_val = rcache->node;
161 (unsigned int) *sid, 379
162 (unsigned long) sid); 380 return vmw_resource_relocation_add
163 return ret; 381 (&sw_context->res_relocations, res,
382 id - sw_context->buf_start);
164 } 383 }
165 384
166 ret = vmw_surface_validate(dev_priv, srf); 385 ret = vmw_user_resource_lookup_handle(dev_priv,
386 sw_context->tfile,
387 *id,
388 converter,
389 &res);
167 if (unlikely(ret != 0)) { 390 if (unlikely(ret != 0)) {
168 if (ret != -ERESTARTSYS) 391 DRM_ERROR("Could not find or use resource 0x%08x.\n",
169 DRM_ERROR("Could not validate surface.\n"); 392 (unsigned) *id);
170 vmw_surface_unreference(&srf); 393 dump_stack();
171 return ret; 394 return ret;
172 } 395 }
173 396
174 sw_context->last_sid = *sid; 397 rcache->valid = true;
175 sw_context->sid_valid = true; 398 rcache->res = res;
176 sw_context->sid_translation = srf->res.id; 399 rcache->handle = *id;
177 *sid = sw_context->sid_translation;
178 400
179 res = &srf->res; 401 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
180 vmw_resource_to_validate_list(sw_context, &res); 402 res,
403 id - sw_context->buf_start);
404 if (unlikely(ret != 0))
405 goto out_no_reloc;
406
407 ret = vmw_resource_val_add(sw_context, res, &node);
408 if (unlikely(ret != 0))
409 goto out_no_reloc;
181 410
411 rcache->node = node;
412 if (p_val)
413 *p_val = node;
414 vmw_resource_unreference(&res);
182 return 0; 415 return 0;
416
417out_no_reloc:
418 BUG_ON(sw_context->error_resource != NULL);
419 sw_context->error_resource = res;
420
421 return ret;
183} 422}
184 423
424/**
425 * vmw_cmd_cid_check - Check a command header for valid context information.
426 *
427 * @dev_priv: Pointer to a device private structure.
428 * @sw_context: Pointer to the software context.
429 * @header: A command header with an embedded user-space context handle.
430 *
431 * Convenience function: Call vmw_cmd_res_check with the user-space context
432 * handle embedded in @header.
433 */
434static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
435 struct vmw_sw_context *sw_context,
436 SVGA3dCmdHeader *header)
437{
438 struct vmw_cid_cmd {
439 SVGA3dCmdHeader header;
440 __le32 cid;
441 } *cmd;
442
443 cmd = container_of(header, struct vmw_cid_cmd, header);
444 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
445 user_context_converter, &cmd->cid, NULL);
446}
185 447
186static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, 448static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
187 struct vmw_sw_context *sw_context, 449 struct vmw_sw_context *sw_context,
@@ -198,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
198 return ret; 460 return ret;
199 461
200 cmd = container_of(header, struct vmw_sid_cmd, header); 462 cmd = container_of(header, struct vmw_sid_cmd, header);
201 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); 463 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
464 user_surface_converter,
465 &cmd->body.target.sid, NULL);
202 return ret; 466 return ret;
203} 467}
204 468
@@ -213,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
213 int ret; 477 int ret;
214 478
215 cmd = container_of(header, struct vmw_sid_cmd, header); 479 cmd = container_of(header, struct vmw_sid_cmd, header);
216 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); 480 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
481 user_surface_converter,
482 &cmd->body.src.sid, NULL);
217 if (unlikely(ret != 0)) 483 if (unlikely(ret != 0))
218 return ret; 484 return ret;
219 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); 485 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
486 user_surface_converter,
487 &cmd->body.dest.sid, NULL);
220} 488}
221 489
222static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 490static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
230 int ret; 498 int ret;
231 499
232 cmd = container_of(header, struct vmw_sid_cmd, header); 500 cmd = container_of(header, struct vmw_sid_cmd, header);
233 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); 501 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
502 user_surface_converter,
503 &cmd->body.src.sid, NULL);
234 if (unlikely(ret != 0)) 504 if (unlikely(ret != 0))
235 return ret; 505 return ret;
236 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); 506 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
507 user_surface_converter,
508 &cmd->body.dest.sid, NULL);
237} 509}
238 510
239static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 511static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
252 return -EPERM; 524 return -EPERM;
253 } 525 }
254 526
255 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); 527 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
528 user_surface_converter,
529 &cmd->body.srcImage.sid, NULL);
256} 530}
257 531
258static int vmw_cmd_present_check(struct vmw_private *dev_priv, 532static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
272 return -EPERM; 546 return -EPERM;
273 } 547 }
274 548
275 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 549 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
550 user_surface_converter, &cmd->body.sid,
551 NULL);
276} 552}
277 553
278/** 554/**
279 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. 555 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
280 * 556 *
281 * @dev_priv: The device private structure. 557 * @dev_priv: The device private structure.
282 * @cid: The hardware context for the next query.
283 * @new_query_bo: The new buffer holding query results. 558 * @new_query_bo: The new buffer holding query results.
284 * @sw_context: The software context used for this command submission. 559 * @sw_context: The software context used for this command submission.
285 * 560 *
@@ -287,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
287 * query results, and if another buffer currently is pinned for query 562 * query results, and if another buffer currently is pinned for query
288 * results. If so, the function prepares the state of @sw_context for 563 * results. If so, the function prepares the state of @sw_context for
289 * switching pinned buffers after successful submission of the current 564 * switching pinned buffers after successful submission of the current
290 * command batch. It also checks whether we're using a new query context. 565 * command batch.
291 * In that case, it makes sure we emit a query barrier for the old
292 * context before the current query buffer is fenced.
293 */ 566 */
294static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 567static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
295 uint32_t cid,
296 struct ttm_buffer_object *new_query_bo, 568 struct ttm_buffer_object *new_query_bo,
297 struct vmw_sw_context *sw_context) 569 struct vmw_sw_context *sw_context)
298{ 570{
571 struct vmw_res_cache_entry *ctx_entry =
572 &sw_context->res_cache[vmw_res_context];
299 int ret; 573 int ret;
300 bool add_cid = false; 574
301 uint32_t cid_to_add; 575 BUG_ON(!ctx_entry->valid);
576 sw_context->last_query_ctx = ctx_entry->res;
302 577
303 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 578 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
304 579
@@ -308,12 +583,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
308 } 583 }
309 584
310 if (unlikely(sw_context->cur_query_bo != NULL)) { 585 if (unlikely(sw_context->cur_query_bo != NULL)) {
311 BUG_ON(!sw_context->query_cid_valid); 586 sw_context->needs_post_query_barrier = true;
312 add_cid = true;
313 cid_to_add = sw_context->cur_query_cid;
314 ret = vmw_bo_to_validate_list(sw_context, 587 ret = vmw_bo_to_validate_list(sw_context,
315 sw_context->cur_query_bo, 588 sw_context->cur_query_bo,
316 DRM_VMW_FENCE_FLAG_EXEC,
317 NULL); 589 NULL);
318 if (unlikely(ret != 0)) 590 if (unlikely(ret != 0))
319 return ret; 591 return ret;
@@ -322,35 +594,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
322 594
323 ret = vmw_bo_to_validate_list(sw_context, 595 ret = vmw_bo_to_validate_list(sw_context,
324 dev_priv->dummy_query_bo, 596 dev_priv->dummy_query_bo,
325 DRM_VMW_FENCE_FLAG_EXEC,
326 NULL); 597 NULL);
327 if (unlikely(ret != 0)) 598 if (unlikely(ret != 0))
328 return ret; 599 return ret;
329 600
330 } 601 }
331 602
332 if (unlikely(cid != sw_context->cur_query_cid &&
333 sw_context->query_cid_valid)) {
334 add_cid = true;
335 cid_to_add = sw_context->cur_query_cid;
336 }
337
338 sw_context->cur_query_cid = cid;
339 sw_context->query_cid_valid = true;
340
341 if (add_cid) {
342 struct vmw_resource *ctx = sw_context->cur_ctx;
343
344 if (list_empty(&ctx->query_head))
345 list_add_tail(&ctx->query_head,
346 &sw_context->query_list);
347 ret = vmw_bo_to_validate_list(sw_context,
348 dev_priv->dummy_query_bo,
349 DRM_VMW_FENCE_FLAG_EXEC,
350 NULL);
351 if (unlikely(ret != 0))
352 return ret;
353 }
354 return 0; 603 return 0;
355} 604}
356 605
@@ -362,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
362 * @sw_context: The software context used for this command submission batch. 611 * @sw_context: The software context used for this command submission batch.
363 * 612 *
364 * This function will check if we're switching query buffers, and will then, 613 * This function will check if we're switching query buffers, and will then,
365 * if no other query waits are issued this command submission batch,
366 * issue a dummy occlusion query wait used as a query barrier. When the fence 614 * issue a dummy occlusion query wait used as a query barrier. When the fence
367 * object following that query wait has signaled, we are sure that all 615 * object following that query wait has signaled, we are sure that all
368 * preseding queries have finished, and the old query buffer can be unpinned. 616 * preceding queries have finished, and the old query buffer can be unpinned.
369 * However, since both the new query buffer and the old one are fenced with 617 * However, since both the new query buffer and the old one are fenced with
370 * that fence, we can do an asynchronus unpin now, and be sure that the 618 * that fence, we can do an asynchronus unpin now, and be sure that the
371 * old query buffer won't be moved until the fence has signaled. 619 * old query buffer won't be moved until the fence has signaled.
@@ -376,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
376static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, 624static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
377 struct vmw_sw_context *sw_context) 625 struct vmw_sw_context *sw_context)
378{ 626{
379
380 struct vmw_resource *ctx, *next_ctx;
381 int ret;
382
383 /* 627 /*
384 * The validate list should still hold references to all 628 * The validate list should still hold references to all
385 * contexts here. 629 * contexts here.
386 */ 630 */
387 631
388 list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, 632 if (sw_context->needs_post_query_barrier) {
389 query_head) { 633 struct vmw_res_cache_entry *ctx_entry =
390 list_del_init(&ctx->query_head); 634 &sw_context->res_cache[vmw_res_context];
635 struct vmw_resource *ctx;
636 int ret;
391 637
392 BUG_ON(list_empty(&ctx->validate_head)); 638 BUG_ON(!ctx_entry->valid);
639 ctx = ctx_entry->res;
393 640
394 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); 641 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
395 642
@@ -403,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
403 ttm_bo_unref(&dev_priv->pinned_bo); 650 ttm_bo_unref(&dev_priv->pinned_bo);
404 } 651 }
405 652
406 vmw_bo_pin(sw_context->cur_query_bo, true); 653 if (!sw_context->needs_post_query_barrier) {
654 vmw_bo_pin(sw_context->cur_query_bo, true);
407 655
408 /* 656 /*
409 * We pin also the dummy_query_bo buffer so that we 657 * We pin also the dummy_query_bo buffer so that we
410 * don't need to validate it when emitting 658 * don't need to validate it when emitting
411 * dummy queries in context destroy paths. 659 * dummy queries in context destroy paths.
412 */ 660 */
413 661
414 vmw_bo_pin(dev_priv->dummy_query_bo, true); 662 vmw_bo_pin(dev_priv->dummy_query_bo, true);
415 dev_priv->dummy_query_bo_pinned = true; 663 dev_priv->dummy_query_bo_pinned = true;
416 664
417 dev_priv->query_cid = sw_context->cur_query_cid; 665 BUG_ON(sw_context->last_query_ctx == NULL);
418 dev_priv->pinned_bo = 666 dev_priv->query_cid = sw_context->last_query_ctx->id;
419 ttm_bo_reference(sw_context->cur_query_bo); 667 dev_priv->query_cid_valid = true;
668 dev_priv->pinned_bo =
669 ttm_bo_reference(sw_context->cur_query_bo);
670 }
420 } 671 }
421} 672}
422 673
423/** 674/**
424 * vmw_query_switch_backoff - clear query barrier list 675 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
425 * @sw_context: The sw context used for this submission batch. 676 * handle to a valid SVGAGuestPtr
426 * 677 *
427 * This function is used as part of an error path, where a previously 678 * @dev_priv: Pointer to a device private structure.
428 * set up list of query barriers needs to be cleared. 679 * @sw_context: The software context used for this command batch validation.
680 * @ptr: Pointer to the user-space handle to be translated.
681 * @vmw_bo_p: Points to a location that, on successful return will carry
682 * a reference-counted pointer to the DMA buffer identified by the
683 * user-space handle in @id.
429 * 684 *
685 * This function saves information needed to translate a user-space buffer
686 * handle to a valid SVGAGuestPtr. The translation does not take place
687 * immediately, but during a call to vmw_apply_relocations().
688 * This function builds a relocation list and a list of buffers to validate.
689 * The former needs to be freed using either vmw_apply_relocations() or
690 * vmw_free_relocations(). The latter needs to be freed using
691 * vmw_clear_validations.
430 */ 692 */
431static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
432{
433 struct list_head *list, *next;
434
435 list_for_each_safe(list, next, &sw_context->query_list) {
436 list_del_init(list);
437 }
438}
439
440static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 693static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
441 struct vmw_sw_context *sw_context, 694 struct vmw_sw_context *sw_context,
442 SVGAGuestPtr *ptr, 695 SVGAGuestPtr *ptr,
@@ -465,8 +718,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
465 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 718 reloc = &sw_context->relocs[sw_context->cur_reloc++];
466 reloc->location = ptr; 719 reloc->location = ptr;
467 720
468 ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, 721 ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
469 &reloc->index);
470 if (unlikely(ret != 0)) 722 if (unlikely(ret != 0))
471 goto out_no_reloc; 723 goto out_no_reloc;
472 724
@@ -479,6 +731,37 @@ out_no_reloc:
479 return ret; 731 return ret;
480} 732}
481 733
734/**
735 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
736 *
737 * @dev_priv: Pointer to a device private struct.
738 * @sw_context: The software context used for this command submission.
739 * @header: Pointer to the command header in the command stream.
740 */
741static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
742 struct vmw_sw_context *sw_context,
743 SVGA3dCmdHeader *header)
744{
745 struct vmw_begin_query_cmd {
746 SVGA3dCmdHeader header;
747 SVGA3dCmdBeginQuery q;
748 } *cmd;
749
750 cmd = container_of(header, struct vmw_begin_query_cmd,
751 header);
752
753 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
754 user_context_converter, &cmd->q.cid,
755 NULL);
756}
757
758/**
759 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
760 *
761 * @dev_priv: Pointer to a device private struct.
762 * @sw_context: The software context used for this command submission.
763 * @header: Pointer to the command header in the command stream.
764 */
482static int vmw_cmd_end_query(struct vmw_private *dev_priv, 765static int vmw_cmd_end_query(struct vmw_private *dev_priv,
483 struct vmw_sw_context *sw_context, 766 struct vmw_sw_context *sw_context,
484 SVGA3dCmdHeader *header) 767 SVGA3dCmdHeader *header)
@@ -501,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
501 if (unlikely(ret != 0)) 784 if (unlikely(ret != 0))
502 return ret; 785 return ret;
503 786
504 ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, 787 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
505 &vmw_bo->base, sw_context);
506 788
507 vmw_dmabuf_unreference(&vmw_bo); 789 vmw_dmabuf_unreference(&vmw_bo);
508 return ret; 790 return ret;
509} 791}
510 792
793/*
794 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
795 *
796 * @dev_priv: Pointer to a device private struct.
797 * @sw_context: The software context used for this command submission.
798 * @header: Pointer to the command header in the command stream.
799 */
511static int vmw_cmd_wait_query(struct vmw_private *dev_priv, 800static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
512 struct vmw_sw_context *sw_context, 801 struct vmw_sw_context *sw_context,
513 SVGA3dCmdHeader *header) 802 SVGA3dCmdHeader *header)
@@ -518,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
518 SVGA3dCmdWaitForQuery q; 807 SVGA3dCmdWaitForQuery q;
519 } *cmd; 808 } *cmd;
520 int ret; 809 int ret;
521 struct vmw_resource *ctx;
522 810
523 cmd = container_of(header, struct vmw_query_cmd, header); 811 cmd = container_of(header, struct vmw_query_cmd, header);
524 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 812 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
532 return ret; 820 return ret;
533 821
534 vmw_dmabuf_unreference(&vmw_bo); 822 vmw_dmabuf_unreference(&vmw_bo);
535
536 /*
537 * This wait will act as a barrier for previous waits for this
538 * context.
539 */
540
541 ctx = sw_context->cur_ctx;
542 if (!list_empty(&ctx->query_head))
543 list_del_init(&ctx->query_head);
544
545 return 0; 823 return 0;
546} 824}
547 825
@@ -550,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
550 SVGA3dCmdHeader *header) 828 SVGA3dCmdHeader *header)
551{ 829{
552 struct vmw_dma_buffer *vmw_bo = NULL; 830 struct vmw_dma_buffer *vmw_bo = NULL;
553 struct ttm_buffer_object *bo;
554 struct vmw_surface *srf = NULL; 831 struct vmw_surface *srf = NULL;
555 struct vmw_dma_cmd { 832 struct vmw_dma_cmd {
556 SVGA3dCmdHeader header; 833 SVGA3dCmdHeader header;
557 SVGA3dCmdSurfaceDMA dma; 834 SVGA3dCmdSurfaceDMA dma;
558 } *cmd; 835 } *cmd;
559 int ret; 836 int ret;
560 struct vmw_resource *res;
561 837
562 cmd = container_of(header, struct vmw_dma_cmd, header); 838 cmd = container_of(header, struct vmw_dma_cmd, header);
563 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 839 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
566 if (unlikely(ret != 0)) 842 if (unlikely(ret != 0))
567 return ret; 843 return ret;
568 844
569 bo = &vmw_bo->base; 845 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
570 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 846 user_surface_converter, &cmd->dma.host.sid,
571 cmd->dma.host.sid, &srf); 847 NULL);
572 if (ret) {
573 DRM_ERROR("could not find surface\n");
574 goto out_no_reloc;
575 }
576
577 ret = vmw_surface_validate(dev_priv, srf);
578 if (unlikely(ret != 0)) { 848 if (unlikely(ret != 0)) {
579 if (ret != -ERESTARTSYS) 849 if (unlikely(ret != -ERESTARTSYS))
580 DRM_ERROR("Culd not validate surface.\n"); 850 DRM_ERROR("could not find surface for DMA.\n");
581 goto out_no_validate; 851 goto out_no_surface;
582 } 852 }
583 853
584 /* 854 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
585 * Patch command stream with device SID.
586 */
587 cmd->dma.host.sid = srf->res.id;
588 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
589
590 vmw_dmabuf_unreference(&vmw_bo);
591
592 res = &srf->res;
593 vmw_resource_to_validate_list(sw_context, &res);
594 855
595 return 0; 856 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
596 857
597out_no_validate: 858out_no_surface:
598 vmw_surface_unreference(&srf);
599out_no_reloc:
600 vmw_dmabuf_unreference(&vmw_bo); 859 vmw_dmabuf_unreference(&vmw_bo);
601 return ret; 860 return ret;
602} 861}
@@ -629,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
629 } 888 }
630 889
631 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { 890 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
632 ret = vmw_cmd_sid_check(dev_priv, sw_context, 891 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
633 &decl->array.surfaceId); 892 user_surface_converter,
893 &decl->array.surfaceId, NULL);
634 if (unlikely(ret != 0)) 894 if (unlikely(ret != 0))
635 return ret; 895 return ret;
636 } 896 }
@@ -644,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
644 904
645 range = (SVGA3dPrimitiveRange *) decl; 905 range = (SVGA3dPrimitiveRange *) decl;
646 for (i = 0; i < cmd->body.numRanges; ++i, ++range) { 906 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
647 ret = vmw_cmd_sid_check(dev_priv, sw_context, 907 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
648 &range->indexArray.surfaceId); 908 user_surface_converter,
909 &range->indexArray.surfaceId, NULL);
649 if (unlikely(ret != 0)) 910 if (unlikely(ret != 0))
650 return ret; 911 return ret;
651 } 912 }
@@ -676,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
676 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 937 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
677 continue; 938 continue;
678 939
679 ret = vmw_cmd_sid_check(dev_priv, sw_context, 940 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
680 &cur_state->value); 941 user_surface_converter,
942 &cur_state->value, NULL);
681 if (unlikely(ret != 0)) 943 if (unlikely(ret != 0))
682 return ret; 944 return ret;
683 } 945 }
@@ -708,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
708 return ret; 970 return ret;
709} 971}
710 972
973/**
974 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
975 * command
976 *
977 * @dev_priv: Pointer to a device private struct.
978 * @sw_context: The software context being used for this batch.
979 * @header: Pointer to the command header in the command stream.
980 */
981static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
982 struct vmw_sw_context *sw_context,
983 SVGA3dCmdHeader *header)
984{
985 struct vmw_set_shader_cmd {
986 SVGA3dCmdHeader header;
987 SVGA3dCmdSetShader body;
988 } *cmd;
989 int ret;
990
991 cmd = container_of(header, struct vmw_set_shader_cmd,
992 header);
993
994 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
995 if (unlikely(ret != 0))
996 return ret;
997
998 return 0;
999}
1000
711static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 1001static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
712 struct vmw_sw_context *sw_context, 1002 struct vmw_sw_context *sw_context,
713 void *buf, uint32_t *size) 1003 void *buf, uint32_t *size)
@@ -781,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
781 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), 1071 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
782 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), 1072 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
783 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), 1073 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
784 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), 1074 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
785 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), 1075 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
786 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 1076 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
787 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 1077 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
788 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 1078 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
789 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), 1079 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
790 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), 1080 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
791 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 1081 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
792 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 1082 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
793 &vmw_cmd_blt_surf_screen_check) 1083 &vmw_cmd_blt_surf_screen_check),
1084 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
1085 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
1086 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
1087 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
794}; 1088};
795 1089
796static int vmw_cmd_check(struct vmw_private *dev_priv, 1090static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
837 int32_t cur_size = size; 1131 int32_t cur_size = size;
838 int ret; 1132 int ret;
839 1133
1134 sw_context->buf_start = buf;
1135
840 while (cur_size > 0) { 1136 while (cur_size > 0) {
841 size = cur_size; 1137 size = cur_size;
842 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 1138 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
868 1164
869 for (i = 0; i < sw_context->cur_reloc; ++i) { 1165 for (i = 0; i < sw_context->cur_reloc; ++i) {
870 reloc = &sw_context->relocs[i]; 1166 reloc = &sw_context->relocs[i];
871 validate = &sw_context->val_bufs[reloc->index]; 1167 validate = &sw_context->val_bufs[reloc->index].base;
872 bo = validate->bo; 1168 bo = validate->bo;
873 if (bo->mem.mem_type == TTM_PL_VRAM) { 1169 switch (bo->mem.mem_type) {
1170 case TTM_PL_VRAM:
874 reloc->location->offset += bo->offset; 1171 reloc->location->offset += bo->offset;
875 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; 1172 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
876 } else 1173 break;
1174 case VMW_PL_GMR:
877 reloc->location->gmrId = bo->mem.start; 1175 reloc->location->gmrId = bo->mem.start;
1176 break;
1177 default:
1178 BUG();
1179 }
878 } 1180 }
879 vmw_free_relocations(sw_context); 1181 vmw_free_relocations(sw_context);
880} 1182}
881 1183
1184/**
1185 * vmw_resource_list_unrefererence - Free up a resource list and unreference
1186 * all resources referenced by it.
1187 *
1188 * @list: The resource list.
1189 */
1190static void vmw_resource_list_unreference(struct list_head *list)
1191{
1192 struct vmw_resource_val_node *val, *val_next;
1193
1194 /*
1195 * Drop references to resources held during command submission.
1196 */
1197
1198 list_for_each_entry_safe(val, val_next, list, head) {
1199 list_del_init(&val->head);
1200 vmw_resource_unreference(&val->res);
1201 kfree(val);
1202 }
1203}
1204
882static void vmw_clear_validations(struct vmw_sw_context *sw_context) 1205static void vmw_clear_validations(struct vmw_sw_context *sw_context)
883{ 1206{
884 struct ttm_validate_buffer *entry, *next; 1207 struct vmw_validate_buffer *entry, *next;
885 struct vmw_resource *res, *res_next; 1208 struct vmw_resource_val_node *val;
886 1209
887 /* 1210 /*
888 * Drop references to DMA buffers held during command submission. 1211 * Drop references to DMA buffers held during command submission.
889 */ 1212 */
890 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, 1213 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
891 head) { 1214 base.head) {
892 list_del(&entry->head); 1215 list_del(&entry->base.head);
893 vmw_dmabuf_validate_clear(entry->bo); 1216 ttm_bo_unref(&entry->base.bo);
894 ttm_bo_unref(&entry->bo); 1217 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
895 sw_context->cur_val_buf--; 1218 sw_context->cur_val_buf--;
896 } 1219 }
897 BUG_ON(sw_context->cur_val_buf != 0); 1220 BUG_ON(sw_context->cur_val_buf != 0);
898 1221
899 /* 1222 list_for_each_entry(val, &sw_context->resource_list, head)
900 * Drop references to resources held during command submission. 1223 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
901 */
902 vmw_resource_unreserve(&sw_context->resource_list);
903 list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
904 validate_head) {
905 list_del_init(&res->validate_head);
906 vmw_resource_unreference(&res);
907 }
908} 1224}
909 1225
910static int vmw_validate_single_buffer(struct vmw_private *dev_priv, 1226static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -929,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
929 * used as a GMR, this will return -ENOMEM. 1245 * used as a GMR, this will return -ENOMEM.
930 */ 1246 */
931 1247
932 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); 1248 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
933 if (likely(ret == 0 || ret == -ERESTARTSYS)) 1249 if (likely(ret == 0 || ret == -ERESTARTSYS))
934 return ret; 1250 return ret;
935 1251
@@ -939,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
939 */ 1255 */
940 1256
941 DRM_INFO("Falling through to VRAM.\n"); 1257 DRM_INFO("Falling through to VRAM.\n");
942 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); 1258 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
943 return ret; 1259 return ret;
944} 1260}
945 1261
@@ -947,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
947static int vmw_validate_buffers(struct vmw_private *dev_priv, 1263static int vmw_validate_buffers(struct vmw_private *dev_priv,
948 struct vmw_sw_context *sw_context) 1264 struct vmw_sw_context *sw_context)
949{ 1265{
950 struct ttm_validate_buffer *entry; 1266 struct vmw_validate_buffer *entry;
951 int ret; 1267 int ret;
952 1268
953 list_for_each_entry(entry, &sw_context->validate_nodes, head) { 1269 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
954 ret = vmw_validate_single_buffer(dev_priv, entry->bo); 1270 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
955 if (unlikely(ret != 0)) 1271 if (unlikely(ret != 0))
956 return ret; 1272 return ret;
957 } 1273 }
@@ -1114,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1114{ 1430{
1115 struct vmw_sw_context *sw_context = &dev_priv->ctx; 1431 struct vmw_sw_context *sw_context = &dev_priv->ctx;
1116 struct vmw_fence_obj *fence = NULL; 1432 struct vmw_fence_obj *fence = NULL;
1433 struct vmw_resource *error_resource;
1434 struct list_head resource_list;
1117 uint32_t handle; 1435 uint32_t handle;
1118 void *cmd; 1436 void *cmd;
1119 int ret; 1437 int ret;
@@ -1143,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1143 sw_context->kernel = true; 1461 sw_context->kernel = true;
1144 1462
1145 sw_context->tfile = vmw_fpriv(file_priv)->tfile; 1463 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
1146 sw_context->cid_valid = false;
1147 sw_context->sid_valid = false;
1148 sw_context->cur_reloc = 0; 1464 sw_context->cur_reloc = 0;
1149 sw_context->cur_val_buf = 0; 1465 sw_context->cur_val_buf = 0;
1150 sw_context->fence_flags = 0; 1466 sw_context->fence_flags = 0;
1151 INIT_LIST_HEAD(&sw_context->query_list);
1152 INIT_LIST_HEAD(&sw_context->resource_list); 1467 INIT_LIST_HEAD(&sw_context->resource_list);
1153 sw_context->cur_query_bo = dev_priv->pinned_bo; 1468 sw_context->cur_query_bo = dev_priv->pinned_bo;
1154 sw_context->cur_query_cid = dev_priv->query_cid; 1469 sw_context->last_query_ctx = NULL;
1155 sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); 1470 sw_context->needs_post_query_barrier = false;
1156 1471 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
1157 INIT_LIST_HEAD(&sw_context->validate_nodes); 1472 INIT_LIST_HEAD(&sw_context->validate_nodes);
1473 INIT_LIST_HEAD(&sw_context->res_relocations);
1474 if (!sw_context->res_ht_initialized) {
1475 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
1476 if (unlikely(ret != 0))
1477 goto out_unlock;
1478 sw_context->res_ht_initialized = true;
1479 }
1158 1480
1481 INIT_LIST_HEAD(&resource_list);
1159 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 1482 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1160 command_size); 1483 command_size);
1161 if (unlikely(ret != 0)) 1484 if (unlikely(ret != 0))
1162 goto out_err; 1485 goto out_err;
1163 1486
1487 ret = vmw_resources_reserve(sw_context);
1488 if (unlikely(ret != 0))
1489 goto out_err;
1490
1164 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); 1491 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
1165 if (unlikely(ret != 0)) 1492 if (unlikely(ret != 0))
1166 goto out_err; 1493 goto out_err;
@@ -1169,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1169 if (unlikely(ret != 0)) 1496 if (unlikely(ret != 0))
1170 goto out_err; 1497 goto out_err;
1171 1498
1172 vmw_apply_relocations(sw_context); 1499 ret = vmw_resources_validate(sw_context);
1500 if (unlikely(ret != 0))
1501 goto out_err;
1173 1502
1174 if (throttle_us) { 1503 if (throttle_us) {
1175 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, 1504 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
1176 throttle_us); 1505 throttle_us);
1177 1506
1178 if (unlikely(ret != 0)) 1507 if (unlikely(ret != 0))
1179 goto out_throttle; 1508 goto out_err;
1180 } 1509 }
1181 1510
1182 cmd = vmw_fifo_reserve(dev_priv, command_size); 1511 cmd = vmw_fifo_reserve(dev_priv, command_size);
1183 if (unlikely(cmd == NULL)) { 1512 if (unlikely(cmd == NULL)) {
1184 DRM_ERROR("Failed reserving fifo space for commands.\n"); 1513 DRM_ERROR("Failed reserving fifo space for commands.\n");
1185 ret = -ENOMEM; 1514 ret = -ENOMEM;
1186 goto out_throttle; 1515 goto out_err;
1187 } 1516 }
1188 1517
1518 vmw_apply_relocations(sw_context);
1189 memcpy(cmd, kernel_commands, command_size); 1519 memcpy(cmd, kernel_commands, command_size);
1520
1521 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
1522 vmw_resource_relocations_free(&sw_context->res_relocations);
1523
1190 vmw_fifo_commit(dev_priv, command_size); 1524 vmw_fifo_commit(dev_priv, command_size);
1191 1525
1192 vmw_query_bo_switch_commit(dev_priv, sw_context); 1526 vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1202 if (ret != 0) 1536 if (ret != 0)
1203 DRM_ERROR("Fence submission error. Syncing.\n"); 1537 DRM_ERROR("Fence submission error. Syncing.\n");
1204 1538
1539 vmw_resource_list_unreserve(&sw_context->resource_list, false);
1205 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, 1540 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
1206 (void *) fence); 1541 (void *) fence);
1207 1542
1543 if (unlikely(dev_priv->pinned_bo != NULL &&
1544 !dev_priv->query_cid_valid))
1545 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
1546
1208 vmw_clear_validations(sw_context); 1547 vmw_clear_validations(sw_context);
1209 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, 1548 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
1210 user_fence_rep, fence, handle); 1549 user_fence_rep, fence, handle);
@@ -1217,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1217 vmw_fence_obj_unreference(&fence); 1556 vmw_fence_obj_unreference(&fence);
1218 } 1557 }
1219 1558
1559 list_splice_init(&sw_context->resource_list, &resource_list);
1220 mutex_unlock(&dev_priv->cmdbuf_mutex); 1560 mutex_unlock(&dev_priv->cmdbuf_mutex);
1561
1562 /*
1563 * Unreference resources outside of the cmdbuf_mutex to
1564 * avoid deadlocks in resource destruction paths.
1565 */
1566 vmw_resource_list_unreference(&resource_list);
1567
1221 return 0; 1568 return 0;
1222 1569
1223out_err: 1570out_err:
1571 vmw_resource_relocations_free(&sw_context->res_relocations);
1224 vmw_free_relocations(sw_context); 1572 vmw_free_relocations(sw_context);
1225out_throttle:
1226 vmw_query_switch_backoff(sw_context);
1227 ttm_eu_backoff_reservation(&sw_context->validate_nodes); 1573 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
1574 vmw_resource_list_unreserve(&sw_context->resource_list, true);
1228 vmw_clear_validations(sw_context); 1575 vmw_clear_validations(sw_context);
1576 if (unlikely(dev_priv->pinned_bo != NULL &&
1577 !dev_priv->query_cid_valid))
1578 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
1229out_unlock: 1579out_unlock:
1580 list_splice_init(&sw_context->resource_list, &resource_list);
1581 error_resource = sw_context->error_resource;
1582 sw_context->error_resource = NULL;
1230 mutex_unlock(&dev_priv->cmdbuf_mutex); 1583 mutex_unlock(&dev_priv->cmdbuf_mutex);
1584
1585 /*
1586 * Unreference resources outside of the cmdbuf_mutex to
1587 * avoid deadlocks in resource destruction paths.
1588 */
1589 vmw_resource_list_unreference(&resource_list);
1590 if (unlikely(error_resource != NULL))
1591 vmw_resource_unreference(&error_resource);
1592
1231 return ret; 1593 return ret;
1232} 1594}
1233 1595
@@ -1252,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1252 1614
1253 1615
1254/** 1616/**
1255 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned 1617 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1256 * query bo. 1618 * query bo.
1257 * 1619 *
1258 * @dev_priv: The device private structure. 1620 * @dev_priv: The device private structure.
1259 * @only_on_cid_match: Only flush and unpin if the current active query cid 1621 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
1260 * matches @cid. 1622 * _after_ a query barrier that flushes all queries touching the current
1261 * @cid: Optional context id to match. 1623 * buffer pointed to by @dev_priv->pinned_bo
1262 * 1624 *
1263 * This function should be used to unpin the pinned query bo, or 1625 * This function should be used to unpin the pinned query bo, or
1264 * as a query barrier when we need to make sure that all queries have 1626 * as a query barrier when we need to make sure that all queries have
@@ -1271,31 +1633,26 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1271 * 1633 *
1272 * The function will synchronize on the previous query barrier, and will 1634 * The function will synchronize on the previous query barrier, and will
1273 * thus not finish until that barrier has executed. 1635 * thus not finish until that barrier has executed.
1636 *
1637 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
1638 * before calling this function.
1274 */ 1639 */
1275void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 1640void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1276 bool only_on_cid_match, uint32_t cid) 1641 struct vmw_fence_obj *fence)
1277{ 1642{
1278 int ret = 0; 1643 int ret = 0;
1279 struct list_head validate_list; 1644 struct list_head validate_list;
1280 struct ttm_validate_buffer pinned_val, query_val; 1645 struct ttm_validate_buffer pinned_val, query_val;
1281 struct vmw_fence_obj *fence; 1646 struct vmw_fence_obj *lfence = NULL;
1282
1283 mutex_lock(&dev_priv->cmdbuf_mutex);
1284 1647
1285 if (dev_priv->pinned_bo == NULL) 1648 if (dev_priv->pinned_bo == NULL)
1286 goto out_unlock; 1649 goto out_unlock;
1287 1650
1288 if (only_on_cid_match && cid != dev_priv->query_cid)
1289 goto out_unlock;
1290
1291 INIT_LIST_HEAD(&validate_list); 1651 INIT_LIST_HEAD(&validate_list);
1292 1652
1293 pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1294 DRM_VMW_FENCE_FLAG_EXEC;
1295 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); 1653 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1296 list_add_tail(&pinned_val.head, &validate_list); 1654 list_add_tail(&pinned_val.head, &validate_list);
1297 1655
1298 query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1299 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); 1656 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1300 list_add_tail(&query_val.head, &validate_list); 1657 list_add_tail(&query_val.head, &validate_list);
1301 1658
@@ -1308,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1308 goto out_no_reserve; 1665 goto out_no_reserve;
1309 } 1666 }
1310 1667
1311 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); 1668 if (dev_priv->query_cid_valid) {
1312 if (unlikely(ret != 0)) { 1669 BUG_ON(fence != NULL);
1313 vmw_execbuf_unpin_panic(dev_priv); 1670 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1314 goto out_no_emit; 1671 if (unlikely(ret != 0)) {
1672 vmw_execbuf_unpin_panic(dev_priv);
1673 goto out_no_emit;
1674 }
1675 dev_priv->query_cid_valid = false;
1315 } 1676 }
1316 1677
1317 vmw_bo_pin(dev_priv->pinned_bo, false); 1678 vmw_bo_pin(dev_priv->pinned_bo, false);
1318 vmw_bo_pin(dev_priv->dummy_query_bo, false); 1679 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1319 dev_priv->dummy_query_bo_pinned = false; 1680 dev_priv->dummy_query_bo_pinned = false;
1320 1681
1321 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1682 if (fence == NULL) {
1683 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
1684 NULL);
1685 fence = lfence;
1686 }
1322 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); 1687 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
1688 if (lfence != NULL)
1689 vmw_fence_obj_unreference(&lfence);
1323 1690
1324 ttm_bo_unref(&query_val.bo); 1691 ttm_bo_unref(&query_val.bo);
1325 ttm_bo_unref(&pinned_val.bo); 1692 ttm_bo_unref(&pinned_val.bo);
1326 ttm_bo_unref(&dev_priv->pinned_bo); 1693 ttm_bo_unref(&dev_priv->pinned_bo);
1327 1694
1328out_unlock: 1695out_unlock:
1329 mutex_unlock(&dev_priv->cmdbuf_mutex);
1330 return; 1696 return;
1331 1697
1332out_no_emit: 1698out_no_emit:
@@ -1335,6 +1701,31 @@ out_no_reserve:
1335 ttm_bo_unref(&query_val.bo); 1701 ttm_bo_unref(&query_val.bo);
1336 ttm_bo_unref(&pinned_val.bo); 1702 ttm_bo_unref(&pinned_val.bo);
1337 ttm_bo_unref(&dev_priv->pinned_bo); 1703 ttm_bo_unref(&dev_priv->pinned_bo);
1704}
1705
1706/**
1707 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1708 * query bo.
1709 *
1710 * @dev_priv: The device private structure.
1711 *
1712 * This function should be used to unpin the pinned query bo, or
1713 * as a query barrier when we need to make sure that all queries have
1714 * finished before the next fifo command. (For example on hardware
1715 * context destructions where the hardware may otherwise leak unfinished
1716 * queries).
1717 *
1718 * This function does not return any failure codes, but make attempts
1719 * to do safe unpinning in case of errors.
1720 *
1721 * The function will synchronize on the previous query barrier, and will
1722 * thus not finish until that barrier has executed.
1723 */
1724void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
1725{
1726 mutex_lock(&dev_priv->cmdbuf_mutex);
1727 if (dev_priv->query_cid_valid)
1728 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
1338 mutex_unlock(&dev_priv->cmdbuf_mutex); 1729 mutex_unlock(&dev_priv->cmdbuf_mutex);
1339} 1730}
1340 1731
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index bc187fafd58c..c62d20e8a6f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
537 container_of(fence, struct vmw_user_fence, fence); 537 container_of(fence, struct vmw_user_fence, fence);
538 struct vmw_fence_manager *fman = fence->fman; 538 struct vmw_fence_manager *fman = fence->fman;
539 539
540 kfree(ufence); 540 ttm_base_object_kfree(ufence, base);
541 /* 541 /*
542 * Free kernel space accounting. 542 * Free kernel space accounting.
543 */ 543 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b07ca2e4d04b..d9fbbe191071 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -110,6 +110,8 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
110 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); 110 memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
111 111
112 ret = copy_to_user(buffer, bounce, size); 112 ret = copy_to_user(buffer, bounce, size);
113 if (ret)
114 ret = -EFAULT;
113 vfree(bounce); 115 vfree(bounce);
114 116
115 if (unlikely(ret != 0)) 117 if (unlikely(ret != 0))
@@ -131,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
131 struct drm_vmw_rect *clips = NULL; 133 struct drm_vmw_rect *clips = NULL;
132 struct drm_mode_object *obj; 134 struct drm_mode_object *obj;
133 struct vmw_framebuffer *vfb; 135 struct vmw_framebuffer *vfb;
136 struct vmw_resource *res;
134 uint32_t num_clips; 137 uint32_t num_clips;
135 int ret; 138 int ret;
136 139
@@ -178,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
178 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
179 goto out_no_ttm_lock; 182 goto out_no_ttm_lock;
180 183
181 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, 184 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
182 &surface); 185 user_surface_converter,
186 &res);
183 if (ret) 187 if (ret)
184 goto out_no_surface; 188 goto out_no_surface;
185 189
190 surface = vmw_res_to_srf(res);
186 ret = vmw_kms_present(dev_priv, file_priv, 191 ret = vmw_kms_present(dev_priv, file_priv,
187 vfb, surface, arg->sid, 192 vfb, surface, arg->sid,
188 arg->dest_x, arg->dest_y, 193 arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb239c5af..79f7e8e60529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
373 373
374 drm_mode_crtc_set_gamma_size(crtc, 256); 374 drm_mode_crtc_set_gamma_size(crtc, 256);
375 375
376 drm_connector_attach_property(connector, 376 drm_object_attach_property(&connector->base,
377 dev->mode_config.dirty_info_property, 377 dev->mode_config.dirty_info_property,
378 1); 378 1);
379 379
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cb55b7b66377..87e39f68e9d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -35,6 +35,7 @@
35#include "svga_escape.h" 35#include "svga_escape.h"
36 36
37#define VMW_MAX_NUM_STREAMS 1 37#define VMW_MAX_NUM_STREAMS 1
38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
38 39
39struct vmw_stream { 40struct vmw_stream {
40 struct vmw_dma_buffer *buf; 41 struct vmw_dma_buffer *buf;
@@ -449,6 +450,14 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
449 return 0; 450 return 0;
450} 451}
451 452
453
454static bool vmw_overlay_available(const struct vmw_private *dev_priv)
455{
456 return (dev_priv->overlay_priv != NULL &&
457 ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
458 VMW_OVERLAY_CAP_MASK));
459}
460
452int vmw_overlay_ioctl(struct drm_device *dev, void *data, 461int vmw_overlay_ioctl(struct drm_device *dev, void *data,
453 struct drm_file *file_priv) 462 struct drm_file *file_priv)
454{ 463{
@@ -461,7 +470,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
461 struct vmw_resource *res; 470 struct vmw_resource *res;
462 int ret; 471 int ret;
463 472
464 if (!overlay) 473 if (!vmw_overlay_available(dev_priv))
465 return -ENOSYS; 474 return -ENOSYS;
466 475
467 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); 476 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
@@ -492,7 +501,7 @@ out_unlock:
492 501
493int vmw_overlay_num_overlays(struct vmw_private *dev_priv) 502int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
494{ 503{
495 if (!dev_priv->overlay_priv) 504 if (!vmw_overlay_available(dev_priv))
496 return 0; 505 return 0;
497 506
498 return VMW_MAX_NUM_STREAMS; 507 return VMW_MAX_NUM_STREAMS;
@@ -503,7 +512,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
503 struct vmw_overlay *overlay = dev_priv->overlay_priv; 512 struct vmw_overlay *overlay = dev_priv->overlay_priv;
504 int i, k; 513 int i, k;
505 514
506 if (!overlay) 515 if (!vmw_overlay_available(dev_priv))
507 return 0; 516 return 0;
508 517
509 mutex_lock(&overlay->mutex); 518 mutex_lock(&overlay->mutex);
@@ -569,12 +578,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
569 if (dev_priv->overlay_priv) 578 if (dev_priv->overlay_priv)
570 return -EINVAL; 579 return -EINVAL;
571 580
572 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
573 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
574 DRM_INFO("hardware doesn't support overlays\n");
575 return -ENOSYS;
576 }
577
578 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); 581 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
579 if (!overlay) 582 if (!overlay)
580 return -ENOMEM; 583 return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index da3c6b5b98a1..e01a17b407b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -30,17 +30,7 @@
30#include <drm/ttm/ttm_object.h> 30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33 33#include "vmwgfx_resource_priv.h"
34struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37};
38
39struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
42 uint32_t size;
43};
44 34
45struct vmw_user_dma_buffer { 35struct vmw_user_dma_buffer {
46 struct ttm_base_object base; 36 struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
62 struct vmw_stream stream; 52 struct vmw_stream stream;
63}; 53};
64 54
65struct vmw_surface_offset {
66 uint32_t face;
67 uint32_t mip;
68 uint32_t bo_offset;
69};
70 55
71
72static uint64_t vmw_user_context_size;
73static uint64_t vmw_user_surface_size;
74static uint64_t vmw_user_stream_size; 56static uint64_t vmw_user_stream_size;
75 57
58static const struct vmw_res_func vmw_stream_func = {
59 .res_type = vmw_res_stream,
60 .needs_backup = false,
61 .may_evict = false,
62 .type_name = "video streams",
63 .backup_placement = NULL,
64 .create = NULL,
65 .destroy = NULL,
66 .bind = NULL,
67 .unbind = NULL
68};
69
76static inline struct vmw_dma_buffer * 70static inline struct vmw_dma_buffer *
77vmw_dma_buffer(struct ttm_buffer_object *bo) 71vmw_dma_buffer(struct ttm_buffer_object *bo)
78{ 72{
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
100 * 94 *
101 * Release the resource id to the resource id manager and set it to -1 95 * Release the resource id to the resource id manager and set it to -1
102 */ 96 */
103static void vmw_resource_release_id(struct vmw_resource *res) 97void vmw_resource_release_id(struct vmw_resource *res)
104{ 98{
105 struct vmw_private *dev_priv = res->dev_priv; 99 struct vmw_private *dev_priv = res->dev_priv;
100 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
106 101
107 write_lock(&dev_priv->resource_lock); 102 write_lock(&dev_priv->resource_lock);
108 if (res->id != -1) 103 if (res->id != -1)
109 idr_remove(res->idr, res->id); 104 idr_remove(idr, res->id);
110 res->id = -1; 105 res->id = -1;
111 write_unlock(&dev_priv->resource_lock); 106 write_unlock(&dev_priv->resource_lock);
112} 107}
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
116 struct vmw_resource *res = 111 struct vmw_resource *res =
117 container_of(kref, struct vmw_resource, kref); 112 container_of(kref, struct vmw_resource, kref);
118 struct vmw_private *dev_priv = res->dev_priv; 113 struct vmw_private *dev_priv = res->dev_priv;
119 int id = res->id; 114 int id;
120 struct idr *idr = res->idr; 115 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
121 116
122 res->avail = false; 117 res->avail = false;
123 if (res->remove_from_lists != NULL) 118 list_del_init(&res->lru_head);
124 res->remove_from_lists(res);
125 write_unlock(&dev_priv->resource_lock); 119 write_unlock(&dev_priv->resource_lock);
120 if (res->backup) {
121 struct ttm_buffer_object *bo = &res->backup->base;
122
123 ttm_bo_reserve(bo, false, false, false, 0);
124 if (!list_empty(&res->mob_head) &&
125 res->func->unbind != NULL) {
126 struct ttm_validate_buffer val_buf;
127
128 val_buf.bo = bo;
129 res->func->unbind(res, false, &val_buf);
130 }
131 res->backup_dirty = false;
132 list_del_init(&res->mob_head);
133 ttm_bo_unreserve(bo);
134 vmw_dmabuf_unreference(&res->backup);
135 }
126 136
127 if (likely(res->hw_destroy != NULL)) 137 if (likely(res->hw_destroy != NULL))
128 res->hw_destroy(res); 138 res->hw_destroy(res);
129 139
140 id = res->id;
130 if (res->res_free != NULL) 141 if (res->res_free != NULL)
131 res->res_free(res); 142 res->res_free(res);
132 else 143 else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
153/** 164/**
154 * vmw_resource_alloc_id - release a resource id to the id manager. 165 * vmw_resource_alloc_id - release a resource id to the id manager.
155 * 166 *
156 * @dev_priv: Pointer to the device private structure.
157 * @res: Pointer to the resource. 167 * @res: Pointer to the resource.
158 * 168 *
159 * Allocate the lowest free resource from the resource manager, and set 169 * Allocate the lowest free resource from the resource manager, and set
160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure. 170 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
161 */ 171 */
162static int vmw_resource_alloc_id(struct vmw_private *dev_priv, 172int vmw_resource_alloc_id(struct vmw_resource *res)
163 struct vmw_resource *res)
164{ 173{
174 struct vmw_private *dev_priv = res->dev_priv;
165 int ret; 175 int ret;
176 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
166 177
167 BUG_ON(res->id != -1); 178 BUG_ON(res->id != -1);
168 179
169 do { 180 do {
170 if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) 181 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
171 return -ENOMEM; 182 return -ENOMEM;
172 183
173 write_lock(&dev_priv->resource_lock); 184 write_lock(&dev_priv->resource_lock);
174 ret = idr_get_new_above(res->idr, res, 1, &res->id); 185 ret = idr_get_new_above(idr, res, 1, &res->id);
175 write_unlock(&dev_priv->resource_lock); 186 write_unlock(&dev_priv->resource_lock);
176 187
177 } while (ret == -EAGAIN); 188 } while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
179 return ret; 190 return ret;
180} 191}
181 192
182 193/**
183static int vmw_resource_init(struct vmw_private *dev_priv, 194 * vmw_resource_init - initialize a struct vmw_resource
184 struct vmw_resource *res, 195 *
185 struct idr *idr, 196 * @dev_priv: Pointer to a device private struct.
186 enum ttm_object_type obj_type, 197 * @res: The struct vmw_resource to initialize.
187 bool delay_id, 198 * @obj_type: Resource object type.
188 void (*res_free) (struct vmw_resource *res), 199 * @delay_id: Boolean whether to defer device id allocation until
189 void (*remove_from_lists) 200 * the first validation.
190 (struct vmw_resource *res)) 201 * @res_free: Resource destructor.
202 * @func: Resource function table.
203 */
204int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
205 bool delay_id,
206 void (*res_free) (struct vmw_resource *res),
207 const struct vmw_res_func *func)
191{ 208{
192 kref_init(&res->kref); 209 kref_init(&res->kref);
193 res->hw_destroy = NULL; 210 res->hw_destroy = NULL;
194 res->res_free = res_free; 211 res->res_free = res_free;
195 res->remove_from_lists = remove_from_lists;
196 res->res_type = obj_type;
197 res->idr = idr;
198 res->avail = false; 212 res->avail = false;
199 res->dev_priv = dev_priv; 213 res->dev_priv = dev_priv;
200 INIT_LIST_HEAD(&res->query_head); 214 res->func = func;
201 INIT_LIST_HEAD(&res->validate_head); 215 INIT_LIST_HEAD(&res->lru_head);
216 INIT_LIST_HEAD(&res->mob_head);
202 res->id = -1; 217 res->id = -1;
218 res->backup = NULL;
219 res->backup_offset = 0;
220 res->backup_dirty = false;
221 res->res_dirty = false;
203 if (delay_id) 222 if (delay_id)
204 return 0; 223 return 0;
205 else 224 else
206 return vmw_resource_alloc_id(dev_priv, res); 225 return vmw_resource_alloc_id(res);
207} 226}
208 227
209/** 228/**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
218 * Activate basically means that the function vmw_resource_lookup will 237 * Activate basically means that the function vmw_resource_lookup will
219 * find it. 238 * find it.
220 */ 239 */
221 240void vmw_resource_activate(struct vmw_resource *res,
222static void vmw_resource_activate(struct vmw_resource *res, 241 void (*hw_destroy) (struct vmw_resource *))
223 void (*hw_destroy) (struct vmw_resource *))
224{ 242{
225 struct vmw_private *dev_priv = res->dev_priv; 243 struct vmw_private *dev_priv = res->dev_priv;
226 244
@@ -250,994 +268,41 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
250} 268}
251 269
252/** 270/**
253 * Context management: 271 * vmw_user_resource_lookup_handle - lookup a struct resource from a
254 */ 272 * TTM user-space handle and perform basic type checks
255
256static void vmw_hw_context_destroy(struct vmw_resource *res)
257{
258
259 struct vmw_private *dev_priv = res->dev_priv;
260 struct {
261 SVGA3dCmdHeader header;
262 SVGA3dCmdDestroyContext body;
263 } *cmd;
264
265
266 vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
267
268 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
269 if (unlikely(cmd == NULL)) {
270 DRM_ERROR("Failed reserving FIFO space for surface "
271 "destruction.\n");
272 return;
273 }
274
275 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
276 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
277 cmd->body.cid = cpu_to_le32(res->id);
278
279 vmw_fifo_commit(dev_priv, sizeof(*cmd));
280 vmw_3d_resource_dec(dev_priv, false);
281}
282
283static int vmw_context_init(struct vmw_private *dev_priv,
284 struct vmw_resource *res,
285 void (*res_free) (struct vmw_resource *res))
286{
287 int ret;
288
289 struct {
290 SVGA3dCmdHeader header;
291 SVGA3dCmdDefineContext body;
292 } *cmd;
293
294 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
295 VMW_RES_CONTEXT, false, res_free, NULL);
296
297 if (unlikely(ret != 0)) {
298 DRM_ERROR("Failed to allocate a resource id.\n");
299 goto out_early;
300 }
301
302 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
303 DRM_ERROR("Out of hw context ids.\n");
304 vmw_resource_unreference(&res);
305 return -ENOMEM;
306 }
307
308 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
309 if (unlikely(cmd == NULL)) {
310 DRM_ERROR("Fifo reserve failed.\n");
311 vmw_resource_unreference(&res);
312 return -ENOMEM;
313 }
314
315 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
316 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
317 cmd->body.cid = cpu_to_le32(res->id);
318
319 vmw_fifo_commit(dev_priv, sizeof(*cmd));
320 (void) vmw_3d_resource_inc(dev_priv, false);
321 vmw_resource_activate(res, vmw_hw_context_destroy);
322 return 0;
323
324out_early:
325 if (res_free == NULL)
326 kfree(res);
327 else
328 res_free(res);
329 return ret;
330}
331
332struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
333{
334 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
335 int ret;
336
337 if (unlikely(res == NULL))
338 return NULL;
339
340 ret = vmw_context_init(dev_priv, res, NULL);
341 return (ret == 0) ? res : NULL;
342}
343
344/**
345 * User-space context management:
346 */
347
348static void vmw_user_context_free(struct vmw_resource *res)
349{
350 struct vmw_user_context *ctx =
351 container_of(res, struct vmw_user_context, res);
352 struct vmw_private *dev_priv = res->dev_priv;
353
354 kfree(ctx);
355 ttm_mem_global_free(vmw_mem_glob(dev_priv),
356 vmw_user_context_size);
357}
358
359/**
360 * This function is called when user space has no more references on the
361 * base object. It releases the base-object's reference on the resource object.
362 */
363
364static void vmw_user_context_base_release(struct ttm_base_object **p_base)
365{
366 struct ttm_base_object *base = *p_base;
367 struct vmw_user_context *ctx =
368 container_of(base, struct vmw_user_context, base);
369 struct vmw_resource *res = &ctx->res;
370
371 *p_base = NULL;
372 vmw_resource_unreference(&res);
373}
374
375int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
376 struct drm_file *file_priv)
377{
378 struct vmw_private *dev_priv = vmw_priv(dev);
379 struct vmw_resource *res;
380 struct vmw_user_context *ctx;
381 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
382 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
383 int ret = 0;
384
385 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
386 if (unlikely(res == NULL))
387 return -EINVAL;
388
389 if (res->res_free != &vmw_user_context_free) {
390 ret = -EINVAL;
391 goto out;
392 }
393
394 ctx = container_of(res, struct vmw_user_context, res);
395 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
396 ret = -EPERM;
397 goto out;
398 }
399
400 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
401out:
402 vmw_resource_unreference(&res);
403 return ret;
404}
405
406int vmw_context_define_ioctl(struct drm_device *dev, void *data,
407 struct drm_file *file_priv)
408{
409 struct vmw_private *dev_priv = vmw_priv(dev);
410 struct vmw_user_context *ctx;
411 struct vmw_resource *res;
412 struct vmw_resource *tmp;
413 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
414 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
415 struct vmw_master *vmaster = vmw_master(file_priv->master);
416 int ret;
417
418
419 /*
420 * Approximate idr memory usage with 128 bytes. It will be limited
421 * by maximum number_of contexts anyway.
422 */
423
424 if (unlikely(vmw_user_context_size == 0))
425 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
426
427 ret = ttm_read_lock(&vmaster->lock, true);
428 if (unlikely(ret != 0))
429 return ret;
430
431 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
432 vmw_user_context_size,
433 false, true);
434 if (unlikely(ret != 0)) {
435 if (ret != -ERESTARTSYS)
436 DRM_ERROR("Out of graphics memory for context"
437 " creation.\n");
438 goto out_unlock;
439 }
440
441 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442 if (unlikely(ctx == NULL)) {
443 ttm_mem_global_free(vmw_mem_glob(dev_priv),
444 vmw_user_context_size);
445 ret = -ENOMEM;
446 goto out_unlock;
447 }
448
449 res = &ctx->res;
450 ctx->base.shareable = false;
451 ctx->base.tfile = NULL;
452
453 /*
454 * From here on, the destructor takes over resource freeing.
455 */
456
457 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
458 if (unlikely(ret != 0))
459 goto out_unlock;
460
461 tmp = vmw_resource_reference(&ctx->res);
462 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
463 &vmw_user_context_base_release, NULL);
464
465 if (unlikely(ret != 0)) {
466 vmw_resource_unreference(&tmp);
467 goto out_err;
468 }
469
470 arg->cid = res->id;
471out_err:
472 vmw_resource_unreference(&res);
473out_unlock:
474 ttm_read_unlock(&vmaster->lock);
475 return ret;
476
477}
478
479int vmw_context_check(struct vmw_private *dev_priv,
480 struct ttm_object_file *tfile,
481 int id,
482 struct vmw_resource **p_res)
483{
484 struct vmw_resource *res;
485 int ret = 0;
486
487 read_lock(&dev_priv->resource_lock);
488 res = idr_find(&dev_priv->context_idr, id);
489 if (res && res->avail) {
490 struct vmw_user_context *ctx =
491 container_of(res, struct vmw_user_context, res);
492 if (ctx->base.tfile != tfile && !ctx->base.shareable)
493 ret = -EPERM;
494 if (p_res)
495 *p_res = vmw_resource_reference(res);
496 } else
497 ret = -EINVAL;
498 read_unlock(&dev_priv->resource_lock);
499
500 return ret;
501}
502
503struct vmw_bpp {
504 uint8_t bpp;
505 uint8_t s_bpp;
506};
507
508/*
509 * Size table for the supported SVGA3D surface formats. It consists of
510 * two values. The bpp value and the s_bpp value which is short for
511 * "stride bits per pixel" The values are given in such a way that the
512 * minimum stride for the image is calculated using
513 *
514 * min_stride = w*s_bpp
515 *
516 * and the total memory requirement for the image is
517 *
518 * h*min_stride*bpp/s_bpp
519 *
520 */
521static const struct vmw_bpp vmw_sf_bpp[] = {
522 [SVGA3D_FORMAT_INVALID] = {0, 0},
523 [SVGA3D_X8R8G8B8] = {32, 32},
524 [SVGA3D_A8R8G8B8] = {32, 32},
525 [SVGA3D_R5G6B5] = {16, 16},
526 [SVGA3D_X1R5G5B5] = {16, 16},
527 [SVGA3D_A1R5G5B5] = {16, 16},
528 [SVGA3D_A4R4G4B4] = {16, 16},
529 [SVGA3D_Z_D32] = {32, 32},
530 [SVGA3D_Z_D16] = {16, 16},
531 [SVGA3D_Z_D24S8] = {32, 32},
532 [SVGA3D_Z_D15S1] = {16, 16},
533 [SVGA3D_LUMINANCE8] = {8, 8},
534 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
535 [SVGA3D_LUMINANCE16] = {16, 16},
536 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
537 [SVGA3D_DXT1] = {4, 16},
538 [SVGA3D_DXT2] = {8, 32},
539 [SVGA3D_DXT3] = {8, 32},
540 [SVGA3D_DXT4] = {8, 32},
541 [SVGA3D_DXT5] = {8, 32},
542 [SVGA3D_BUMPU8V8] = {16, 16},
543 [SVGA3D_BUMPL6V5U5] = {16, 16},
544 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
545 [SVGA3D_ARGB_S10E5] = {16, 16},
546 [SVGA3D_ARGB_S23E8] = {32, 32},
547 [SVGA3D_A2R10G10B10] = {32, 32},
548 [SVGA3D_V8U8] = {16, 16},
549 [SVGA3D_Q8W8V8U8] = {32, 32},
550 [SVGA3D_CxV8U8] = {16, 16},
551 [SVGA3D_X8L8V8U8] = {32, 32},
552 [SVGA3D_A2W10V10U10] = {32, 32},
553 [SVGA3D_ALPHA8] = {8, 8},
554 [SVGA3D_R_S10E5] = {16, 16},
555 [SVGA3D_R_S23E8] = {32, 32},
556 [SVGA3D_RG_S10E5] = {16, 16},
557 [SVGA3D_RG_S23E8] = {32, 32},
558 [SVGA3D_BUFFER] = {8, 8},
559 [SVGA3D_Z_D24X8] = {32, 32},
560 [SVGA3D_V16U16] = {32, 32},
561 [SVGA3D_G16R16] = {32, 32},
562 [SVGA3D_A16B16G16R16] = {64, 64},
563 [SVGA3D_UYVY] = {12, 12},
564 [SVGA3D_YUY2] = {12, 12},
565 [SVGA3D_NV12] = {12, 8},
566 [SVGA3D_AYUV] = {32, 32},
567 [SVGA3D_BC4_UNORM] = {4, 16},
568 [SVGA3D_BC5_UNORM] = {8, 32},
569 [SVGA3D_Z_DF16] = {16, 16},
570 [SVGA3D_Z_DF24] = {24, 24},
571 [SVGA3D_Z_D24S8_INT] = {32, 32}
572};
573
574
575/**
576 * Surface management.
577 */
578
579struct vmw_surface_dma {
580 SVGA3dCmdHeader header;
581 SVGA3dCmdSurfaceDMA body;
582 SVGA3dCopyBox cb;
583 SVGA3dCmdSurfaceDMASuffix suffix;
584};
585
586struct vmw_surface_define {
587 SVGA3dCmdHeader header;
588 SVGA3dCmdDefineSurface body;
589};
590
591struct vmw_surface_destroy {
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDestroySurface body;
594};
595
596
597/**
598 * vmw_surface_dma_size - Compute fifo size for a dma command.
599 *
600 * @srf: Pointer to a struct vmw_surface
601 *
602 * Computes the required size for a surface dma command for backup or
603 * restoration of the surface represented by @srf.
604 */
605static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
606{
607 return srf->num_sizes * sizeof(struct vmw_surface_dma);
608}
609
610
611/**
612 * vmw_surface_define_size - Compute fifo size for a surface define command.
613 *
614 * @srf: Pointer to a struct vmw_surface
615 *
616 * Computes the required size for a surface define command for the definition
617 * of the surface represented by @srf.
618 */
619static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
620{
621 return sizeof(struct vmw_surface_define) + srf->num_sizes *
622 sizeof(SVGA3dSize);
623}
624
625
626/**
627 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
628 * 273 *
629 * Computes the required size for a surface destroy command for the destruction 274 * @dev_priv: Pointer to a device private struct
630 * of a hw surface. 275 * @tfile: Pointer to a struct ttm_object_file identifying the caller
631 */ 276 * @handle: The TTM user-space handle
632static inline uint32_t vmw_surface_destroy_size(void) 277 * @converter: Pointer to an object describing the resource type
633{ 278 * @p_res: On successful return the location pointed to will contain
634 return sizeof(struct vmw_surface_destroy); 279 * a pointer to a refcounted struct vmw_resource.
635}
636
637/**
638 * vmw_surface_destroy_encode - Encode a surface_destroy command.
639 *
640 * @id: The surface id
641 * @cmd_space: Pointer to memory area in which the commands should be encoded.
642 */
643static void vmw_surface_destroy_encode(uint32_t id,
644 void *cmd_space)
645{
646 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
647 cmd_space;
648
649 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
650 cmd->header.size = sizeof(cmd->body);
651 cmd->body.sid = id;
652}
653
654/**
655 * vmw_surface_define_encode - Encode a surface_define command.
656 * 280 *
657 * @srf: Pointer to a struct vmw_surface object. 281 * If the handle can't be found or is associated with an incorrect resource
658 * @cmd_space: Pointer to memory area in which the commands should be encoded. 282 * type, -EINVAL will be returned.
659 */ 283 */
660static void vmw_surface_define_encode(const struct vmw_surface *srf, 284int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
661 void *cmd_space) 285 struct ttm_object_file *tfile,
286 uint32_t handle,
287 const struct vmw_user_resource_conv
288 *converter,
289 struct vmw_resource **p_res)
662{ 290{
663 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
664 cmd_space;
665 struct drm_vmw_size *src_size;
666 SVGA3dSize *cmd_size;
667 uint32_t cmd_len;
668 int i;
669
670 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
671
672 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
673 cmd->header.size = cmd_len;
674 cmd->body.sid = srf->res.id;
675 cmd->body.surfaceFlags = srf->flags;
676 cmd->body.format = cpu_to_le32(srf->format);
677 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
678 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
679
680 cmd += 1;
681 cmd_size = (SVGA3dSize *) cmd;
682 src_size = srf->sizes;
683
684 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
685 cmd_size->width = src_size->width;
686 cmd_size->height = src_size->height;
687 cmd_size->depth = src_size->depth;
688 }
689}
690
691
692/**
693 * vmw_surface_dma_encode - Encode a surface_dma command.
694 *
695 * @srf: Pointer to a struct vmw_surface object.
696 * @cmd_space: Pointer to memory area in which the commands should be encoded.
697 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
698 * should be placed or read from.
699 * @to_surface: Boolean whether to DMA to the surface or from the surface.
700 */
701static void vmw_surface_dma_encode(struct vmw_surface *srf,
702 void *cmd_space,
703 const SVGAGuestPtr *ptr,
704 bool to_surface)
705{
706 uint32_t i;
707 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
708 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
709 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
710
711 for (i = 0; i < srf->num_sizes; ++i) {
712 SVGA3dCmdHeader *header = &cmd->header;
713 SVGA3dCmdSurfaceDMA *body = &cmd->body;
714 SVGA3dCopyBox *cb = &cmd->cb;
715 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
716 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
717 const struct drm_vmw_size *cur_size = &srf->sizes[i];
718
719 header->id = SVGA_3D_CMD_SURFACE_DMA;
720 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
721
722 body->guest.ptr = *ptr;
723 body->guest.ptr.offset += cur_offset->bo_offset;
724 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
725 body->host.sid = srf->res.id;
726 body->host.face = cur_offset->face;
727 body->host.mipmap = cur_offset->mip;
728 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
729 SVGA3D_READ_HOST_VRAM);
730 cb->x = 0;
731 cb->y = 0;
732 cb->z = 0;
733 cb->srcx = 0;
734 cb->srcy = 0;
735 cb->srcz = 0;
736 cb->w = cur_size->width;
737 cb->h = cur_size->height;
738 cb->d = cur_size->depth;
739
740 suffix->suffixSize = sizeof(*suffix);
741 suffix->maximumOffset = body->guest.pitch*cur_size->height*
742 cur_size->depth*bpp / stride_bpp;
743 suffix->flags.discard = 0;
744 suffix->flags.unsynchronized = 0;
745 suffix->flags.reserved = 0;
746 ++cmd;
747 }
748};
749
750
751static void vmw_hw_surface_destroy(struct vmw_resource *res)
752{
753
754 struct vmw_private *dev_priv = res->dev_priv;
755 struct vmw_surface *srf;
756 void *cmd;
757
758 if (res->id != -1) {
759
760 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
761 if (unlikely(cmd == NULL)) {
762 DRM_ERROR("Failed reserving FIFO space for surface "
763 "destruction.\n");
764 return;
765 }
766
767 vmw_surface_destroy_encode(res->id, cmd);
768 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
769
770 /*
771 * used_memory_size_atomic, or separate lock
772 * to avoid taking dev_priv::cmdbuf_mutex in
773 * the destroy path.
774 */
775
776 mutex_lock(&dev_priv->cmdbuf_mutex);
777 srf = container_of(res, struct vmw_surface, res);
778 dev_priv->used_memory_size -= srf->backup_size;
779 mutex_unlock(&dev_priv->cmdbuf_mutex);
780
781 }
782 vmw_3d_resource_dec(dev_priv, false);
783}
784
785void vmw_surface_res_free(struct vmw_resource *res)
786{
787 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
788
789 if (srf->backup)
790 ttm_bo_unref(&srf->backup);
791 kfree(srf->offsets);
792 kfree(srf->sizes);
793 kfree(srf->snooper.image);
794 kfree(srf);
795}
796
797
798/**
799 * vmw_surface_do_validate - make a surface available to the device.
800 *
801 * @dev_priv: Pointer to a device private struct.
802 * @srf: Pointer to a struct vmw_surface.
803 *
804 * If the surface doesn't have a hw id, allocate one, and optionally
805 * DMA the backed up surface contents to the device.
806 *
807 * Returns -EBUSY if there wasn't sufficient device resources to
808 * complete the validation. Retry after freeing up resources.
809 *
810 * May return other errors if the kernel is out of guest resources.
811 */
812int vmw_surface_do_validate(struct vmw_private *dev_priv,
813 struct vmw_surface *srf)
814{
815 struct vmw_resource *res = &srf->res;
816 struct list_head val_list;
817 struct ttm_validate_buffer val_buf;
818 uint32_t submit_size;
819 uint8_t *cmd;
820 int ret;
821
822 if (likely(res->id != -1))
823 return 0;
824
825 if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
826 dev_priv->memory_size))
827 return -EBUSY;
828
829 /*
830 * Reserve- and validate the backup DMA bo.
831 */
832
833 if (srf->backup) {
834 INIT_LIST_HEAD(&val_list);
835 val_buf.bo = ttm_bo_reference(srf->backup);
836 val_buf.new_sync_obj_arg = (void *)((unsigned long)
837 DRM_VMW_FENCE_FLAG_EXEC);
838 list_add_tail(&val_buf.head, &val_list);
839 ret = ttm_eu_reserve_buffers(&val_list);
840 if (unlikely(ret != 0))
841 goto out_no_reserve;
842
843 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
844 true, false, false);
845 if (unlikely(ret != 0))
846 goto out_no_validate;
847 }
848
849 /*
850 * Alloc id for the resource.
851 */
852
853 ret = vmw_resource_alloc_id(dev_priv, res);
854 if (unlikely(ret != 0)) {
855 DRM_ERROR("Failed to allocate a surface id.\n");
856 goto out_no_id;
857 }
858 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
859 ret = -EBUSY;
860 goto out_no_fifo;
861 }
862
863
864 /*
865 * Encode surface define- and dma commands.
866 */
867
868 submit_size = vmw_surface_define_size(srf);
869 if (srf->backup)
870 submit_size += vmw_surface_dma_size(srf);
871
872 cmd = vmw_fifo_reserve(dev_priv, submit_size);
873 if (unlikely(cmd == NULL)) {
874 DRM_ERROR("Failed reserving FIFO space for surface "
875 "validation.\n");
876 ret = -ENOMEM;
877 goto out_no_fifo;
878 }
879
880 vmw_surface_define_encode(srf, cmd);
881 if (srf->backup) {
882 SVGAGuestPtr ptr;
883
884 cmd += vmw_surface_define_size(srf);
885 vmw_bo_get_guest_ptr(srf->backup, &ptr);
886 vmw_surface_dma_encode(srf, cmd, &ptr, true);
887 }
888
889 vmw_fifo_commit(dev_priv, submit_size);
890
891 /*
892 * Create a fence object and fence the backup buffer.
893 */
894
895 if (srf->backup) {
896 struct vmw_fence_obj *fence;
897
898 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
899 &fence, NULL);
900 ttm_eu_fence_buffer_objects(&val_list, fence);
901 if (likely(fence != NULL))
902 vmw_fence_obj_unreference(&fence);
903 ttm_bo_unref(&val_buf.bo);
904 ttm_bo_unref(&srf->backup);
905 }
906
907 /*
908 * Surface memory usage accounting.
909 */
910
911 dev_priv->used_memory_size += srf->backup_size;
912
913 return 0;
914
915out_no_fifo:
916 vmw_resource_release_id(res);
917out_no_id:
918out_no_validate:
919 if (srf->backup)
920 ttm_eu_backoff_reservation(&val_list);
921out_no_reserve:
922 if (srf->backup)
923 ttm_bo_unref(&val_buf.bo);
924 return ret;
925}
926
927/**
928 * vmw_surface_evict - Evict a hw surface.
929 *
930 * @dev_priv: Pointer to a device private struct.
931 * @srf: Pointer to a struct vmw_surface
932 *
933 * DMA the contents of a hw surface to a backup guest buffer object,
934 * and destroy the hw surface, releasing its id.
935 */
936int vmw_surface_evict(struct vmw_private *dev_priv,
937 struct vmw_surface *srf)
938{
939 struct vmw_resource *res = &srf->res;
940 struct list_head val_list;
941 struct ttm_validate_buffer val_buf;
942 uint32_t submit_size;
943 uint8_t *cmd;
944 int ret;
945 struct vmw_fence_obj *fence;
946 SVGAGuestPtr ptr;
947
948 BUG_ON(res->id == -1);
949
950 /*
951 * Create a surface backup buffer object.
952 */
953
954 if (!srf->backup) {
955 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
956 ttm_bo_type_device,
957 &vmw_srf_placement, 0, 0, true,
958 NULL, &srf->backup);
959 if (unlikely(ret != 0))
960 return ret;
961 }
962
963 /*
964 * Reserve- and validate the backup DMA bo.
965 */
966
967 INIT_LIST_HEAD(&val_list);
968 val_buf.bo = ttm_bo_reference(srf->backup);
969 val_buf.new_sync_obj_arg = (void *)(unsigned long)
970 DRM_VMW_FENCE_FLAG_EXEC;
971 list_add_tail(&val_buf.head, &val_list);
972 ret = ttm_eu_reserve_buffers(&val_list);
973 if (unlikely(ret != 0))
974 goto out_no_reserve;
975
976 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
977 true, false, false);
978 if (unlikely(ret != 0))
979 goto out_no_validate;
980
981
982 /*
983 * Encode the dma- and surface destroy commands.
984 */
985
986 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
987 cmd = vmw_fifo_reserve(dev_priv, submit_size);
988 if (unlikely(cmd == NULL)) {
989 DRM_ERROR("Failed reserving FIFO space for surface "
990 "eviction.\n");
991 ret = -ENOMEM;
992 goto out_no_fifo;
993 }
994
995 vmw_bo_get_guest_ptr(srf->backup, &ptr);
996 vmw_surface_dma_encode(srf, cmd, &ptr, false);
997 cmd += vmw_surface_dma_size(srf);
998 vmw_surface_destroy_encode(res->id, cmd);
999 vmw_fifo_commit(dev_priv, submit_size);
1000
1001 /*
1002 * Surface memory usage accounting.
1003 */
1004
1005 dev_priv->used_memory_size -= srf->backup_size;
1006
1007 /*
1008 * Create a fence object and fence the DMA buffer.
1009 */
1010
1011 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1012 &fence, NULL);
1013 ttm_eu_fence_buffer_objects(&val_list, fence);
1014 if (likely(fence != NULL))
1015 vmw_fence_obj_unreference(&fence);
1016 ttm_bo_unref(&val_buf.bo);
1017
1018 /*
1019 * Release the surface ID.
1020 */
1021
1022 vmw_resource_release_id(res);
1023
1024 return 0;
1025
1026out_no_fifo:
1027out_no_validate:
1028 if (srf->backup)
1029 ttm_eu_backoff_reservation(&val_list);
1030out_no_reserve:
1031 ttm_bo_unref(&val_buf.bo);
1032 ttm_bo_unref(&srf->backup);
1033 return ret;
1034}
1035
1036
1037/**
1038 * vmw_surface_validate - make a surface available to the device, evicting
1039 * other surfaces if needed.
1040 *
1041 * @dev_priv: Pointer to a device private struct.
1042 * @srf: Pointer to a struct vmw_surface.
1043 *
1044 * Try to validate a surface and if it fails due to limited device resources,
1045 * repeatedly try to evict other surfaces until the request can be
1046 * acommodated.
1047 *
1048 * May return errors if out of resources.
1049 */
1050int vmw_surface_validate(struct vmw_private *dev_priv,
1051 struct vmw_surface *srf)
1052{
1053 int ret;
1054 struct vmw_surface *evict_srf;
1055
1056 do {
1057 write_lock(&dev_priv->resource_lock);
1058 list_del_init(&srf->lru_head);
1059 write_unlock(&dev_priv->resource_lock);
1060
1061 ret = vmw_surface_do_validate(dev_priv, srf);
1062 if (likely(ret != -EBUSY))
1063 break;
1064
1065 write_lock(&dev_priv->resource_lock);
1066 if (list_empty(&dev_priv->surface_lru)) {
1067 DRM_ERROR("Out of device memory for surfaces.\n");
1068 ret = -EBUSY;
1069 write_unlock(&dev_priv->resource_lock);
1070 break;
1071 }
1072
1073 evict_srf = vmw_surface_reference
1074 (list_first_entry(&dev_priv->surface_lru,
1075 struct vmw_surface,
1076 lru_head));
1077 list_del_init(&evict_srf->lru_head);
1078
1079 write_unlock(&dev_priv->resource_lock);
1080 (void) vmw_surface_evict(dev_priv, evict_srf);
1081
1082 vmw_surface_unreference(&evict_srf);
1083
1084 } while (1);
1085
1086 if (unlikely(ret != 0 && srf->res.id != -1)) {
1087 write_lock(&dev_priv->resource_lock);
1088 list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1089 write_unlock(&dev_priv->resource_lock);
1090 }
1091
1092 return ret;
1093}
1094
1095
1096/**
1097 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1098 *
1099 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1100 *
1101 * As part of the resource destruction, remove the surface from any
1102 * lookup lists.
1103 */
1104static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1105{
1106 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1107
1108 list_del_init(&srf->lru_head);
1109}
1110
1111int vmw_surface_init(struct vmw_private *dev_priv,
1112 struct vmw_surface *srf,
1113 void (*res_free) (struct vmw_resource *res))
1114{
1115 int ret;
1116 struct vmw_resource *res = &srf->res;
1117
1118 BUG_ON(res_free == NULL);
1119 INIT_LIST_HEAD(&srf->lru_head);
1120 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
1121 VMW_RES_SURFACE, true, res_free,
1122 vmw_surface_remove_from_lists);
1123
1124 if (unlikely(ret != 0))
1125 res_free(res);
1126
1127 /*
1128 * The surface won't be visible to hardware until a
1129 * surface validate.
1130 */
1131
1132 (void) vmw_3d_resource_inc(dev_priv, false);
1133 vmw_resource_activate(res, vmw_hw_surface_destroy);
1134 return ret;
1135}
1136
1137static void vmw_user_surface_free(struct vmw_resource *res)
1138{
1139 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1140 struct vmw_user_surface *user_srf =
1141 container_of(srf, struct vmw_user_surface, srf);
1142 struct vmw_private *dev_priv = srf->res.dev_priv;
1143 uint32_t size = user_srf->size;
1144
1145 if (srf->backup)
1146 ttm_bo_unref(&srf->backup);
1147 kfree(srf->offsets);
1148 kfree(srf->sizes);
1149 kfree(srf->snooper.image);
1150 kfree(user_srf);
1151 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1152}
1153
1154/**
1155 * vmw_resource_unreserve - unreserve resources previously reserved for
1156 * command submission.
1157 *
1158 * @list_head: list of resources to unreserve.
1159 *
1160 * Currently only surfaces are considered, and unreserving a surface
1161 * means putting it back on the device's surface lru list,
1162 * so that it can be evicted if necessary.
1163 * This function traverses the resource list and
1164 * checks whether resources are surfaces, and in that case puts them back
1165 * on the device's surface LRU list.
1166 */
1167void vmw_resource_unreserve(struct list_head *list)
1168{
1169 struct vmw_resource *res;
1170 struct vmw_surface *srf;
1171 rwlock_t *lock = NULL;
1172
1173 list_for_each_entry(res, list, validate_head) {
1174
1175 if (res->res_free != &vmw_surface_res_free &&
1176 res->res_free != &vmw_user_surface_free)
1177 continue;
1178
1179 if (unlikely(lock == NULL)) {
1180 lock = &res->dev_priv->resource_lock;
1181 write_lock(lock);
1182 }
1183
1184 srf = container_of(res, struct vmw_surface, res);
1185 list_del_init(&srf->lru_head);
1186 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1187 }
1188
1189 if (lock != NULL)
1190 write_unlock(lock);
1191}
1192
1193/**
1194 * Helper function that looks either a surface or dmabuf.
1195 *
1196 * The pointer this pointed at by out_surf and out_buf needs to be null.
1197 */
1198int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1199 struct ttm_object_file *tfile,
1200 uint32_t handle,
1201 struct vmw_surface **out_surf,
1202 struct vmw_dma_buffer **out_buf)
1203{
1204 int ret;
1205
1206 BUG_ON(*out_surf || *out_buf);
1207
1208 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
1209 if (!ret)
1210 return 0;
1211
1212 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1213 return ret;
1214}
1215
1216
1217int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1218 struct ttm_object_file *tfile,
1219 uint32_t handle, struct vmw_surface **out)
1220{
1221 struct vmw_resource *res;
1222 struct vmw_surface *srf;
1223 struct vmw_user_surface *user_srf;
1224 struct ttm_base_object *base; 291 struct ttm_base_object *base;
292 struct vmw_resource *res;
1225 int ret = -EINVAL; 293 int ret = -EINVAL;
1226 294
1227 base = ttm_base_object_lookup(tfile, handle); 295 base = ttm_base_object_lookup(tfile, handle);
1228 if (unlikely(base == NULL)) 296 if (unlikely(base == NULL))
1229 return -EINVAL; 297 return -EINVAL;
1230 298
1231 if (unlikely(base->object_type != VMW_RES_SURFACE)) 299 if (unlikely(base->object_type != converter->object_type))
1232 goto out_bad_resource; 300 goto out_bad_resource;
1233 301
1234 user_srf = container_of(base, struct vmw_user_surface, base); 302 res = converter->base_obj_to_res(base);
1235 srf = &user_srf->srf;
1236 res = &srf->res;
1237 303
1238 read_lock(&dev_priv->resource_lock); 304 read_lock(&dev_priv->resource_lock);
1239 305 if (!res->avail || res->res_free != converter->res_free) {
1240 if (!res->avail || res->res_free != &vmw_user_surface_free) {
1241 read_unlock(&dev_priv->resource_lock); 306 read_unlock(&dev_priv->resource_lock);
1242 goto out_bad_resource; 307 goto out_bad_resource;
1243 } 308 }
@@ -1245,7 +310,7 @@ int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1245 kref_get(&res->kref); 310 kref_get(&res->kref);
1246 read_unlock(&dev_priv->resource_lock); 311 read_unlock(&dev_priv->resource_lock);
1247 312
1248 *out = srf; 313 *p_res = res;
1249 ret = 0; 314 ret = 0;
1250 315
1251out_bad_resource: 316out_bad_resource:
@@ -1254,286 +319,32 @@ out_bad_resource:
1254 return ret; 319 return ret;
1255} 320}
1256 321
1257static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 322/**
1258{ 323 * Helper function that looks either a surface or dmabuf.
1259 struct ttm_base_object *base = *p_base; 324 *
1260 struct vmw_user_surface *user_srf = 325 * The pointer this pointed at by out_surf and out_buf needs to be null.
1261 container_of(base, struct vmw_user_surface, base); 326 */
1262 struct vmw_resource *res = &user_srf->srf.res; 327int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1263 328 struct ttm_object_file *tfile,
1264 *p_base = NULL; 329 uint32_t handle,
1265 vmw_resource_unreference(&res); 330 struct vmw_surface **out_surf,
1266} 331 struct vmw_dma_buffer **out_buf)
1267
1268int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1269 struct drm_file *file_priv)
1270{
1271 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1272 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1273
1274 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
1275}
1276
1277int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1278 struct drm_file *file_priv)
1279{ 332{
1280 struct vmw_private *dev_priv = vmw_priv(dev);
1281 struct vmw_user_surface *user_srf;
1282 struct vmw_surface *srf;
1283 struct vmw_resource *res; 333 struct vmw_resource *res;
1284 struct vmw_resource *tmp;
1285 union drm_vmw_surface_create_arg *arg =
1286 (union drm_vmw_surface_create_arg *)data;
1287 struct drm_vmw_surface_create_req *req = &arg->req;
1288 struct drm_vmw_surface_arg *rep = &arg->rep;
1289 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1290 struct drm_vmw_size __user *user_sizes;
1291 int ret; 334 int ret;
1292 int i, j;
1293 uint32_t cur_bo_offset;
1294 struct drm_vmw_size *cur_size;
1295 struct vmw_surface_offset *cur_offset;
1296 uint32_t stride_bpp;
1297 uint32_t bpp;
1298 uint32_t num_sizes;
1299 uint32_t size;
1300 struct vmw_master *vmaster = vmw_master(file_priv->master);
1301 335
1302 if (unlikely(vmw_user_surface_size == 0)) 336 BUG_ON(*out_surf || *out_buf);
1303 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1304 128;
1305
1306 num_sizes = 0;
1307 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1308 num_sizes += req->mip_levels[i];
1309
1310 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1311 DRM_VMW_MAX_MIP_LEVELS)
1312 return -EINVAL;
1313
1314 size = vmw_user_surface_size + 128 +
1315 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1316 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1317
1318
1319 ret = ttm_read_lock(&vmaster->lock, true);
1320 if (unlikely(ret != 0))
1321 return ret;
1322
1323 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1324 size, false, true);
1325 if (unlikely(ret != 0)) {
1326 if (ret != -ERESTARTSYS)
1327 DRM_ERROR("Out of graphics memory for surface"
1328 " creation.\n");
1329 goto out_unlock;
1330 }
1331
1332 user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
1333 if (unlikely(user_srf == NULL)) {
1334 ret = -ENOMEM;
1335 goto out_no_user_srf;
1336 }
1337
1338 srf = &user_srf->srf;
1339 res = &srf->res;
1340
1341 srf->flags = req->flags;
1342 srf->format = req->format;
1343 srf->scanout = req->scanout;
1344 srf->backup = NULL;
1345
1346 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1347 srf->num_sizes = num_sizes;
1348 user_srf->size = size;
1349
1350 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1351 if (unlikely(srf->sizes == NULL)) {
1352 ret = -ENOMEM;
1353 goto out_no_sizes;
1354 }
1355 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1356 GFP_KERNEL);
1357 if (unlikely(srf->sizes == NULL)) {
1358 ret = -ENOMEM;
1359 goto out_no_offsets;
1360 }
1361
1362 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1363 req->size_addr;
1364
1365 ret = copy_from_user(srf->sizes, user_sizes,
1366 srf->num_sizes * sizeof(*srf->sizes));
1367 if (unlikely(ret != 0)) {
1368 ret = -EFAULT;
1369 goto out_no_copy;
1370 }
1371
1372 cur_bo_offset = 0;
1373 cur_offset = srf->offsets;
1374 cur_size = srf->sizes;
1375
1376 bpp = vmw_sf_bpp[srf->format].bpp;
1377 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1378
1379 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1380 for (j = 0; j < srf->mip_levels[i]; ++j) {
1381 uint32_t stride =
1382 (cur_size->width * stride_bpp + 7) >> 3;
1383
1384 cur_offset->face = i;
1385 cur_offset->mip = j;
1386 cur_offset->bo_offset = cur_bo_offset;
1387 cur_bo_offset += stride * cur_size->height *
1388 cur_size->depth * bpp / stride_bpp;
1389 ++cur_offset;
1390 ++cur_size;
1391 }
1392 }
1393 srf->backup_size = cur_bo_offset;
1394
1395 if (srf->scanout &&
1396 srf->num_sizes == 1 &&
1397 srf->sizes[0].width == 64 &&
1398 srf->sizes[0].height == 64 &&
1399 srf->format == SVGA3D_A8R8G8B8) {
1400
1401 /* allocate image area and clear it */
1402 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1403 if (!srf->snooper.image) {
1404 DRM_ERROR("Failed to allocate cursor_image\n");
1405 ret = -ENOMEM;
1406 goto out_no_copy;
1407 }
1408 } else {
1409 srf->snooper.image = NULL;
1410 }
1411 srf->snooper.crtc = NULL;
1412
1413 user_srf->base.shareable = false;
1414 user_srf->base.tfile = NULL;
1415
1416 /**
1417 * From this point, the generic resource management functions
1418 * destroy the object on failure.
1419 */
1420
1421 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1422 if (unlikely(ret != 0))
1423 goto out_unlock;
1424
1425 tmp = vmw_resource_reference(&srf->res);
1426 ret = ttm_base_object_init(tfile, &user_srf->base,
1427 req->shareable, VMW_RES_SURFACE,
1428 &vmw_user_surface_base_release, NULL);
1429
1430 if (unlikely(ret != 0)) {
1431 vmw_resource_unreference(&tmp);
1432 vmw_resource_unreference(&res);
1433 goto out_unlock;
1434 }
1435
1436 rep->sid = user_srf->base.hash.key;
1437 if (rep->sid == SVGA3D_INVALID_ID)
1438 DRM_ERROR("Created bad Surface ID.\n");
1439
1440 vmw_resource_unreference(&res);
1441
1442 ttm_read_unlock(&vmaster->lock);
1443 return 0;
1444out_no_copy:
1445 kfree(srf->offsets);
1446out_no_offsets:
1447 kfree(srf->sizes);
1448out_no_sizes:
1449 kfree(user_srf);
1450out_no_user_srf:
1451 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1452out_unlock:
1453 ttm_read_unlock(&vmaster->lock);
1454 return ret;
1455}
1456
1457int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1458 struct drm_file *file_priv)
1459{
1460 union drm_vmw_surface_reference_arg *arg =
1461 (union drm_vmw_surface_reference_arg *)data;
1462 struct drm_vmw_surface_arg *req = &arg->req;
1463 struct drm_vmw_surface_create_req *rep = &arg->rep;
1464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1465 struct vmw_surface *srf;
1466 struct vmw_user_surface *user_srf;
1467 struct drm_vmw_size __user *user_sizes;
1468 struct ttm_base_object *base;
1469 int ret = -EINVAL;
1470
1471 base = ttm_base_object_lookup(tfile, req->sid);
1472 if (unlikely(base == NULL)) {
1473 DRM_ERROR("Could not find surface to reference.\n");
1474 return -EINVAL;
1475 }
1476
1477 if (unlikely(base->object_type != VMW_RES_SURFACE))
1478 goto out_bad_resource;
1479
1480 user_srf = container_of(base, struct vmw_user_surface, base);
1481 srf = &user_srf->srf;
1482
1483 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1484 if (unlikely(ret != 0)) {
1485 DRM_ERROR("Could not add a reference to a surface.\n");
1486 goto out_no_reference;
1487 }
1488
1489 rep->flags = srf->flags;
1490 rep->format = srf->format;
1491 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1492 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1493 rep->size_addr;
1494 337
1495 if (user_sizes) 338 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
1496 ret = copy_to_user(user_sizes, srf->sizes, 339 user_surface_converter,
1497 srf->num_sizes * sizeof(*srf->sizes)); 340 &res);
1498 if (unlikely(ret != 0)) { 341 if (!ret) {
1499 DRM_ERROR("copy_to_user failed %p %u\n", 342 *out_surf = vmw_res_to_srf(res);
1500 user_sizes, srf->num_sizes); 343 return 0;
1501 ret = -EFAULT;
1502 } 344 }
1503out_bad_resource:
1504out_no_reference:
1505 ttm_base_object_unref(&base);
1506
1507 return ret;
1508}
1509
1510int vmw_surface_check(struct vmw_private *dev_priv,
1511 struct ttm_object_file *tfile,
1512 uint32_t handle, int *id)
1513{
1514 struct ttm_base_object *base;
1515 struct vmw_user_surface *user_srf;
1516
1517 int ret = -EPERM;
1518
1519 base = ttm_base_object_lookup(tfile, handle);
1520 if (unlikely(base == NULL))
1521 return -EINVAL;
1522 345
1523 if (unlikely(base->object_type != VMW_RES_SURFACE)) 346 *out_surf = NULL;
1524 goto out_bad_surface; 347 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1525
1526 user_srf = container_of(base, struct vmw_user_surface, base);
1527 *id = user_srf->srf.res.id;
1528 ret = 0;
1529
1530out_bad_surface:
1531 /**
1532 * FIXME: May deadlock here when called from the
1533 * command parsing code.
1534 */
1535
1536 ttm_base_object_unref(&base);
1537 return ret; 348 return ret;
1538} 349}
1539 350
@@ -1562,11 +373,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
1562 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); 373 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
1563 memset(vmw_bo, 0, sizeof(*vmw_bo)); 374 memset(vmw_bo, 0, sizeof(*vmw_bo));
1564 375
1565 INIT_LIST_HEAD(&vmw_bo->validate_list); 376 INIT_LIST_HEAD(&vmw_bo->res_list);
1566 377
1567 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 378 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1568 ttm_bo_type_device, placement, 379 ttm_bo_type_device, placement,
1569 0, 0, interruptible, 380 0, interruptible,
1570 NULL, acc_size, NULL, bo_free); 381 NULL, acc_size, NULL, bo_free);
1571 return ret; 382 return ret;
1572} 383}
@@ -1575,7 +386,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1575{ 386{
1576 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 387 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1577 388
1578 kfree(vmw_user_bo); 389 ttm_base_object_kfree(vmw_user_bo, base);
1579} 390}
1580 391
1581static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 392static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -1594,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1594 ttm_bo_unref(&bo); 405 ttm_bo_unref(&bo);
1595} 406}
1596 407
408/**
409 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
410 *
411 * @dev_priv: Pointer to a struct device private.
412 * @tfile: Pointer to a struct ttm_object_file on which to register the user
413 * object.
414 * @size: Size of the dma buffer.
415 * @shareable: Boolean whether the buffer is shareable with other open files.
416 * @handle: Pointer to where the handle value should be assigned.
417 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
418 * should be assigned.
419 */
420int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
421 struct ttm_object_file *tfile,
422 uint32_t size,
423 bool shareable,
424 uint32_t *handle,
425 struct vmw_dma_buffer **p_dma_buf)
426{
427 struct vmw_user_dma_buffer *user_bo;
428 struct ttm_buffer_object *tmp;
429 int ret;
430
431 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
432 if (unlikely(user_bo == NULL)) {
433 DRM_ERROR("Failed to allocate a buffer.\n");
434 return -ENOMEM;
435 }
436
437 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
438 &vmw_vram_sys_placement, true,
439 &vmw_user_dmabuf_destroy);
440 if (unlikely(ret != 0))
441 return ret;
442
443 tmp = ttm_bo_reference(&user_bo->dma.base);
444 ret = ttm_base_object_init(tfile,
445 &user_bo->base,
446 shareable,
447 ttm_buffer_type,
448 &vmw_user_dmabuf_release, NULL);
449 if (unlikely(ret != 0)) {
450 ttm_bo_unref(&tmp);
451 goto out_no_base_object;
452 }
453
454 *p_dma_buf = &user_bo->dma;
455 *handle = user_bo->base.hash.key;
456
457out_no_base_object:
458 return ret;
459}
460
461/**
462 * vmw_user_dmabuf_verify_access - verify access permissions on this
463 * buffer object.
464 *
465 * @bo: Pointer to the buffer object being accessed
466 * @tfile: Identifying the caller.
467 */
468int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
469 struct ttm_object_file *tfile)
470{
471 struct vmw_user_dma_buffer *vmw_user_bo;
472
473 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
474 return -EPERM;
475
476 vmw_user_bo = vmw_user_dma_buffer(bo);
477 return (vmw_user_bo->base.tfile == tfile ||
478 vmw_user_bo->base.shareable) ? 0 : -EPERM;
479}
480
1597int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 481int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1598 struct drm_file *file_priv) 482 struct drm_file *file_priv)
1599{ 483{
@@ -1602,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1602 (union drm_vmw_alloc_dmabuf_arg *)data; 486 (union drm_vmw_alloc_dmabuf_arg *)data;
1603 struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 487 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1604 struct drm_vmw_dmabuf_rep *rep = &arg->rep; 488 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1605 struct vmw_user_dma_buffer *vmw_user_bo; 489 struct vmw_dma_buffer *dma_buf;
1606 struct ttm_buffer_object *tmp; 490 uint32_t handle;
1607 struct vmw_master *vmaster = vmw_master(file_priv->master); 491 struct vmw_master *vmaster = vmw_master(file_priv->master);
1608 int ret; 492 int ret;
1609 493
1610 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1611 if (unlikely(vmw_user_bo == NULL))
1612 return -ENOMEM;
1613
1614 ret = ttm_read_lock(&vmaster->lock, true); 494 ret = ttm_read_lock(&vmaster->lock, true);
1615 if (unlikely(ret != 0)) { 495 if (unlikely(ret != 0))
1616 kfree(vmw_user_bo);
1617 return ret; 496 return ret;
1618 }
1619 497
1620 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 498 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1621 &vmw_vram_sys_placement, true, 499 req->size, false, &handle, &dma_buf);
1622 &vmw_user_dmabuf_destroy);
1623 if (unlikely(ret != 0)) 500 if (unlikely(ret != 0))
1624 goto out_no_dmabuf; 501 goto out_no_dmabuf;
1625 502
1626 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 503 rep->handle = handle;
1627 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 504 rep->map_handle = dma_buf->base.addr_space_offset;
1628 &vmw_user_bo->base, 505 rep->cur_gmr_id = handle;
1629 false, 506 rep->cur_gmr_offset = 0;
1630 ttm_buffer_type, 507
1631 &vmw_user_dmabuf_release, NULL); 508 vmw_dmabuf_unreference(&dma_buf);
1632 if (unlikely(ret != 0))
1633 goto out_no_base_object;
1634 else {
1635 rep->handle = vmw_user_bo->base.hash.key;
1636 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
1637 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
1638 rep->cur_gmr_offset = 0;
1639 }
1640 509
1641out_no_base_object:
1642 ttm_bo_unref(&tmp);
1643out_no_dmabuf: 510out_no_dmabuf:
1644 ttm_read_unlock(&vmaster->lock); 511 ttm_read_unlock(&vmaster->lock);
1645 512
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
1657 TTM_REF_USAGE); 524 TTM_REF_USAGE);
1658} 525}
1659 526
1660uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
1661 uint32_t cur_validate_node)
1662{
1663 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1664
1665 if (likely(vmw_bo->on_validate_list))
1666 return vmw_bo->cur_validate_node;
1667
1668 vmw_bo->cur_validate_node = cur_validate_node;
1669 vmw_bo->on_validate_list = true;
1670
1671 return cur_validate_node;
1672}
1673
1674void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
1675{
1676 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1677
1678 vmw_bo->on_validate_list = false;
1679}
1680
1681int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 527int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1682 uint32_t handle, struct vmw_dma_buffer **out) 528 uint32_t handle, struct vmw_dma_buffer **out)
1683{ 529{
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1706 return 0; 552 return 0;
1707} 553}
1708 554
555int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
556 struct vmw_dma_buffer *dma_buf)
557{
558 struct vmw_user_dma_buffer *user_bo;
559
560 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
561 return -EINVAL;
562
563 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
564 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
565}
566
1709/* 567/*
1710 * Stream management 568 * Stream management
1711 */ 569 */
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
1730 struct vmw_resource *res = &stream->res; 588 struct vmw_resource *res = &stream->res;
1731 int ret; 589 int ret;
1732 590
1733 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, 591 ret = vmw_resource_init(dev_priv, res, false, res_free,
1734 VMW_RES_STREAM, false, res_free, NULL); 592 &vmw_stream_func);
1735 593
1736 if (unlikely(ret != 0)) { 594 if (unlikely(ret != 0)) {
1737 if (res_free == NULL) 595 if (res_free == NULL)
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
1753 return 0; 611 return 0;
1754} 612}
1755 613
1756/**
1757 * User-space context management:
1758 */
1759
1760static void vmw_user_stream_free(struct vmw_resource *res) 614static void vmw_user_stream_free(struct vmw_resource *res)
1761{ 615{
1762 struct vmw_user_stream *stream = 616 struct vmw_user_stream *stream =
1763 container_of(res, struct vmw_user_stream, stream.res); 617 container_of(res, struct vmw_user_stream, stream.res);
1764 struct vmw_private *dev_priv = res->dev_priv; 618 struct vmw_private *dev_priv = res->dev_priv;
1765 619
1766 kfree(stream); 620 ttm_base_object_kfree(stream, base);
1767 ttm_mem_global_free(vmw_mem_glob(dev_priv), 621 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1768 vmw_user_stream_size); 622 vmw_user_stream_size);
1769} 623}
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1792 struct vmw_user_stream *stream; 646 struct vmw_user_stream *stream;
1793 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; 647 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1794 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 648 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
649 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
1795 int ret = 0; 650 int ret = 0;
1796 651
1797 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); 652
653 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
1798 if (unlikely(res == NULL)) 654 if (unlikely(res == NULL))
1799 return -EINVAL; 655 return -EINVAL;
1800 656
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1895 struct vmw_resource *res; 751 struct vmw_resource *res;
1896 int ret; 752 int ret;
1897 753
1898 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); 754 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
755 *inout_id);
1899 if (unlikely(res == NULL)) 756 if (unlikely(res == NULL))
1900 return -EINVAL; 757 return -EINVAL;
1901 758
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
1990 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 847 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1991 handle, TTM_REF_USAGE); 848 handle, TTM_REF_USAGE);
1992} 849}
850
851/**
852 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
853 *
854 * @res: The resource for which to allocate a backup buffer.
855 * @interruptible: Whether any sleeps during allocation should be
856 * performed while interruptible.
857 */
858static int vmw_resource_buf_alloc(struct vmw_resource *res,
859 bool interruptible)
860{
861 unsigned long size =
862 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
863 struct vmw_dma_buffer *backup;
864 int ret;
865
866 if (likely(res->backup)) {
867 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
868 return 0;
869 }
870
871 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
872 if (unlikely(backup == NULL))
873 return -ENOMEM;
874
875 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
876 res->func->backup_placement,
877 interruptible,
878 &vmw_dmabuf_bo_free);
879 if (unlikely(ret != 0))
880 goto out_no_dmabuf;
881
882 res->backup = backup;
883
884out_no_dmabuf:
885 return ret;
886}
887
888/**
889 * vmw_resource_do_validate - Make a resource up-to-date and visible
890 * to the device.
891 *
892 * @res: The resource to make visible to the device.
893 * @val_buf: Information about a buffer possibly
894 * containing backup data if a bind operation is needed.
895 *
896 * On hardware resource shortage, this function returns -EBUSY and
897 * should be retried once resources have been freed up.
898 */
899static int vmw_resource_do_validate(struct vmw_resource *res,
900 struct ttm_validate_buffer *val_buf)
901{
902 int ret = 0;
903 const struct vmw_res_func *func = res->func;
904
905 if (unlikely(res->id == -1)) {
906 ret = func->create(res);
907 if (unlikely(ret != 0))
908 return ret;
909 }
910
911 if (func->bind &&
912 ((func->needs_backup && list_empty(&res->mob_head) &&
913 val_buf->bo != NULL) ||
914 (!func->needs_backup && val_buf->bo != NULL))) {
915 ret = func->bind(res, val_buf);
916 if (unlikely(ret != 0))
917 goto out_bind_failed;
918 if (func->needs_backup)
919 list_add_tail(&res->mob_head, &res->backup->res_list);
920 }
921
922 /*
923 * Only do this on write operations, and move to
924 * vmw_resource_unreserve if it can be called after
925 * backup buffers have been unreserved. Otherwise
926 * sort out locking.
927 */
928 res->res_dirty = true;
929
930 return 0;
931
932out_bind_failed:
933 func->destroy(res);
934
935 return ret;
936}
937
938/**
939 * vmw_resource_unreserve - Unreserve a resource previously reserved for
940 * command submission.
941 *
942 * @res: Pointer to the struct vmw_resource to unreserve.
943 * @new_backup: Pointer to new backup buffer if command submission
944 * switched.
945 * @new_backup_offset: New backup offset if @new_backup is !NULL.
946 *
947 * Currently unreserving a resource means putting it back on the device's
948 * resource lru list, so that it can be evicted if necessary.
949 */
950void vmw_resource_unreserve(struct vmw_resource *res,
951 struct vmw_dma_buffer *new_backup,
952 unsigned long new_backup_offset)
953{
954 struct vmw_private *dev_priv = res->dev_priv;
955
956 if (!list_empty(&res->lru_head))
957 return;
958
959 if (new_backup && new_backup != res->backup) {
960
961 if (res->backup) {
962 BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
963 list_del_init(&res->mob_head);
964 vmw_dmabuf_unreference(&res->backup);
965 }
966
967 res->backup = vmw_dmabuf_reference(new_backup);
968 BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
969 list_add_tail(&res->mob_head, &new_backup->res_list);
970 }
971 if (new_backup)
972 res->backup_offset = new_backup_offset;
973
974 if (!res->func->may_evict)
975 return;
976
977 write_lock(&dev_priv->resource_lock);
978 list_add_tail(&res->lru_head,
979 &res->dev_priv->res_lru[res->func->res_type]);
980 write_unlock(&dev_priv->resource_lock);
981}
982
983/**
984 * vmw_resource_check_buffer - Check whether a backup buffer is needed
985 * for a resource and in that case, allocate
986 * one, reserve and validate it.
987 *
988 * @res: The resource for which to allocate a backup buffer.
989 * @interruptible: Whether any sleeps during allocation should be
990 * performed while interruptible.
991 * @val_buf: On successful return contains data about the
992 * reserved and validated backup buffer.
993 */
994int vmw_resource_check_buffer(struct vmw_resource *res,
995 bool interruptible,
996 struct ttm_validate_buffer *val_buf)
997{
998 struct list_head val_list;
999 bool backup_dirty = false;
1000 int ret;
1001
1002 if (unlikely(res->backup == NULL)) {
1003 ret = vmw_resource_buf_alloc(res, interruptible);
1004 if (unlikely(ret != 0))
1005 return ret;
1006 }
1007
1008 INIT_LIST_HEAD(&val_list);
1009 val_buf->bo = ttm_bo_reference(&res->backup->base);
1010 list_add_tail(&val_buf->head, &val_list);
1011 ret = ttm_eu_reserve_buffers(&val_list);
1012 if (unlikely(ret != 0))
1013 goto out_no_reserve;
1014
1015 if (res->func->needs_backup && list_empty(&res->mob_head))
1016 return 0;
1017
1018 backup_dirty = res->backup_dirty;
1019 ret = ttm_bo_validate(&res->backup->base,
1020 res->func->backup_placement,
1021 true, false);
1022
1023 if (unlikely(ret != 0))
1024 goto out_no_validate;
1025
1026 return 0;
1027
1028out_no_validate:
1029 ttm_eu_backoff_reservation(&val_list);
1030out_no_reserve:
1031 ttm_bo_unref(&val_buf->bo);
1032 if (backup_dirty)
1033 vmw_dmabuf_unreference(&res->backup);
1034
1035 return ret;
1036}
1037
1038/**
1039 * vmw_resource_reserve - Reserve a resource for command submission
1040 *
1041 * @res: The resource to reserve.
1042 *
1043 * This function takes the resource off the LRU list and make sure
1044 * a backup buffer is present for guest-backed resources. However,
1045 * the buffer may not be bound to the resource at this point.
1046 *
1047 */
1048int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1049{
1050 struct vmw_private *dev_priv = res->dev_priv;
1051 int ret;
1052
1053 write_lock(&dev_priv->resource_lock);
1054 list_del_init(&res->lru_head);
1055 write_unlock(&dev_priv->resource_lock);
1056
1057 if (res->func->needs_backup && res->backup == NULL &&
1058 !no_backup) {
1059 ret = vmw_resource_buf_alloc(res, true);
1060 if (unlikely(ret != 0))
1061 return ret;
1062 }
1063
1064 return 0;
1065}
1066
1067/**
1068 * vmw_resource_backoff_reservation - Unreserve and unreference a
1069 * backup buffer
1070 *.
1071 * @val_buf: Backup buffer information.
1072 */
1073void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1074{
1075 struct list_head val_list;
1076
1077 if (likely(val_buf->bo == NULL))
1078 return;
1079
1080 INIT_LIST_HEAD(&val_list);
1081 list_add_tail(&val_buf->head, &val_list);
1082 ttm_eu_backoff_reservation(&val_list);
1083 ttm_bo_unref(&val_buf->bo);
1084}
1085
1086/**
1087 * vmw_resource_do_evict - Evict a resource, and transfer its data
1088 * to a backup buffer.
1089 *
1090 * @res: The resource to evict.
1091 */
1092int vmw_resource_do_evict(struct vmw_resource *res)
1093{
1094 struct ttm_validate_buffer val_buf;
1095 const struct vmw_res_func *func = res->func;
1096 int ret;
1097
1098 BUG_ON(!func->may_evict);
1099
1100 val_buf.bo = NULL;
1101 ret = vmw_resource_check_buffer(res, true, &val_buf);
1102 if (unlikely(ret != 0))
1103 return ret;
1104
1105 if (unlikely(func->unbind != NULL &&
1106 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1107 ret = func->unbind(res, res->res_dirty, &val_buf);
1108 if (unlikely(ret != 0))
1109 goto out_no_unbind;
1110 list_del_init(&res->mob_head);
1111 }
1112 ret = func->destroy(res);
1113 res->backup_dirty = true;
1114 res->res_dirty = false;
1115out_no_unbind:
1116 vmw_resource_backoff_reservation(&val_buf);
1117
1118 return ret;
1119}
1120
1121
1122/**
1123 * vmw_resource_validate - Make a resource up-to-date and visible
1124 * to the device.
1125 *
1126 * @res: The resource to make visible to the device.
1127 *
1128 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1129 * be reserved and validated.
1130 * On hardware resource shortage, this function will repeatedly evict
1131 * resources of the same type until the validation succeeds.
1132 */
1133int vmw_resource_validate(struct vmw_resource *res)
1134{
1135 int ret;
1136 struct vmw_resource *evict_res;
1137 struct vmw_private *dev_priv = res->dev_priv;
1138 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1139 struct ttm_validate_buffer val_buf;
1140
1141 if (likely(!res->func->may_evict))
1142 return 0;
1143
1144 val_buf.bo = NULL;
1145 if (res->backup)
1146 val_buf.bo = &res->backup->base;
1147 do {
1148 ret = vmw_resource_do_validate(res, &val_buf);
1149 if (likely(ret != -EBUSY))
1150 break;
1151
1152 write_lock(&dev_priv->resource_lock);
1153 if (list_empty(lru_list) || !res->func->may_evict) {
1154 DRM_ERROR("Out of device device id entries "
1155 "for %s.\n", res->func->type_name);
1156 ret = -EBUSY;
1157 write_unlock(&dev_priv->resource_lock);
1158 break;
1159 }
1160
1161 evict_res = vmw_resource_reference
1162 (list_first_entry(lru_list, struct vmw_resource,
1163 lru_head));
1164 list_del_init(&evict_res->lru_head);
1165
1166 write_unlock(&dev_priv->resource_lock);
1167 vmw_resource_do_evict(evict_res);
1168 vmw_resource_unreference(&evict_res);
1169 } while (1);
1170
1171 if (unlikely(ret != 0))
1172 goto out_no_validate;
1173 else if (!res->func->needs_backup && res->backup) {
1174 list_del_init(&res->mob_head);
1175 vmw_dmabuf_unreference(&res->backup);
1176 }
1177
1178 return 0;
1179
1180out_no_validate:
1181 return ret;
1182}
1183
1184/**
1185 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1186 * object without unreserving it.
1187 *
1188 * @bo: Pointer to the struct ttm_buffer_object to fence.
1189 * @fence: Pointer to the fence. If NULL, this function will
1190 * insert a fence into the command stream..
1191 *
1192 * Contrary to the ttm_eu version of this function, it takes only
1193 * a single buffer object instead of a list, and it also doesn't
1194 * unreserve the buffer object, which needs to be done separately.
1195 */
1196void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1197 struct vmw_fence_obj *fence)
1198{
1199 struct ttm_bo_device *bdev = bo->bdev;
1200 struct ttm_bo_driver *driver = bdev->driver;
1201 struct vmw_fence_obj *old_fence_obj;
1202 struct vmw_private *dev_priv =
1203 container_of(bdev, struct vmw_private, bdev);
1204
1205 if (fence == NULL)
1206 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1207 else
1208 driver->sync_obj_ref(fence);
1209
1210 spin_lock(&bdev->fence_lock);
1211
1212 old_fence_obj = bo->sync_obj;
1213 bo->sync_obj = fence;
1214
1215 spin_unlock(&bdev->fence_lock);
1216
1217 if (old_fence_obj)
1218 vmw_fence_obj_unreference(&old_fence_obj);
1219}
1220
1221/**
1222 * vmw_resource_move_notify - TTM move_notify_callback
1223 *
1224 * @bo: The TTM buffer object about to move.
1225 * @mem: The truct ttm_mem_reg indicating to what memory
1226 * region the move is taking place.
1227 *
1228 * For now does nothing.
1229 */
1230void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231 struct ttm_mem_reg *mem)
1232{
1233}
1234
1235/**
1236 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1237 *
1238 * @res: The resource being queried.
1239 */
1240bool vmw_resource_needs_backup(const struct vmw_resource *res)
1241{
1242 return res->func->needs_backup;
1243}
1244
1245/**
1246 * vmw_resource_evict_type - Evict all resources of a specific type
1247 *
1248 * @dev_priv: Pointer to a device private struct
1249 * @type: The resource type to evict
1250 *
1251 * To avoid thrashing starvation or as part of the hibernation sequence,
1252 * evict all evictable resources of a specific type.
1253 */
1254static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1255 enum vmw_res_type type)
1256{
1257 struct list_head *lru_list = &dev_priv->res_lru[type];
1258 struct vmw_resource *evict_res;
1259
1260 do {
1261 write_lock(&dev_priv->resource_lock);
1262
1263 if (list_empty(lru_list))
1264 goto out_unlock;
1265
1266 evict_res = vmw_resource_reference(
1267 list_first_entry(lru_list, struct vmw_resource,
1268 lru_head));
1269 list_del_init(&evict_res->lru_head);
1270 write_unlock(&dev_priv->resource_lock);
1271 vmw_resource_do_evict(evict_res);
1272 vmw_resource_unreference(&evict_res);
1273 } while (1);
1274
1275out_unlock:
1276 write_unlock(&dev_priv->resource_lock);
1277}
1278
1279/**
1280 * vmw_resource_evict_all - Evict all evictable resources
1281 *
1282 * @dev_priv: Pointer to a device private struct
1283 *
1284 * To avoid thrashing starvation or as part of the hibernation sequence,
1285 * evict all evictable resources. In particular this means that all
1286 * guest-backed resources that are registered with the device are
1287 * evicted and the OTable becomes clean.
1288 */
1289void vmw_resource_evict_all(struct vmw_private *dev_priv)
1290{
1291 enum vmw_res_type type;
1292
1293 mutex_lock(&dev_priv->cmdbuf_mutex);
1294
1295 for (type = 0; type < vmw_res_max; ++type)
1296 vmw_resource_evict_type(dev_priv, type);
1297
1298 mutex_unlock(&dev_priv->cmdbuf_mutex);
1299}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 000000000000..f3adeed2854c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
1/**************************************************************************
2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_RESOURCE_PRIV_H_
29#define _VMWGFX_RESOURCE_PRIV_H_
30
31#include "vmwgfx_drv.h"
32
33/**
34 * struct vmw_user_resource_conv - Identify a derived user-exported resource
35 * type and provide a function to convert its ttm_base_object pointer to
36 * a struct vmw_resource
37 */
38struct vmw_user_resource_conv {
39 enum ttm_object_type object_type;
40 struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
41 void (*res_free) (struct vmw_resource *res);
42};
43
44/**
45 * struct vmw_res_func - members and functions common for a resource type
46 *
47 * @res_type: Enum that identifies the lru list to use for eviction.
48 * @needs_backup: Whether the resource is guest-backed and needs
49 * persistent buffer storage.
50 * @type_name: String that identifies the resource type.
51 * @backup_placement: TTM placement for backup buffers.
52 * @may_evict Whether the resource may be evicted.
53 * @create: Create a hardware resource.
54 * @destroy: Destroy a hardware resource.
55 * @bind: Bind a hardware resource to persistent buffer storage.
56 * @unbind: Unbind a hardware resource from persistent
57 * buffer storage.
58 */
59
60struct vmw_res_func {
61 enum vmw_res_type res_type;
62 bool needs_backup;
63 const char *type_name;
64 struct ttm_placement *backup_placement;
65 bool may_evict;
66
67 int (*create) (struct vmw_resource *res);
68 int (*destroy) (struct vmw_resource *res);
69 int (*bind) (struct vmw_resource *res,
70 struct ttm_validate_buffer *val_buf);
71 int (*unbind) (struct vmw_resource *res,
72 bool readback,
73 struct ttm_validate_buffer *val_buf);
74};
75
76int vmw_resource_alloc_id(struct vmw_resource *res);
77void vmw_resource_release_id(struct vmw_resource *res);
78int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
79 bool delay_id,
80 void (*res_free) (struct vmw_resource *res),
81 const struct vmw_res_func *func);
82void vmw_resource_activate(struct vmw_resource *res,
83 void (*hw_destroy) (struct vmw_resource *));
84#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 6deaf2f8bab1..26387c3d5a21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
468 468
469 drm_mode_crtc_set_gamma_size(crtc, 256); 469 drm_mode_crtc_set_gamma_size(crtc, 256);
470 470
471 drm_connector_attach_property(connector, 471 drm_object_attach_property(&connector->base,
472 dev->mode_config.dirty_info_property, 472 dev->mode_config.dirty_info_property,
473 1); 473 1);
474 474
@@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
485 return -EINVAL; 485 return -EINVAL;
486 } 486 }
487 487
488 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) { 488 if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
489 DRM_INFO("Not using screen objects," 489 DRM_INFO("Not using screen objects,"
490 " missing cap SCREEN_OBJECT_2\n"); 490 " missing cap SCREEN_OBJECT_2\n");
491 return -ENOSYS; 491 return -ENOSYS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 000000000000..582814339748
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include <ttm/ttm_placement.h>
31#include "svga3d_surfacedefs.h"
32
33/**
34 * struct vmw_user_surface - User-space visible surface resource
35 *
36 * @base: The TTM base object handling user-space visibility.
37 * @srf: The surface metadata.
38 * @size: TTM accounting size for the surface.
39 */
40struct vmw_user_surface {
41 struct ttm_base_object base;
42 struct vmw_surface srf;
43 uint32_t size;
44 uint32_t backup_handle;
45};
46
47/**
48 * struct vmw_surface_offset - Backing store mip level offset info
49 *
50 * @face: Surface face.
51 * @mip: Mip level.
52 * @bo_offset: Offset into backing store of this mip level.
53 *
54 */
55struct vmw_surface_offset {
56 uint32_t face;
57 uint32_t mip;
58 uint32_t bo_offset;
59};
60
61static void vmw_user_surface_free(struct vmw_resource *res);
62static struct vmw_resource *
63vmw_user_surface_base_to_res(struct ttm_base_object *base);
64static int vmw_legacy_srf_bind(struct vmw_resource *res,
65 struct ttm_validate_buffer *val_buf);
66static int vmw_legacy_srf_unbind(struct vmw_resource *res,
67 bool readback,
68 struct ttm_validate_buffer *val_buf);
69static int vmw_legacy_srf_create(struct vmw_resource *res);
70static int vmw_legacy_srf_destroy(struct vmw_resource *res);
71
72static const struct vmw_user_resource_conv user_surface_conv = {
73 .object_type = VMW_RES_SURFACE,
74 .base_obj_to_res = vmw_user_surface_base_to_res,
75 .res_free = vmw_user_surface_free
76};
77
78const struct vmw_user_resource_conv *user_surface_converter =
79 &user_surface_conv;
80
81
82static uint64_t vmw_user_surface_size;
83
84static const struct vmw_res_func vmw_legacy_surface_func = {
85 .res_type = vmw_res_surface,
86 .needs_backup = false,
87 .may_evict = true,
88 .type_name = "legacy surfaces",
89 .backup_placement = &vmw_srf_placement,
90 .create = &vmw_legacy_srf_create,
91 .destroy = &vmw_legacy_srf_destroy,
92 .bind = &vmw_legacy_srf_bind,
93 .unbind = &vmw_legacy_srf_unbind
94};
95
96/**
97 * struct vmw_surface_dma - SVGA3D DMA command
98 */
99struct vmw_surface_dma {
100 SVGA3dCmdHeader header;
101 SVGA3dCmdSurfaceDMA body;
102 SVGA3dCopyBox cb;
103 SVGA3dCmdSurfaceDMASuffix suffix;
104};
105
106/**
107 * struct vmw_surface_define - SVGA3D Surface Define command
108 */
109struct vmw_surface_define {
110 SVGA3dCmdHeader header;
111 SVGA3dCmdDefineSurface body;
112};
113
114/**
115 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
116 */
117struct vmw_surface_destroy {
118 SVGA3dCmdHeader header;
119 SVGA3dCmdDestroySurface body;
120};
121
122
123/**
124 * vmw_surface_dma_size - Compute fifo size for a dma command.
125 *
126 * @srf: Pointer to a struct vmw_surface
127 *
128 * Computes the required size for a surface dma command for backup or
129 * restoration of the surface represented by @srf.
130 */
131static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
132{
133 return srf->num_sizes * sizeof(struct vmw_surface_dma);
134}
135
136
137/**
138 * vmw_surface_define_size - Compute fifo size for a surface define command.
139 *
140 * @srf: Pointer to a struct vmw_surface
141 *
142 * Computes the required size for a surface define command for the definition
143 * of the surface represented by @srf.
144 */
145static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
146{
147 return sizeof(struct vmw_surface_define) + srf->num_sizes *
148 sizeof(SVGA3dSize);
149}
150
151
152/**
153 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
154 *
155 * Computes the required size for a surface destroy command for the destruction
156 * of a hw surface.
157 */
158static inline uint32_t vmw_surface_destroy_size(void)
159{
160 return sizeof(struct vmw_surface_destroy);
161}
162
163/**
164 * vmw_surface_destroy_encode - Encode a surface_destroy command.
165 *
166 * @id: The surface id
167 * @cmd_space: Pointer to memory area in which the commands should be encoded.
168 */
169static void vmw_surface_destroy_encode(uint32_t id,
170 void *cmd_space)
171{
172 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
173 cmd_space;
174
175 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
176 cmd->header.size = sizeof(cmd->body);
177 cmd->body.sid = id;
178}
179
180/**
181 * vmw_surface_define_encode - Encode a surface_define command.
182 *
183 * @srf: Pointer to a struct vmw_surface object.
184 * @cmd_space: Pointer to memory area in which the commands should be encoded.
185 */
186static void vmw_surface_define_encode(const struct vmw_surface *srf,
187 void *cmd_space)
188{
189 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
190 cmd_space;
191 struct drm_vmw_size *src_size;
192 SVGA3dSize *cmd_size;
193 uint32_t cmd_len;
194 int i;
195
196 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
197
198 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
199 cmd->header.size = cmd_len;
200 cmd->body.sid = srf->res.id;
201 cmd->body.surfaceFlags = srf->flags;
202 cmd->body.format = cpu_to_le32(srf->format);
203 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
204 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
205
206 cmd += 1;
207 cmd_size = (SVGA3dSize *) cmd;
208 src_size = srf->sizes;
209
210 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
211 cmd_size->width = src_size->width;
212 cmd_size->height = src_size->height;
213 cmd_size->depth = src_size->depth;
214 }
215}
216
217/**
218 * vmw_surface_dma_encode - Encode a surface_dma command.
219 *
220 * @srf: Pointer to a struct vmw_surface object.
221 * @cmd_space: Pointer to memory area in which the commands should be encoded.
222 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
223 * should be placed or read from.
224 * @to_surface: Boolean whether to DMA to the surface or from the surface.
225 */
226static void vmw_surface_dma_encode(struct vmw_surface *srf,
227 void *cmd_space,
228 const SVGAGuestPtr *ptr,
229 bool to_surface)
230{
231 uint32_t i;
232 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
233 const struct svga3d_surface_desc *desc =
234 svga3dsurface_get_desc(srf->format);
235
236 for (i = 0; i < srf->num_sizes; ++i) {
237 SVGA3dCmdHeader *header = &cmd->header;
238 SVGA3dCmdSurfaceDMA *body = &cmd->body;
239 SVGA3dCopyBox *cb = &cmd->cb;
240 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
241 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
242 const struct drm_vmw_size *cur_size = &srf->sizes[i];
243
244 header->id = SVGA_3D_CMD_SURFACE_DMA;
245 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
246
247 body->guest.ptr = *ptr;
248 body->guest.ptr.offset += cur_offset->bo_offset;
249 body->guest.pitch = svga3dsurface_calculate_pitch(desc,
250 cur_size);
251 body->host.sid = srf->res.id;
252 body->host.face = cur_offset->face;
253 body->host.mipmap = cur_offset->mip;
254 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
255 SVGA3D_READ_HOST_VRAM);
256 cb->x = 0;
257 cb->y = 0;
258 cb->z = 0;
259 cb->srcx = 0;
260 cb->srcy = 0;
261 cb->srcz = 0;
262 cb->w = cur_size->width;
263 cb->h = cur_size->height;
264 cb->d = cur_size->depth;
265
266 suffix->suffixSize = sizeof(*suffix);
267 suffix->maximumOffset =
268 svga3dsurface_get_image_buffer_size(desc, cur_size,
269 body->guest.pitch);
270 suffix->flags.discard = 0;
271 suffix->flags.unsynchronized = 0;
272 suffix->flags.reserved = 0;
273 ++cmd;
274 }
275};
276
277
278/**
279 * vmw_hw_surface_destroy - destroy a Device surface
280 *
281 * @res: Pointer to a struct vmw_resource embedded in a struct
282 * vmw_surface.
283 *
284 * Destroys a the device surface associated with a struct vmw_surface if
285 * any, and adjusts accounting and resource count accordingly.
286 */
287static void vmw_hw_surface_destroy(struct vmw_resource *res)
288{
289
290 struct vmw_private *dev_priv = res->dev_priv;
291 struct vmw_surface *srf;
292 void *cmd;
293
294 if (res->id != -1) {
295
296 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
297 if (unlikely(cmd == NULL)) {
298 DRM_ERROR("Failed reserving FIFO space for surface "
299 "destruction.\n");
300 return;
301 }
302
303 vmw_surface_destroy_encode(res->id, cmd);
304 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
305
306 /*
307 * used_memory_size_atomic, or separate lock
308 * to avoid taking dev_priv::cmdbuf_mutex in
309 * the destroy path.
310 */
311
312 mutex_lock(&dev_priv->cmdbuf_mutex);
313 srf = vmw_res_to_srf(res);
314 dev_priv->used_memory_size -= res->backup_size;
315 mutex_unlock(&dev_priv->cmdbuf_mutex);
316 }
317 vmw_3d_resource_dec(dev_priv, false);
318}
319
320/**
321 * vmw_legacy_srf_create - Create a device surface as part of the
322 * resource validation process.
323 *
324 * @res: Pointer to a struct vmw_surface.
325 *
326 * If the surface doesn't have a hw id.
327 *
328 * Returns -EBUSY if there wasn't sufficient device resources to
329 * complete the validation. Retry after freeing up resources.
330 *
331 * May return other errors if the kernel is out of guest resources.
332 */
333static int vmw_legacy_srf_create(struct vmw_resource *res)
334{
335 struct vmw_private *dev_priv = res->dev_priv;
336 struct vmw_surface *srf;
337 uint32_t submit_size;
338 uint8_t *cmd;
339 int ret;
340
341 if (likely(res->id != -1))
342 return 0;
343
344 srf = vmw_res_to_srf(res);
345 if (unlikely(dev_priv->used_memory_size + res->backup_size >=
346 dev_priv->memory_size))
347 return -EBUSY;
348
349 /*
350 * Alloc id for the resource.
351 */
352
353 ret = vmw_resource_alloc_id(res);
354 if (unlikely(ret != 0)) {
355 DRM_ERROR("Failed to allocate a surface id.\n");
356 goto out_no_id;
357 }
358
359 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
360 ret = -EBUSY;
361 goto out_no_fifo;
362 }
363
364 /*
365 * Encode surface define- commands.
366 */
367
368 submit_size = vmw_surface_define_size(srf);
369 cmd = vmw_fifo_reserve(dev_priv, submit_size);
370 if (unlikely(cmd == NULL)) {
371 DRM_ERROR("Failed reserving FIFO space for surface "
372 "creation.\n");
373 ret = -ENOMEM;
374 goto out_no_fifo;
375 }
376
377 vmw_surface_define_encode(srf, cmd);
378 vmw_fifo_commit(dev_priv, submit_size);
379 /*
380 * Surface memory usage accounting.
381 */
382
383 dev_priv->used_memory_size += res->backup_size;
384 return 0;
385
386out_no_fifo:
387 vmw_resource_release_id(res);
388out_no_id:
389 return ret;
390}
391
392/**
393 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
394 *
395 * @res: Pointer to a struct vmw_res embedded in a struct
396 * vmw_surface.
397 * @val_buf: Pointer to a struct ttm_validate_buffer containing
398 * information about the backup buffer.
399 * @bind: Boolean wether to DMA to the surface.
400 *
401 * Transfer backup data to or from a legacy surface as part of the
402 * validation process.
403 * May return other errors if the kernel is out of guest resources.
404 * The backup buffer will be fenced or idle upon successful completion,
405 * and if the surface needs persistent backup storage, the backup buffer
406 * will also be returned reserved iff @bind is true.
407 */
408static int vmw_legacy_srf_dma(struct vmw_resource *res,
409 struct ttm_validate_buffer *val_buf,
410 bool bind)
411{
412 SVGAGuestPtr ptr;
413 struct vmw_fence_obj *fence;
414 uint32_t submit_size;
415 struct vmw_surface *srf = vmw_res_to_srf(res);
416 uint8_t *cmd;
417 struct vmw_private *dev_priv = res->dev_priv;
418
419 BUG_ON(val_buf->bo == NULL);
420
421 submit_size = vmw_surface_dma_size(srf);
422 cmd = vmw_fifo_reserve(dev_priv, submit_size);
423 if (unlikely(cmd == NULL)) {
424 DRM_ERROR("Failed reserving FIFO space for surface "
425 "DMA.\n");
426 return -ENOMEM;
427 }
428 vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
429 vmw_surface_dma_encode(srf, cmd, &ptr, bind);
430
431 vmw_fifo_commit(dev_priv, submit_size);
432
433 /*
434 * Create a fence object and fence the backup buffer.
435 */
436
437 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
438 &fence, NULL);
439
440 vmw_fence_single_bo(val_buf->bo, fence);
441
442 if (likely(fence != NULL))
443 vmw_fence_obj_unreference(&fence);
444
445 return 0;
446}
447
448/**
449 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
450 * surface validation process.
451 *
452 * @res: Pointer to a struct vmw_res embedded in a struct
453 * vmw_surface.
454 * @val_buf: Pointer to a struct ttm_validate_buffer containing
455 * information about the backup buffer.
456 *
457 * This function will copy backup data to the surface if the
458 * backup buffer is dirty.
459 */
460static int vmw_legacy_srf_bind(struct vmw_resource *res,
461 struct ttm_validate_buffer *val_buf)
462{
463 if (!res->backup_dirty)
464 return 0;
465
466 return vmw_legacy_srf_dma(res, val_buf, true);
467}
468
469
470/**
471 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
472 * surface eviction process.
473 *
474 * @res: Pointer to a struct vmw_res embedded in a struct
475 * vmw_surface.
476 * @val_buf: Pointer to a struct ttm_validate_buffer containing
477 * information about the backup buffer.
478 *
479 * This function will copy backup data from the surface.
480 */
481static int vmw_legacy_srf_unbind(struct vmw_resource *res,
482 bool readback,
483 struct ttm_validate_buffer *val_buf)
484{
485 if (unlikely(readback))
486 return vmw_legacy_srf_dma(res, val_buf, false);
487 return 0;
488}
489
490/**
491 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
492 * resource eviction process.
493 *
494 * @res: Pointer to a struct vmw_res embedded in a struct
495 * vmw_surface.
496 */
497static int vmw_legacy_srf_destroy(struct vmw_resource *res)
498{
499 struct vmw_private *dev_priv = res->dev_priv;
500 uint32_t submit_size;
501 uint8_t *cmd;
502
503 BUG_ON(res->id == -1);
504
505 /*
506 * Encode the dma- and surface destroy commands.
507 */
508
509 submit_size = vmw_surface_destroy_size();
510 cmd = vmw_fifo_reserve(dev_priv, submit_size);
511 if (unlikely(cmd == NULL)) {
512 DRM_ERROR("Failed reserving FIFO space for surface "
513 "eviction.\n");
514 return -ENOMEM;
515 }
516
517 vmw_surface_destroy_encode(res->id, cmd);
518 vmw_fifo_commit(dev_priv, submit_size);
519
520 /*
521 * Surface memory usage accounting.
522 */
523
524 dev_priv->used_memory_size -= res->backup_size;
525
526 /*
527 * Release the surface ID.
528 */
529
530 vmw_resource_release_id(res);
531
532 return 0;
533}
534
535
536/**
537 * vmw_surface_init - initialize a struct vmw_surface
538 *
539 * @dev_priv: Pointer to a device private struct.
540 * @srf: Pointer to the struct vmw_surface to initialize.
541 * @res_free: Pointer to a resource destructor used to free
542 * the object.
543 */
544static int vmw_surface_init(struct vmw_private *dev_priv,
545 struct vmw_surface *srf,
546 void (*res_free) (struct vmw_resource *res))
547{
548 int ret;
549 struct vmw_resource *res = &srf->res;
550
551 BUG_ON(res_free == NULL);
552 (void) vmw_3d_resource_inc(dev_priv, false);
553 ret = vmw_resource_init(dev_priv, res, true, res_free,
554 &vmw_legacy_surface_func);
555
556 if (unlikely(ret != 0)) {
557 vmw_3d_resource_dec(dev_priv, false);
558 res_free(res);
559 return ret;
560 }
561
562 /*
563 * The surface won't be visible to hardware until a
564 * surface validate.
565 */
566
567 vmw_resource_activate(res, vmw_hw_surface_destroy);
568 return ret;
569}
570
571/**
572 * vmw_user_surface_base_to_res - TTM base object to resource converter for
573 * user visible surfaces
574 *
575 * @base: Pointer to a TTM base object
576 *
577 * Returns the struct vmw_resource embedded in a struct vmw_surface
578 * for the user-visible object identified by the TTM base object @base.
579 */
580static struct vmw_resource *
581vmw_user_surface_base_to_res(struct ttm_base_object *base)
582{
583 return &(container_of(base, struct vmw_user_surface, base)->srf.res);
584}
585
586/**
587 * vmw_user_surface_free - User visible surface resource destructor
588 *
589 * @res: A struct vmw_resource embedded in a struct vmw_surface.
590 */
591static void vmw_user_surface_free(struct vmw_resource *res)
592{
593 struct vmw_surface *srf = vmw_res_to_srf(res);
594 struct vmw_user_surface *user_srf =
595 container_of(srf, struct vmw_user_surface, srf);
596 struct vmw_private *dev_priv = srf->res.dev_priv;
597 uint32_t size = user_srf->size;
598
599 kfree(srf->offsets);
600 kfree(srf->sizes);
601 kfree(srf->snooper.image);
602 ttm_base_object_kfree(user_srf, base);
603 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
604}
605
606/**
607 * vmw_user_surface_free - User visible surface TTM base object destructor
608 *
609 * @p_base: Pointer to a pointer to a TTM base object
610 * embedded in a struct vmw_user_surface.
611 *
612 * Drops the base object's reference on its resource, and the
613 * pointer pointed to by *p_base is set to NULL.
614 */
615static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
616{
617 struct ttm_base_object *base = *p_base;
618 struct vmw_user_surface *user_srf =
619 container_of(base, struct vmw_user_surface, base);
620 struct vmw_resource *res = &user_srf->srf.res;
621
622 *p_base = NULL;
623 vmw_resource_unreference(&res);
624}
625
626/**
627 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
628 * the user surface destroy functionality.
629 *
630 * @dev: Pointer to a struct drm_device.
631 * @data: Pointer to data copied from / to user-space.
632 * @file_priv: Pointer to a drm file private structure.
633 */
634int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
635 struct drm_file *file_priv)
636{
637 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
638 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
639
640 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
641}
642
643/**
644 * vmw_user_surface_define_ioctl - Ioctl function implementing
645 * the user surface define functionality.
646 *
647 * @dev: Pointer to a struct drm_device.
648 * @data: Pointer to data copied from / to user-space.
649 * @file_priv: Pointer to a drm file private structure.
650 */
651int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv)
653{
654 struct vmw_private *dev_priv = vmw_priv(dev);
655 struct vmw_user_surface *user_srf;
656 struct vmw_surface *srf;
657 struct vmw_resource *res;
658 struct vmw_resource *tmp;
659 union drm_vmw_surface_create_arg *arg =
660 (union drm_vmw_surface_create_arg *)data;
661 struct drm_vmw_surface_create_req *req = &arg->req;
662 struct drm_vmw_surface_arg *rep = &arg->rep;
663 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
664 struct drm_vmw_size __user *user_sizes;
665 int ret;
666 int i, j;
667 uint32_t cur_bo_offset;
668 struct drm_vmw_size *cur_size;
669 struct vmw_surface_offset *cur_offset;
670 uint32_t num_sizes;
671 uint32_t size;
672 struct vmw_master *vmaster = vmw_master(file_priv->master);
673 const struct svga3d_surface_desc *desc;
674
675 if (unlikely(vmw_user_surface_size == 0))
676 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
677 128;
678
679 num_sizes = 0;
680 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
681 num_sizes += req->mip_levels[i];
682
683 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
684 DRM_VMW_MAX_MIP_LEVELS)
685 return -EINVAL;
686
687 size = vmw_user_surface_size + 128 +
688 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
689 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
690
691
692 desc = svga3dsurface_get_desc(req->format);
693 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
694 DRM_ERROR("Invalid surface format for surface creation.\n");
695 return -EINVAL;
696 }
697
698 ret = ttm_read_lock(&vmaster->lock, true);
699 if (unlikely(ret != 0))
700 return ret;
701
702 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
703 size, false, true);
704 if (unlikely(ret != 0)) {
705 if (ret != -ERESTARTSYS)
706 DRM_ERROR("Out of graphics memory for surface"
707 " creation.\n");
708 goto out_unlock;
709 }
710
711 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
712 if (unlikely(user_srf == NULL)) {
713 ret = -ENOMEM;
714 goto out_no_user_srf;
715 }
716
717 srf = &user_srf->srf;
718 res = &srf->res;
719
720 srf->flags = req->flags;
721 srf->format = req->format;
722 srf->scanout = req->scanout;
723
724 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
725 srf->num_sizes = num_sizes;
726 user_srf->size = size;
727
728 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
729 if (unlikely(srf->sizes == NULL)) {
730 ret = -ENOMEM;
731 goto out_no_sizes;
732 }
733 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
734 GFP_KERNEL);
735 if (unlikely(srf->sizes == NULL)) {
736 ret = -ENOMEM;
737 goto out_no_offsets;
738 }
739
740 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
741 req->size_addr;
742
743 ret = copy_from_user(srf->sizes, user_sizes,
744 srf->num_sizes * sizeof(*srf->sizes));
745 if (unlikely(ret != 0)) {
746 ret = -EFAULT;
747 goto out_no_copy;
748 }
749
750 srf->base_size = *srf->sizes;
751 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
752 srf->multisample_count = 1;
753
754 cur_bo_offset = 0;
755 cur_offset = srf->offsets;
756 cur_size = srf->sizes;
757
758 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
759 for (j = 0; j < srf->mip_levels[i]; ++j) {
760 uint32_t stride = svga3dsurface_calculate_pitch
761 (desc, cur_size);
762
763 cur_offset->face = i;
764 cur_offset->mip = j;
765 cur_offset->bo_offset = cur_bo_offset;
766 cur_bo_offset += svga3dsurface_get_image_buffer_size
767 (desc, cur_size, stride);
768 ++cur_offset;
769 ++cur_size;
770 }
771 }
772 res->backup_size = cur_bo_offset;
773 if (srf->scanout &&
774 srf->num_sizes == 1 &&
775 srf->sizes[0].width == 64 &&
776 srf->sizes[0].height == 64 &&
777 srf->format == SVGA3D_A8R8G8B8) {
778
779 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
780 /* clear the image */
781 if (srf->snooper.image) {
782 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
783 } else {
784 DRM_ERROR("Failed to allocate cursor_image\n");
785 ret = -ENOMEM;
786 goto out_no_copy;
787 }
788 } else {
789 srf->snooper.image = NULL;
790 }
791 srf->snooper.crtc = NULL;
792
793 user_srf->base.shareable = false;
794 user_srf->base.tfile = NULL;
795
796 /**
797 * From this point, the generic resource management functions
798 * destroy the object on failure.
799 */
800
801 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
802 if (unlikely(ret != 0))
803 goto out_unlock;
804
805 tmp = vmw_resource_reference(&srf->res);
806 ret = ttm_base_object_init(tfile, &user_srf->base,
807 req->shareable, VMW_RES_SURFACE,
808 &vmw_user_surface_base_release, NULL);
809
810 if (unlikely(ret != 0)) {
811 vmw_resource_unreference(&tmp);
812 vmw_resource_unreference(&res);
813 goto out_unlock;
814 }
815
816 rep->sid = user_srf->base.hash.key;
817 vmw_resource_unreference(&res);
818
819 ttm_read_unlock(&vmaster->lock);
820 return 0;
821out_no_copy:
822 kfree(srf->offsets);
823out_no_offsets:
824 kfree(srf->sizes);
825out_no_sizes:
826 ttm_base_object_kfree(user_srf, base);
827out_no_user_srf:
828 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
829out_unlock:
830 ttm_read_unlock(&vmaster->lock);
831 return ret;
832}
833
834/**
835 * vmw_user_surface_define_ioctl - Ioctl function implementing
836 * the user surface reference functionality.
837 *
838 * @dev: Pointer to a struct drm_device.
839 * @data: Pointer to data copied from / to user-space.
840 * @file_priv: Pointer to a drm file private structure.
841 */
842int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
843 struct drm_file *file_priv)
844{
845 union drm_vmw_surface_reference_arg *arg =
846 (union drm_vmw_surface_reference_arg *)data;
847 struct drm_vmw_surface_arg *req = &arg->req;
848 struct drm_vmw_surface_create_req *rep = &arg->rep;
849 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
850 struct vmw_surface *srf;
851 struct vmw_user_surface *user_srf;
852 struct drm_vmw_size __user *user_sizes;
853 struct ttm_base_object *base;
854 int ret = -EINVAL;
855
856 base = ttm_base_object_lookup(tfile, req->sid);
857 if (unlikely(base == NULL)) {
858 DRM_ERROR("Could not find surface to reference.\n");
859 return -EINVAL;
860 }
861
862 if (unlikely(base->object_type != VMW_RES_SURFACE))
863 goto out_bad_resource;
864
865 user_srf = container_of(base, struct vmw_user_surface, base);
866 srf = &user_srf->srf;
867
868 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
869 if (unlikely(ret != 0)) {
870 DRM_ERROR("Could not add a reference to a surface.\n");
871 goto out_no_reference;
872 }
873
874 rep->flags = srf->flags;
875 rep->format = srf->format;
876 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
877 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
878 rep->size_addr;
879
880 if (user_sizes)
881 ret = copy_to_user(user_sizes, srf->sizes,
882 srf->num_sizes * sizeof(*srf->sizes));
883 if (unlikely(ret != 0)) {
884 DRM_ERROR("copy_to_user failed %p %u\n",
885 user_sizes, srf->num_sizes);
886 ret = -EFAULT;
887 }
888out_bad_resource:
889out_no_reference:
890 ttm_base_object_unref(&base);
891
892 return ret;
893}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index f34838839b08..29437eabe095 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -1,7 +1,7 @@
1config VGA_ARB 1config VGA_ARB
2 bool "VGA Arbitration" if EXPERT 2 bool "VGA Arbitration" if EXPERT
3 default y 3 default y
4 depends on PCI 4 depends on (PCI && !S390)
5 help 5 help
6 Some "legacy" VGA devices implemented on PCI typically have the same 6 Some "legacy" VGA devices implemented on PCI typically have the same
7 hard-decoded addresses as they did on ISA. When multiple PCI devices 7 hard-decoded addresses as they did on ISA. When multiple PCI devices
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e25cf31faab2..fa60add0ff63 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/dmi.h>
22#include <linux/seq_file.h> 21#include <linux/seq_file.h>
23#include <linux/uaccess.h> 22#include <linux/uaccess.h>
24#include <linux/fs.h> 23#include <linux/fs.h>
@@ -376,7 +375,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
376 size_t cnt, loff_t *ppos) 375 size_t cnt, loff_t *ppos)
377{ 376{
378 char usercmd[64]; 377 char usercmd[64];
379 const char *pdev_name;
380 int ret; 378 int ret;
381 bool delay = false, can_switch; 379 bool delay = false, can_switch;
382 bool just_mux = false; 380 bool just_mux = false;
@@ -468,7 +466,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
468 goto out; 466 goto out;
469 467
470 if (can_switch) { 468 if (can_switch) {
471 pdev_name = pci_name(client->pdev);
472 ret = vga_switchto_stage1(client); 469 ret = vga_switchto_stage1(client);
473 if (ret) 470 if (ret)
474 printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); 471 printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
@@ -540,7 +537,6 @@ fail:
540int vga_switcheroo_process_delayed_switch(void) 537int vga_switcheroo_process_delayed_switch(void)
541{ 538{
542 struct vga_switcheroo_client *client; 539 struct vga_switcheroo_client *client;
543 const char *pdev_name;
544 int ret; 540 int ret;
545 int err = -EINVAL; 541 int err = -EINVAL;
546 542
@@ -555,7 +551,6 @@ int vga_switcheroo_process_delayed_switch(void)
555 if (!client || !check_can_switch()) 551 if (!client || !check_can_switch())
556 goto err; 552 goto err;
557 553
558 pdev_name = pci_name(client->pdev);
559 ret = vga_switchto_stage2(client); 554 ret = vga_switchto_stage2(client);
560 if (ret) 555 if (ret)
561 printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); 556 printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
@@ -567,4 +562,3 @@ err:
567 return err; 562 return err;
568} 563}
569EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 564EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
570