aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-attributes.txt9
-rw-r--r--Documentation/DocBook/drm.tmpl39
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt191
-rw-r--r--Documentation/kref.txt88
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/mm/dma-mapping.c41
-rw-r--r--drivers/char/agp/intel-agp.h91
-rw-r--r--drivers/char/agp/intel-gtt.c320
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile6
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c12
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c13
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c12
-rw-r--r--drivers/gpu/drm/drm_crtc.c63
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c161
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c (renamed from drivers/gpu/drm/drm_dp_i2c_helper.c)146
-rw-r--r--drivers/gpu/drm/drm_edid.c48
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c76
-rw-r--r--drivers/gpu/drm/drm_hashtab.c38
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_irq.c120
-rw-r--r--drivers/gpu/drm/drm_modes.c8
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/drm_stub.c37
-rw-r--r--drivers/gpu/drm/drm_sysfs.c6
-rw-r--r--drivers/gpu/drm/exynos/Kconfig30
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c115
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c117
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c74
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2001
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.h37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c200
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c495
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c435
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c1870
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.h38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c59
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h85
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c2060
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c855
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c324
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c376
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h669
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h284
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h22
-rw-r--r--drivers/gpu/drm/exynos/regs-rotator.h73
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c12
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c10
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c365
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c24
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c66
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c95
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c136
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h472
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c292
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c420
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c86
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h308
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c763
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c45
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c62
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1091
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1945
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c961
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h123
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c227
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c11
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c90
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c511
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c250
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h36
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c128
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c101
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/Makefile38
-rw-r--r--drivers/gpu/drm/nouveau/core/core/engctx.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/core/falcon.c247
-rw-r--r--drivers/gpu/drm/nouveau/core/core/gpuobj.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nva3.c124
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c167
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/copy/nve0.c54
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c46
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c53
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c1144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h142
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c98
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c109
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c88
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c884
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c190
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c68
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c104
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c122
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c36
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c60
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv04.c184
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv10.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.c38
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c83
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/regs.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c107
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv10.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/software/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv84.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nve0.c110
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/class.h225
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/engctx.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/falcon.h81
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/gpuobj.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/mm.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/parent.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/bsp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/copy.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/crypt.h39
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h29
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/fifo.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/ppp.h40
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/vp.h41
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h34
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h48
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h32
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c32
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c63
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c178
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c182
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv10.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv20.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv30.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv40.c28
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nv50.c26
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c64
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/device/nve0.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c92
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c62
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c89
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c81
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c51
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c82
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c131
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c106
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c79
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c66
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c84
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c72
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c393
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c126
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c235
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c65
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c141
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c261
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fence.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c764
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c136
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c321
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2547
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h71
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c403
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h120
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c530
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fence.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2141
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c149
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c218
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c739
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h131
-rw-r--r--drivers/gpu/drm/radeon/ni.c357
-rw-r--r--drivers/gpu/drm/radeon/nid.h86
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r600.c480
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c357
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h9
-rw-r--r--drivers/gpu/drm/radeon/r600d.h86
-rw-r--r--drivers/gpu/drm/radeon/radeon.h38
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c198
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c52
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c40
-rw-r--r--drivers/gpu/drm/radeon/rv515.c122
-rw-r--r--drivers/gpu/drm/radeon/rv770.c31
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h71
-rw-r--r--drivers/gpu/drm/radeon/si.c355
-rw-r--r--drivers/gpu/drm/radeon/sid.h119
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/tegra/Kconfig23
-rw-r--r--drivers/gpu/drm/tegra/Makefile7
-rw-r--r--drivers/gpu/drm/tegra/dc.c834
-rw-r--r--drivers/gpu/drm/tegra/dc.h388
-rw-r--r--drivers/gpu/drm/tegra/drm.c115
-rw-r--r--drivers/gpu/drm/tegra/drm.h234
-rw-r--r--drivers/gpu/drm/tegra/fb.c56
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c1334
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h575
-rw-r--r--drivers/gpu/drm/tegra/host1x.c325
-rw-r--r--drivers/gpu/drm/tegra/output.c272
-rw-r--r--drivers/gpu/drm/tegra/rgb.c228
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c321
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c51
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h909
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c274
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c92
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c917
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2019
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h84
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c893
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c6
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h19
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_dp_helper.h39
-rw-r--r--include/drm/drm_hashtab.h14
-rw-r--r--include/drm/exynos_drm.h26
-rw-r--r--include/drm/intel-gtt.h7
-rw-r--r--include/drm/ttm/ttm_bo_api.h33
-rw-r--r--include/drm/ttm/ttm_bo_driver.h45
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h3
-rw-r--r--include/drm/ttm/ttm_memory.h2
-rw-r--r--include/drm/ttm/ttm_object.h4
-rw-r--r--include/linux/dma-attrs.h1
-rw-r--r--include/linux/kref.h21
-rw-r--r--include/uapi/drm/drm.h1
-rw-r--r--include/uapi/drm/exynos_drm.h203
-rw-r--r--include/uapi/drm/i915_drm.h6
-rw-r--r--include/uapi/drm/radeon_drm.h6
364 files changed, 38931 insertions, 14876 deletions
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index f50309081ac7..e59480db9ee0 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -91,3 +91,12 @@ transferred to 'device' domain. This attribute can be also used for
91dma_unmap_{single,page,sg} functions family to force buffer to stay in 91dma_unmap_{single,page,sg} functions family to force buffer to stay in
92device domain after releasing a mapping for it. Use this attribute with 92device domain after releasing a mapping for it. Use this attribute with
93care! 93care!
94
95DMA_ATTR_FORCE_CONTIGUOUS
96-------------------------
97
98By default DMA-mapping subsystem is allowed to assemble the buffer
99allocated by dma_alloc_attrs() function from individual pages if it can
100be mapped as contiguous chunk into device dma address space. By
101specifing this attribute the allocated buffer is forced to be contiguous
102also in physical memory.
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index b0300529ab13..4ee2304f82f9 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1141,23 +1141,13 @@ int max_width, max_height;</synopsis>
1141 the <methodname>page_flip</methodname> operation will be called with a 1141 the <methodname>page_flip</methodname> operation will be called with a
1142 non-NULL <parameter>event</parameter> argument pointing to a 1142 non-NULL <parameter>event</parameter> argument pointing to a
1143 <structname>drm_pending_vblank_event</structname> instance. Upon page 1143 <structname>drm_pending_vblank_event</structname> instance. Upon page
1144 flip completion the driver must fill the 1144 flip completion the driver must call <methodname>drm_send_vblank_event</methodname>
1145 <parameter>event</parameter>::<structfield>event</structfield> 1145 to fill in the event and send to wake up any waiting processes.
1146 <structfield>sequence</structfield>, <structfield>tv_sec</structfield> 1146 This can be performed with
1147 and <structfield>tv_usec</structfield> fields with the associated
1148 vertical blanking count and timestamp, add the event to the
1149 <parameter>drm_file</parameter> list of events to be signaled, and wake
1150 up any waiting process. This can be performed with
1151 <programlisting><![CDATA[ 1147 <programlisting><![CDATA[
1152 struct timeval now;
1153
1154 event->event.sequence = drm_vblank_count_and_time(..., &now);
1155 event->event.tv_sec = now.tv_sec;
1156 event->event.tv_usec = now.tv_usec;
1157
1158 spin_lock_irqsave(&dev->event_lock, flags); 1148 spin_lock_irqsave(&dev->event_lock, flags);
1159 list_add_tail(&event->base.link, &event->base.file_priv->event_list); 1149 ...
1160 wake_up_interruptible(&event->base.file_priv->event_wait); 1150 drm_send_vblank_event(dev, pipe, event);
1161 spin_unlock_irqrestore(&dev->event_lock, flags); 1151 spin_unlock_irqrestore(&dev->event_lock, flags);
1162 ]]></programlisting> 1152 ]]></programlisting>
1163 </para> 1153 </para>
@@ -1621,10 +1611,10 @@ void intel_crt_init(struct drm_device *dev)
1621 </sect2> 1611 </sect2>
1622 </sect1> 1612 </sect1>
1623 1613
1624 <!-- Internals: mid-layer helper functions --> 1614 <!-- Internals: kms helper functions -->
1625 1615
1626 <sect1> 1616 <sect1>
1627 <title>Mid-layer Helper Functions</title> 1617 <title>Mode Setting Helper Functions</title>
1628 <para> 1618 <para>
1629 The CRTC, encoder and connector functions provided by the drivers 1619 The CRTC, encoder and connector functions provided by the drivers
1630 implement the DRM API. They're called by the DRM core and ioctl handlers 1620 implement the DRM API. They're called by the DRM core and ioctl handlers
@@ -2106,6 +2096,21 @@ void intel_crt_init(struct drm_device *dev)
2106 </listitem> 2096 </listitem>
2107 </itemizedlist> 2097 </itemizedlist>
2108 </sect2> 2098 </sect2>
2099 <sect2>
2100 <title>Modeset Helper Functions Reference</title>
2101!Edrivers/gpu/drm/drm_crtc_helper.c
2102 </sect2>
2103 <sect2>
2104 <title>fbdev Helper Functions Reference</title>
2105!Pdrivers/gpu/drm/drm_fb_helper.c fbdev helpers
2106!Edrivers/gpu/drm/drm_fb_helper.c
2107 </sect2>
2108 <sect2>
2109 <title>Display Port Helper Functions Reference</title>
2110!Pdrivers/gpu/drm/drm_dp_helper.c dp helpers
2111!Iinclude/drm/drm_dp_helper.h
2112!Edrivers/gpu/drm/drm_dp_helper.c
2113 </sect2>
2109 </sect1> 2114 </sect1>
2110 2115
2111 <!-- Internals: vertical blanking --> 2116 <!-- Internals: vertical blanking -->
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
new file mode 100644
index 000000000000..b4fa934ae3a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -0,0 +1,191 @@
1NVIDIA Tegra host1x
2
3Required properties:
4- compatible: "nvidia,tegra<chip>-host1x"
5- reg: Physical base address and length of the controller's registers.
6- interrupts: The interrupt outputs from the controller.
7- #address-cells: The number of cells used to represent physical base addresses
8 in the host1x address space. Should be 1.
9- #size-cells: The number of cells used to represent the size of an address
10 range in the host1x address space. Should be 1.
11- ranges: The mapping of the host1x address space to the CPU address space.
12
13The host1x top-level node defines a number of children, each representing one
14of the following host1x client modules:
15
16- mpe: video encoder
17
18 Required properties:
19 - compatible: "nvidia,tegra<chip>-mpe"
20 - reg: Physical base address and length of the controller's registers.
21 - interrupts: The interrupt outputs from the controller.
22
23- vi: video input
24
25 Required properties:
26 - compatible: "nvidia,tegra<chip>-vi"
27 - reg: Physical base address and length of the controller's registers.
28 - interrupts: The interrupt outputs from the controller.
29
30- epp: encoder pre-processor
31
32 Required properties:
33 - compatible: "nvidia,tegra<chip>-epp"
34 - reg: Physical base address and length of the controller's registers.
35 - interrupts: The interrupt outputs from the controller.
36
37- isp: image signal processor
38
39 Required properties:
40 - compatible: "nvidia,tegra<chip>-isp"
41 - reg: Physical base address and length of the controller's registers.
42 - interrupts: The interrupt outputs from the controller.
43
44- gr2d: 2D graphics engine
45
46 Required properties:
47 - compatible: "nvidia,tegra<chip>-gr2d"
48 - reg: Physical base address and length of the controller's registers.
49 - interrupts: The interrupt outputs from the controller.
50
51- gr3d: 3D graphics engine
52
53 Required properties:
54 - compatible: "nvidia,tegra<chip>-gr3d"
55 - reg: Physical base address and length of the controller's registers.
56
57- dc: display controller
58
59 Required properties:
60 - compatible: "nvidia,tegra<chip>-dc"
61 - reg: Physical base address and length of the controller's registers.
62 - interrupts: The interrupt outputs from the controller.
63
64 Each display controller node has a child node, named "rgb", that represents
65 the RGB output associated with the controller. It can take the following
66 optional properties:
67 - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
68 - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
69 - nvidia,edid: supplies a binary EDID blob
70
71- hdmi: High Definition Multimedia Interface
72
73 Required properties:
74 - compatible: "nvidia,tegra<chip>-hdmi"
75 - reg: Physical base address and length of the controller's registers.
76 - interrupts: The interrupt outputs from the controller.
77 - vdd-supply: regulator for supply voltage
78 - pll-supply: regulator for PLL
79
80 Optional properties:
81 - nvidia,ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
82 - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
83 - nvidia,edid: supplies a binary EDID blob
84
85- tvo: TV encoder output
86
87 Required properties:
88 - compatible: "nvidia,tegra<chip>-tvo"
89 - reg: Physical base address and length of the controller's registers.
90 - interrupts: The interrupt outputs from the controller.
91
92- dsi: display serial interface
93
94 Required properties:
95 - compatible: "nvidia,tegra<chip>-dsi"
96 - reg: Physical base address and length of the controller's registers.
97
98Example:
99
100/ {
101 ...
102
103 host1x {
104 compatible = "nvidia,tegra20-host1x", "simple-bus";
105 reg = <0x50000000 0x00024000>;
106 interrupts = <0 65 0x04 /* mpcore syncpt */
107 0 67 0x04>; /* mpcore general */
108
109 #address-cells = <1>;
110 #size-cells = <1>;
111
112 ranges = <0x54000000 0x54000000 0x04000000>;
113
114 mpe {
115 compatible = "nvidia,tegra20-mpe";
116 reg = <0x54040000 0x00040000>;
117 interrupts = <0 68 0x04>;
118 };
119
120 vi {
121 compatible = "nvidia,tegra20-vi";
122 reg = <0x54080000 0x00040000>;
123 interrupts = <0 69 0x04>;
124 };
125
126 epp {
127 compatible = "nvidia,tegra20-epp";
128 reg = <0x540c0000 0x00040000>;
129 interrupts = <0 70 0x04>;
130 };
131
132 isp {
133 compatible = "nvidia,tegra20-isp";
134 reg = <0x54100000 0x00040000>;
135 interrupts = <0 71 0x04>;
136 };
137
138 gr2d {
139 compatible = "nvidia,tegra20-gr2d";
140 reg = <0x54140000 0x00040000>;
141 interrupts = <0 72 0x04>;
142 };
143
144 gr3d {
145 compatible = "nvidia,tegra20-gr3d";
146 reg = <0x54180000 0x00040000>;
147 };
148
149 dc@54200000 {
150 compatible = "nvidia,tegra20-dc";
151 reg = <0x54200000 0x00040000>;
152 interrupts = <0 73 0x04>;
153
154 rgb {
155 status = "disabled";
156 };
157 };
158
159 dc@54240000 {
160 compatible = "nvidia,tegra20-dc";
161 reg = <0x54240000 0x00040000>;
162 interrupts = <0 74 0x04>;
163
164 rgb {
165 status = "disabled";
166 };
167 };
168
169 hdmi {
170 compatible = "nvidia,tegra20-hdmi";
171 reg = <0x54280000 0x00040000>;
172 interrupts = <0 75 0x04>;
173 status = "disabled";
174 };
175
176 tvo {
177 compatible = "nvidia,tegra20-tvo";
178 reg = <0x542c0000 0x00040000>;
179 interrupts = <0 76 0x04>;
180 status = "disabled";
181 };
182
183 dsi {
184 compatible = "nvidia,tegra20-dsi";
185 reg = <0x54300000 0x00040000>;
186 status = "disabled";
187 };
188 };
189
190 ...
191};
diff --git a/Documentation/kref.txt b/Documentation/kref.txt
index 48ba715d5a63..ddf85a5dde0c 100644
--- a/Documentation/kref.txt
+++ b/Documentation/kref.txt
@@ -213,3 +213,91 @@ presentation on krefs, which can be found at:
213and: 213and:
214 http://www.kroah.com/linux/talks/ols_2004_kref_talk/ 214 http://www.kroah.com/linux/talks/ols_2004_kref_talk/
215 215
216
217The above example could also be optimized using kref_get_unless_zero() in
218the following way:
219
220static struct my_data *get_entry()
221{
222 struct my_data *entry = NULL;
223 mutex_lock(&mutex);
224 if (!list_empty(&q)) {
225 entry = container_of(q.next, struct my_data, link);
226 if (!kref_get_unless_zero(&entry->refcount))
227 entry = NULL;
228 }
229 mutex_unlock(&mutex);
230 return entry;
231}
232
233static void release_entry(struct kref *ref)
234{
235 struct my_data *entry = container_of(ref, struct my_data, refcount);
236
237 mutex_lock(&mutex);
238 list_del(&entry->link);
239 mutex_unlock(&mutex);
240 kfree(entry);
241}
242
243static void put_entry(struct my_data *entry)
244{
245 kref_put(&entry->refcount, release_entry);
246}
247
248Which is useful to remove the mutex lock around kref_put() in put_entry(), but
249it's important that kref_get_unless_zero is enclosed in the same critical
250section that finds the entry in the lookup table,
251otherwise kref_get_unless_zero may reference already freed memory.
252Note that it is illegal to use kref_get_unless_zero without checking its
253return value. If you are sure (by already having a valid pointer) that
254kref_get_unless_zero() will return true, then use kref_get() instead.
255
256The function kref_get_unless_zero also makes it possible to use rcu
257locking for lookups in the above example:
258
259struct my_data
260{
261 struct rcu_head rhead;
262 .
263 struct kref refcount;
264 .
265 .
266};
267
268static struct my_data *get_entry_rcu()
269{
270 struct my_data *entry = NULL;
271 rcu_read_lock();
272 if (!list_empty(&q)) {
273 entry = container_of(q.next, struct my_data, link);
274 if (!kref_get_unless_zero(&entry->refcount))
275 entry = NULL;
276 }
277 rcu_read_unlock();
278 return entry;
279}
280
281static void release_entry_rcu(struct kref *ref)
282{
283 struct my_data *entry = container_of(ref, struct my_data, refcount);
284
285 mutex_lock(&mutex);
286 list_del_rcu(&entry->link);
287 mutex_unlock(&mutex);
288 kfree_rcu(entry, rhead);
289}
290
291static void put_entry(struct my_data *entry)
292{
293 kref_put(&entry->refcount, release_entry_rcu);
294}
295
296But note that the struct kref member needs to remain in valid memory for a
297rcu grace period after release_entry_rcu was called. That can be accomplished
298by using kfree_rcu(entry, rhead) as done above, or by calling synchronize_rcu()
299before using kfree, but note that synchronize_rcu() may sleep for a
300substantial amount of time.
301
302
303Thomas Hellstrom <thellstrom@vmware.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index d9c31b906ac9..6892b26025ba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2549,6 +2549,15 @@ S: Supported
2549F: drivers/gpu/drm/exynos 2549F: drivers/gpu/drm/exynos
2550F: include/drm/exynos* 2550F: include/drm/exynos*
2551 2551
2552DRM DRIVERS FOR NVIDIA TEGRA
2553M: Thierry Reding <thierry.reding@avionic-design.de>
2554L: dri-devel@lists.freedesktop.org
2555L: linux-tegra@vger.kernel.org
2556T: git git://gitorious.org/thierryreding/linux.git
2557S: Maintained
2558F: drivers/gpu/drm/tegra/
2559F: Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
2560
2552DSCC4 DRIVER 2561DSCC4 DRIVER
2553M: Francois Romieu <romieu@fr.zoreil.com> 2562M: Francois Romieu <romieu@fr.zoreil.com>
2554L: netdev@vger.kernel.org 2563L: netdev@vger.kernel.org
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 5383bc018571..6b2fb87c8698 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1034,7 +1034,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
1034 spin_unlock_irqrestore(&mapping->lock, flags); 1034 spin_unlock_irqrestore(&mapping->lock, flags);
1035} 1035}
1036 1036
1037static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) 1037static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1038 gfp_t gfp, struct dma_attrs *attrs)
1038{ 1039{
1039 struct page **pages; 1040 struct page **pages;
1040 int count = size >> PAGE_SHIFT; 1041 int count = size >> PAGE_SHIFT;
@@ -1048,6 +1049,23 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
1048 if (!pages) 1049 if (!pages)
1049 return NULL; 1050 return NULL;
1050 1051
1052 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
1053 {
1054 unsigned long order = get_order(size);
1055 struct page *page;
1056
1057 page = dma_alloc_from_contiguous(dev, count, order);
1058 if (!page)
1059 goto error;
1060
1061 __dma_clear_buffer(page, size);
1062
1063 for (i = 0; i < count; i++)
1064 pages[i] = page + i;
1065
1066 return pages;
1067 }
1068
1051 while (count) { 1069 while (count) {
1052 int j, order = __fls(count); 1070 int j, order = __fls(count);
1053 1071
@@ -1081,14 +1099,21 @@ error:
1081 return NULL; 1099 return NULL;
1082} 1100}
1083 1101
1084static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size) 1102static int __iommu_free_buffer(struct device *dev, struct page **pages,
1103 size_t size, struct dma_attrs *attrs)
1085{ 1104{
1086 int count = size >> PAGE_SHIFT; 1105 int count = size >> PAGE_SHIFT;
1087 int array_size = count * sizeof(struct page *); 1106 int array_size = count * sizeof(struct page *);
1088 int i; 1107 int i;
1089 for (i = 0; i < count; i++) 1108
1090 if (pages[i]) 1109 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
1091 __free_pages(pages[i], 0); 1110 dma_release_from_contiguous(dev, pages[0], count);
1111 } else {
1112 for (i = 0; i < count; i++)
1113 if (pages[i])
1114 __free_pages(pages[i], 0);
1115 }
1116
1092 if (array_size <= PAGE_SIZE) 1117 if (array_size <= PAGE_SIZE)
1093 kfree(pages); 1118 kfree(pages);
1094 else 1119 else
@@ -1250,7 +1275,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1250 if (gfp & GFP_ATOMIC) 1275 if (gfp & GFP_ATOMIC)
1251 return __iommu_alloc_atomic(dev, size, handle); 1276 return __iommu_alloc_atomic(dev, size, handle);
1252 1277
1253 pages = __iommu_alloc_buffer(dev, size, gfp); 1278 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
1254 if (!pages) 1279 if (!pages)
1255 return NULL; 1280 return NULL;
1256 1281
@@ -1271,7 +1296,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1271err_mapping: 1296err_mapping:
1272 __iommu_remove_mapping(dev, *handle, size); 1297 __iommu_remove_mapping(dev, *handle, size);
1273err_buffer: 1298err_buffer:
1274 __iommu_free_buffer(dev, pages, size); 1299 __iommu_free_buffer(dev, pages, size, attrs);
1275 return NULL; 1300 return NULL;
1276} 1301}
1277 1302
@@ -1327,7 +1352,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1327 } 1352 }
1328 1353
1329 __iommu_remove_mapping(dev, handle, size); 1354 __iommu_remove_mapping(dev, handle, size);
1330 __iommu_free_buffer(dev, pages, size); 1355 __iommu_free_buffer(dev, pages, size, attrs);
1331} 1356}
1332 1357
1333static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1358static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 6ec0fff79bc2..1042c1b90376 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -62,12 +62,6 @@
62#define I810_PTE_LOCAL 0x00000002 62#define I810_PTE_LOCAL 0x00000002
63#define I810_PTE_VALID 0x00000001 63#define I810_PTE_VALID 0x00000001
64#define I830_PTE_SYSTEM_CACHED 0x00000006 64#define I830_PTE_SYSTEM_CACHED 0x00000006
65/* GT PTE cache control fields */
66#define GEN6_PTE_UNCACHED 0x00000002
67#define HSW_PTE_UNCACHED 0x00000000
68#define GEN6_PTE_LLC 0x00000004
69#define GEN6_PTE_LLC_MLC 0x00000006
70#define GEN6_PTE_GFDT 0x00000008
71 65
72#define I810_SMRAM_MISCC 0x70 66#define I810_SMRAM_MISCC 0x70
73#define I810_GFX_MEM_WIN_SIZE 0x00010000 67#define I810_GFX_MEM_WIN_SIZE 0x00010000
@@ -97,7 +91,6 @@
97#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN) 91#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
98 92
99#define GFX_FLSH_CNTL 0x2170 /* 915+ */ 93#define GFX_FLSH_CNTL 0x2170 /* 915+ */
100#define GFX_FLSH_CNTL_VLV 0x101008
101 94
102#define I810_DRAM_CTL 0x3000 95#define I810_DRAM_CTL 0x3000
103#define I810_DRAM_ROW_0 0x00000001 96#define I810_DRAM_ROW_0 0x00000001
@@ -148,29 +141,6 @@
148#define INTEL_I7505_AGPCTRL 0x70 141#define INTEL_I7505_AGPCTRL 0x70
149#define INTEL_I7505_MCHCFG 0x50 142#define INTEL_I7505_MCHCFG 0x50
150 143
151#define SNB_GMCH_CTRL 0x50
152#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
153#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
154#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
155#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
156#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
157#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
158#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
159#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
160#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
161#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
162#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
163#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
164#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
165#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
166#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
167#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
168#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
169#define SNB_GTT_SIZE_0M (0 << 8)
170#define SNB_GTT_SIZE_1M (1 << 8)
171#define SNB_GTT_SIZE_2M (2 << 8)
172#define SNB_GTT_SIZE_MASK (3 << 8)
173
174/* pci devices ids */ 144/* pci devices ids */
175#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588 145#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
176#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a 146#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
@@ -219,66 +189,5 @@
219#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 189#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
220#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a 190#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
221#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 191#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
222#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
223#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
224#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
225#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
226#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
227#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
228#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
229#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
230#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
231#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
232#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
233#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
234#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
235#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
236#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
237#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
238#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
239#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
240#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
241#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
242#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
243#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
244#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
245#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
246#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
247#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
248#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
249#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
250#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
251#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
252#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
253#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
254#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
255#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
256#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
257#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
258#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
259#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
260#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
261#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
262#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
263#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
264#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
265#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
266#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
267#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
268#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
269#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
270#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
271#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
272#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
273#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
274#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
275#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
276#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
277#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
278#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
279#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
280#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
281#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
282#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
283 192
284#endif 193#endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 38390f7c6ab6..dbd901e94ea6 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -367,62 +367,6 @@ static unsigned int intel_gtt_stolen_size(void)
367 stolen_size = 0; 367 stolen_size = 0;
368 break; 368 break;
369 } 369 }
370 } else if (INTEL_GTT_GEN == 6) {
371 /*
372 * SandyBridge has new memory control reg at 0x50.w
373 */
374 u16 snb_gmch_ctl;
375 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
376 switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
377 case SNB_GMCH_GMS_STOLEN_32M:
378 stolen_size = MB(32);
379 break;
380 case SNB_GMCH_GMS_STOLEN_64M:
381 stolen_size = MB(64);
382 break;
383 case SNB_GMCH_GMS_STOLEN_96M:
384 stolen_size = MB(96);
385 break;
386 case SNB_GMCH_GMS_STOLEN_128M:
387 stolen_size = MB(128);
388 break;
389 case SNB_GMCH_GMS_STOLEN_160M:
390 stolen_size = MB(160);
391 break;
392 case SNB_GMCH_GMS_STOLEN_192M:
393 stolen_size = MB(192);
394 break;
395 case SNB_GMCH_GMS_STOLEN_224M:
396 stolen_size = MB(224);
397 break;
398 case SNB_GMCH_GMS_STOLEN_256M:
399 stolen_size = MB(256);
400 break;
401 case SNB_GMCH_GMS_STOLEN_288M:
402 stolen_size = MB(288);
403 break;
404 case SNB_GMCH_GMS_STOLEN_320M:
405 stolen_size = MB(320);
406 break;
407 case SNB_GMCH_GMS_STOLEN_352M:
408 stolen_size = MB(352);
409 break;
410 case SNB_GMCH_GMS_STOLEN_384M:
411 stolen_size = MB(384);
412 break;
413 case SNB_GMCH_GMS_STOLEN_416M:
414 stolen_size = MB(416);
415 break;
416 case SNB_GMCH_GMS_STOLEN_448M:
417 stolen_size = MB(448);
418 break;
419 case SNB_GMCH_GMS_STOLEN_480M:
420 stolen_size = MB(480);
421 break;
422 case SNB_GMCH_GMS_STOLEN_512M:
423 stolen_size = MB(512);
424 break;
425 }
426 } else { 370 } else {
427 switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 371 switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
428 case I855_GMCH_GMS_STOLEN_1M: 372 case I855_GMCH_GMS_STOLEN_1M:
@@ -556,29 +500,9 @@ static unsigned int i965_gtt_total_entries(void)
556 500
557static unsigned int intel_gtt_total_entries(void) 501static unsigned int intel_gtt_total_entries(void)
558{ 502{
559 int size;
560
561 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) 503 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
562 return i965_gtt_total_entries(); 504 return i965_gtt_total_entries();
563 else if (INTEL_GTT_GEN == 6) { 505 else {
564 u16 snb_gmch_ctl;
565
566 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
567 switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
568 default:
569 case SNB_GTT_SIZE_0M:
570 printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
571 size = MB(0);
572 break;
573 case SNB_GTT_SIZE_1M:
574 size = MB(1);
575 break;
576 case SNB_GTT_SIZE_2M:
577 size = MB(2);
578 break;
579 }
580 return size/4;
581 } else {
582 /* On previous hardware, the GTT size was just what was 506 /* On previous hardware, the GTT size was just what was
583 * required to map the aperture. 507 * required to map the aperture.
584 */ 508 */
@@ -778,9 +702,6 @@ bool intel_enable_gtt(void)
778{ 702{
779 u8 __iomem *reg; 703 u8 __iomem *reg;
780 704
781 if (INTEL_GTT_GEN >= 6)
782 return true;
783
784 if (INTEL_GTT_GEN == 2) { 705 if (INTEL_GTT_GEN == 2) {
785 u16 gmch_ctrl; 706 u16 gmch_ctrl;
786 707
@@ -1149,85 +1070,6 @@ static void i965_write_entry(dma_addr_t addr,
1149 writel(addr | pte_flags, intel_private.gtt + entry); 1070 writel(addr | pte_flags, intel_private.gtt + entry);
1150} 1071}
1151 1072
1152static bool gen6_check_flags(unsigned int flags)
1153{
1154 return true;
1155}
1156
1157static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
1158 unsigned int flags)
1159{
1160 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1161 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1162 u32 pte_flags;
1163
1164 if (type_mask == AGP_USER_MEMORY)
1165 pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
1166 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1167 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1168 if (gfdt)
1169 pte_flags |= GEN6_PTE_GFDT;
1170 } else { /* set 'normal'/'cached' to LLC by default */
1171 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1172 if (gfdt)
1173 pte_flags |= GEN6_PTE_GFDT;
1174 }
1175
1176 /* gen6 has bit11-4 for physical addr bit39-32 */
1177 addr |= (addr >> 28) & 0xff0;
1178 writel(addr | pte_flags, intel_private.gtt + entry);
1179}
1180
1181static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
1182 unsigned int flags)
1183{
1184 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1185 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1186 u32 pte_flags;
1187
1188 if (type_mask == AGP_USER_MEMORY)
1189 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1190 else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
1191 pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
1192 if (gfdt)
1193 pte_flags |= GEN6_PTE_GFDT;
1194 } else { /* set 'normal'/'cached' to LLC by default */
1195 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1196 if (gfdt)
1197 pte_flags |= GEN6_PTE_GFDT;
1198 }
1199
1200 /* gen6 has bit11-4 for physical addr bit39-32 */
1201 addr |= (addr >> 28) & 0xff0;
1202 writel(addr | pte_flags, intel_private.gtt + entry);
1203}
1204
1205static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
1206 unsigned int flags)
1207{
1208 unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
1209 unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
1210 u32 pte_flags;
1211
1212 if (type_mask == AGP_USER_MEMORY)
1213 pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
1214 else {
1215 pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
1216 if (gfdt)
1217 pte_flags |= GEN6_PTE_GFDT;
1218 }
1219
1220 /* gen6 has bit11-4 for physical addr bit39-32 */
1221 addr |= (addr >> 28) & 0xff0;
1222 writel(addr | pte_flags, intel_private.gtt + entry);
1223
1224 writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
1225}
1226
1227static void gen6_cleanup(void)
1228{
1229}
1230
1231/* Certain Gen5 chipsets require require idling the GPU before 1073/* Certain Gen5 chipsets require require idling the GPU before
1232 * unmapping anything from the GTT when VT-d is enabled. 1074 * unmapping anything from the GTT when VT-d is enabled.
1233 */ 1075 */
@@ -1249,41 +1091,29 @@ static inline int needs_idle_maps(void)
1249 1091
1250static int i9xx_setup(void) 1092static int i9xx_setup(void)
1251{ 1093{
1252 u32 reg_addr; 1094 u32 reg_addr, gtt_addr;
1253 int size = KB(512); 1095 int size = KB(512);
1254 1096
1255 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr); 1097 pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
1256 1098
1257 reg_addr &= 0xfff80000; 1099 reg_addr &= 0xfff80000;
1258 1100
1259 if (INTEL_GTT_GEN >= 7)
1260 size = MB(2);
1261
1262 intel_private.registers = ioremap(reg_addr, size); 1101 intel_private.registers = ioremap(reg_addr, size);
1263 if (!intel_private.registers) 1102 if (!intel_private.registers)
1264 return -ENOMEM; 1103 return -ENOMEM;
1265 1104
1266 if (INTEL_GTT_GEN == 3) { 1105 switch (INTEL_GTT_GEN) {
1267 u32 gtt_addr; 1106 case 3:
1268
1269 pci_read_config_dword(intel_private.pcidev, 1107 pci_read_config_dword(intel_private.pcidev,
1270 I915_PTEADDR, &gtt_addr); 1108 I915_PTEADDR, &gtt_addr);
1271 intel_private.gtt_bus_addr = gtt_addr; 1109 intel_private.gtt_bus_addr = gtt_addr;
1272 } else { 1110 break;
1273 u32 gtt_offset; 1111 case 5:
1274 1112 intel_private.gtt_bus_addr = reg_addr + MB(2);
1275 switch (INTEL_GTT_GEN) { 1113 break;
1276 case 5: 1114 default:
1277 case 6: 1115 intel_private.gtt_bus_addr = reg_addr + KB(512);
1278 case 7: 1116 break;
1279 gtt_offset = MB(2);
1280 break;
1281 case 4:
1282 default:
1283 gtt_offset = KB(512);
1284 break;
1285 }
1286 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1287 } 1117 }
1288 1118
1289 if (needs_idle_maps()) 1119 if (needs_idle_maps())
@@ -1395,32 +1225,6 @@ static const struct intel_gtt_driver ironlake_gtt_driver = {
1395 .check_flags = i830_check_flags, 1225 .check_flags = i830_check_flags,
1396 .chipset_flush = i9xx_chipset_flush, 1226 .chipset_flush = i9xx_chipset_flush,
1397}; 1227};
1398static const struct intel_gtt_driver sandybridge_gtt_driver = {
1399 .gen = 6,
1400 .setup = i9xx_setup,
1401 .cleanup = gen6_cleanup,
1402 .write_entry = gen6_write_entry,
1403 .dma_mask_size = 40,
1404 .check_flags = gen6_check_flags,
1405 .chipset_flush = i9xx_chipset_flush,
1406};
1407static const struct intel_gtt_driver haswell_gtt_driver = {
1408 .gen = 6,
1409 .setup = i9xx_setup,
1410 .cleanup = gen6_cleanup,
1411 .write_entry = haswell_write_entry,
1412 .dma_mask_size = 40,
1413 .check_flags = gen6_check_flags,
1414 .chipset_flush = i9xx_chipset_flush,
1415};
1416static const struct intel_gtt_driver valleyview_gtt_driver = {
1417 .gen = 7,
1418 .setup = i9xx_setup,
1419 .cleanup = gen6_cleanup,
1420 .write_entry = valleyview_write_entry,
1421 .dma_mask_size = 40,
1422 .check_flags = gen6_check_flags,
1423};
1424 1228
1425/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of 1229/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
1426 * driver and gmch_driver must be non-null, and find_gmch will determine 1230 * driver and gmch_driver must be non-null, and find_gmch will determine
@@ -1501,106 +1305,6 @@ static const struct intel_gtt_driver_description {
1501 "HD Graphics", &ironlake_gtt_driver }, 1305 "HD Graphics", &ironlake_gtt_driver },
1502 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 1306 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1503 "HD Graphics", &ironlake_gtt_driver }, 1307 "HD Graphics", &ironlake_gtt_driver },
1504 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1505 "Sandybridge", &sandybridge_gtt_driver },
1506 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1507 "Sandybridge", &sandybridge_gtt_driver },
1508 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1509 "Sandybridge", &sandybridge_gtt_driver },
1510 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1511 "Sandybridge", &sandybridge_gtt_driver },
1512 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1513 "Sandybridge", &sandybridge_gtt_driver },
1514 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1515 "Sandybridge", &sandybridge_gtt_driver },
1516 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1517 "Sandybridge", &sandybridge_gtt_driver },
1518 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
1519 "Ivybridge", &sandybridge_gtt_driver },
1520 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
1521 "Ivybridge", &sandybridge_gtt_driver },
1522 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
1523 "Ivybridge", &sandybridge_gtt_driver },
1524 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
1525 "Ivybridge", &sandybridge_gtt_driver },
1526 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
1527 "Ivybridge", &sandybridge_gtt_driver },
1528 { PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
1529 "Ivybridge", &sandybridge_gtt_driver },
1530 { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
1531 "ValleyView", &valleyview_gtt_driver },
1532 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
1533 "Haswell", &haswell_gtt_driver },
1534 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
1535 "Haswell", &haswell_gtt_driver },
1536 { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
1537 "Haswell", &haswell_gtt_driver },
1538 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
1539 "Haswell", &haswell_gtt_driver },
1540 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
1541 "Haswell", &haswell_gtt_driver },
1542 { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
1543 "Haswell", &haswell_gtt_driver },
1544 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
1545 "Haswell", &haswell_gtt_driver },
1546 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
1547 "Haswell", &haswell_gtt_driver },
1548 { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
1549 "Haswell", &haswell_gtt_driver },
1550 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
1551 "Haswell", &haswell_gtt_driver },
1552 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
1553 "Haswell", &haswell_gtt_driver },
1554 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
1555 "Haswell", &haswell_gtt_driver },
1556 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
1557 "Haswell", &haswell_gtt_driver },
1558 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
1559 "Haswell", &haswell_gtt_driver },
1560 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
1561 "Haswell", &haswell_gtt_driver },
1562 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
1563 "Haswell", &haswell_gtt_driver },
1564 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
1565 "Haswell", &haswell_gtt_driver },
1566 { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
1567 "Haswell", &haswell_gtt_driver },
1568 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
1569 "Haswell", &haswell_gtt_driver },
1570 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
1571 "Haswell", &haswell_gtt_driver },
1572 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
1573 "Haswell", &haswell_gtt_driver },
1574 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
1575 "Haswell", &haswell_gtt_driver },
1576 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
1577 "Haswell", &haswell_gtt_driver },
1578 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
1579 "Haswell", &haswell_gtt_driver },
1580 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
1581 "Haswell", &haswell_gtt_driver },
1582 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
1583 "Haswell", &haswell_gtt_driver },
1584 { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
1585 "Haswell", &haswell_gtt_driver },
1586 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
1587 "Haswell", &haswell_gtt_driver },
1588 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
1589 "Haswell", &haswell_gtt_driver },
1590 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
1591 "Haswell", &haswell_gtt_driver },
1592 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
1593 "Haswell", &haswell_gtt_driver },
1594 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
1595 "Haswell", &haswell_gtt_driver },
1596 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
1597 "Haswell", &haswell_gtt_driver },
1598 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
1599 "Haswell", &haswell_gtt_driver },
1600 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
1601 "Haswell", &haswell_gtt_driver },
1602 { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
1603 "Haswell", &haswell_gtt_driver },
1604 { 0, NULL, NULL } 1308 { 0, NULL, NULL }
1605}; 1309};
1606 1310
@@ -1686,7 +1390,7 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
1686} 1390}
1687EXPORT_SYMBOL(intel_gmch_probe); 1391EXPORT_SYMBOL(intel_gmch_probe);
1688 1392
1689const struct intel_gtt *intel_gtt_get(void) 1393struct intel_gtt *intel_gtt_get(void)
1690{ 1394{
1691 return &intel_private.base; 1395 return &intel_private.base;
1692} 1396}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 18321b68b880..983201b450f1 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -210,3 +210,5 @@ source "drivers/gpu/drm/mgag200/Kconfig"
210source "drivers/gpu/drm/cirrus/Kconfig" 210source "drivers/gpu/drm/cirrus/Kconfig"
211 211
212source "drivers/gpu/drm/shmobile/Kconfig" 212source "drivers/gpu/drm/shmobile/Kconfig"
213
214source "drivers/gpu/drm/tegra/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2ff5cefe9ead..6f58c81cfcbc 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
8 drm_context.o drm_dma.o \ 8 drm_context.o drm_dma.o \
9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_agpsupport.o drm_scatter.o drm_pci.o \
12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 12 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
13 drm_crtc.o drm_modes.o drm_edid.o \ 13 drm_crtc.o drm_modes.o drm_edid.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
@@ -16,10 +16,11 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
16 16
17drm-$(CONFIG_COMPAT) += drm_ioc32.o 17drm-$(CONFIG_COMPAT) += drm_ioc32.o
18drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 18drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
19drm-$(CONFIG_PCI) += ati_pcigart.o
19 20
20drm-usb-y := drm_usb.o 21drm-usb-y := drm_usb.o
21 22
22drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o 23drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_helper.o
23drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 24drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
24drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 25drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
25 26
@@ -48,4 +49,5 @@ obj-$(CONFIG_DRM_GMA500) += gma500/
48obj-$(CONFIG_DRM_UDL) += udl/ 49obj-$(CONFIG_DRM_UDL) += udl/
49obj-$(CONFIG_DRM_AST) += ast/ 50obj-$(CONFIG_DRM_AST) += ast/
50obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/ 51obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
52obj-$(CONFIG_DRM_TEGRA) += tegra/
51obj-y += i2c/ 53obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 1a026ac2dfb4..3602731a6112 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,11 +186,11 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
186 186
187static int ast_bo_move(struct ttm_buffer_object *bo, 187static int ast_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -356,7 +356,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
356 356
357 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size, 357 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
358 ttm_bo_type_device, &astbo->placement, 358 ttm_bo_type_device, &astbo->placement,
359 align >> PAGE_SHIFT, 0, false, NULL, acc_size, 359 align >> PAGE_SHIFT, false, NULL, acc_size,
360 NULL, ast_bo_ttm_destroy); 360 NULL, ast_bo_ttm_destroy);
361 if (ret) 361 if (ret)
362 return ret; 362 return ret;
@@ -383,7 +383,7 @@ int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
383 ast_ttm_placement(bo, pl_flag); 383 ast_ttm_placement(bo, pl_flag);
384 for (i = 0; i < bo->placement.num_placement; i++) 384 for (i = 0; i < bo->placement.num_placement; i++)
385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
387 if (ret) 387 if (ret)
388 return ret; 388 return ret;
389 389
@@ -406,7 +406,7 @@ int ast_bo_unpin(struct ast_bo *bo)
406 406
407 for (i = 0; i < bo->placement.num_placement ; i++) 407 for (i = 0; i < bo->placement.num_placement ; i++)
408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
410 if (ret) 410 if (ret)
411 return ret; 411 return ret;
412 412
@@ -431,7 +431,7 @@ int ast_bo_push_sysram(struct ast_bo *bo)
431 for (i = 0; i < bo->placement.num_placement ; i++) 431 for (i = 0; i < bo->placement.num_placement ; i++)
432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
433 433
434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
435 if (ret) { 435 if (ret) {
436 DRM_ERROR("pushing to VRAM failed\n"); 436 DRM_ERROR("pushing to VRAM failed\n");
437 return ret; 437 return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 101e423c8991..dcd1a8c029eb 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,12 +35,15 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
35}; 35};
36 36
37 37
38static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev) 38static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
39{ 39{
40 struct apertures_struct *ap; 40 struct apertures_struct *ap;
41 bool primary = false; 41 bool primary = false;
42 42
43 ap = alloc_apertures(1); 43 ap = alloc_apertures(1);
44 if (!ap)
45 return -ENOMEM;
46
44 ap->ranges[0].base = pci_resource_start(pdev, 0); 47 ap->ranges[0].base = pci_resource_start(pdev, 0);
45 ap->ranges[0].size = pci_resource_len(pdev, 0); 48 ap->ranges[0].size = pci_resource_len(pdev, 0);
46 49
@@ -49,12 +52,18 @@ static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
49#endif 52#endif
50 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary); 53 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
51 kfree(ap); 54 kfree(ap);
55
56 return 0;
52} 57}
53 58
54static int __devinit 59static int __devinit
55cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 60cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
56{ 61{
57 cirrus_kick_out_firmware_fb(pdev); 62 int ret;
63
64 ret = cirrus_kick_out_firmware_fb(pdev);
65 if (ret)
66 return ret;
58 67
59 return drm_get_pci_dev(pdev, ent, &driver); 68 return drm_get_pci_dev(pdev, ent, &driver);
60} 69}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index bc83f835c830..1413a26e4905 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,11 +186,11 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
186 186
187static int cirrus_bo_move(struct ttm_buffer_object *bo, 187static int cirrus_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -361,7 +361,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
361 361
362 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size, 362 ret = ttm_bo_init(&cirrus->ttm.bdev, &cirrusbo->bo, size,
363 ttm_bo_type_device, &cirrusbo->placement, 363 ttm_bo_type_device, &cirrusbo->placement,
364 align >> PAGE_SHIFT, 0, false, NULL, acc_size, 364 align >> PAGE_SHIFT, false, NULL, acc_size,
365 NULL, cirrus_bo_ttm_destroy); 365 NULL, cirrus_bo_ttm_destroy);
366 if (ret) 366 if (ret)
367 return ret; 367 return ret;
@@ -388,7 +388,7 @@ int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
388 cirrus_ttm_placement(bo, pl_flag); 388 cirrus_ttm_placement(bo, pl_flag);
389 for (i = 0; i < bo->placement.num_placement; i++) 389 for (i = 0; i < bo->placement.num_placement; i++)
390 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 390 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 391 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
392 if (ret) 392 if (ret)
393 return ret; 393 return ret;
394 394
@@ -411,7 +411,7 @@ int cirrus_bo_unpin(struct cirrus_bo *bo)
411 411
412 for (i = 0; i < bo->placement.num_placement ; i++) 412 for (i = 0; i < bo->placement.num_placement ; i++)
413 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 413 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
414 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 414 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
415 if (ret) 415 if (ret)
416 return ret; 416 return ret;
417 417
@@ -436,7 +436,7 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
436 for (i = 0; i < bo->placement.num_placement ; i++) 436 for (i = 0; i < bo->placement.num_placement ; i++)
437 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 437 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
438 438
439 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 439 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
440 if (ret) { 440 if (ret) {
441 DRM_ERROR("pushing to VRAM failed\n"); 441 DRM_ERROR("pushing to VRAM failed\n");
442 return ret; 442 return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ef1b22144d37..f2d667b8bee2 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -470,10 +470,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
470{ 470{
471 struct drm_device *dev = crtc->dev; 471 struct drm_device *dev = crtc->dev;
472 472
473 if (crtc->gamma_store) { 473 kfree(crtc->gamma_store);
474 kfree(crtc->gamma_store); 474 crtc->gamma_store = NULL;
475 crtc->gamma_store = NULL;
476 }
477 475
478 drm_mode_object_put(dev, &crtc->base); 476 drm_mode_object_put(dev, &crtc->base);
479 list_del(&crtc->head); 477 list_del(&crtc->head);
@@ -555,16 +553,17 @@ int drm_connector_init(struct drm_device *dev,
555 INIT_LIST_HEAD(&connector->probed_modes); 553 INIT_LIST_HEAD(&connector->probed_modes);
556 INIT_LIST_HEAD(&connector->modes); 554 INIT_LIST_HEAD(&connector->modes);
557 connector->edid_blob_ptr = NULL; 555 connector->edid_blob_ptr = NULL;
556 connector->status = connector_status_unknown;
558 557
559 list_add_tail(&connector->head, &dev->mode_config.connector_list); 558 list_add_tail(&connector->head, &dev->mode_config.connector_list);
560 dev->mode_config.num_connector++; 559 dev->mode_config.num_connector++;
561 560
562 if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) 561 if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
563 drm_connector_attach_property(connector, 562 drm_object_attach_property(&connector->base,
564 dev->mode_config.edid_property, 563 dev->mode_config.edid_property,
565 0); 564 0);
566 565
567 drm_connector_attach_property(connector, 566 drm_object_attach_property(&connector->base,
568 dev->mode_config.dpms_property, 0); 567 dev->mode_config.dpms_property, 0);
569 568
570 out: 569 out:
@@ -2280,13 +2279,21 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
2280 2279
2281 for (i = 0; i < num_planes; i++) { 2280 for (i = 0; i < num_planes; i++) {
2282 unsigned int width = r->width / (i != 0 ? hsub : 1); 2281 unsigned int width = r->width / (i != 0 ? hsub : 1);
2282 unsigned int height = r->height / (i != 0 ? vsub : 1);
2283 unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
2283 2284
2284 if (!r->handles[i]) { 2285 if (!r->handles[i]) {
2285 DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); 2286 DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
2286 return -EINVAL; 2287 return -EINVAL;
2287 } 2288 }
2288 2289
2289 if (r->pitches[i] < drm_format_plane_cpp(r->pixel_format, i) * width) { 2290 if ((uint64_t) width * cpp > UINT_MAX)
2291 return -ERANGE;
2292
2293 if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX)
2294 return -ERANGE;
2295
2296 if (r->pitches[i] < width * cpp) {
2290 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); 2297 DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i);
2291 return -EINVAL; 2298 return -EINVAL;
2292 } 2299 }
@@ -2323,6 +2330,11 @@ int drm_mode_addfb2(struct drm_device *dev,
2323 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2330 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2324 return -EINVAL; 2331 return -EINVAL;
2325 2332
2333 if (r->flags & ~DRM_MODE_FB_INTERLACED) {
2334 DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
2335 return -EINVAL;
2336 }
2337
2326 if ((config->min_width > r->width) || (r->width > config->max_width)) { 2338 if ((config->min_width > r->width) || (r->width > config->max_width)) {
2327 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", 2339 DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n",
2328 r->width, config->min_width, config->max_width); 2340 r->width, config->min_width, config->max_width);
@@ -2916,27 +2928,6 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
2916} 2928}
2917EXPORT_SYMBOL(drm_property_destroy); 2929EXPORT_SYMBOL(drm_property_destroy);
2918 2930
2919void drm_connector_attach_property(struct drm_connector *connector,
2920 struct drm_property *property, uint64_t init_val)
2921{
2922 drm_object_attach_property(&connector->base, property, init_val);
2923}
2924EXPORT_SYMBOL(drm_connector_attach_property);
2925
2926int drm_connector_property_set_value(struct drm_connector *connector,
2927 struct drm_property *property, uint64_t value)
2928{
2929 return drm_object_property_set_value(&connector->base, property, value);
2930}
2931EXPORT_SYMBOL(drm_connector_property_set_value);
2932
2933int drm_connector_property_get_value(struct drm_connector *connector,
2934 struct drm_property *property, uint64_t *val)
2935{
2936 return drm_object_property_get_value(&connector->base, property, val);
2937}
2938EXPORT_SYMBOL(drm_connector_property_get_value);
2939
2940void drm_object_attach_property(struct drm_mode_object *obj, 2931void drm_object_attach_property(struct drm_mode_object *obj,
2941 struct drm_property *property, 2932 struct drm_property *property,
2942 uint64_t init_val) 2933 uint64_t init_val)
@@ -3173,15 +3164,17 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
3173 /* Delete edid, when there is none. */ 3164 /* Delete edid, when there is none. */
3174 if (!edid) { 3165 if (!edid) {
3175 connector->edid_blob_ptr = NULL; 3166 connector->edid_blob_ptr = NULL;
3176 ret = drm_connector_property_set_value(connector, dev->mode_config.edid_property, 0); 3167 ret = drm_object_property_set_value(&connector->base, dev->mode_config.edid_property, 0);
3177 return ret; 3168 return ret;
3178 } 3169 }
3179 3170
3180 size = EDID_LENGTH * (1 + edid->extensions); 3171 size = EDID_LENGTH * (1 + edid->extensions);
3181 connector->edid_blob_ptr = drm_property_create_blob(connector->dev, 3172 connector->edid_blob_ptr = drm_property_create_blob(connector->dev,
3182 size, edid); 3173 size, edid);
3174 if (!connector->edid_blob_ptr)
3175 return -EINVAL;
3183 3176
3184 ret = drm_connector_property_set_value(connector, 3177 ret = drm_object_property_set_value(&connector->base,
3185 dev->mode_config.edid_property, 3178 dev->mode_config.edid_property,
3186 connector->edid_blob_ptr->base.id); 3179 connector->edid_blob_ptr->base.id);
3187 3180
@@ -3204,6 +3197,9 @@ static bool drm_property_change_is_valid(struct drm_property *property,
3204 for (i = 0; i < property->num_values; i++) 3197 for (i = 0; i < property->num_values; i++)
3205 valid_mask |= (1ULL << property->values[i]); 3198 valid_mask |= (1ULL << property->values[i]);
3206 return !(value & ~valid_mask); 3199 return !(value & ~valid_mask);
3200 } else if (property->flags & DRM_MODE_PROP_BLOB) {
3201 /* Only the driver knows */
3202 return true;
3207 } else { 3203 } else {
3208 int i; 3204 int i;
3209 for (i = 0; i < property->num_values; i++) 3205 for (i = 0; i < property->num_values; i++)
@@ -3245,7 +3241,7 @@ static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
3245 3241
3246 /* store the property value if successful */ 3242 /* store the property value if successful */
3247 if (!ret) 3243 if (!ret)
3248 drm_connector_property_set_value(connector, property, value); 3244 drm_object_property_set_value(&connector->base, property, value);
3249 return ret; 3245 return ret;
3250} 3246}
3251 3247
@@ -3656,9 +3652,12 @@ void drm_mode_config_reset(struct drm_device *dev)
3656 if (encoder->funcs->reset) 3652 if (encoder->funcs->reset)
3657 encoder->funcs->reset(encoder); 3653 encoder->funcs->reset(encoder);
3658 3654
3659 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 3655 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3656 connector->status = connector_status_unknown;
3657
3660 if (connector->funcs->reset) 3658 if (connector->funcs->reset)
3661 connector->funcs->reset(connector); 3659 connector->funcs->reset(connector);
3660 }
3662} 3661}
3663EXPORT_SYMBOL(drm_mode_config_reset); 3662EXPORT_SYMBOL(drm_mode_config_reset);
3664 3663
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 1227adf74dbc..7b2d378b2576 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -39,6 +39,35 @@
39#include <drm/drm_fb_helper.h> 39#include <drm/drm_fb_helper.h>
40#include <drm/drm_edid.h> 40#include <drm/drm_edid.h>
41 41
42/**
43 * drm_helper_move_panel_connectors_to_head() - move panels to the front in the
44 * connector list
45 * @dev: drm device to operate on
46 *
47 * Some userspace presumes that the first connected connector is the main
48 * display, where it's supposed to display e.g. the login screen. For
49 * laptops, this should be the main panel. Use this function to sort all
50 * (eDP/LVDS) panels to the front of the connector list, instead of
51 * painstakingly trying to initialize them in the right order.
52 */
53void drm_helper_move_panel_connectors_to_head(struct drm_device *dev)
54{
55 struct drm_connector *connector, *tmp;
56 struct list_head panel_list;
57
58 INIT_LIST_HEAD(&panel_list);
59
60 list_for_each_entry_safe(connector, tmp,
61 &dev->mode_config.connector_list, head) {
62 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
63 connector->connector_type == DRM_MODE_CONNECTOR_eDP)
64 list_move_tail(&connector->head, &panel_list);
65 }
66
67 list_splice(&panel_list, &dev->mode_config.connector_list);
68}
69EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
70
42static bool drm_kms_helper_poll = true; 71static bool drm_kms_helper_poll = true;
43module_param_named(poll, drm_kms_helper_poll, bool, 0600); 72module_param_named(poll, drm_kms_helper_poll, bool, 0600);
44 73
@@ -64,22 +93,21 @@ static void drm_mode_validate_flag(struct drm_connector *connector,
64 93
65/** 94/**
66 * drm_helper_probe_single_connector_modes - get complete set of display modes 95 * drm_helper_probe_single_connector_modes - get complete set of display modes
67 * @dev: DRM device 96 * @connector: connector to probe
68 * @maxX: max width for modes 97 * @maxX: max width for modes
69 * @maxY: max height for modes 98 * @maxY: max height for modes
70 * 99 *
71 * LOCKING: 100 * LOCKING:
72 * Caller must hold mode config lock. 101 * Caller must hold mode config lock.
73 * 102 *
74 * Based on @dev's mode_config layout, scan all the connectors and try to detect 103 * Based on the helper callbacks implemented by @connector try to detect all
75 * modes on them. Modes will first be added to the connector's probed_modes 104 * valid modes. Modes will first be added to the connector's probed_modes list,
76 * list, then culled (based on validity and the @maxX, @maxY parameters) and 105 * then culled (based on validity and the @maxX, @maxY parameters) and put into
77 * put into the normal modes list. 106 * the normal modes list.
78 * 107 *
79 * Intended to be used either at bootup time or when major configuration 108 * Intended to be use as a generic implementation of the ->probe() @connector
80 * changes have occurred. 109 * callback for drivers that use the crtc helpers for output mode filtering and
81 * 110 * detection.
82 * FIXME: take into account monitor limits
83 * 111 *
84 * RETURNS: 112 * RETURNS:
85 * Number of modes found on @connector. 113 * Number of modes found on @connector.
@@ -109,9 +137,14 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
109 connector->funcs->force(connector); 137 connector->funcs->force(connector);
110 } else { 138 } else {
111 connector->status = connector->funcs->detect(connector, true); 139 connector->status = connector->funcs->detect(connector, true);
112 drm_kms_helper_poll_enable(dev);
113 } 140 }
114 141
142 /* Re-enable polling in case the global poll config changed. */
143 if (drm_kms_helper_poll != dev->mode_config.poll_running)
144 drm_kms_helper_poll_enable(dev);
145
146 dev->mode_config.poll_running = drm_kms_helper_poll;
147
115 if (connector->status == connector_status_disconnected) { 148 if (connector->status == connector_status_disconnected) {
116 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
117 connector->base.id, drm_get_connector_name(connector)); 150 connector->base.id, drm_get_connector_name(connector));
@@ -325,17 +358,24 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
325} 358}
326 359
327/** 360/**
328 * drm_crtc_set_mode - set a mode 361 * drm_crtc_helper_set_mode - internal helper to set a mode
329 * @crtc: CRTC to program 362 * @crtc: CRTC to program
330 * @mode: mode to use 363 * @mode: mode to use
331 * @x: width of mode 364 * @x: horizontal offset into the surface
332 * @y: height of mode 365 * @y: vertical offset into the surface
366 * @old_fb: old framebuffer, for cleanup
333 * 367 *
334 * LOCKING: 368 * LOCKING:
335 * Caller must hold mode config lock. 369 * Caller must hold mode config lock.
336 * 370 *
337 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance 371 * Try to set @mode on @crtc. Give @crtc and its associated connectors a chance
338 * to fixup or reject the mode prior to trying to set it. 372 * to fixup or reject the mode prior to trying to set it. This is an internal
373 * helper that drivers could e.g. use to update properties that require the
374 * entire output pipe to be disabled and re-enabled in a new configuration. For
375 * example for changing whether audio is enabled on a hdmi link or for changing
376 * panel fitter or dither attributes. It is also called by the
377 * drm_crtc_helper_set_config() helper function to drive the mode setting
378 * sequence.
339 * 379 *
340 * RETURNS: 380 * RETURNS:
341 * True if the mode was set successfully, or false otherwise. 381 * True if the mode was set successfully, or false otherwise.
@@ -491,20 +531,19 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
491 531
492/** 532/**
493 * drm_crtc_helper_set_config - set a new config from userspace 533 * drm_crtc_helper_set_config - set a new config from userspace
494 * @crtc: CRTC to setup 534 * @set: mode set configuration
495 * @crtc_info: user provided configuration
496 * @new_mode: new mode to set
497 * @connector_set: set of connectors for the new config
498 * @fb: new framebuffer
499 * 535 *
500 * LOCKING: 536 * LOCKING:
501 * Caller must hold mode config lock. 537 * Caller must hold mode config lock.
502 * 538 *
503 * Setup a new configuration, provided by the user in @crtc_info, and enable 539 * Setup a new configuration, provided by the upper layers (either an ioctl call
504 * it. 540 * from userspace or internally e.g. from the fbdev suppport code) in @set, and
541 * enable it. This is the main helper functions for drivers that implement
542 * kernel mode setting with the crtc helper functions and the assorted
543 * ->prepare(), ->modeset() and ->commit() helper callbacks.
505 * 544 *
506 * RETURNS: 545 * RETURNS:
507 * Zero. (FIXME) 546 * Returns 0 on success, -ERRNO on failure.
508 */ 547 */
509int drm_crtc_helper_set_config(struct drm_mode_set *set) 548int drm_crtc_helper_set_config(struct drm_mode_set *set)
510{ 549{
@@ -800,12 +839,14 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
800} 839}
801 840
802/** 841/**
803 * drm_helper_connector_dpms 842 * drm_helper_connector_dpms() - connector dpms helper implementation
804 * @connector affected connector 843 * @connector: affected connector
805 * @mode DPMS mode 844 * @mode: DPMS mode
806 * 845 *
807 * Calls the low-level connector DPMS function, then 846 * This is the main helper function provided by the crtc helper framework for
808 * calls appropriate encoder and crtc DPMS functions as well 847 * implementing the DPMS connector attribute. It computes the new desired DPMS
848 * state for all encoders and crtcs in the output mesh and calls the ->dpms()
849 * callback provided by the driver appropriately.
809 */ 850 */
810void drm_helper_connector_dpms(struct drm_connector *connector, int mode) 851void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
811{ 852{
@@ -918,6 +959,15 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
918} 959}
919EXPORT_SYMBOL(drm_helper_resume_force_mode); 960EXPORT_SYMBOL(drm_helper_resume_force_mode);
920 961
962void drm_kms_helper_hotplug_event(struct drm_device *dev)
963{
964 /* send a uevent + call fbdev */
965 drm_sysfs_hotplug_event(dev);
966 if (dev->mode_config.funcs->output_poll_changed)
967 dev->mode_config.funcs->output_poll_changed(dev);
968}
969EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
970
921#define DRM_OUTPUT_POLL_PERIOD (10*HZ) 971#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
922static void output_poll_execute(struct work_struct *work) 972static void output_poll_execute(struct work_struct *work)
923{ 973{
@@ -933,20 +983,22 @@ static void output_poll_execute(struct work_struct *work)
933 mutex_lock(&dev->mode_config.mutex); 983 mutex_lock(&dev->mode_config.mutex);
934 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 984 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
935 985
936 /* if this is HPD or polled don't check it - 986 /* Ignore forced connectors. */
937 TV out for instance */ 987 if (connector->force)
938 if (!connector->polled)
939 continue; 988 continue;
940 989
941 else if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) 990 /* Ignore HPD capable connectors and connectors where we don't
942 repoll = true; 991 * want any hotplug detection at all for polling. */
992 if (!connector->polled || connector->polled == DRM_CONNECTOR_POLL_HPD)
993 continue;
994
995 repoll = true;
943 996
944 old_status = connector->status; 997 old_status = connector->status;
945 /* if we are connected and don't want to poll for disconnect 998 /* if we are connected and don't want to poll for disconnect
946 skip it */ 999 skip it */
947 if (old_status == connector_status_connected && 1000 if (old_status == connector_status_connected &&
948 !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT) && 1001 !(connector->polled & DRM_CONNECTOR_POLL_DISCONNECT))
949 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
950 continue; 1002 continue;
951 1003
952 connector->status = connector->funcs->detect(connector, false); 1004 connector->status = connector->funcs->detect(connector, false);
@@ -960,12 +1012,8 @@ static void output_poll_execute(struct work_struct *work)
960 1012
961 mutex_unlock(&dev->mode_config.mutex); 1013 mutex_unlock(&dev->mode_config.mutex);
962 1014
963 if (changed) { 1015 if (changed)
964 /* send a uevent + call fbdev */ 1016 drm_kms_helper_hotplug_event(dev);
965 drm_sysfs_hotplug_event(dev);
966 if (dev->mode_config.funcs->output_poll_changed)
967 dev->mode_config.funcs->output_poll_changed(dev);
968 }
969 1017
970 if (repoll) 1018 if (repoll)
971 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); 1019 schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
@@ -988,7 +1036,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
988 return; 1036 return;
989 1037
990 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1038 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
991 if (connector->polled) 1039 if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
1040 DRM_CONNECTOR_POLL_DISCONNECT))
992 poll = true; 1041 poll = true;
993 } 1042 }
994 1043
@@ -1014,12 +1063,34 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
1014 1063
1015void drm_helper_hpd_irq_event(struct drm_device *dev) 1064void drm_helper_hpd_irq_event(struct drm_device *dev)
1016{ 1065{
1066 struct drm_connector *connector;
1067 enum drm_connector_status old_status;
1068 bool changed = false;
1069
1017 if (!dev->mode_config.poll_enabled) 1070 if (!dev->mode_config.poll_enabled)
1018 return; 1071 return;
1019 1072
1020 /* kill timer and schedule immediate execution, this doesn't block */ 1073 mutex_lock(&dev->mode_config.mutex);
1021 cancel_delayed_work(&dev->mode_config.output_poll_work); 1074 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1022 if (drm_kms_helper_poll) 1075
1023 schedule_delayed_work(&dev->mode_config.output_poll_work, 0); 1076 /* Only handle HPD capable connectors. */
1077 if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
1078 continue;
1079
1080 old_status = connector->status;
1081
1082 connector->status = connector->funcs->detect(connector, false);
1083 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
1084 connector->base.id,
1085 drm_get_connector_name(connector),
1086 old_status, connector->status);
1087 if (old_status != connector->status)
1088 changed = true;
1089 }
1090
1091 mutex_unlock(&dev->mode_config.mutex);
1092
1093 if (changed)
1094 drm_kms_helper_hotplug_event(dev);
1024} 1095}
1025EXPORT_SYMBOL(drm_helper_hpd_irq_event); 1096EXPORT_SYMBOL(drm_helper_hpd_irq_event);
diff --git a/drivers/gpu/drm/drm_dp_i2c_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 7f246f212457..89e196627160 100644
--- a/drivers/gpu/drm/drm_dp_i2c_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -30,6 +30,15 @@
30#include <drm/drm_dp_helper.h> 30#include <drm/drm_dp_helper.h>
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32 32
33/**
34 * DOC: dp helpers
35 *
36 * These functions contain some common logic and helpers at various abstraction
37 * levels to deal with Display Port sink devices and related things like DP aux
38 * channel transfers, EDID reading over DP aux channels, decoding certain DPCD
39 * blocks, ...
40 */
41
33/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ 42/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
34static int 43static int
35i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, 44i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
@@ -37,7 +46,7 @@ i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
37{ 46{
38 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 47 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
39 int ret; 48 int ret;
40 49
41 ret = (*algo_data->aux_ch)(adapter, mode, 50 ret = (*algo_data->aux_ch)(adapter, mode,
42 write_byte, read_byte); 51 write_byte, read_byte);
43 return ret; 52 return ret;
@@ -182,7 +191,6 @@ i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
182{ 191{
183 (void) i2c_algo_dp_aux_address(adapter, 0, false); 192 (void) i2c_algo_dp_aux_address(adapter, 0, false);
184 (void) i2c_algo_dp_aux_stop(adapter, false); 193 (void) i2c_algo_dp_aux_stop(adapter, false);
185
186} 194}
187 195
188static int 196static int
@@ -194,11 +202,23 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
194 return 0; 202 return 0;
195} 203}
196 204
205/**
206 * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
207 * @adapter: i2c adapter to register
208 *
209 * This registers an i2c adapater that uses dp aux channel as it's underlaying
210 * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
211 * and store it in the algo_data member of the @adapter argument. This will be
212 * used by the i2c over dp aux algorithm to drive the hardware.
213 *
214 * RETURNS:
215 * 0 on success, -ERRNO on failure.
216 */
197int 217int
198i2c_dp_aux_add_bus(struct i2c_adapter *adapter) 218i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
199{ 219{
200 int error; 220 int error;
201 221
202 error = i2c_dp_aux_prepare_bus(adapter); 222 error = i2c_dp_aux_prepare_bus(adapter);
203 if (error) 223 if (error)
204 return error; 224 return error;
@@ -206,3 +226,123 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
206 return error; 226 return error;
207} 227}
208EXPORT_SYMBOL(i2c_dp_aux_add_bus); 228EXPORT_SYMBOL(i2c_dp_aux_add_bus);
229
230/* Helpers for DP link training */
231static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
232{
233 return link_status[r - DP_LANE0_1_STATUS];
234}
235
236static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
237 int lane)
238{
239 int i = DP_LANE0_1_STATUS + (lane >> 1);
240 int s = (lane & 1) * 4;
241 u8 l = dp_link_status(link_status, i);
242 return (l >> s) & 0xf;
243}
244
245bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
246 int lane_count)
247{
248 u8 lane_align;
249 u8 lane_status;
250 int lane;
251
252 lane_align = dp_link_status(link_status,
253 DP_LANE_ALIGN_STATUS_UPDATED);
254 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
255 return false;
256 for (lane = 0; lane < lane_count; lane++) {
257 lane_status = dp_get_lane_status(link_status, lane);
258 if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
259 return false;
260 }
261 return true;
262}
263EXPORT_SYMBOL(drm_dp_channel_eq_ok);
264
265bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
266 int lane_count)
267{
268 int lane;
269 u8 lane_status;
270
271 for (lane = 0; lane < lane_count; lane++) {
272 lane_status = dp_get_lane_status(link_status, lane);
273 if ((lane_status & DP_LANE_CR_DONE) == 0)
274 return false;
275 }
276 return true;
277}
278EXPORT_SYMBOL(drm_dp_clock_recovery_ok);
279
280u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
281 int lane)
282{
283 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
284 int s = ((lane & 1) ?
285 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
286 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
287 u8 l = dp_link_status(link_status, i);
288
289 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
290}
291EXPORT_SYMBOL(drm_dp_get_adjust_request_voltage);
292
293u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
294 int lane)
295{
296 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
297 int s = ((lane & 1) ?
298 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
299 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
300 u8 l = dp_link_status(link_status, i);
301
302 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
303}
304EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
305
306void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
307 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
308 udelay(100);
309 else
310 mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
311}
312EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
313
314void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
315 if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
316 udelay(400);
317 else
318 mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
319}
320EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
321
322u8 drm_dp_link_rate_to_bw_code(int link_rate)
323{
324 switch (link_rate) {
325 case 162000:
326 default:
327 return DP_LINK_BW_1_62;
328 case 270000:
329 return DP_LINK_BW_2_7;
330 case 540000:
331 return DP_LINK_BW_5_4;
332 }
333}
334EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
335
336int drm_dp_bw_code_to_link_rate(u8 link_bw)
337{
338 switch (link_bw) {
339 case DP_LINK_BW_1_62:
340 default:
341 return 162000;
342 case DP_LINK_BW_2_7:
343 return 270000;
344 case DP_LINK_BW_5_4:
345 return 540000;
346 }
347}
348EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index fadcd44ff196..5a3770fbd770 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -307,12 +307,9 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
307 307
308static bool drm_edid_is_zero(u8 *in_edid, int length) 308static bool drm_edid_is_zero(u8 *in_edid, int length)
309{ 309{
310 int i; 310 if (memchr_inv(in_edid, 0, length))
311 u32 *raw_edid = (u32 *)in_edid; 311 return false;
312 312
313 for (i = 0; i < length / 4; i++)
314 if (*(raw_edid + i) != 0)
315 return false;
316 return true; 313 return true;
317} 314}
318 315
@@ -1516,6 +1513,26 @@ u8 *drm_find_cea_extension(struct edid *edid)
1516} 1513}
1517EXPORT_SYMBOL(drm_find_cea_extension); 1514EXPORT_SYMBOL(drm_find_cea_extension);
1518 1515
1516/*
1517 * Looks for a CEA mode matching given drm_display_mode.
1518 * Returns its CEA Video ID code, or 0 if not found.
1519 */
1520u8 drm_match_cea_mode(struct drm_display_mode *to_match)
1521{
1522 struct drm_display_mode *cea_mode;
1523 u8 mode;
1524
1525 for (mode = 0; mode < drm_num_cea_modes; mode++) {
1526 cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
1527
1528 if (drm_mode_equal(to_match, cea_mode))
1529 return mode + 1;
1530 }
1531 return 0;
1532}
1533EXPORT_SYMBOL(drm_match_cea_mode);
1534
1535
1519static int 1536static int
1520do_cea_modes (struct drm_connector *connector, u8 *db, u8 len) 1537do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
1521{ 1538{
@@ -1622,7 +1639,7 @@ parse_hdmi_vsdb(struct drm_connector *connector, const u8 *db)
1622 if (len >= 12) 1639 if (len >= 12)
1623 connector->audio_latency[1] = db[12]; 1640 connector->audio_latency[1] = db[12];
1624 1641
1625 DRM_LOG_KMS("HDMI: DVI dual %d, " 1642 DRM_DEBUG_KMS("HDMI: DVI dual %d, "
1626 "max TMDS clock %d, " 1643 "max TMDS clock %d, "
1627 "latency present %d %d, " 1644 "latency present %d %d, "
1628 "video latency %d %d, " 1645 "video latency %d %d, "
@@ -2062,3 +2079,22 @@ int drm_add_modes_noedid(struct drm_connector *connector,
2062 return num_modes; 2079 return num_modes;
2063} 2080}
2064EXPORT_SYMBOL(drm_add_modes_noedid); 2081EXPORT_SYMBOL(drm_add_modes_noedid);
2082
2083/**
2084 * drm_mode_cea_vic - return the CEA-861 VIC of a given mode
2085 * @mode: mode
2086 *
2087 * RETURNS:
2088 * The VIC number, 0 in case it's not a CEA-861 mode.
2089 */
2090uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
2091{
2092 uint8_t i;
2093
2094 for (i = 0; i < drm_num_cea_modes; i++)
2095 if (drm_mode_equal(mode, &edid_cea_modes[i]))
2096 return i + 1;
2097
2098 return 0;
2099}
2100EXPORT_SYMBOL(drm_mode_cea_vic);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4d58d7e6af3f..954d175bd7fa 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,8 @@
27 * Dave Airlie <airlied@linux.ie> 27 * Dave Airlie <airlied@linux.ie>
28 * Jesse Barnes <jesse.barnes@intel.com> 28 * Jesse Barnes <jesse.barnes@intel.com>
29 */ 29 */
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/kernel.h> 32#include <linux/kernel.h>
31#include <linux/sysrq.h> 33#include <linux/sysrq.h>
32#include <linux/slab.h> 34#include <linux/slab.h>
@@ -43,6 +45,15 @@ MODULE_LICENSE("GPL and additional rights");
43 45
44static LIST_HEAD(kernel_fb_helper_list); 46static LIST_HEAD(kernel_fb_helper_list);
45 47
48/**
49 * DOC: fbdev helpers
50 *
51 * The fb helper functions are useful to provide an fbdev on top of a drm kernel
52 * mode setting driver. They can be used mostly independantely from the crtc
53 * helper functions used by many drivers to implement the kernel mode setting
54 * interfaces.
55 */
56
46/* simple single crtc case helper function */ 57/* simple single crtc case helper function */
47int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) 58int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
48{ 59{
@@ -95,10 +106,16 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
95 if (mode->force) { 106 if (mode->force) {
96 const char *s; 107 const char *s;
97 switch (mode->force) { 108 switch (mode->force) {
98 case DRM_FORCE_OFF: s = "OFF"; break; 109 case DRM_FORCE_OFF:
99 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; 110 s = "OFF";
111 break;
112 case DRM_FORCE_ON_DIGITAL:
113 s = "ON - dig";
114 break;
100 default: 115 default:
101 case DRM_FORCE_ON: s = "ON"; break; 116 case DRM_FORCE_ON:
117 s = "ON";
118 break;
102 } 119 }
103 120
104 DRM_INFO("forcing %s connector %s\n", 121 DRM_INFO("forcing %s connector %s\n",
@@ -265,7 +282,7 @@ int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
265 if (panic_timeout < 0) 282 if (panic_timeout < 0)
266 return 0; 283 return 0;
267 284
268 printk(KERN_ERR "panic occurred, switching back to text console\n"); 285 pr_err("panic occurred, switching back to text console\n");
269 return drm_fb_helper_force_kernel_mode(); 286 return drm_fb_helper_force_kernel_mode();
270} 287}
271EXPORT_SYMBOL(drm_fb_helper_panic); 288EXPORT_SYMBOL(drm_fb_helper_panic);
@@ -331,7 +348,7 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
331 for (j = 0; j < fb_helper->connector_count; j++) { 348 for (j = 0; j < fb_helper->connector_count; j++) {
332 connector = fb_helper->connector_info[j]->connector; 349 connector = fb_helper->connector_info[j]->connector;
333 connector->funcs->dpms(connector, dpms_mode); 350 connector->funcs->dpms(connector, dpms_mode);
334 drm_connector_property_set_value(connector, 351 drm_object_property_set_value(&connector->base,
335 dev->mode_config.dpms_property, dpms_mode); 352 dev->mode_config.dpms_property, dpms_mode);
336 } 353 }
337 } 354 }
@@ -433,7 +450,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
433 if (!list_empty(&fb_helper->kernel_fb_list)) { 450 if (!list_empty(&fb_helper->kernel_fb_list)) {
434 list_del(&fb_helper->kernel_fb_list); 451 list_del(&fb_helper->kernel_fb_list);
435 if (list_empty(&kernel_fb_helper_list)) { 452 if (list_empty(&kernel_fb_helper_list)) {
436 printk(KERN_INFO "drm: unregistered panic notifier\n"); 453 pr_info("drm: unregistered panic notifier\n");
437 atomic_notifier_chain_unregister(&panic_notifier_list, 454 atomic_notifier_chain_unregister(&panic_notifier_list,
438 &paniced); 455 &paniced);
439 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 456 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -724,9 +741,9 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
724 741
725 /* if driver picks 8 or 16 by default use that 742 /* if driver picks 8 or 16 by default use that
726 for both depth/bpp */ 743 for both depth/bpp */
727 if (preferred_bpp != sizes.surface_bpp) { 744 if (preferred_bpp != sizes.surface_bpp)
728 sizes.surface_depth = sizes.surface_bpp = preferred_bpp; 745 sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
729 } 746
730 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 747 /* first up get a count of crtcs now in use and new min/maxes width/heights */
731 for (i = 0; i < fb_helper->connector_count; i++) { 748 for (i = 0; i < fb_helper->connector_count; i++) {
732 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i]; 749 struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
@@ -794,18 +811,16 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
794 info = fb_helper->fbdev; 811 info = fb_helper->fbdev;
795 812
796 /* set the fb pointer */ 813 /* set the fb pointer */
797 for (i = 0; i < fb_helper->crtc_count; i++) { 814 for (i = 0; i < fb_helper->crtc_count; i++)
798 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; 815 fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
799 }
800 816
801 if (new_fb) { 817 if (new_fb) {
802 info->var.pixclock = 0; 818 info->var.pixclock = 0;
803 if (register_framebuffer(info) < 0) { 819 if (register_framebuffer(info) < 0)
804 return -EINVAL; 820 return -EINVAL;
805 }
806 821
807 printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, 822 dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
808 info->fix.id); 823 info->node, info->fix.id);
809 824
810 } else { 825 } else {
811 drm_fb_helper_set_par(info); 826 drm_fb_helper_set_par(info);
@@ -814,7 +829,7 @@ int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
814 /* Switch back to kernel console on panic */ 829 /* Switch back to kernel console on panic */
815 /* multi card linked list maybe */ 830 /* multi card linked list maybe */
816 if (list_empty(&kernel_fb_helper_list)) { 831 if (list_empty(&kernel_fb_helper_list)) {
817 printk(KERN_INFO "drm: registered panic notifier\n"); 832 dev_info(fb_helper->dev->dev, "registered panic notifier\n");
818 atomic_notifier_chain_register(&panic_notifier_list, 833 atomic_notifier_chain_register(&panic_notifier_list,
819 &paniced); 834 &paniced);
820 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 835 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
@@ -1002,11 +1017,11 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
1002{ 1017{
1003 bool enable; 1018 bool enable;
1004 1019
1005 if (strict) { 1020 if (strict)
1006 enable = connector->status == connector_status_connected; 1021 enable = connector->status == connector_status_connected;
1007 } else { 1022 else
1008 enable = connector->status != connector_status_disconnected; 1023 enable = connector->status != connector_status_disconnected;
1009 } 1024
1010 return enable; 1025 return enable;
1011} 1026}
1012 1027
@@ -1191,9 +1206,8 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
1191 for (c = 0; c < fb_helper->crtc_count; c++) { 1206 for (c = 0; c < fb_helper->crtc_count; c++) {
1192 crtc = &fb_helper->crtc_info[c]; 1207 crtc = &fb_helper->crtc_info[c];
1193 1208
1194 if ((encoder->possible_crtcs & (1 << c)) == 0) { 1209 if ((encoder->possible_crtcs & (1 << c)) == 0)
1195 continue; 1210 continue;
1196 }
1197 1211
1198 for (o = 0; o < n; o++) 1212 for (o = 0; o < n; o++)
1199 if (best_crtcs[o] == crtc) 1213 if (best_crtcs[o] == crtc)
@@ -1246,6 +1260,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1246 sizeof(struct drm_display_mode *), GFP_KERNEL); 1260 sizeof(struct drm_display_mode *), GFP_KERNEL);
1247 enabled = kcalloc(dev->mode_config.num_connector, 1261 enabled = kcalloc(dev->mode_config.num_connector,
1248 sizeof(bool), GFP_KERNEL); 1262 sizeof(bool), GFP_KERNEL);
1263 if (!crtcs || !modes || !enabled) {
1264 DRM_ERROR("Memory allocation failed\n");
1265 goto out;
1266 }
1267
1249 1268
1250 drm_enable_connectors(fb_helper, enabled); 1269 drm_enable_connectors(fb_helper, enabled);
1251 1270
@@ -1284,6 +1303,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1284 } 1303 }
1285 } 1304 }
1286 1305
1306out:
1287 kfree(crtcs); 1307 kfree(crtcs);
1288 kfree(modes); 1308 kfree(modes);
1289 kfree(enabled); 1309 kfree(enabled);
@@ -1291,12 +1311,14 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1291 1311
1292/** 1312/**
1293 * drm_helper_initial_config - setup a sane initial connector configuration 1313 * drm_helper_initial_config - setup a sane initial connector configuration
1294 * @dev: DRM device 1314 * @fb_helper: fb_helper device struct
1315 * @bpp_sel: bpp value to use for the framebuffer configuration
1295 * 1316 *
1296 * LOCKING: 1317 * LOCKING:
1297 * Called at init time, must take mode config lock. 1318 * Called at init time by the driver to set up the @fb_helper initial
1319 * configuration, must take the mode config lock.
1298 * 1320 *
1299 * Scan the CRTCs and connectors and try to put together an initial setup. 1321 * Scans the CRTCs and connectors and tries to put together an initial setup.
1300 * At the moment, this is a cloned configuration across all heads with 1322 * At the moment, this is a cloned configuration across all heads with
1301 * a new framebuffer object as the backing store. 1323 * a new framebuffer object as the backing store.
1302 * 1324 *
@@ -1319,9 +1341,9 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
1319 /* 1341 /*
1320 * we shouldn't end up with no modes here. 1342 * we shouldn't end up with no modes here.
1321 */ 1343 */
1322 if (count == 0) { 1344 if (count == 0)
1323 printk(KERN_INFO "No connectors reported connected with modes\n"); 1345 dev_info(fb_helper->dev->dev, "No connectors reported connected with modes\n");
1324 } 1346
1325 drm_setup_crtcs(fb_helper); 1347 drm_setup_crtcs(fb_helper);
1326 1348
1327 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1349 return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
@@ -1330,7 +1352,7 @@ EXPORT_SYMBOL(drm_fb_helper_initial_config);
1330 1352
1331/** 1353/**
1332 * drm_fb_helper_hotplug_event - respond to a hotplug notification by 1354 * drm_fb_helper_hotplug_event - respond to a hotplug notification by
1333 * probing all the outputs attached to the fb. 1355 * probing all the outputs attached to the fb
1334 * @fb_helper: the drm_fb_helper 1356 * @fb_helper: the drm_fb_helper
1335 * 1357 *
1336 * LOCKING: 1358 * LOCKING:
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index c3745c4d46d8..80254547a3f8 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -67,10 +67,8 @@ void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
67 hashed_key = hash_long(key, ht->order); 67 hashed_key = hash_long(key, ht->order);
68 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); 68 DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
69 h_list = &ht->table[hashed_key]; 69 h_list = &ht->table[hashed_key];
70 hlist_for_each(list, h_list) { 70 hlist_for_each_entry(entry, list, h_list, head)
71 entry = hlist_entry(list, struct drm_hash_item, head);
72 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); 71 DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
73 }
74} 72}
75 73
76static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht, 74static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
@@ -83,8 +81,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
83 81
84 hashed_key = hash_long(key, ht->order); 82 hashed_key = hash_long(key, ht->order);
85 h_list = &ht->table[hashed_key]; 83 h_list = &ht->table[hashed_key];
86 hlist_for_each(list, h_list) { 84 hlist_for_each_entry(entry, list, h_list, head) {
87 entry = hlist_entry(list, struct drm_hash_item, head);
88 if (entry->key == key) 85 if (entry->key == key)
89 return list; 86 return list;
90 if (entry->key > key) 87 if (entry->key > key)
@@ -93,6 +90,24 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
93 return NULL; 90 return NULL;
94} 91}
95 92
93static struct hlist_node *drm_ht_find_key_rcu(struct drm_open_hash *ht,
94 unsigned long key)
95{
96 struct drm_hash_item *entry;
97 struct hlist_head *h_list;
98 struct hlist_node *list;
99 unsigned int hashed_key;
100
101 hashed_key = hash_long(key, ht->order);
102 h_list = &ht->table[hashed_key];
103 hlist_for_each_entry_rcu(entry, list, h_list, head) {
104 if (entry->key == key)
105 return list;
106 if (entry->key > key)
107 break;
108 }
109 return NULL;
110}
96 111
97int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) 112int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
98{ 113{
@@ -105,8 +120,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
105 hashed_key = hash_long(key, ht->order); 120 hashed_key = hash_long(key, ht->order);
106 h_list = &ht->table[hashed_key]; 121 h_list = &ht->table[hashed_key];
107 parent = NULL; 122 parent = NULL;
108 hlist_for_each(list, h_list) { 123 hlist_for_each_entry(entry, list, h_list, head) {
109 entry = hlist_entry(list, struct drm_hash_item, head);
110 if (entry->key == key) 124 if (entry->key == key)
111 return -EINVAL; 125 return -EINVAL;
112 if (entry->key > key) 126 if (entry->key > key)
@@ -114,9 +128,9 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
114 parent = list; 128 parent = list;
115 } 129 }
116 if (parent) { 130 if (parent) {
117 hlist_add_after(parent, &item->head); 131 hlist_add_after_rcu(parent, &item->head);
118 } else { 132 } else {
119 hlist_add_head(&item->head, h_list); 133 hlist_add_head_rcu(&item->head, h_list);
120 } 134 }
121 return 0; 135 return 0;
122} 136}
@@ -156,7 +170,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
156{ 170{
157 struct hlist_node *list; 171 struct hlist_node *list;
158 172
159 list = drm_ht_find_key(ht, key); 173 list = drm_ht_find_key_rcu(ht, key);
160 if (!list) 174 if (!list)
161 return -EINVAL; 175 return -EINVAL;
162 176
@@ -171,7 +185,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
171 185
172 list = drm_ht_find_key(ht, key); 186 list = drm_ht_find_key(ht, key);
173 if (list) { 187 if (list) {
174 hlist_del_init(list); 188 hlist_del_init_rcu(list);
175 return 0; 189 return 0;
176 } 190 }
177 return -EINVAL; 191 return -EINVAL;
@@ -179,7 +193,7 @@ int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
179 193
180int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) 194int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
181{ 195{
182 hlist_del_init(&item->head); 196 hlist_del_init_rcu(&item->head);
183 return 0; 197 return 0;
184} 198}
185EXPORT_SYMBOL(drm_ht_remove_item); 199EXPORT_SYMBOL(drm_ht_remove_item);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 23dd97506f28..e77bd8b57df2 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -287,6 +287,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
287 req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0; 287 req->value |= dev->driver->prime_fd_to_handle ? DRM_PRIME_CAP_IMPORT : 0;
288 req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0; 288 req->value |= dev->driver->prime_handle_to_fd ? DRM_PRIME_CAP_EXPORT : 0;
289 break; 289 break;
290 case DRM_CAP_TIMESTAMP_MONOTONIC:
291 req->value = drm_timestamp_monotonic;
292 break;
290 default: 293 default:
291 return -EINVAL; 294 return -EINVAL;
292 } 295 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3a3d0ce891b9..19c01ca3cc76 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -106,6 +106,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
106 s64 diff_ns; 106 s64 diff_ns;
107 int vblrc; 107 int vblrc;
108 struct timeval tvblank; 108 struct timeval tvblank;
109 int count = DRM_TIMESTAMP_MAXRETRIES;
109 110
110 /* Prevent vblank irq processing while disabling vblank irqs, 111 /* Prevent vblank irq processing while disabling vblank irqs,
111 * so no updates of timestamps or count can happen after we've 112 * so no updates of timestamps or count can happen after we've
@@ -131,7 +132,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
131 do { 132 do {
132 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc); 133 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
133 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0); 134 vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
134 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc)); 135 } while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
136
137 if (!count)
138 vblrc = 0;
135 139
136 /* Compute time difference to stored timestamp of last vblank 140 /* Compute time difference to stored timestamp of last vblank
137 * as updated by last invocation of drm_handle_vblank() in vblank irq. 141 * as updated by last invocation of drm_handle_vblank() in vblank irq.
@@ -576,7 +580,8 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
576 unsigned flags, 580 unsigned flags,
577 struct drm_crtc *refcrtc) 581 struct drm_crtc *refcrtc)
578{ 582{
579 struct timeval stime, raw_time; 583 ktime_t stime, etime, mono_time_offset;
584 struct timeval tv_etime;
580 struct drm_display_mode *mode; 585 struct drm_display_mode *mode;
581 int vbl_status, vtotal, vdisplay; 586 int vbl_status, vtotal, vdisplay;
582 int vpos, hpos, i; 587 int vpos, hpos, i;
@@ -625,13 +630,15 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
625 preempt_disable(); 630 preempt_disable();
626 631
627 /* Get system timestamp before query. */ 632 /* Get system timestamp before query. */
628 do_gettimeofday(&stime); 633 stime = ktime_get();
629 634
630 /* Get vertical and horizontal scanout pos. vpos, hpos. */ 635 /* Get vertical and horizontal scanout pos. vpos, hpos. */
631 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos); 636 vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);
632 637
633 /* Get system timestamp after query. */ 638 /* Get system timestamp after query. */
634 do_gettimeofday(&raw_time); 639 etime = ktime_get();
640 if (!drm_timestamp_monotonic)
641 mono_time_offset = ktime_get_monotonic_offset();
635 642
636 preempt_enable(); 643 preempt_enable();
637 644
@@ -642,7 +649,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
642 return -EIO; 649 return -EIO;
643 } 650 }
644 651
645 duration_ns = timeval_to_ns(&raw_time) - timeval_to_ns(&stime); 652 duration_ns = ktime_to_ns(etime) - ktime_to_ns(stime);
646 653
647 /* Accept result with < max_error nsecs timing uncertainty. */ 654 /* Accept result with < max_error nsecs timing uncertainty. */
648 if (duration_ns <= (s64) *max_error) 655 if (duration_ns <= (s64) *max_error)
@@ -689,14 +696,20 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
689 vbl_status |= 0x8; 696 vbl_status |= 0x8;
690 } 697 }
691 698
699 if (!drm_timestamp_monotonic)
700 etime = ktime_sub(etime, mono_time_offset);
701
702 /* save this only for debugging purposes */
703 tv_etime = ktime_to_timeval(etime);
692 /* Subtract time delta from raw timestamp to get final 704 /* Subtract time delta from raw timestamp to get final
693 * vblank_time timestamp for end of vblank. 705 * vblank_time timestamp for end of vblank.
694 */ 706 */
695 *vblank_time = ns_to_timeval(timeval_to_ns(&raw_time) - delta_ns); 707 etime = ktime_sub_ns(etime, delta_ns);
708 *vblank_time = ktime_to_timeval(etime);
696 709
697 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", 710 DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
698 crtc, (int)vbl_status, hpos, vpos, 711 crtc, (int)vbl_status, hpos, vpos,
699 (long)raw_time.tv_sec, (long)raw_time.tv_usec, 712 (long)tv_etime.tv_sec, (long)tv_etime.tv_usec,
700 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec, 713 (long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
701 (int)duration_ns/1000, i); 714 (int)duration_ns/1000, i);
702 715
@@ -708,6 +721,17 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
708} 721}
709EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos); 722EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
710 723
724static struct timeval get_drm_timestamp(void)
725{
726 ktime_t now;
727
728 now = ktime_get();
729 if (!drm_timestamp_monotonic)
730 now = ktime_sub(now, ktime_get_monotonic_offset());
731
732 return ktime_to_timeval(now);
733}
734
711/** 735/**
712 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent 736 * drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
713 * vblank interval. 737 * vblank interval.
@@ -745,9 +769,9 @@ u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
745 } 769 }
746 770
747 /* GPU high precision timestamp query unsupported or failed. 771 /* GPU high precision timestamp query unsupported or failed.
748 * Return gettimeofday timestamp as best estimate. 772 * Return current monotonic/gettimeofday timestamp as best estimate.
749 */ 773 */
750 do_gettimeofday(tvblank); 774 *tvblank = get_drm_timestamp();
751 775
752 return 0; 776 return 0;
753} 777}
@@ -802,6 +826,47 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
802} 826}
803EXPORT_SYMBOL(drm_vblank_count_and_time); 827EXPORT_SYMBOL(drm_vblank_count_and_time);
804 828
829static void send_vblank_event(struct drm_device *dev,
830 struct drm_pending_vblank_event *e,
831 unsigned long seq, struct timeval *now)
832{
833 WARN_ON_SMP(!spin_is_locked(&dev->event_lock));
834 e->event.sequence = seq;
835 e->event.tv_sec = now->tv_sec;
836 e->event.tv_usec = now->tv_usec;
837
838 list_add_tail(&e->base.link,
839 &e->base.file_priv->event_list);
840 wake_up_interruptible(&e->base.file_priv->event_wait);
841 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
842 e->event.sequence);
843}
844
845/**
846 * drm_send_vblank_event - helper to send vblank event after pageflip
847 * @dev: DRM device
848 * @crtc: CRTC in question
849 * @e: the event to send
850 *
851 * Updates sequence # and timestamp on event, and sends it to userspace.
852 * Caller must hold event lock.
853 */
854void drm_send_vblank_event(struct drm_device *dev, int crtc,
855 struct drm_pending_vblank_event *e)
856{
857 struct timeval now;
858 unsigned int seq;
859 if (crtc >= 0) {
860 seq = drm_vblank_count_and_time(dev, crtc, &now);
861 } else {
862 seq = 0;
863
864 now = get_drm_timestamp();
865 }
866 send_vblank_event(dev, e, seq, &now);
867}
868EXPORT_SYMBOL(drm_send_vblank_event);
869
805/** 870/**
806 * drm_update_vblank_count - update the master vblank counter 871 * drm_update_vblank_count - update the master vblank counter
807 * @dev: DRM device 872 * @dev: DRM device
@@ -936,6 +1001,13 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
936} 1001}
937EXPORT_SYMBOL(drm_vblank_put); 1002EXPORT_SYMBOL(drm_vblank_put);
938 1003
1004/**
1005 * drm_vblank_off - disable vblank events on a CRTC
1006 * @dev: DRM device
1007 * @crtc: CRTC in question
1008 *
1009 * Caller must hold event lock.
1010 */
939void drm_vblank_off(struct drm_device *dev, int crtc) 1011void drm_vblank_off(struct drm_device *dev, int crtc)
940{ 1012{
941 struct drm_pending_vblank_event *e, *t; 1013 struct drm_pending_vblank_event *e, *t;
@@ -949,22 +1021,19 @@ void drm_vblank_off(struct drm_device *dev, int crtc)
949 1021
950 /* Send any queued vblank events, lest the natives grow disquiet */ 1022 /* Send any queued vblank events, lest the natives grow disquiet */
951 seq = drm_vblank_count_and_time(dev, crtc, &now); 1023 seq = drm_vblank_count_and_time(dev, crtc, &now);
1024
1025 spin_lock(&dev->event_lock);
952 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) { 1026 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
953 if (e->pipe != crtc) 1027 if (e->pipe != crtc)
954 continue; 1028 continue;
955 DRM_DEBUG("Sending premature vblank event on disable: \ 1029 DRM_DEBUG("Sending premature vblank event on disable: \
956 wanted %d, current %d\n", 1030 wanted %d, current %d\n",
957 e->event.sequence, seq); 1031 e->event.sequence, seq);
958 1032 list_del(&e->base.link);
959 e->event.sequence = seq;
960 e->event.tv_sec = now.tv_sec;
961 e->event.tv_usec = now.tv_usec;
962 drm_vblank_put(dev, e->pipe); 1033 drm_vblank_put(dev, e->pipe);
963 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1034 send_vblank_event(dev, e, seq, &now);
964 wake_up_interruptible(&e->base.file_priv->event_wait);
965 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
966 e->event.sequence);
967 } 1035 }
1036 spin_unlock(&dev->event_lock);
968 1037
969 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1038 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
970} 1039}
@@ -1107,15 +1176,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1107 1176
1108 e->event.sequence = vblwait->request.sequence; 1177 e->event.sequence = vblwait->request.sequence;
1109 if ((seq - vblwait->request.sequence) <= (1 << 23)) { 1178 if ((seq - vblwait->request.sequence) <= (1 << 23)) {
1110 e->event.sequence = seq;
1111 e->event.tv_sec = now.tv_sec;
1112 e->event.tv_usec = now.tv_usec;
1113 drm_vblank_put(dev, pipe); 1179 drm_vblank_put(dev, pipe);
1114 list_add_tail(&e->base.link, &e->base.file_priv->event_list); 1180 send_vblank_event(dev, e, seq, &now);
1115 wake_up_interruptible(&e->base.file_priv->event_wait);
1116 vblwait->reply.sequence = seq; 1181 vblwait->reply.sequence = seq;
1117 trace_drm_vblank_event_delivered(current->pid, pipe,
1118 vblwait->request.sequence);
1119 } else { 1182 } else {
1120 /* drm_handle_vblank_events will call drm_vblank_put */ 1183 /* drm_handle_vblank_events will call drm_vblank_put */
1121 list_add_tail(&e->base.link, &dev->vblank_event_list); 1184 list_add_tail(&e->base.link, &dev->vblank_event_list);
@@ -1256,14 +1319,9 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
1256 DRM_DEBUG("vblank event on %d, current %d\n", 1319 DRM_DEBUG("vblank event on %d, current %d\n",
1257 e->event.sequence, seq); 1320 e->event.sequence, seq);
1258 1321
1259 e->event.sequence = seq; 1322 list_del(&e->base.link);
1260 e->event.tv_sec = now.tv_sec;
1261 e->event.tv_usec = now.tv_usec;
1262 drm_vblank_put(dev, e->pipe); 1323 drm_vblank_put(dev, e->pipe);
1263 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 1324 send_vblank_event(dev, e, seq, &now);
1264 wake_up_interruptible(&e->base.file_priv->event_wait);
1265 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
1266 e->event.sequence);
1267 } 1325 }
1268 1326
1269 spin_unlock_irqrestore(&dev->event_lock, flags); 1327 spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 59450f39bf96..d8da30e90db5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -46,7 +46,7 @@
46 * 46 *
47 * Describe @mode using DRM_DEBUG. 47 * Describe @mode using DRM_DEBUG.
48 */ 48 */
49void drm_mode_debug_printmodeline(struct drm_display_mode *mode) 49void drm_mode_debug_printmodeline(const struct drm_display_mode *mode)
50{ 50{
51 DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " 51 DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d "
52 "0x%x 0x%x\n", 52 "0x%x 0x%x\n",
@@ -558,7 +558,7 @@ EXPORT_SYMBOL(drm_mode_list_concat);
558 * RETURNS: 558 * RETURNS:
559 * @mode->hdisplay 559 * @mode->hdisplay
560 */ 560 */
561int drm_mode_width(struct drm_display_mode *mode) 561int drm_mode_width(const struct drm_display_mode *mode)
562{ 562{
563 return mode->hdisplay; 563 return mode->hdisplay;
564 564
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(drm_mode_width);
579 * RETURNS: 579 * RETURNS:
580 * @mode->vdisplay 580 * @mode->vdisplay
581 */ 581 */
582int drm_mode_height(struct drm_display_mode *mode) 582int drm_mode_height(const struct drm_display_mode *mode)
583{ 583{
584 return mode->vdisplay; 584 return mode->vdisplay;
585} 585}
@@ -768,7 +768,7 @@ EXPORT_SYMBOL(drm_mode_duplicate);
768 * RETURNS: 768 * RETURNS:
769 * True if the modes are equal, false otherwise. 769 * True if the modes are equal, false otherwise.
770 */ 770 */
771bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2) 771bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2)
772{ 772{
773 /* do clock check convert to PICOS so fb modes get matched 773 /* do clock check convert to PICOS so fb modes get matched
774 * the same */ 774 * the same */
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index ba33144257e5..754bc96e10c7 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -470,7 +470,7 @@ int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
470{ 470{
471 struct pci_dev *root; 471 struct pci_dev *root;
472 int pos; 472 int pos;
473 u32 lnkcap, lnkcap2; 473 u32 lnkcap = 0, lnkcap2 = 0;
474 474
475 *mask = 0; 475 *mask = 0;
476 if (!dev->pdev) 476 if (!dev->pdev)
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index c236fd27eba6..200e104f1fa0 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -46,16 +46,24 @@ EXPORT_SYMBOL(drm_vblank_offdelay);
46unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */ 46unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
47EXPORT_SYMBOL(drm_timestamp_precision); 47EXPORT_SYMBOL(drm_timestamp_precision);
48 48
49/*
50 * Default to use monotonic timestamps for wait-for-vblank and page-flip
51 * complete events.
52 */
53unsigned int drm_timestamp_monotonic = 1;
54
49MODULE_AUTHOR(CORE_AUTHOR); 55MODULE_AUTHOR(CORE_AUTHOR);
50MODULE_DESCRIPTION(CORE_DESC); 56MODULE_DESCRIPTION(CORE_DESC);
51MODULE_LICENSE("GPL and additional rights"); 57MODULE_LICENSE("GPL and additional rights");
52MODULE_PARM_DESC(debug, "Enable debug output"); 58MODULE_PARM_DESC(debug, "Enable debug output");
53MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]"); 59MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
54MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]"); 60MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
61MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
55 62
56module_param_named(debug, drm_debug, int, 0600); 63module_param_named(debug, drm_debug, int, 0600);
57module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600); 64module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
58module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600); 65module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
66module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
59 67
60struct idr drm_minors_idr; 68struct idr drm_minors_idr;
61 69
@@ -221,20 +229,20 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
221 if (!file_priv->master) 229 if (!file_priv->master)
222 return -EINVAL; 230 return -EINVAL;
223 231
224 if (!file_priv->minor->master && 232 if (file_priv->minor->master)
225 file_priv->minor->master != file_priv->master) { 233 return -EINVAL;
226 mutex_lock(&dev->struct_mutex); 234
227 file_priv->minor->master = drm_master_get(file_priv->master); 235 mutex_lock(&dev->struct_mutex);
228 file_priv->is_master = 1; 236 file_priv->minor->master = drm_master_get(file_priv->master);
229 if (dev->driver->master_set) { 237 file_priv->is_master = 1;
230 ret = dev->driver->master_set(dev, file_priv, false); 238 if (dev->driver->master_set) {
231 if (unlikely(ret != 0)) { 239 ret = dev->driver->master_set(dev, file_priv, false);
232 file_priv->is_master = 0; 240 if (unlikely(ret != 0)) {
233 drm_master_put(&file_priv->minor->master); 241 file_priv->is_master = 0;
234 } 242 drm_master_put(&file_priv->minor->master);
235 } 243 }
236 mutex_unlock(&dev->struct_mutex);
237 } 244 }
245 mutex_unlock(&dev->struct_mutex);
238 246
239 return 0; 247 return 0;
240} 248}
@@ -492,10 +500,7 @@ void drm_put_dev(struct drm_device *dev)
492 drm_put_minor(&dev->primary); 500 drm_put_minor(&dev->primary);
493 501
494 list_del(&dev->driver_item); 502 list_del(&dev->driver_item);
495 if (dev->devname) { 503 kfree(dev->devname);
496 kfree(dev->devname);
497 dev->devname = NULL;
498 }
499 kfree(dev); 504 kfree(dev);
500} 505}
501EXPORT_SYMBOL(drm_put_dev); 506EXPORT_SYMBOL(drm_put_dev);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 05cd8fe062af..02296653a058 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -182,7 +182,7 @@ static ssize_t dpms_show(struct device *device,
182 uint64_t dpms_status; 182 uint64_t dpms_status;
183 int ret; 183 int ret;
184 184
185 ret = drm_connector_property_get_value(connector, 185 ret = drm_object_property_get_value(&connector->base,
186 dev->mode_config.dpms_property, 186 dev->mode_config.dpms_property,
187 &dpms_status); 187 &dpms_status);
188 if (ret) 188 if (ret)
@@ -277,7 +277,7 @@ static ssize_t subconnector_show(struct device *device,
277 return 0; 277 return 0;
278 } 278 }
279 279
280 ret = drm_connector_property_get_value(connector, prop, &subconnector); 280 ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
281 if (ret) 281 if (ret)
282 return 0; 282 return 0;
283 283
@@ -318,7 +318,7 @@ static ssize_t select_subconnector_show(struct device *device,
318 return 0; 318 return 0;
319 } 319 }
320 320
321 ret = drm_connector_property_get_value(connector, prop, &subconnector); 321 ret = drm_object_property_get_value(&connector->base, prop, &subconnector);
322 if (ret) 322 if (ret)
323 return 0; 323 return 0;
324 324
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index fc345d4ebb03..1d1f1e5e33f0 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -10,6 +10,12 @@ config DRM_EXYNOS
10 Choose this option if you have a Samsung SoC EXYNOS chipset. 10 Choose this option if you have a Samsung SoC EXYNOS chipset.
11 If M is selected the module will be called exynosdrm. 11 If M is selected the module will be called exynosdrm.
12 12
13config DRM_EXYNOS_IOMMU
14 bool "EXYNOS DRM IOMMU Support"
15 depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
16 help
17 Choose this option if you want to use IOMMU feature for DRM.
18
13config DRM_EXYNOS_DMABUF 19config DRM_EXYNOS_DMABUF
14 bool "EXYNOS DRM DMABUF" 20 bool "EXYNOS DRM DMABUF"
15 depends on DRM_EXYNOS 21 depends on DRM_EXYNOS
@@ -39,3 +45,27 @@ config DRM_EXYNOS_G2D
39 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D 45 depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D
40 help 46 help
41 Choose this option if you want to use Exynos G2D for DRM. 47 Choose this option if you want to use Exynos G2D for DRM.
48
49config DRM_EXYNOS_IPP
50 bool "Exynos DRM IPP"
51 depends on DRM_EXYNOS
52 help
53 Choose this option if you want to use IPP feature for DRM.
54
55config DRM_EXYNOS_FIMC
56 bool "Exynos DRM FIMC"
57 depends on DRM_EXYNOS_IPP
58 help
59 Choose this option if you want to use Exynos FIMC for DRM.
60
61config DRM_EXYNOS_ROTATOR
62 bool "Exynos DRM Rotator"
63 depends on DRM_EXYNOS_IPP
64 help
65 Choose this option if you want to use Exynos Rotator for DRM.
66
67config DRM_EXYNOS_GSC
68 bool "Exynos DRM GSC"
69 depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
70 help
71 Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index eb651ca8e2a8..639b49e1ec05 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \ 8 exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
9 exynos_drm_plane.o 9 exynos_drm_plane.o
10 10
11exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
11exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o 12exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
12exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o 13exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
13exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \ 14exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -15,5 +16,9 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
15 exynos_drm_hdmi.o 16 exynos_drm_hdmi.o
16exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o 17exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o
17exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o 18exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o
19exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o
20exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o
21exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o
22exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o
18 23
19obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o 24obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 37e6ec704e1d..bef43e0342a6 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -48,6 +48,7 @@ static struct i2c_device_id ddc_idtable[] = {
48 { }, 48 { },
49}; 49};
50 50
51#ifdef CONFIG_OF
51static struct of_device_id hdmiddc_match_types[] = { 52static struct of_device_id hdmiddc_match_types[] = {
52 { 53 {
53 .compatible = "samsung,exynos5-hdmiddc", 54 .compatible = "samsung,exynos5-hdmiddc",
@@ -55,12 +56,13 @@ static struct of_device_id hdmiddc_match_types[] = {
55 /* end node */ 56 /* end node */
56 } 57 }
57}; 58};
59#endif
58 60
59struct i2c_driver ddc_driver = { 61struct i2c_driver ddc_driver = {
60 .driver = { 62 .driver = {
61 .name = "exynos-hdmiddc", 63 .name = "exynos-hdmiddc",
62 .owner = THIS_MODULE, 64 .owner = THIS_MODULE,
63 .of_match_table = hdmiddc_match_types, 65 .of_match_table = of_match_ptr(hdmiddc_match_types),
64 }, 66 },
65 .id_table = ddc_idtable, 67 .id_table = ddc_idtable,
66 .probe = s5p_ddc_probe, 68 .probe = s5p_ddc_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 118c117b3226..9601bad47a2e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -33,89 +33,64 @@
33static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
34 unsigned int flags, struct exynos_drm_gem_buf *buf) 34 unsigned int flags, struct exynos_drm_gem_buf *buf)
35{ 35{
36 dma_addr_t start_addr;
37 unsigned int npages, i = 0;
38 struct scatterlist *sgl;
39 int ret = 0; 36 int ret = 0;
37 enum dma_attr attr;
38 unsigned int nr_pages;
40 39
41 DRM_DEBUG_KMS("%s\n", __FILE__); 40 DRM_DEBUG_KMS("%s\n", __FILE__);
42 41
43 if (IS_NONCONTIG_BUFFER(flags)) {
44 DRM_DEBUG_KMS("not support allocation type.\n");
45 return -EINVAL;
46 }
47
48 if (buf->dma_addr) { 42 if (buf->dma_addr) {
49 DRM_DEBUG_KMS("already allocated.\n"); 43 DRM_DEBUG_KMS("already allocated.\n");
50 return 0; 44 return 0;
51 } 45 }
52 46
53 if (buf->size >= SZ_1M) { 47 init_dma_attrs(&buf->dma_attrs);
54 npages = buf->size >> SECTION_SHIFT;
55 buf->page_size = SECTION_SIZE;
56 } else if (buf->size >= SZ_64K) {
57 npages = buf->size >> 16;
58 buf->page_size = SZ_64K;
59 } else {
60 npages = buf->size >> PAGE_SHIFT;
61 buf->page_size = PAGE_SIZE;
62 }
63 48
64 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); 49 /*
65 if (!buf->sgt) { 50 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
66 DRM_ERROR("failed to allocate sg table.\n"); 51 * region will be allocated else physically contiguous
67 return -ENOMEM; 52 * as possible.
68 } 53 */
54 if (flags & EXYNOS_BO_CONTIG)
55 dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);
69 56
70 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL); 57 /*
71 if (ret < 0) { 58 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
72 DRM_ERROR("failed to initialize sg table.\n"); 59 * else cachable mapping.
73 kfree(buf->sgt); 60 */
74 buf->sgt = NULL; 61 if (flags & EXYNOS_BO_WC || !(flags & EXYNOS_BO_CACHABLE))
75 return -ENOMEM; 62 attr = DMA_ATTR_WRITE_COMBINE;
76 } 63 else
64 attr = DMA_ATTR_NON_CONSISTENT;
77 65
78 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, 66 dma_set_attr(attr, &buf->dma_attrs);
79 &buf->dma_addr, GFP_KERNEL); 67 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);
80 if (!buf->kvaddr) {
81 DRM_ERROR("failed to allocate buffer.\n");
82 ret = -ENOMEM;
83 goto err1;
84 }
85 68
86 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); 69 buf->pages = dma_alloc_attrs(dev->dev, buf->size,
70 &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
87 if (!buf->pages) { 71 if (!buf->pages) {
88 DRM_ERROR("failed to allocate pages.\n"); 72 DRM_ERROR("failed to allocate buffer.\n");
89 ret = -ENOMEM; 73 return -ENOMEM;
90 goto err2;
91 } 74 }
92 75
93 sgl = buf->sgt->sgl; 76 nr_pages = buf->size >> PAGE_SHIFT;
94 start_addr = buf->dma_addr; 77 buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
95 78 if (!buf->sgt) {
96 while (i < npages) { 79 DRM_ERROR("failed to get sg table.\n");
97 buf->pages[i] = phys_to_page(start_addr); 80 ret = -ENOMEM;
98 sg_set_page(sgl, buf->pages[i], buf->page_size, 0); 81 goto err_free_attrs;
99 sg_dma_address(sgl) = start_addr;
100 start_addr += buf->page_size;
101 sgl = sg_next(sgl);
102 i++;
103 } 82 }
104 83
105 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 84 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
106 (unsigned long)buf->kvaddr,
107 (unsigned long)buf->dma_addr, 85 (unsigned long)buf->dma_addr,
108 buf->size); 86 buf->size);
109 87
110 return ret; 88 return ret;
111err2: 89
112 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr, 90err_free_attrs:
113 (dma_addr_t)buf->dma_addr); 91 dma_free_attrs(dev->dev, buf->size, buf->pages,
92 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
114 buf->dma_addr = (dma_addr_t)NULL; 93 buf->dma_addr = (dma_addr_t)NULL;
115err1:
116 sg_free_table(buf->sgt);
117 kfree(buf->sgt);
118 buf->sgt = NULL;
119 94
120 return ret; 95 return ret;
121} 96}
@@ -125,23 +100,12 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
125{ 100{
126 DRM_DEBUG_KMS("%s.\n", __FILE__); 101 DRM_DEBUG_KMS("%s.\n", __FILE__);
127 102
128 /*
129 * release only physically continuous memory and
130 * non-continuous memory would be released by exynos
131 * gem framework.
132 */
133 if (IS_NONCONTIG_BUFFER(flags)) {
134 DRM_DEBUG_KMS("not support allocation type.\n");
135 return;
136 }
137
138 if (!buf->dma_addr) { 103 if (!buf->dma_addr) {
139 DRM_DEBUG_KMS("dma_addr is invalid.\n"); 104 DRM_DEBUG_KMS("dma_addr is invalid.\n");
140 return; 105 return;
141 } 106 }
142 107
143 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 108 DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
144 (unsigned long)buf->kvaddr,
145 (unsigned long)buf->dma_addr, 109 (unsigned long)buf->dma_addr,
146 buf->size); 110 buf->size);
147 111
@@ -150,11 +114,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
150 kfree(buf->sgt); 114 kfree(buf->sgt);
151 buf->sgt = NULL; 115 buf->sgt = NULL;
152 116
153 kfree(buf->pages); 117 dma_free_attrs(dev->dev, buf->size, buf->pages,
154 buf->pages = NULL; 118 (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
155
156 dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
157 (dma_addr_t)buf->dma_addr);
158 buf->dma_addr = (dma_addr_t)NULL; 119 buf->dma_addr = (dma_addr_t)NULL;
159} 120}
160 121
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 3388e4eb4ba2..25cf16285033 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -34,12 +34,12 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
34void exynos_drm_fini_buf(struct drm_device *dev, 34void exynos_drm_fini_buf(struct drm_device *dev,
35 struct exynos_drm_gem_buf *buffer); 35 struct exynos_drm_gem_buf *buffer);
36 36
37/* allocate physical memory region and setup sgt and pages. */ 37/* allocate physical memory region and setup sgt. */
38int exynos_drm_alloc_buf(struct drm_device *dev, 38int exynos_drm_alloc_buf(struct drm_device *dev,
39 struct exynos_drm_gem_buf *buf, 39 struct exynos_drm_gem_buf *buf,
40 unsigned int flags); 40 unsigned int flags);
41 41
42/* release physical memory region, sgt and pages. */ 42/* release physical memory region, and sgt. */
43void exynos_drm_free_buf(struct drm_device *dev, 43void exynos_drm_free_buf(struct drm_device *dev,
44 unsigned int flags, 44 unsigned int flags,
45 struct exynos_drm_gem_buf *buffer); 45 struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index fce245f64c4f..2efa4b031d73 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -236,16 +236,21 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
236 goto out; 236 goto out;
237 } 237 }
238 238
239 spin_lock_irq(&dev->event_lock);
239 list_add_tail(&event->base.link, 240 list_add_tail(&event->base.link,
240 &dev_priv->pageflip_event_list); 241 &dev_priv->pageflip_event_list);
242 spin_unlock_irq(&dev->event_lock);
241 243
242 crtc->fb = fb; 244 crtc->fb = fb;
243 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y, 245 ret = exynos_drm_crtc_mode_set_base(crtc, crtc->x, crtc->y,
244 NULL); 246 NULL);
245 if (ret) { 247 if (ret) {
246 crtc->fb = old_fb; 248 crtc->fb = old_fb;
249
250 spin_lock_irq(&dev->event_lock);
247 drm_vblank_put(dev, exynos_crtc->pipe); 251 drm_vblank_put(dev, exynos_crtc->pipe);
248 list_del(&event->base.link); 252 list_del(&event->base.link);
253 spin_unlock_irq(&dev->event_lock);
249 254
250 goto out; 255 goto out;
251 } 256 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index fae1f2ec886c..61d5a8402eb8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -30,70 +30,108 @@
30 30
31#include <linux/dma-buf.h> 31#include <linux/dma-buf.h>
32 32
33static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages, 33struct exynos_drm_dmabuf_attachment {
34 unsigned int page_size) 34 struct sg_table sgt;
35 enum dma_data_direction dir;
36};
37
38static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
39 struct device *dev,
40 struct dma_buf_attachment *attach)
35{ 41{
36 struct sg_table *sgt = NULL; 42 struct exynos_drm_dmabuf_attachment *exynos_attach;
37 struct scatterlist *sgl;
38 int i, ret;
39 43
40 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); 44 exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
41 if (!sgt) 45 if (!exynos_attach)
42 goto out; 46 return -ENOMEM;
43 47
44 ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL); 48 exynos_attach->dir = DMA_NONE;
45 if (ret) 49 attach->priv = exynos_attach;
46 goto err_free_sgt;
47 50
48 if (page_size < PAGE_SIZE) 51 return 0;
49 page_size = PAGE_SIZE; 52}
50 53
51 for_each_sg(sgt->sgl, sgl, nr_pages, i) 54static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
52 sg_set_page(sgl, pages[i], page_size, 0); 55 struct dma_buf_attachment *attach)
56{
57 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
58 struct sg_table *sgt;
53 59
54 return sgt; 60 if (!exynos_attach)
61 return;
55 62
56err_free_sgt: 63 sgt = &exynos_attach->sgt;
57 kfree(sgt); 64
58 sgt = NULL; 65 if (exynos_attach->dir != DMA_NONE)
59out: 66 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
60 return NULL; 67 exynos_attach->dir);
68
69 sg_free_table(sgt);
70 kfree(exynos_attach);
71 attach->priv = NULL;
61} 72}
62 73
63static struct sg_table * 74static struct sg_table *
64 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, 75 exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
65 enum dma_data_direction dir) 76 enum dma_data_direction dir)
66{ 77{
78 struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
67 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; 79 struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
68 struct drm_device *dev = gem_obj->base.dev; 80 struct drm_device *dev = gem_obj->base.dev;
69 struct exynos_drm_gem_buf *buf; 81 struct exynos_drm_gem_buf *buf;
82 struct scatterlist *rd, *wr;
70 struct sg_table *sgt = NULL; 83 struct sg_table *sgt = NULL;
71 unsigned int npages; 84 unsigned int i;
72 int nents; 85 int nents, ret;
73 86
74 DRM_DEBUG_PRIME("%s\n", __FILE__); 87 DRM_DEBUG_PRIME("%s\n", __FILE__);
75 88
76 mutex_lock(&dev->struct_mutex); 89 if (WARN_ON(dir == DMA_NONE))
90 return ERR_PTR(-EINVAL);
91
92 /* just return current sgt if already requested. */
93 if (exynos_attach->dir == dir)
94 return &exynos_attach->sgt;
95
96 /* reattaching is not allowed. */
97 if (WARN_ON(exynos_attach->dir != DMA_NONE))
98 return ERR_PTR(-EBUSY);
77 99
78 buf = gem_obj->buffer; 100 buf = gem_obj->buffer;
101 if (!buf) {
102 DRM_ERROR("buffer is null.\n");
103 return ERR_PTR(-ENOMEM);
104 }
79 105
80 /* there should always be pages allocated. */ 106 sgt = &exynos_attach->sgt;
81 if (!buf->pages) { 107
82 DRM_ERROR("pages is null.\n"); 108 ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
83 goto err_unlock; 109 if (ret) {
110 DRM_ERROR("failed to alloc sgt.\n");
111 return ERR_PTR(-ENOMEM);
84 } 112 }
85 113
86 npages = buf->size / buf->page_size; 114 mutex_lock(&dev->struct_mutex);
87 115
88 sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size); 116 rd = buf->sgt->sgl;
89 if (!sgt) { 117 wr = sgt->sgl;
90 DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n"); 118 for (i = 0; i < sgt->orig_nents; ++i) {
119 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
120 rd = sg_next(rd);
121 wr = sg_next(wr);
122 }
123
124 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
125 if (!nents) {
126 DRM_ERROR("failed to map sgl with iommu.\n");
127 sgt = ERR_PTR(-EIO);
91 goto err_unlock; 128 goto err_unlock;
92 } 129 }
93 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
94 130
95 DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n", 131 exynos_attach->dir = dir;
96 npages, buf->size, buf->page_size); 132 attach->priv = exynos_attach;
133
134 DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
97 135
98err_unlock: 136err_unlock:
99 mutex_unlock(&dev->struct_mutex); 137 mutex_unlock(&dev->struct_mutex);
@@ -104,10 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
104 struct sg_table *sgt, 142 struct sg_table *sgt,
105 enum dma_data_direction dir) 143 enum dma_data_direction dir)
106{ 144{
107 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); 145 /* Nothing to do. */
108 sg_free_table(sgt);
109 kfree(sgt);
110 sgt = NULL;
111} 146}
112 147
113static void exynos_dmabuf_release(struct dma_buf *dmabuf) 148static void exynos_dmabuf_release(struct dma_buf *dmabuf)
@@ -169,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
169} 204}
170 205
171static struct dma_buf_ops exynos_dmabuf_ops = { 206static struct dma_buf_ops exynos_dmabuf_ops = {
207 .attach = exynos_gem_attach_dma_buf,
208 .detach = exynos_gem_detach_dma_buf,
172 .map_dma_buf = exynos_gem_map_dma_buf, 209 .map_dma_buf = exynos_gem_map_dma_buf,
173 .unmap_dma_buf = exynos_gem_unmap_dma_buf, 210 .unmap_dma_buf = exynos_gem_unmap_dma_buf,
174 .kmap = exynos_gem_dmabuf_kmap, 211 .kmap = exynos_gem_dmabuf_kmap,
@@ -196,7 +233,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
196 struct scatterlist *sgl; 233 struct scatterlist *sgl;
197 struct exynos_drm_gem_obj *exynos_gem_obj; 234 struct exynos_drm_gem_obj *exynos_gem_obj;
198 struct exynos_drm_gem_buf *buffer; 235 struct exynos_drm_gem_buf *buffer;
199 struct page *page;
200 int ret; 236 int ret;
201 237
202 DRM_DEBUG_PRIME("%s\n", __FILE__); 238 DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +269,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
233 goto err_unmap_attach; 269 goto err_unmap_attach;
234 } 270 }
235 271
236 buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
237 if (!buffer->pages) {
238 DRM_ERROR("failed to allocate pages.\n");
239 ret = -ENOMEM;
240 goto err_free_buffer;
241 }
242
243 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size); 272 exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
244 if (!exynos_gem_obj) { 273 if (!exynos_gem_obj) {
245 ret = -ENOMEM; 274 ret = -ENOMEM;
246 goto err_free_pages; 275 goto err_free_buffer;
247 } 276 }
248 277
249 sgl = sgt->sgl; 278 sgl = sgt->sgl;
250 279
251 if (sgt->nents == 1) { 280 buffer->size = dma_buf->size;
252 buffer->dma_addr = sg_dma_address(sgt->sgl); 281 buffer->dma_addr = sg_dma_address(sgl);
253 buffer->size = sg_dma_len(sgt->sgl);
254 282
283 if (sgt->nents == 1) {
255 /* always physically continuous memory if sgt->nents is 1. */ 284 /* always physically continuous memory if sgt->nents is 1. */
256 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG; 285 exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
257 } else { 286 } else {
258 unsigned int i = 0; 287 /*
259 288 * this case could be CONTIG or NONCONTIG type but for now
260 buffer->dma_addr = sg_dma_address(sgl); 289 * sets NONCONTIG.
261 while (i < sgt->nents) { 290 * TODO. we have to find a way that exporter can notify
262 buffer->pages[i] = sg_page(sgl); 291 * the type of its own buffer to importer.
263 buffer->size += sg_dma_len(sgl); 292 */
264 sgl = sg_next(sgl);
265 i++;
266 }
267
268 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG; 293 exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
269 } 294 }
270 295
@@ -277,9 +302,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
277 302
278 return &exynos_gem_obj->base; 303 return &exynos_gem_obj->base;
279 304
280err_free_pages:
281 kfree(buffer->pages);
282 buffer->pages = NULL;
283err_free_buffer: 305err_free_buffer:
284 kfree(buffer); 306 kfree(buffer);
285 buffer = NULL; 307 buffer = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 1de7baafddd0..e0a8e8024b01 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -40,6 +40,8 @@
40#include "exynos_drm_vidi.h" 40#include "exynos_drm_vidi.h"
41#include "exynos_drm_dmabuf.h" 41#include "exynos_drm_dmabuf.h"
42#include "exynos_drm_g2d.h" 42#include "exynos_drm_g2d.h"
43#include "exynos_drm_ipp.h"
44#include "exynos_drm_iommu.h"
43 45
44#define DRIVER_NAME "exynos" 46#define DRIVER_NAME "exynos"
45#define DRIVER_DESC "Samsung SoC DRM" 47#define DRIVER_DESC "Samsung SoC DRM"
@@ -49,6 +51,9 @@
49 51
50#define VBLANK_OFF_DELAY 50000 52#define VBLANK_OFF_DELAY 50000
51 53
54/* platform device pointer for eynos drm device. */
55static struct platform_device *exynos_drm_pdev;
56
52static int exynos_drm_load(struct drm_device *dev, unsigned long flags) 57static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
53{ 58{
54 struct exynos_drm_private *private; 59 struct exynos_drm_private *private;
@@ -66,6 +71,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
66 INIT_LIST_HEAD(&private->pageflip_event_list); 71 INIT_LIST_HEAD(&private->pageflip_event_list);
67 dev->dev_private = (void *)private; 72 dev->dev_private = (void *)private;
68 73
74 /*
75 * create mapping to manage iommu table and set a pointer to iommu
76 * mapping structure to iommu_mapping of private data.
77 * also this iommu_mapping can be used to check if iommu is supported
78 * or not.
79 */
80 ret = drm_create_iommu_mapping(dev);
81 if (ret < 0) {
82 DRM_ERROR("failed to create iommu mapping.\n");
83 goto err_crtc;
84 }
85
69 drm_mode_config_init(dev); 86 drm_mode_config_init(dev);
70 87
71 /* init kms poll for handling hpd */ 88 /* init kms poll for handling hpd */
@@ -80,7 +97,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
80 for (nr = 0; nr < MAX_CRTC; nr++) { 97 for (nr = 0; nr < MAX_CRTC; nr++) {
81 ret = exynos_drm_crtc_create(dev, nr); 98 ret = exynos_drm_crtc_create(dev, nr);
82 if (ret) 99 if (ret)
83 goto err_crtc; 100 goto err_release_iommu_mapping;
84 } 101 }
85 102
86 for (nr = 0; nr < MAX_PLANE; nr++) { 103 for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -89,12 +106,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
89 106
90 plane = exynos_plane_init(dev, possible_crtcs, false); 107 plane = exynos_plane_init(dev, possible_crtcs, false);
91 if (!plane) 108 if (!plane)
92 goto err_crtc; 109 goto err_release_iommu_mapping;
93 } 110 }
94 111
95 ret = drm_vblank_init(dev, MAX_CRTC); 112 ret = drm_vblank_init(dev, MAX_CRTC);
96 if (ret) 113 if (ret)
97 goto err_crtc; 114 goto err_release_iommu_mapping;
98 115
99 /* 116 /*
100 * probe sub drivers such as display controller and hdmi driver, 117 * probe sub drivers such as display controller and hdmi driver,
@@ -126,6 +143,8 @@ err_drm_device:
126 exynos_drm_device_unregister(dev); 143 exynos_drm_device_unregister(dev);
127err_vblank: 144err_vblank:
128 drm_vblank_cleanup(dev); 145 drm_vblank_cleanup(dev);
146err_release_iommu_mapping:
147 drm_release_iommu_mapping(dev);
129err_crtc: 148err_crtc:
130 drm_mode_config_cleanup(dev); 149 drm_mode_config_cleanup(dev);
131 kfree(private); 150 kfree(private);
@@ -142,6 +161,8 @@ static int exynos_drm_unload(struct drm_device *dev)
142 drm_vblank_cleanup(dev); 161 drm_vblank_cleanup(dev);
143 drm_kms_helper_poll_fini(dev); 162 drm_kms_helper_poll_fini(dev);
144 drm_mode_config_cleanup(dev); 163 drm_mode_config_cleanup(dev);
164
165 drm_release_iommu_mapping(dev);
145 kfree(dev->dev_private); 166 kfree(dev->dev_private);
146 167
147 dev->dev_private = NULL; 168 dev->dev_private = NULL;
@@ -229,6 +250,14 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
229 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH), 250 exynos_g2d_set_cmdlist_ioctl, DRM_UNLOCKED | DRM_AUTH),
230 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, 251 DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC,
231 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH), 252 exynos_g2d_exec_ioctl, DRM_UNLOCKED | DRM_AUTH),
253 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_PROPERTY,
254 exynos_drm_ipp_get_property, DRM_UNLOCKED | DRM_AUTH),
255 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_SET_PROPERTY,
256 exynos_drm_ipp_set_property, DRM_UNLOCKED | DRM_AUTH),
257 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_QUEUE_BUF,
258 exynos_drm_ipp_queue_buf, DRM_UNLOCKED | DRM_AUTH),
259 DRM_IOCTL_DEF_DRV(EXYNOS_IPP_CMD_CTRL,
260 exynos_drm_ipp_cmd_ctrl, DRM_UNLOCKED | DRM_AUTH),
232}; 261};
233 262
234static const struct file_operations exynos_drm_driver_fops = { 263static const struct file_operations exynos_drm_driver_fops = {
@@ -279,6 +308,7 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
279{ 308{
280 DRM_DEBUG_DRIVER("%s\n", __FILE__); 309 DRM_DEBUG_DRIVER("%s\n", __FILE__);
281 310
311 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
282 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls); 312 exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
283 313
284 return drm_platform_init(&exynos_drm_driver, pdev); 314 return drm_platform_init(&exynos_drm_driver, pdev);
@@ -324,6 +354,10 @@ static int __init exynos_drm_init(void)
324 ret = platform_driver_register(&exynos_drm_common_hdmi_driver); 354 ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
325 if (ret < 0) 355 if (ret < 0)
326 goto out_common_hdmi; 356 goto out_common_hdmi;
357
358 ret = exynos_platform_device_hdmi_register();
359 if (ret < 0)
360 goto out_common_hdmi_dev;
327#endif 361#endif
328 362
329#ifdef CONFIG_DRM_EXYNOS_VIDI 363#ifdef CONFIG_DRM_EXYNOS_VIDI
@@ -338,24 +372,80 @@ static int __init exynos_drm_init(void)
338 goto out_g2d; 372 goto out_g2d;
339#endif 373#endif
340 374
375#ifdef CONFIG_DRM_EXYNOS_FIMC
376 ret = platform_driver_register(&fimc_driver);
377 if (ret < 0)
378 goto out_fimc;
379#endif
380
381#ifdef CONFIG_DRM_EXYNOS_ROTATOR
382 ret = platform_driver_register(&rotator_driver);
383 if (ret < 0)
384 goto out_rotator;
385#endif
386
387#ifdef CONFIG_DRM_EXYNOS_GSC
388 ret = platform_driver_register(&gsc_driver);
389 if (ret < 0)
390 goto out_gsc;
391#endif
392
393#ifdef CONFIG_DRM_EXYNOS_IPP
394 ret = platform_driver_register(&ipp_driver);
395 if (ret < 0)
396 goto out_ipp;
397#endif
398
341 ret = platform_driver_register(&exynos_drm_platform_driver); 399 ret = platform_driver_register(&exynos_drm_platform_driver);
342 if (ret < 0) 400 if (ret < 0)
401 goto out_drm;
402
403 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
404 NULL, 0);
405 if (IS_ERR_OR_NULL(exynos_drm_pdev)) {
406 ret = PTR_ERR(exynos_drm_pdev);
343 goto out; 407 goto out;
408 }
344 409
345 return 0; 410 return 0;
346 411
347out: 412out:
413 platform_driver_unregister(&exynos_drm_platform_driver);
414
415out_drm:
416#ifdef CONFIG_DRM_EXYNOS_IPP
417 platform_driver_unregister(&ipp_driver);
418out_ipp:
419#endif
420
421#ifdef CONFIG_DRM_EXYNOS_GSC
422 platform_driver_unregister(&gsc_driver);
423out_gsc:
424#endif
425
426#ifdef CONFIG_DRM_EXYNOS_ROTATOR
427 platform_driver_unregister(&rotator_driver);
428out_rotator:
429#endif
430
431#ifdef CONFIG_DRM_EXYNOS_FIMC
432 platform_driver_unregister(&fimc_driver);
433out_fimc:
434#endif
435
348#ifdef CONFIG_DRM_EXYNOS_G2D 436#ifdef CONFIG_DRM_EXYNOS_G2D
349 platform_driver_unregister(&g2d_driver); 437 platform_driver_unregister(&g2d_driver);
350out_g2d: 438out_g2d:
351#endif 439#endif
352 440
353#ifdef CONFIG_DRM_EXYNOS_VIDI 441#ifdef CONFIG_DRM_EXYNOS_VIDI
354out_vidi:
355 platform_driver_unregister(&vidi_driver); 442 platform_driver_unregister(&vidi_driver);
443out_vidi:
356#endif 444#endif
357 445
358#ifdef CONFIG_DRM_EXYNOS_HDMI 446#ifdef CONFIG_DRM_EXYNOS_HDMI
447 exynos_platform_device_hdmi_unregister();
448out_common_hdmi_dev:
359 platform_driver_unregister(&exynos_drm_common_hdmi_driver); 449 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
360out_common_hdmi: 450out_common_hdmi:
361 platform_driver_unregister(&mixer_driver); 451 platform_driver_unregister(&mixer_driver);
@@ -375,13 +465,32 @@ static void __exit exynos_drm_exit(void)
375{ 465{
376 DRM_DEBUG_DRIVER("%s\n", __FILE__); 466 DRM_DEBUG_DRIVER("%s\n", __FILE__);
377 467
468 platform_device_unregister(exynos_drm_pdev);
469
378 platform_driver_unregister(&exynos_drm_platform_driver); 470 platform_driver_unregister(&exynos_drm_platform_driver);
379 471
472#ifdef CONFIG_DRM_EXYNOS_IPP
473 platform_driver_unregister(&ipp_driver);
474#endif
475
476#ifdef CONFIG_DRM_EXYNOS_GSC
477 platform_driver_unregister(&gsc_driver);
478#endif
479
480#ifdef CONFIG_DRM_EXYNOS_ROTATOR
481 platform_driver_unregister(&rotator_driver);
482#endif
483
484#ifdef CONFIG_DRM_EXYNOS_FIMC
485 platform_driver_unregister(&fimc_driver);
486#endif
487
380#ifdef CONFIG_DRM_EXYNOS_G2D 488#ifdef CONFIG_DRM_EXYNOS_G2D
381 platform_driver_unregister(&g2d_driver); 489 platform_driver_unregister(&g2d_driver);
382#endif 490#endif
383 491
384#ifdef CONFIG_DRM_EXYNOS_HDMI 492#ifdef CONFIG_DRM_EXYNOS_HDMI
493 exynos_platform_device_hdmi_unregister();
385 platform_driver_unregister(&exynos_drm_common_hdmi_driver); 494 platform_driver_unregister(&exynos_drm_common_hdmi_driver);
386 platform_driver_unregister(&mixer_driver); 495 platform_driver_unregister(&mixer_driver);
387 platform_driver_unregister(&hdmi_driver); 496 platform_driver_unregister(&hdmi_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index a34231036496..f5a97745bf93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -74,8 +74,6 @@ enum exynos_drm_output_type {
74 * @commit: apply hardware specific overlay data to registers. 74 * @commit: apply hardware specific overlay data to registers.
75 * @enable: enable hardware specific overlay. 75 * @enable: enable hardware specific overlay.
76 * @disable: disable hardware specific overlay. 76 * @disable: disable hardware specific overlay.
77 * @wait_for_vblank: wait for vblank interrupt to make sure that
78 * hardware overlay is disabled.
79 */ 77 */
80struct exynos_drm_overlay_ops { 78struct exynos_drm_overlay_ops {
81 void (*mode_set)(struct device *subdrv_dev, 79 void (*mode_set)(struct device *subdrv_dev,
@@ -83,7 +81,6 @@ struct exynos_drm_overlay_ops {
83 void (*commit)(struct device *subdrv_dev, int zpos); 81 void (*commit)(struct device *subdrv_dev, int zpos);
84 void (*enable)(struct device *subdrv_dev, int zpos); 82 void (*enable)(struct device *subdrv_dev, int zpos);
85 void (*disable)(struct device *subdrv_dev, int zpos); 83 void (*disable)(struct device *subdrv_dev, int zpos);
86 void (*wait_for_vblank)(struct device *subdrv_dev);
87}; 84};
88 85
89/* 86/*
@@ -110,7 +107,6 @@ struct exynos_drm_overlay_ops {
110 * @pixel_format: fourcc pixel format of this overlay 107 * @pixel_format: fourcc pixel format of this overlay
111 * @dma_addr: array of bus(accessed by dma) address to the memory region 108 * @dma_addr: array of bus(accessed by dma) address to the memory region
112 * allocated for a overlay. 109 * allocated for a overlay.
113 * @vaddr: array of virtual memory addresss to this overlay.
114 * @zpos: order of overlay layer(z position). 110 * @zpos: order of overlay layer(z position).
115 * @default_win: a window to be enabled. 111 * @default_win: a window to be enabled.
116 * @color_key: color key on or off. 112 * @color_key: color key on or off.
@@ -142,7 +138,6 @@ struct exynos_drm_overlay {
142 unsigned int pitch; 138 unsigned int pitch;
143 uint32_t pixel_format; 139 uint32_t pixel_format;
144 dma_addr_t dma_addr[MAX_FB_BUFFER]; 140 dma_addr_t dma_addr[MAX_FB_BUFFER];
145 void __iomem *vaddr[MAX_FB_BUFFER];
146 int zpos; 141 int zpos;
147 142
148 bool default_win; 143 bool default_win;
@@ -186,6 +181,8 @@ struct exynos_drm_display_ops {
186 * @commit: set current hw specific display mode to hw. 181 * @commit: set current hw specific display mode to hw.
187 * @enable_vblank: specific driver callback for enabling vblank interrupt. 182 * @enable_vblank: specific driver callback for enabling vblank interrupt.
188 * @disable_vblank: specific driver callback for disabling vblank interrupt. 183 * @disable_vblank: specific driver callback for disabling vblank interrupt.
184 * @wait_for_vblank: wait for vblank interrupt to make sure that
185 * hardware overlay is updated.
189 */ 186 */
190struct exynos_drm_manager_ops { 187struct exynos_drm_manager_ops {
191 void (*dpms)(struct device *subdrv_dev, int mode); 188 void (*dpms)(struct device *subdrv_dev, int mode);
@@ -200,6 +197,7 @@ struct exynos_drm_manager_ops {
200 void (*commit)(struct device *subdrv_dev); 197 void (*commit)(struct device *subdrv_dev);
201 int (*enable_vblank)(struct device *subdrv_dev); 198 int (*enable_vblank)(struct device *subdrv_dev);
202 void (*disable_vblank)(struct device *subdrv_dev); 199 void (*disable_vblank)(struct device *subdrv_dev);
200 void (*wait_for_vblank)(struct device *subdrv_dev);
203}; 201};
204 202
205/* 203/*
@@ -231,16 +229,28 @@ struct exynos_drm_g2d_private {
231 struct device *dev; 229 struct device *dev;
232 struct list_head inuse_cmdlist; 230 struct list_head inuse_cmdlist;
233 struct list_head event_list; 231 struct list_head event_list;
234 struct list_head gem_list; 232 struct list_head userptr_list;
235 unsigned int gem_nr; 233};
234
235struct exynos_drm_ipp_private {
236 struct device *dev;
237 struct list_head event_list;
236}; 238};
237 239
238struct drm_exynos_file_private { 240struct drm_exynos_file_private {
239 struct exynos_drm_g2d_private *g2d_priv; 241 struct exynos_drm_g2d_private *g2d_priv;
242 struct exynos_drm_ipp_private *ipp_priv;
240}; 243};
241 244
242/* 245/*
243 * Exynos drm private structure. 246 * Exynos drm private structure.
247 *
248 * @da_start: start address to device address space.
249 * with iommu, device address space starts from this address
250 * otherwise default one.
251 * @da_space_size: size of device address space.
252 * if 0 then default value is used for it.
253 * @da_space_order: order to device address space.
244 */ 254 */
245struct exynos_drm_private { 255struct exynos_drm_private {
246 struct drm_fb_helper *fb_helper; 256 struct drm_fb_helper *fb_helper;
@@ -255,6 +265,10 @@ struct exynos_drm_private {
255 struct drm_crtc *crtc[MAX_CRTC]; 265 struct drm_crtc *crtc[MAX_CRTC];
256 struct drm_property *plane_zpos_property; 266 struct drm_property *plane_zpos_property;
257 struct drm_property *crtc_mode_property; 267 struct drm_property *crtc_mode_property;
268
269 unsigned long da_start;
270 unsigned long da_space_size;
271 unsigned long da_space_order;
258}; 272};
259 273
260/* 274/*
@@ -318,10 +332,25 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
318int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); 332int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
319void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); 333void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
320 334
335/*
336 * this function registers exynos drm hdmi platform device. It ensures only one
337 * instance of the device is created.
338 */
339extern int exynos_platform_device_hdmi_register(void);
340
341/*
342 * this function unregisters exynos drm hdmi platform device if it exists.
343 */
344void exynos_platform_device_hdmi_unregister(void);
345
321extern struct platform_driver fimd_driver; 346extern struct platform_driver fimd_driver;
322extern struct platform_driver hdmi_driver; 347extern struct platform_driver hdmi_driver;
323extern struct platform_driver mixer_driver; 348extern struct platform_driver mixer_driver;
324extern struct platform_driver exynos_drm_common_hdmi_driver; 349extern struct platform_driver exynos_drm_common_hdmi_driver;
325extern struct platform_driver vidi_driver; 350extern struct platform_driver vidi_driver;
326extern struct platform_driver g2d_driver; 351extern struct platform_driver g2d_driver;
352extern struct platform_driver fimc_driver;
353extern struct platform_driver rotator_driver;
354extern struct platform_driver gsc_driver;
355extern struct platform_driver ipp_driver;
327#endif 356#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index f2df06c603f7..301485215a70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -234,6 +234,32 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
234 exynos_encoder->dpms = DRM_MODE_DPMS_ON; 234 exynos_encoder->dpms = DRM_MODE_DPMS_ON;
235} 235}
236 236
237void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb)
238{
239 struct exynos_drm_encoder *exynos_encoder;
240 struct exynos_drm_manager_ops *ops;
241 struct drm_device *dev = fb->dev;
242 struct drm_encoder *encoder;
243
244 /*
245 * make sure that overlay data are updated to real hardware
246 * for all encoders.
247 */
248 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
249 exynos_encoder = to_exynos_encoder(encoder);
250 ops = exynos_encoder->manager->ops;
251
252 /*
253 * wait for vblank interrupt
254 * - this makes sure that overlay data are updated to
255 * real hardware.
256 */
257 if (ops->wait_for_vblank)
258 ops->wait_for_vblank(exynos_encoder->manager->dev);
259 }
260}
261
262
237static void exynos_drm_encoder_disable(struct drm_encoder *encoder) 263static void exynos_drm_encoder_disable(struct drm_encoder *encoder)
238{ 264{
239 struct drm_plane *plane; 265 struct drm_plane *plane;
@@ -505,14 +531,4 @@ void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data)
505 531
506 if (overlay_ops && overlay_ops->disable) 532 if (overlay_ops && overlay_ops->disable)
507 overlay_ops->disable(manager->dev, zpos); 533 overlay_ops->disable(manager->dev, zpos);
508
509 /*
510 * wait for vblank interrupt
511 * - this makes sure that hardware overlay is disabled to avoid
512 * for the dma accesses to memory after gem buffer was released
513 * because the setting for disabling the overlay will be updated
514 * at vsync.
515 */
516 if (overlay_ops && overlay_ops->wait_for_vblank)
517 overlay_ops->wait_for_vblank(manager->dev);
518} 534}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 6470d9ddf5a1..88bb25a2a917 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -46,5 +46,6 @@ void exynos_drm_encoder_plane_mode_set(struct drm_encoder *encoder, void *data);
46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data); 46void exynos_drm_encoder_plane_commit(struct drm_encoder *encoder, void *data);
47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data); 47void exynos_drm_encoder_plane_enable(struct drm_encoder *encoder, void *data);
48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data); 48void exynos_drm_encoder_plane_disable(struct drm_encoder *encoder, void *data);
49void exynos_drm_encoder_complete_scanout(struct drm_framebuffer *fb);
49 50
50#endif 51#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 4ef4cd3f9936..5426cc5a5e8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -30,10 +30,13 @@
30#include <drm/drm_crtc.h> 30#include <drm/drm_crtc.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_fb_helper.h> 32#include <drm/drm_fb_helper.h>
33#include <uapi/drm/exynos_drm.h>
33 34
34#include "exynos_drm_drv.h" 35#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 36#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
38#include "exynos_drm_iommu.h"
39#include "exynos_drm_encoder.h"
37 40
38#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) 41#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
39 42
@@ -50,6 +53,32 @@ struct exynos_drm_fb {
50 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER]; 53 struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
51}; 54};
52 55
56static int check_fb_gem_memory_type(struct drm_device *drm_dev,
57 struct exynos_drm_gem_obj *exynos_gem_obj)
58{
59 unsigned int flags;
60
61 /*
62 * if exynos drm driver supports iommu then framebuffer can use
63 * all the buffer types.
64 */
65 if (is_drm_iommu_supported(drm_dev))
66 return 0;
67
68 flags = exynos_gem_obj->flags;
69
70 /*
71 * without iommu support, not support physically non-continuous memory
72 * for framebuffer.
73 */
74 if (IS_NONCONTIG_BUFFER(flags)) {
75 DRM_ERROR("cannot use this gem memory type for fb.\n");
76 return -EINVAL;
77 }
78
79 return 0;
80}
81
53static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 82static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
54{ 83{
55 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 84 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -57,6 +86,9 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
57 86
58 DRM_DEBUG_KMS("%s\n", __FILE__); 87 DRM_DEBUG_KMS("%s\n", __FILE__);
59 88
89 /* make sure that overlay data are updated before relesing fb. */
90 exynos_drm_encoder_complete_scanout(fb);
91
60 drm_framebuffer_cleanup(fb); 92 drm_framebuffer_cleanup(fb);
61 93
62 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) { 94 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
@@ -128,23 +160,32 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
128 struct drm_gem_object *obj) 160 struct drm_gem_object *obj)
129{ 161{
130 struct exynos_drm_fb *exynos_fb; 162 struct exynos_drm_fb *exynos_fb;
163 struct exynos_drm_gem_obj *exynos_gem_obj;
131 int ret; 164 int ret;
132 165
166 exynos_gem_obj = to_exynos_gem_obj(obj);
167
168 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
169 if (ret < 0) {
170 DRM_ERROR("cannot use this gem memory type for fb.\n");
171 return ERR_PTR(-EINVAL);
172 }
173
133 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); 174 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
134 if (!exynos_fb) { 175 if (!exynos_fb) {
135 DRM_ERROR("failed to allocate exynos drm framebuffer\n"); 176 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
136 return ERR_PTR(-ENOMEM); 177 return ERR_PTR(-ENOMEM);
137 } 178 }
138 179
180 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
181 exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
182
139 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); 183 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
140 if (ret) { 184 if (ret) {
141 DRM_ERROR("failed to initialize framebuffer\n"); 185 DRM_ERROR("failed to initialize framebuffer\n");
142 return ERR_PTR(ret); 186 return ERR_PTR(ret);
143 } 187 }
144 188
145 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
146 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
147
148 return &exynos_fb->fb; 189 return &exynos_fb->fb;
149} 190}
150 191
@@ -190,9 +231,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
190 struct drm_mode_fb_cmd2 *mode_cmd) 231 struct drm_mode_fb_cmd2 *mode_cmd)
191{ 232{
192 struct drm_gem_object *obj; 233 struct drm_gem_object *obj;
193 struct drm_framebuffer *fb;
194 struct exynos_drm_fb *exynos_fb; 234 struct exynos_drm_fb *exynos_fb;
195 int i; 235 int i, ret;
196 236
197 DRM_DEBUG_KMS("%s\n", __FILE__); 237 DRM_DEBUG_KMS("%s\n", __FILE__);
198 238
@@ -202,30 +242,56 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
202 return ERR_PTR(-ENOENT); 242 return ERR_PTR(-ENOENT);
203 } 243 }
204 244
205 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); 245 exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
206 if (IS_ERR(fb)) { 246 if (!exynos_fb) {
207 drm_gem_object_unreference_unlocked(obj); 247 DRM_ERROR("failed to allocate exynos drm framebuffer\n");
208 return fb; 248 return ERR_PTR(-ENOMEM);
209 } 249 }
210 250
211 exynos_fb = to_exynos_fb(fb); 251 drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
252 exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
212 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd); 253 exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
213 254
214 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt); 255 DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
215 256
216 for (i = 1; i < exynos_fb->buf_cnt; i++) { 257 for (i = 1; i < exynos_fb->buf_cnt; i++) {
258 struct exynos_drm_gem_obj *exynos_gem_obj;
259 int ret;
260
217 obj = drm_gem_object_lookup(dev, file_priv, 261 obj = drm_gem_object_lookup(dev, file_priv,
218 mode_cmd->handles[i]); 262 mode_cmd->handles[i]);
219 if (!obj) { 263 if (!obj) {
220 DRM_ERROR("failed to lookup gem object\n"); 264 DRM_ERROR("failed to lookup gem object\n");
221 exynos_drm_fb_destroy(fb); 265 kfree(exynos_fb);
222 return ERR_PTR(-ENOENT); 266 return ERR_PTR(-ENOENT);
223 } 267 }
224 268
269 exynos_gem_obj = to_exynos_gem_obj(obj);
270
271 ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
272 if (ret < 0) {
273 DRM_ERROR("cannot use this gem memory type for fb.\n");
274 kfree(exynos_fb);
275 return ERR_PTR(ret);
276 }
277
225 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 278 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
226 } 279 }
227 280
228 return fb; 281 ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
282 if (ret) {
283 for (i = 0; i < exynos_fb->buf_cnt; i++) {
284 struct exynos_drm_gem_obj *gem_obj;
285
286 gem_obj = exynos_fb->exynos_gem_obj[i];
287 drm_gem_object_unreference_unlocked(&gem_obj->base);
288 }
289
290 kfree(exynos_fb);
291 return ERR_PTR(ret);
292 }
293
294 return &exynos_fb->fb;
229} 295}
230 296
231struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb, 297struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
@@ -243,9 +309,7 @@ struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
243 if (!buffer) 309 if (!buffer)
244 return NULL; 310 return NULL;
245 311
246 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n", 312 DRM_DEBUG_KMS("dma_addr = 0x%lx\n", (unsigned long)buffer->dma_addr);
247 (unsigned long)buffer->kvaddr,
248 (unsigned long)buffer->dma_addr);
249 313
250 return buffer; 314 return buffer;
251} 315}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e7466c4414cb..f433eb7533a9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -46,8 +46,38 @@ struct exynos_drm_fbdev {
46 struct exynos_drm_gem_obj *exynos_gem_obj; 46 struct exynos_drm_gem_obj *exynos_gem_obj;
47}; 47};
48 48
49static int exynos_drm_fb_mmap(struct fb_info *info,
50 struct vm_area_struct *vma)
51{
52 struct drm_fb_helper *helper = info->par;
53 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(helper);
54 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
55 struct exynos_drm_gem_buf *buffer = exynos_gem_obj->buffer;
56 unsigned long vm_size;
57 int ret;
58
59 DRM_DEBUG_KMS("%s\n", __func__);
60
61 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
62
63 vm_size = vma->vm_end - vma->vm_start;
64
65 if (vm_size > buffer->size)
66 return -EINVAL;
67
68 ret = dma_mmap_attrs(helper->dev->dev, vma, buffer->pages,
69 buffer->dma_addr, buffer->size, &buffer->dma_attrs);
70 if (ret < 0) {
71 DRM_ERROR("failed to mmap.\n");
72 return ret;
73 }
74
75 return 0;
76}
77
49static struct fb_ops exynos_drm_fb_ops = { 78static struct fb_ops exynos_drm_fb_ops = {
50 .owner = THIS_MODULE, 79 .owner = THIS_MODULE,
80 .fb_mmap = exynos_drm_fb_mmap,
51 .fb_fillrect = cfb_fillrect, 81 .fb_fillrect = cfb_fillrect,
52 .fb_copyarea = cfb_copyarea, 82 .fb_copyarea = cfb_copyarea,
53 .fb_imageblit = cfb_imageblit, 83 .fb_imageblit = cfb_imageblit,
@@ -79,6 +109,17 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
79 return -EFAULT; 109 return -EFAULT;
80 } 110 }
81 111
112 /* map pages with kernel virtual space. */
113 if (!buffer->kvaddr) {
114 unsigned int nr_pages = buffer->size >> PAGE_SHIFT;
115 buffer->kvaddr = vmap(buffer->pages, nr_pages, VM_MAP,
116 pgprot_writecombine(PAGE_KERNEL));
117 if (!buffer->kvaddr) {
118 DRM_ERROR("failed to map pages to kernel space.\n");
119 return -EIO;
120 }
121 }
122
82 /* buffer count to framebuffer always is 1 at booting time. */ 123 /* buffer count to framebuffer always is 1 at booting time. */
83 exynos_drm_fb_set_buf_cnt(fb, 1); 124 exynos_drm_fb_set_buf_cnt(fb, 1);
84 125
@@ -87,8 +128,8 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
87 128
88 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr; 129 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
89 fbi->screen_base = buffer->kvaddr + offset; 130 fbi->screen_base = buffer->kvaddr + offset;
90 fbi->fix.smem_start = (unsigned long)(page_to_phys(buffer->pages[0]) + 131 fbi->fix.smem_start = (unsigned long)
91 offset); 132 (page_to_phys(sg_page(buffer->sgt->sgl)) + offset);
92 fbi->screen_size = size; 133 fbi->screen_size = size;
93 fbi->fix.smem_len = size; 134 fbi->fix.smem_len = size;
94 135
@@ -134,7 +175,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
134 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size); 175 exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
135 if (IS_ERR(exynos_gem_obj)) { 176 if (IS_ERR(exynos_gem_obj)) {
136 ret = PTR_ERR(exynos_gem_obj); 177 ret = PTR_ERR(exynos_gem_obj);
137 goto out; 178 goto err_release_framebuffer;
138 } 179 }
139 180
140 exynos_fbdev->exynos_gem_obj = exynos_gem_obj; 181 exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
@@ -144,7 +185,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
144 if (IS_ERR_OR_NULL(helper->fb)) { 185 if (IS_ERR_OR_NULL(helper->fb)) {
145 DRM_ERROR("failed to create drm framebuffer.\n"); 186 DRM_ERROR("failed to create drm framebuffer.\n");
146 ret = PTR_ERR(helper->fb); 187 ret = PTR_ERR(helper->fb);
147 goto out; 188 goto err_destroy_gem;
148 } 189 }
149 190
150 helper->fbdev = fbi; 191 helper->fbdev = fbi;
@@ -156,14 +197,24 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
156 ret = fb_alloc_cmap(&fbi->cmap, 256, 0); 197 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
157 if (ret) { 198 if (ret) {
158 DRM_ERROR("failed to allocate cmap.\n"); 199 DRM_ERROR("failed to allocate cmap.\n");
159 goto out; 200 goto err_destroy_framebuffer;
160 } 201 }
161 202
162 ret = exynos_drm_fbdev_update(helper, helper->fb); 203 ret = exynos_drm_fbdev_update(helper, helper->fb);
163 if (ret < 0) { 204 if (ret < 0)
164 fb_dealloc_cmap(&fbi->cmap); 205 goto err_dealloc_cmap;
165 goto out; 206
166 } 207 mutex_unlock(&dev->struct_mutex);
208 return ret;
209
210err_dealloc_cmap:
211 fb_dealloc_cmap(&fbi->cmap);
212err_destroy_framebuffer:
213 drm_framebuffer_cleanup(helper->fb);
214err_destroy_gem:
215 exynos_drm_gem_destroy(exynos_gem_obj);
216err_release_framebuffer:
217 framebuffer_release(fbi);
167 218
168/* 219/*
169 * if failed, all resources allocated above would be released by 220 * if failed, all resources allocated above would be released by
@@ -265,8 +316,13 @@ err_init:
265static void exynos_drm_fbdev_destroy(struct drm_device *dev, 316static void exynos_drm_fbdev_destroy(struct drm_device *dev,
266 struct drm_fb_helper *fb_helper) 317 struct drm_fb_helper *fb_helper)
267{ 318{
319 struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
320 struct exynos_drm_gem_obj *exynos_gem_obj = exynos_fbd->exynos_gem_obj;
268 struct drm_framebuffer *fb; 321 struct drm_framebuffer *fb;
269 322
323 if (exynos_gem_obj->buffer->kvaddr)
324 vunmap(exynos_gem_obj->buffer->kvaddr);
325
270 /* release drm framebuffer and real buffer */ 326 /* release drm framebuffer and real buffer */
271 if (fb_helper->fb && fb_helper->fb->funcs) { 327 if (fb_helper->fb && fb_helper->fb->funcs) {
272 fb = fb_helper->fb; 328 fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
new file mode 100644
index 000000000000..61ea24296b52
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -0,0 +1,2001 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-fimc.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_fimc.h"
26
27/*
28 * FIMC is stand for Fully Interactive Mobile Camera and
29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory.
32 * FIMC supports image rotation and image effect functions.
33 *
34 * M2M operation : supports crop/scale/rotation/csc so on.
35 * Memory ----> FIMC H/W ----> Memory.
36 * Writeback operation : supports cloned screen with FIMD.
37 * FIMD ----> FIMC H/W ----> Memory.
38 * Output operation : supports direct display using local path.
39 * Memory ----> FIMC H/W ----> FIMD.
40 */
41
42/*
43 * TODO
44 * 1. check suspend/resume api if needed.
45 * 2. need to check use case platform_device_id.
46 * 3. check src/dst size with, height.
47 * 4. added check_prepare api for right register.
48 * 5. need to add supported list in prop_list.
49 * 6. check prescaler/scaler optimization.
50 */
51
52#define FIMC_MAX_DEVS 4
53#define FIMC_MAX_SRC 2
54#define FIMC_MAX_DST 32
55#define FIMC_SHFACTOR 10
56#define FIMC_BUF_STOP 1
57#define FIMC_BUF_START 2
58#define FIMC_REG_SZ 32
59#define FIMC_WIDTH_ITU_709 1280
60#define FIMC_REFRESH_MAX 60
61#define FIMC_REFRESH_MIN 12
62#define FIMC_CROP_MAX 8192
63#define FIMC_CROP_MIN 32
64#define FIMC_SCALE_MAX 4224
65#define FIMC_SCALE_MIN 32
66
67#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
68#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
69 struct fimc_context, ippdrv);
70#define fimc_read(offset) readl(ctx->regs + (offset))
71#define fimc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
72
73enum fimc_wb {
74 FIMC_WB_NONE,
75 FIMC_WB_A,
76 FIMC_WB_B,
77};
78
79/*
80 * A structure of scaler.
81 *
82 * @range: narrow, wide.
83 * @bypass: unused scaler path.
84 * @up_h: horizontal scale up.
85 * @up_v: vertical scale up.
86 * @hratio: horizontal ratio.
87 * @vratio: vertical ratio.
88 */
89struct fimc_scaler {
90 bool range;
91 bool bypass;
92 bool up_h;
93 bool up_v;
94 u32 hratio;
95 u32 vratio;
96};
97
98/*
99 * A structure of scaler capability.
100 *
101 * find user manual table 43-1.
102 * @in_hori: scaler input horizontal size.
103 * @bypass: scaler bypass mode.
104 * @dst_h_wo_rot: target horizontal size without output rotation.
105 * @dst_h_rot: target horizontal size with output rotation.
106 * @rl_w_wo_rot: real width without input rotation.
107 * @rl_h_rot: real height without output rotation.
108 */
109struct fimc_capability {
110 /* scaler */
111 u32 in_hori;
112 u32 bypass;
113 /* output rotator */
114 u32 dst_h_wo_rot;
115 u32 dst_h_rot;
116 /* input rotator */
117 u32 rl_w_wo_rot;
118 u32 rl_h_rot;
119};
120
121/*
122 * A structure of fimc driver data.
123 *
124 * @parent_clk: name of parent clock.
125 */
126struct fimc_driverdata {
127 char *parent_clk;
128};
129
130/*
131 * A structure of fimc context.
132 *
133 * @ippdrv: prepare initialization using ippdrv.
134 * @regs_res: register resources.
135 * @regs: memory mapped io registers.
136 * @lock: locking of operations.
137 * @sclk_fimc_clk: fimc source clock.
138 * @fimc_clk: fimc clock.
139 * @wb_clk: writeback a clock.
140 * @wb_b_clk: writeback b clock.
141 * @sc: scaler infomations.
142 * @odr: ordering of YUV.
143 * @ver: fimc version.
144 * @pol: porarity of writeback.
145 * @id: fimc id.
146 * @irq: irq number.
147 * @suspended: qos operations.
148 */
149struct fimc_context {
150 struct exynos_drm_ippdrv ippdrv;
151 struct resource *regs_res;
152 void __iomem *regs;
153 struct mutex lock;
154 struct clk *sclk_fimc_clk;
155 struct clk *fimc_clk;
156 struct clk *wb_clk;
157 struct clk *wb_b_clk;
158 struct fimc_scaler sc;
159 struct fimc_driverdata *ddata;
160 struct exynos_drm_ipp_pol pol;
161 int id;
162 int irq;
163 bool suspended;
164};
165
166static void fimc_sw_reset(struct fimc_context *ctx, bool pattern)
167{
168 u32 cfg;
169
170 DRM_DEBUG_KMS("%s:pattern[%d]\n", __func__, pattern);
171
172 cfg = fimc_read(EXYNOS_CISRCFMT);
173 cfg |= EXYNOS_CISRCFMT_ITU601_8BIT;
174 if (pattern)
175 cfg |= EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR;
176
177 fimc_write(cfg, EXYNOS_CISRCFMT);
178
179 /* s/w reset */
180 cfg = fimc_read(EXYNOS_CIGCTRL);
181 cfg |= (EXYNOS_CIGCTRL_SWRST);
182 fimc_write(cfg, EXYNOS_CIGCTRL);
183
184 /* s/w reset complete */
185 cfg = fimc_read(EXYNOS_CIGCTRL);
186 cfg &= ~EXYNOS_CIGCTRL_SWRST;
187 fimc_write(cfg, EXYNOS_CIGCTRL);
188
189 /* reset sequence */
190 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
191}
192
193static void fimc_set_camblk_fimd0_wb(struct fimc_context *ctx)
194{
195 u32 camblk_cfg;
196
197 DRM_DEBUG_KMS("%s\n", __func__);
198
199 camblk_cfg = readl(SYSREG_CAMERA_BLK);
200 camblk_cfg &= ~(SYSREG_FIMD0WB_DEST_MASK);
201 camblk_cfg |= ctx->id << (SYSREG_FIMD0WB_DEST_SHIFT);
202
203 writel(camblk_cfg, SYSREG_CAMERA_BLK);
204}
205
206static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb)
207{
208 u32 cfg;
209
210 DRM_DEBUG_KMS("%s:wb[%d]\n", __func__, wb);
211
212 cfg = fimc_read(EXYNOS_CIGCTRL);
213 cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK |
214 EXYNOS_CIGCTRL_SELCAM_ITU_MASK |
215 EXYNOS_CIGCTRL_SELCAM_MIPI_MASK |
216 EXYNOS_CIGCTRL_SELCAM_FIMC_MASK |
217 EXYNOS_CIGCTRL_SELWB_CAMIF_MASK |
218 EXYNOS_CIGCTRL_SELWRITEBACK_MASK);
219
220 switch (wb) {
221 case FIMC_WB_A:
222 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A |
223 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
224 break;
225 case FIMC_WB_B:
226 cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B |
227 EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK);
228 break;
229 case FIMC_WB_NONE:
230 default:
231 cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A |
232 EXYNOS_CIGCTRL_SELWRITEBACK_A |
233 EXYNOS_CIGCTRL_SELCAM_MIPI_A |
234 EXYNOS_CIGCTRL_SELCAM_FIMC_ITU);
235 break;
236 }
237
238 fimc_write(cfg, EXYNOS_CIGCTRL);
239}
240
241static void fimc_set_polarity(struct fimc_context *ctx,
242 struct exynos_drm_ipp_pol *pol)
243{
244 u32 cfg;
245
246 DRM_DEBUG_KMS("%s:inv_pclk[%d]inv_vsync[%d]\n",
247 __func__, pol->inv_pclk, pol->inv_vsync);
248 DRM_DEBUG_KMS("%s:inv_href[%d]inv_hsync[%d]\n",
249 __func__, pol->inv_href, pol->inv_hsync);
250
251 cfg = fimc_read(EXYNOS_CIGCTRL);
252 cfg &= ~(EXYNOS_CIGCTRL_INVPOLPCLK | EXYNOS_CIGCTRL_INVPOLVSYNC |
253 EXYNOS_CIGCTRL_INVPOLHREF | EXYNOS_CIGCTRL_INVPOLHSYNC);
254
255 if (pol->inv_pclk)
256 cfg |= EXYNOS_CIGCTRL_INVPOLPCLK;
257 if (pol->inv_vsync)
258 cfg |= EXYNOS_CIGCTRL_INVPOLVSYNC;
259 if (pol->inv_href)
260 cfg |= EXYNOS_CIGCTRL_INVPOLHREF;
261 if (pol->inv_hsync)
262 cfg |= EXYNOS_CIGCTRL_INVPOLHSYNC;
263
264 fimc_write(cfg, EXYNOS_CIGCTRL);
265}
266
267static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
268{
269 u32 cfg;
270
271 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
272
273 cfg = fimc_read(EXYNOS_CIGCTRL);
274 if (enable)
275 cfg |= EXYNOS_CIGCTRL_CAM_JPEG;
276 else
277 cfg &= ~EXYNOS_CIGCTRL_CAM_JPEG;
278
279 fimc_write(cfg, EXYNOS_CIGCTRL);
280}
281
282static void fimc_handle_irq(struct fimc_context *ctx, bool enable,
283 bool overflow, bool level)
284{
285 u32 cfg;
286
287 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
288 enable, overflow, level);
289
290 cfg = fimc_read(EXYNOS_CIGCTRL);
291 if (enable) {
292 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_LEVEL);
293 cfg |= EXYNOS_CIGCTRL_IRQ_ENABLE;
294 if (overflow)
295 cfg |= EXYNOS_CIGCTRL_IRQ_OVFEN;
296 if (level)
297 cfg |= EXYNOS_CIGCTRL_IRQ_LEVEL;
298 } else
299 cfg &= ~(EXYNOS_CIGCTRL_IRQ_OVFEN | EXYNOS_CIGCTRL_IRQ_ENABLE);
300
301 fimc_write(cfg, EXYNOS_CIGCTRL);
302}
303
304static void fimc_clear_irq(struct fimc_context *ctx)
305{
306 u32 cfg;
307
308 DRM_DEBUG_KMS("%s\n", __func__);
309
310 cfg = fimc_read(EXYNOS_CIGCTRL);
311 cfg |= EXYNOS_CIGCTRL_IRQ_CLR;
312 fimc_write(cfg, EXYNOS_CIGCTRL);
313}
314
315static bool fimc_check_ovf(struct fimc_context *ctx)
316{
317 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
318 u32 cfg, status, flag;
319
320 status = fimc_read(EXYNOS_CISTATUS);
321 flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
322 EXYNOS_CISTATUS_OVFICR;
323
324 DRM_DEBUG_KMS("%s:flag[0x%x]\n", __func__, flag);
325
326 if (status & flag) {
327 cfg = fimc_read(EXYNOS_CIWDOFST);
328 cfg |= (EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
329 EXYNOS_CIWDOFST_CLROVFICR);
330
331 fimc_write(cfg, EXYNOS_CIWDOFST);
332
333 cfg = fimc_read(EXYNOS_CIWDOFST);
334 cfg &= ~(EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
335 EXYNOS_CIWDOFST_CLROVFICR);
336
337 fimc_write(cfg, EXYNOS_CIWDOFST);
338
339 dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
340 ctx->id, status);
341 return true;
342 }
343
344 return false;
345}
346
347static bool fimc_check_frame_end(struct fimc_context *ctx)
348{
349 u32 cfg;
350
351 cfg = fimc_read(EXYNOS_CISTATUS);
352
353 DRM_DEBUG_KMS("%s:cfg[0x%x]\n", __func__, cfg);
354
355 if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
356 return false;
357
358 cfg &= ~(EXYNOS_CISTATUS_FRAMEEND);
359 fimc_write(cfg, EXYNOS_CISTATUS);
360
361 return true;
362}
363
364static int fimc_get_buf_id(struct fimc_context *ctx)
365{
366 u32 cfg;
367 int frame_cnt, buf_id;
368
369 DRM_DEBUG_KMS("%s\n", __func__);
370
371 cfg = fimc_read(EXYNOS_CISTATUS2);
372 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg);
373
374 if (frame_cnt == 0)
375 frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
376
377 DRM_DEBUG_KMS("%s:present[%d]before[%d]\n", __func__,
378 EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
379 EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
380
381 if (frame_cnt == 0) {
382 DRM_ERROR("failed to get frame count.\n");
383 return -EIO;
384 }
385
386 buf_id = frame_cnt - 1;
387 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
388
389 return buf_id;
390}
391
392static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
393{
394 u32 cfg;
395
396 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
397
398 cfg = fimc_read(EXYNOS_CIOCTRL);
399 if (enable)
400 cfg |= EXYNOS_CIOCTRL_LASTENDEN;
401 else
402 cfg &= ~EXYNOS_CIOCTRL_LASTENDEN;
403
404 fimc_write(cfg, EXYNOS_CIOCTRL);
405}
406
407
408static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
409{
410 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
411 u32 cfg;
412
413 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
414
415 /* RGB */
416 cfg = fimc_read(EXYNOS_CISCCTRL);
417 cfg &= ~EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK;
418
419 switch (fmt) {
420 case DRM_FORMAT_RGB565:
421 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565;
422 fimc_write(cfg, EXYNOS_CISCCTRL);
423 return 0;
424 case DRM_FORMAT_RGB888:
425 case DRM_FORMAT_XRGB8888:
426 cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888;
427 fimc_write(cfg, EXYNOS_CISCCTRL);
428 return 0;
429 default:
430 /* bypass */
431 break;
432 }
433
434 /* YUV */
435 cfg = fimc_read(EXYNOS_MSCTRL);
436 cfg &= ~(EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK |
437 EXYNOS_MSCTRL_C_INT_IN_2PLANE |
438 EXYNOS_MSCTRL_ORDER422_YCBYCR);
439
440 switch (fmt) {
441 case DRM_FORMAT_YUYV:
442 cfg |= EXYNOS_MSCTRL_ORDER422_YCBYCR;
443 break;
444 case DRM_FORMAT_YVYU:
445 cfg |= EXYNOS_MSCTRL_ORDER422_YCRYCB;
446 break;
447 case DRM_FORMAT_UYVY:
448 cfg |= EXYNOS_MSCTRL_ORDER422_CBYCRY;
449 break;
450 case DRM_FORMAT_VYUY:
451 case DRM_FORMAT_YUV444:
452 cfg |= EXYNOS_MSCTRL_ORDER422_CRYCBY;
453 break;
454 case DRM_FORMAT_NV21:
455 case DRM_FORMAT_NV61:
456 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CRCB |
457 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
458 break;
459 case DRM_FORMAT_YUV422:
460 case DRM_FORMAT_YUV420:
461 case DRM_FORMAT_YVU420:
462 cfg |= EXYNOS_MSCTRL_C_INT_IN_3PLANE;
463 break;
464 case DRM_FORMAT_NV12:
465 case DRM_FORMAT_NV12MT:
466 case DRM_FORMAT_NV16:
467 cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR |
468 EXYNOS_MSCTRL_C_INT_IN_2PLANE);
469 break;
470 default:
471 dev_err(ippdrv->dev, "inavlid source yuv order 0x%x.\n", fmt);
472 return -EINVAL;
473 }
474
475 fimc_write(cfg, EXYNOS_MSCTRL);
476
477 return 0;
478}
479
480static int fimc_src_set_fmt(struct device *dev, u32 fmt)
481{
482 struct fimc_context *ctx = get_fimc_context(dev);
483 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
484 u32 cfg;
485
486 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
487
488 cfg = fimc_read(EXYNOS_MSCTRL);
489 cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
490
491 switch (fmt) {
492 case DRM_FORMAT_RGB565:
493 case DRM_FORMAT_RGB888:
494 case DRM_FORMAT_XRGB8888:
495 cfg |= EXYNOS_MSCTRL_INFORMAT_RGB;
496 break;
497 case DRM_FORMAT_YUV444:
498 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
499 break;
500 case DRM_FORMAT_YUYV:
501 case DRM_FORMAT_YVYU:
502 case DRM_FORMAT_UYVY:
503 case DRM_FORMAT_VYUY:
504 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE;
505 break;
506 case DRM_FORMAT_NV16:
507 case DRM_FORMAT_NV61:
508 case DRM_FORMAT_YUV422:
509 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR422;
510 break;
511 case DRM_FORMAT_YUV420:
512 case DRM_FORMAT_YVU420:
513 case DRM_FORMAT_NV12:
514 case DRM_FORMAT_NV21:
515 case DRM_FORMAT_NV12MT:
516 cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420;
517 break;
518 default:
519 dev_err(ippdrv->dev, "inavlid source format 0x%x.\n", fmt);
520 return -EINVAL;
521 }
522
523 fimc_write(cfg, EXYNOS_MSCTRL);
524
525 cfg = fimc_read(EXYNOS_CIDMAPARAM);
526 cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK;
527
528 if (fmt == DRM_FORMAT_NV12MT)
529 cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32;
530 else
531 cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR;
532
533 fimc_write(cfg, EXYNOS_CIDMAPARAM);
534
535 return fimc_src_set_fmt_order(ctx, fmt);
536}
537
538static int fimc_src_set_transf(struct device *dev,
539 enum drm_exynos_degree degree,
540 enum drm_exynos_flip flip, bool *swap)
541{
542 struct fimc_context *ctx = get_fimc_context(dev);
543 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
544 u32 cfg1, cfg2;
545
546 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
547 degree, flip);
548
549 cfg1 = fimc_read(EXYNOS_MSCTRL);
550 cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
551 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
552
553 cfg2 = fimc_read(EXYNOS_CITRGFMT);
554 cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
555
556 switch (degree) {
557 case EXYNOS_DRM_DEGREE_0:
558 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
559 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
560 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
561 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
562 break;
563 case EXYNOS_DRM_DEGREE_90:
564 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
565 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
566 cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR;
567 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
568 cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR;
569 break;
570 case EXYNOS_DRM_DEGREE_180:
571 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
572 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
573 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
574 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
575 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
576 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
577 break;
578 case EXYNOS_DRM_DEGREE_270:
579 cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR |
580 EXYNOS_MSCTRL_FLIP_Y_MIRROR);
581 cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE;
582 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
583 cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR;
584 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
585 cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR;
586 break;
587 default:
588 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
589 return -EINVAL;
590 }
591
592 fimc_write(cfg1, EXYNOS_MSCTRL);
593 fimc_write(cfg2, EXYNOS_CITRGFMT);
594 *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0;
595
596 return 0;
597}
598
599static int fimc_set_window(struct fimc_context *ctx,
600 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
601{
602 u32 cfg, h1, h2, v1, v2;
603
604 /* cropped image */
605 h1 = pos->x;
606 h2 = sz->hsize - pos->w - pos->x;
607 v1 = pos->y;
608 v2 = sz->vsize - pos->h - pos->y;
609
610 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
611 __func__, pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize);
612 DRM_DEBUG_KMS("%s:h1[%d]h2[%d]v1[%d]v2[%d]\n", __func__,
613 h1, h2, v1, v2);
614
615 /*
616 * set window offset 1, 2 size
617 * check figure 43-21 in user manual
618 */
619 cfg = fimc_read(EXYNOS_CIWDOFST);
620 cfg &= ~(EXYNOS_CIWDOFST_WINHOROFST_MASK |
621 EXYNOS_CIWDOFST_WINVEROFST_MASK);
622 cfg |= (EXYNOS_CIWDOFST_WINHOROFST(h1) |
623 EXYNOS_CIWDOFST_WINVEROFST(v1));
624 cfg |= EXYNOS_CIWDOFST_WINOFSEN;
625 fimc_write(cfg, EXYNOS_CIWDOFST);
626
627 cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) |
628 EXYNOS_CIWDOFST2_WINVEROFST2(v2));
629 fimc_write(cfg, EXYNOS_CIWDOFST2);
630
631 return 0;
632}
633
634static int fimc_src_set_size(struct device *dev, int swap,
635 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
636{
637 struct fimc_context *ctx = get_fimc_context(dev);
638 struct drm_exynos_pos img_pos = *pos;
639 struct drm_exynos_sz img_sz = *sz;
640 u32 cfg;
641
642 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
643 __func__, swap, sz->hsize, sz->vsize);
644
645 /* original size */
646 cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) |
647 EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize));
648
649 fimc_write(cfg, EXYNOS_ORGISIZE);
650
651 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n", __func__,
652 pos->x, pos->y, pos->w, pos->h);
653
654 if (swap) {
655 img_pos.w = pos->h;
656 img_pos.h = pos->w;
657 img_sz.hsize = sz->vsize;
658 img_sz.vsize = sz->hsize;
659 }
660
661 /* set input DMA image size */
662 cfg = fimc_read(EXYNOS_CIREAL_ISIZE);
663 cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK |
664 EXYNOS_CIREAL_ISIZE_WIDTH_MASK);
665 cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) |
666 EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h));
667 fimc_write(cfg, EXYNOS_CIREAL_ISIZE);
668
669 /*
670 * set input FIFO image size
671 * for now, we support only ITU601 8 bit mode
672 */
673 cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
674 EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) |
675 EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize));
676 fimc_write(cfg, EXYNOS_CISRCFMT);
677
678 /* offset Y(RGB), Cb, Cr */
679 cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) |
680 EXYNOS_CIIYOFF_VERTICAL(img_pos.y));
681 fimc_write(cfg, EXYNOS_CIIYOFF);
682 cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) |
683 EXYNOS_CIICBOFF_VERTICAL(img_pos.y));
684 fimc_write(cfg, EXYNOS_CIICBOFF);
685 cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) |
686 EXYNOS_CIICROFF_VERTICAL(img_pos.y));
687 fimc_write(cfg, EXYNOS_CIICROFF);
688
689 return fimc_set_window(ctx, &img_pos, &img_sz);
690}
691
692static int fimc_src_set_addr(struct device *dev,
693 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
694 enum drm_exynos_ipp_buf_type buf_type)
695{
696 struct fimc_context *ctx = get_fimc_context(dev);
697 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
698 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
699 struct drm_exynos_ipp_property *property;
700 struct drm_exynos_ipp_config *config;
701
702 if (!c_node) {
703 DRM_ERROR("failed to get c_node.\n");
704 return -EINVAL;
705 }
706
707 property = &c_node->property;
708 if (!property) {
709 DRM_ERROR("failed to get property.\n");
710 return -EINVAL;
711 }
712
713 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
714 property->prop_id, buf_id, buf_type);
715
716 if (buf_id > FIMC_MAX_SRC) {
717 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
718 return -ENOMEM;
719 }
720
721 /* address register set */
722 switch (buf_type) {
723 case IPP_BUF_ENQUEUE:
724 config = &property->config[EXYNOS_DRM_OPS_SRC];
725 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
726 EXYNOS_CIIYSA(buf_id));
727
728 if (config->fmt == DRM_FORMAT_YVU420) {
729 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
730 EXYNOS_CIICBSA(buf_id));
731 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
732 EXYNOS_CIICRSA(buf_id));
733 } else {
734 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
735 EXYNOS_CIICBSA(buf_id));
736 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
737 EXYNOS_CIICRSA(buf_id));
738 }
739 break;
740 case IPP_BUF_DEQUEUE:
741 fimc_write(0x0, EXYNOS_CIIYSA(buf_id));
742 fimc_write(0x0, EXYNOS_CIICBSA(buf_id));
743 fimc_write(0x0, EXYNOS_CIICRSA(buf_id));
744 break;
745 default:
746 /* bypass */
747 break;
748 }
749
750 return 0;
751}
752
753static struct exynos_drm_ipp_ops fimc_src_ops = {
754 .set_fmt = fimc_src_set_fmt,
755 .set_transf = fimc_src_set_transf,
756 .set_size = fimc_src_set_size,
757 .set_addr = fimc_src_set_addr,
758};
759
760static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
761{
762 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
763 u32 cfg;
764
765 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
766
767 /* RGB */
768 cfg = fimc_read(EXYNOS_CISCCTRL);
769 cfg &= ~EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK;
770
771 switch (fmt) {
772 case DRM_FORMAT_RGB565:
773 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565;
774 fimc_write(cfg, EXYNOS_CISCCTRL);
775 return 0;
776 case DRM_FORMAT_RGB888:
777 cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888;
778 fimc_write(cfg, EXYNOS_CISCCTRL);
779 return 0;
780 case DRM_FORMAT_XRGB8888:
781 cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 |
782 EXYNOS_CISCCTRL_EXTRGB_EXTENSION);
783 fimc_write(cfg, EXYNOS_CISCCTRL);
784 break;
785 default:
786 /* bypass */
787 break;
788 }
789
790 /* YUV */
791 cfg = fimc_read(EXYNOS_CIOCTRL);
792 cfg &= ~(EXYNOS_CIOCTRL_ORDER2P_MASK |
793 EXYNOS_CIOCTRL_ORDER422_MASK |
794 EXYNOS_CIOCTRL_YCBCR_PLANE_MASK);
795
796 switch (fmt) {
797 case DRM_FORMAT_XRGB8888:
798 cfg |= EXYNOS_CIOCTRL_ALPHA_OUT;
799 break;
800 case DRM_FORMAT_YUYV:
801 cfg |= EXYNOS_CIOCTRL_ORDER422_YCBYCR;
802 break;
803 case DRM_FORMAT_YVYU:
804 cfg |= EXYNOS_CIOCTRL_ORDER422_YCRYCB;
805 break;
806 case DRM_FORMAT_UYVY:
807 cfg |= EXYNOS_CIOCTRL_ORDER422_CBYCRY;
808 break;
809 case DRM_FORMAT_VYUY:
810 cfg |= EXYNOS_CIOCTRL_ORDER422_CRYCBY;
811 break;
812 case DRM_FORMAT_NV21:
813 case DRM_FORMAT_NV61:
814 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB;
815 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
816 break;
817 case DRM_FORMAT_YUV422:
818 case DRM_FORMAT_YUV420:
819 case DRM_FORMAT_YVU420:
820 cfg |= EXYNOS_CIOCTRL_YCBCR_3PLANE;
821 break;
822 case DRM_FORMAT_NV12:
823 case DRM_FORMAT_NV12MT:
824 case DRM_FORMAT_NV16:
825 cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR;
826 cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE;
827 break;
828 default:
829 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
830 return -EINVAL;
831 }
832
833 fimc_write(cfg, EXYNOS_CIOCTRL);
834
835 return 0;
836}
837
838static int fimc_dst_set_fmt(struct device *dev, u32 fmt)
839{
840 struct fimc_context *ctx = get_fimc_context(dev);
841 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
842 u32 cfg;
843
844 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
845
846 cfg = fimc_read(EXYNOS_CIEXTEN);
847
848 if (fmt == DRM_FORMAT_AYUV) {
849 cfg |= EXYNOS_CIEXTEN_YUV444_OUT;
850 fimc_write(cfg, EXYNOS_CIEXTEN);
851 } else {
852 cfg &= ~EXYNOS_CIEXTEN_YUV444_OUT;
853 fimc_write(cfg, EXYNOS_CIEXTEN);
854
855 cfg = fimc_read(EXYNOS_CITRGFMT);
856 cfg &= ~EXYNOS_CITRGFMT_OUTFORMAT_MASK;
857
858 switch (fmt) {
859 case DRM_FORMAT_RGB565:
860 case DRM_FORMAT_RGB888:
861 case DRM_FORMAT_XRGB8888:
862 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_RGB;
863 break;
864 case DRM_FORMAT_YUYV:
865 case DRM_FORMAT_YVYU:
866 case DRM_FORMAT_UYVY:
867 case DRM_FORMAT_VYUY:
868 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE;
869 break;
870 case DRM_FORMAT_NV16:
871 case DRM_FORMAT_NV61:
872 case DRM_FORMAT_YUV422:
873 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422;
874 break;
875 case DRM_FORMAT_YUV420:
876 case DRM_FORMAT_YVU420:
877 case DRM_FORMAT_NV12:
878 case DRM_FORMAT_NV12MT:
879 case DRM_FORMAT_NV21:
880 cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420;
881 break;
882 default:
883 dev_err(ippdrv->dev, "inavlid target format 0x%x.\n",
884 fmt);
885 return -EINVAL;
886 }
887
888 fimc_write(cfg, EXYNOS_CITRGFMT);
889 }
890
891 cfg = fimc_read(EXYNOS_CIDMAPARAM);
892 cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK;
893
894 if (fmt == DRM_FORMAT_NV12MT)
895 cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32;
896 else
897 cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR;
898
899 fimc_write(cfg, EXYNOS_CIDMAPARAM);
900
901 return fimc_dst_set_fmt_order(ctx, fmt);
902}
903
904static int fimc_dst_set_transf(struct device *dev,
905 enum drm_exynos_degree degree,
906 enum drm_exynos_flip flip, bool *swap)
907{
908 struct fimc_context *ctx = get_fimc_context(dev);
909 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
910 u32 cfg;
911
912 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
913 degree, flip);
914
915 cfg = fimc_read(EXYNOS_CITRGFMT);
916 cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
917 cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
918
919 switch (degree) {
920 case EXYNOS_DRM_DEGREE_0:
921 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
922 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
923 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
924 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
925 break;
926 case EXYNOS_DRM_DEGREE_90:
927 cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE;
928 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
929 cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR;
930 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
931 cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
932 break;
933 case EXYNOS_DRM_DEGREE_180:
934 cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR |
935 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
936 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
937 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
938 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
939 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
940 break;
941 case EXYNOS_DRM_DEGREE_270:
942 cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE |
943 EXYNOS_CITRGFMT_FLIP_X_MIRROR |
944 EXYNOS_CITRGFMT_FLIP_Y_MIRROR);
945 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
946 cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR;
947 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
948 cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR;
949 break;
950 default:
951 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
952 return -EINVAL;
953 }
954
955 fimc_write(cfg, EXYNOS_CITRGFMT);
956 *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0;
957
958 return 0;
959}
960
961static int fimc_get_ratio_shift(u32 src, u32 dst, u32 *ratio, u32 *shift)
962{
963 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
964
965 if (src >= dst * 64) {
966 DRM_ERROR("failed to make ratio and shift.\n");
967 return -EINVAL;
968 } else if (src >= dst * 32) {
969 *ratio = 32;
970 *shift = 5;
971 } else if (src >= dst * 16) {
972 *ratio = 16;
973 *shift = 4;
974 } else if (src >= dst * 8) {
975 *ratio = 8;
976 *shift = 3;
977 } else if (src >= dst * 4) {
978 *ratio = 4;
979 *shift = 2;
980 } else if (src >= dst * 2) {
981 *ratio = 2;
982 *shift = 1;
983 } else {
984 *ratio = 1;
985 *shift = 0;
986 }
987
988 return 0;
989}
990
991static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
992 struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
993{
994 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
995 u32 cfg, cfg_ext, shfactor;
996 u32 pre_dst_width, pre_dst_height;
997 u32 pre_hratio, hfactor, pre_vratio, vfactor;
998 int ret = 0;
999 u32 src_w, src_h, dst_w, dst_h;
1000
1001 cfg_ext = fimc_read(EXYNOS_CITRGFMT);
1002 if (cfg_ext & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) {
1003 src_w = src->h;
1004 src_h = src->w;
1005 } else {
1006 src_w = src->w;
1007 src_h = src->h;
1008 }
1009
1010 if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) {
1011 dst_w = dst->h;
1012 dst_h = dst->w;
1013 } else {
1014 dst_w = dst->w;
1015 dst_h = dst->h;
1016 }
1017
1018 ret = fimc_get_ratio_shift(src_w, dst_w, &pre_hratio, &hfactor);
1019 if (ret) {
1020 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
1021 return ret;
1022 }
1023
1024 ret = fimc_get_ratio_shift(src_h, dst_h, &pre_vratio, &vfactor);
1025 if (ret) {
1026 dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
1027 return ret;
1028 }
1029
1030 pre_dst_width = src_w / pre_hratio;
1031 pre_dst_height = src_h / pre_vratio;
1032 DRM_DEBUG_KMS("%s:pre_dst_width[%d]pre_dst_height[%d]\n", __func__,
1033 pre_dst_width, pre_dst_height);
1034 DRM_DEBUG_KMS("%s:pre_hratio[%d]hfactor[%d]pre_vratio[%d]vfactor[%d]\n",
1035 __func__, pre_hratio, hfactor, pre_vratio, vfactor);
1036
1037 sc->hratio = (src_w << 14) / (dst_w << hfactor);
1038 sc->vratio = (src_h << 14) / (dst_h << vfactor);
1039 sc->up_h = (dst_w >= src_w) ? true : false;
1040 sc->up_v = (dst_h >= src_h) ? true : false;
1041 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
1042 __func__, sc->hratio, sc->vratio, sc->up_h, sc->up_v);
1043
1044 shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
1045 DRM_DEBUG_KMS("%s:shfactor[%d]\n", __func__, shfactor);
1046
1047 cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
1048 EXYNOS_CISCPRERATIO_PREHORRATIO(pre_hratio) |
1049 EXYNOS_CISCPRERATIO_PREVERRATIO(pre_vratio));
1050 fimc_write(cfg, EXYNOS_CISCPRERATIO);
1051
1052 cfg = (EXYNOS_CISCPREDST_PREDSTWIDTH(pre_dst_width) |
1053 EXYNOS_CISCPREDST_PREDSTHEIGHT(pre_dst_height));
1054 fimc_write(cfg, EXYNOS_CISCPREDST);
1055
1056 return ret;
1057}
1058
1059static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
1060{
1061 u32 cfg, cfg_ext;
1062
1063 DRM_DEBUG_KMS("%s:range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
1064 __func__, sc->range, sc->bypass, sc->up_h, sc->up_v);
1065 DRM_DEBUG_KMS("%s:hratio[%d]vratio[%d]\n",
1066 __func__, sc->hratio, sc->vratio);
1067
1068 cfg = fimc_read(EXYNOS_CISCCTRL);
1069 cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
1070 EXYNOS_CISCCTRL_SCALEUP_H | EXYNOS_CISCCTRL_SCALEUP_V |
1071 EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK |
1072 EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK |
1073 EXYNOS_CISCCTRL_CSCR2Y_WIDE |
1074 EXYNOS_CISCCTRL_CSCY2R_WIDE);
1075
1076 if (sc->range)
1077 cfg |= (EXYNOS_CISCCTRL_CSCR2Y_WIDE |
1078 EXYNOS_CISCCTRL_CSCY2R_WIDE);
1079 if (sc->bypass)
1080 cfg |= EXYNOS_CISCCTRL_SCALERBYPASS;
1081 if (sc->up_h)
1082 cfg |= EXYNOS_CISCCTRL_SCALEUP_H;
1083 if (sc->up_v)
1084 cfg |= EXYNOS_CISCCTRL_SCALEUP_V;
1085
1086 cfg |= (EXYNOS_CISCCTRL_MAINHORRATIO((sc->hratio >> 6)) |
1087 EXYNOS_CISCCTRL_MAINVERRATIO((sc->vratio >> 6)));
1088 fimc_write(cfg, EXYNOS_CISCCTRL);
1089
1090 cfg_ext = fimc_read(EXYNOS_CIEXTEN);
1091 cfg_ext &= ~EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK;
1092 cfg_ext &= ~EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK;
1093 cfg_ext |= (EXYNOS_CIEXTEN_MAINHORRATIO_EXT(sc->hratio) |
1094 EXYNOS_CIEXTEN_MAINVERRATIO_EXT(sc->vratio));
1095 fimc_write(cfg_ext, EXYNOS_CIEXTEN);
1096}
1097
1098static int fimc_dst_set_size(struct device *dev, int swap,
1099 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1100{
1101 struct fimc_context *ctx = get_fimc_context(dev);
1102 struct drm_exynos_pos img_pos = *pos;
1103 struct drm_exynos_sz img_sz = *sz;
1104 u32 cfg;
1105
1106 DRM_DEBUG_KMS("%s:swap[%d]hsize[%d]vsize[%d]\n",
1107 __func__, swap, sz->hsize, sz->vsize);
1108
1109 /* original size */
1110 cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) |
1111 EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize));
1112
1113 fimc_write(cfg, EXYNOS_ORGOSIZE);
1114
1115 DRM_DEBUG_KMS("%s:x[%d]y[%d]w[%d]h[%d]\n",
1116 __func__, pos->x, pos->y, pos->w, pos->h);
1117
1118 /* CSC ITU */
1119 cfg = fimc_read(EXYNOS_CIGCTRL);
1120 cfg &= ~EXYNOS_CIGCTRL_CSC_MASK;
1121
1122 if (sz->hsize >= FIMC_WIDTH_ITU_709)
1123 cfg |= EXYNOS_CIGCTRL_CSC_ITU709;
1124 else
1125 cfg |= EXYNOS_CIGCTRL_CSC_ITU601;
1126
1127 fimc_write(cfg, EXYNOS_CIGCTRL);
1128
1129 if (swap) {
1130 img_pos.w = pos->h;
1131 img_pos.h = pos->w;
1132 img_sz.hsize = sz->vsize;
1133 img_sz.vsize = sz->hsize;
1134 }
1135
1136 /* target image size */
1137 cfg = fimc_read(EXYNOS_CITRGFMT);
1138 cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK |
1139 EXYNOS_CITRGFMT_TARGETV_MASK);
1140 cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) |
1141 EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h));
1142 fimc_write(cfg, EXYNOS_CITRGFMT);
1143
1144 /* target area */
1145 cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h);
1146 fimc_write(cfg, EXYNOS_CITAREA);
1147
1148 /* offset Y(RGB), Cb, Cr */
1149 cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) |
1150 EXYNOS_CIOYOFF_VERTICAL(img_pos.y));
1151 fimc_write(cfg, EXYNOS_CIOYOFF);
1152 cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) |
1153 EXYNOS_CIOCBOFF_VERTICAL(img_pos.y));
1154 fimc_write(cfg, EXYNOS_CIOCBOFF);
1155 cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) |
1156 EXYNOS_CIOCROFF_VERTICAL(img_pos.y));
1157 fimc_write(cfg, EXYNOS_CIOCROFF);
1158
1159 return 0;
1160}
1161
1162static int fimc_dst_get_buf_seq(struct fimc_context *ctx)
1163{
1164 u32 cfg, i, buf_num = 0;
1165 u32 mask = 0x00000001;
1166
1167 cfg = fimc_read(EXYNOS_CIFCNTSEQ);
1168
1169 for (i = 0; i < FIMC_REG_SZ; i++)
1170 if (cfg & (mask << i))
1171 buf_num++;
1172
1173 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1174
1175 return buf_num;
1176}
1177
1178static int fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
1179 enum drm_exynos_ipp_buf_type buf_type)
1180{
1181 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1182 bool enable;
1183 u32 cfg;
1184 u32 mask = 0x00000001 << buf_id;
1185 int ret = 0;
1186
1187 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1188 buf_id, buf_type);
1189
1190 mutex_lock(&ctx->lock);
1191
1192 /* mask register set */
1193 cfg = fimc_read(EXYNOS_CIFCNTSEQ);
1194
1195 switch (buf_type) {
1196 case IPP_BUF_ENQUEUE:
1197 enable = true;
1198 break;
1199 case IPP_BUF_DEQUEUE:
1200 enable = false;
1201 break;
1202 default:
1203 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1204 ret = -EINVAL;
1205 goto err_unlock;
1206 }
1207
1208 /* sequence id */
1209 cfg &= (~mask);
1210 cfg |= (enable << buf_id);
1211 fimc_write(cfg, EXYNOS_CIFCNTSEQ);
1212
1213 /* interrupt enable */
1214 if (buf_type == IPP_BUF_ENQUEUE &&
1215 fimc_dst_get_buf_seq(ctx) >= FIMC_BUF_START)
1216 fimc_handle_irq(ctx, true, false, true);
1217
1218 /* interrupt disable */
1219 if (buf_type == IPP_BUF_DEQUEUE &&
1220 fimc_dst_get_buf_seq(ctx) <= FIMC_BUF_STOP)
1221 fimc_handle_irq(ctx, false, false, true);
1222
1223err_unlock:
1224 mutex_unlock(&ctx->lock);
1225 return ret;
1226}
1227
1228static int fimc_dst_set_addr(struct device *dev,
1229 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1230 enum drm_exynos_ipp_buf_type buf_type)
1231{
1232 struct fimc_context *ctx = get_fimc_context(dev);
1233 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1234 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1235 struct drm_exynos_ipp_property *property;
1236 struct drm_exynos_ipp_config *config;
1237
1238 if (!c_node) {
1239 DRM_ERROR("failed to get c_node.\n");
1240 return -EINVAL;
1241 }
1242
1243 property = &c_node->property;
1244 if (!property) {
1245 DRM_ERROR("failed to get property.\n");
1246 return -EINVAL;
1247 }
1248
1249 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1250 property->prop_id, buf_id, buf_type);
1251
1252 if (buf_id > FIMC_MAX_DST) {
1253 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1254 return -ENOMEM;
1255 }
1256
1257 /* address register set */
1258 switch (buf_type) {
1259 case IPP_BUF_ENQUEUE:
1260 config = &property->config[EXYNOS_DRM_OPS_DST];
1261
1262 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1263 EXYNOS_CIOYSA(buf_id));
1264
1265 if (config->fmt == DRM_FORMAT_YVU420) {
1266 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1267 EXYNOS_CIOCBSA(buf_id));
1268 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1269 EXYNOS_CIOCRSA(buf_id));
1270 } else {
1271 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1272 EXYNOS_CIOCBSA(buf_id));
1273 fimc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1274 EXYNOS_CIOCRSA(buf_id));
1275 }
1276 break;
1277 case IPP_BUF_DEQUEUE:
1278 fimc_write(0x0, EXYNOS_CIOYSA(buf_id));
1279 fimc_write(0x0, EXYNOS_CIOCBSA(buf_id));
1280 fimc_write(0x0, EXYNOS_CIOCRSA(buf_id));
1281 break;
1282 default:
1283 /* bypass */
1284 break;
1285 }
1286
1287 return fimc_dst_set_buf_seq(ctx, buf_id, buf_type);
1288}
1289
1290static struct exynos_drm_ipp_ops fimc_dst_ops = {
1291 .set_fmt = fimc_dst_set_fmt,
1292 .set_transf = fimc_dst_set_transf,
1293 .set_size = fimc_dst_set_size,
1294 .set_addr = fimc_dst_set_addr,
1295};
1296
1297static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
1298{
1299 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1300
1301 if (enable) {
1302 clk_enable(ctx->sclk_fimc_clk);
1303 clk_enable(ctx->fimc_clk);
1304 clk_enable(ctx->wb_clk);
1305 ctx->suspended = false;
1306 } else {
1307 clk_disable(ctx->sclk_fimc_clk);
1308 clk_disable(ctx->fimc_clk);
1309 clk_disable(ctx->wb_clk);
1310 ctx->suspended = true;
1311 }
1312
1313 return 0;
1314}
1315
1316static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
1317{
1318 struct fimc_context *ctx = dev_id;
1319 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1320 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1321 struct drm_exynos_ipp_event_work *event_work =
1322 c_node->event_work;
1323 int buf_id;
1324
1325 DRM_DEBUG_KMS("%s:fimc id[%d]\n", __func__, ctx->id);
1326
1327 fimc_clear_irq(ctx);
1328 if (fimc_check_ovf(ctx))
1329 return IRQ_NONE;
1330
1331 if (!fimc_check_frame_end(ctx))
1332 return IRQ_NONE;
1333
1334 buf_id = fimc_get_buf_id(ctx);
1335 if (buf_id < 0)
1336 return IRQ_HANDLED;
1337
1338 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, buf_id);
1339
1340 if (fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE) < 0) {
1341 DRM_ERROR("failed to dequeue.\n");
1342 return IRQ_HANDLED;
1343 }
1344
1345 event_work->ippdrv = ippdrv;
1346 event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
1347 queue_work(ippdrv->event_workq, (struct work_struct *)event_work);
1348
1349 return IRQ_HANDLED;
1350}
1351
1352static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1353{
1354 struct drm_exynos_ipp_prop_list *prop_list;
1355
1356 DRM_DEBUG_KMS("%s\n", __func__);
1357
1358 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1359 if (!prop_list) {
1360 DRM_ERROR("failed to alloc property list.\n");
1361 return -ENOMEM;
1362 }
1363
1364 prop_list->version = 1;
1365 prop_list->writeback = 1;
1366 prop_list->refresh_min = FIMC_REFRESH_MIN;
1367 prop_list->refresh_max = FIMC_REFRESH_MAX;
1368 prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) |
1369 (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1370 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1371 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1372 (1 << EXYNOS_DRM_DEGREE_90) |
1373 (1 << EXYNOS_DRM_DEGREE_180) |
1374 (1 << EXYNOS_DRM_DEGREE_270);
1375 prop_list->csc = 1;
1376 prop_list->crop = 1;
1377 prop_list->crop_max.hsize = FIMC_CROP_MAX;
1378 prop_list->crop_max.vsize = FIMC_CROP_MAX;
1379 prop_list->crop_min.hsize = FIMC_CROP_MIN;
1380 prop_list->crop_min.vsize = FIMC_CROP_MIN;
1381 prop_list->scale = 1;
1382 prop_list->scale_max.hsize = FIMC_SCALE_MAX;
1383 prop_list->scale_max.vsize = FIMC_SCALE_MAX;
1384 prop_list->scale_min.hsize = FIMC_SCALE_MIN;
1385 prop_list->scale_min.vsize = FIMC_SCALE_MIN;
1386
1387 ippdrv->prop_list = prop_list;
1388
1389 return 0;
1390}
1391
1392static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip)
1393{
1394 switch (flip) {
1395 case EXYNOS_DRM_FLIP_NONE:
1396 case EXYNOS_DRM_FLIP_VERTICAL:
1397 case EXYNOS_DRM_FLIP_HORIZONTAL:
1398 return true;
1399 default:
1400 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1401 return false;
1402 }
1403}
1404
1405static int fimc_ippdrv_check_property(struct device *dev,
1406 struct drm_exynos_ipp_property *property)
1407{
1408 struct fimc_context *ctx = get_fimc_context(dev);
1409 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1410 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1411 struct drm_exynos_ipp_config *config;
1412 struct drm_exynos_pos *pos;
1413 struct drm_exynos_sz *sz;
1414 bool swap;
1415 int i;
1416
1417 DRM_DEBUG_KMS("%s\n", __func__);
1418
1419 for_each_ipp_ops(i) {
1420 if ((i == EXYNOS_DRM_OPS_SRC) &&
1421 (property->cmd == IPP_CMD_WB))
1422 continue;
1423
1424 config = &property->config[i];
1425 pos = &config->pos;
1426 sz = &config->sz;
1427
1428 /* check for flip */
1429 if (!fimc_check_drm_flip(config->flip)) {
1430 DRM_ERROR("invalid flip.\n");
1431 goto err_property;
1432 }
1433
1434 /* check for degree */
1435 switch (config->degree) {
1436 case EXYNOS_DRM_DEGREE_90:
1437 case EXYNOS_DRM_DEGREE_270:
1438 swap = true;
1439 break;
1440 case EXYNOS_DRM_DEGREE_0:
1441 case EXYNOS_DRM_DEGREE_180:
1442 swap = false;
1443 break;
1444 default:
1445 DRM_ERROR("invalid degree.\n");
1446 goto err_property;
1447 }
1448
1449 /* check for buffer bound */
1450 if ((pos->x + pos->w > sz->hsize) ||
1451 (pos->y + pos->h > sz->vsize)) {
1452 DRM_ERROR("out of buf bound.\n");
1453 goto err_property;
1454 }
1455
1456 /* check for crop */
1457 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1458 if (swap) {
1459 if ((pos->h < pp->crop_min.hsize) ||
1460 (sz->vsize > pp->crop_max.hsize) ||
1461 (pos->w < pp->crop_min.vsize) ||
1462 (sz->hsize > pp->crop_max.vsize)) {
1463 DRM_ERROR("out of crop size.\n");
1464 goto err_property;
1465 }
1466 } else {
1467 if ((pos->w < pp->crop_min.hsize) ||
1468 (sz->hsize > pp->crop_max.hsize) ||
1469 (pos->h < pp->crop_min.vsize) ||
1470 (sz->vsize > pp->crop_max.vsize)) {
1471 DRM_ERROR("out of crop size.\n");
1472 goto err_property;
1473 }
1474 }
1475 }
1476
1477 /* check for scale */
1478 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1479 if (swap) {
1480 if ((pos->h < pp->scale_min.hsize) ||
1481 (sz->vsize > pp->scale_max.hsize) ||
1482 (pos->w < pp->scale_min.vsize) ||
1483 (sz->hsize > pp->scale_max.vsize)) {
1484 DRM_ERROR("out of scale size.\n");
1485 goto err_property;
1486 }
1487 } else {
1488 if ((pos->w < pp->scale_min.hsize) ||
1489 (sz->hsize > pp->scale_max.hsize) ||
1490 (pos->h < pp->scale_min.vsize) ||
1491 (sz->vsize > pp->scale_max.vsize)) {
1492 DRM_ERROR("out of scale size.\n");
1493 goto err_property;
1494 }
1495 }
1496 }
1497 }
1498
1499 return 0;
1500
1501err_property:
1502 for_each_ipp_ops(i) {
1503 if ((i == EXYNOS_DRM_OPS_SRC) &&
1504 (property->cmd == IPP_CMD_WB))
1505 continue;
1506
1507 config = &property->config[i];
1508 pos = &config->pos;
1509 sz = &config->sz;
1510
1511 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1512 i ? "dst" : "src", config->flip, config->degree,
1513 pos->x, pos->y, pos->w, pos->h,
1514 sz->hsize, sz->vsize);
1515 }
1516
1517 return -EINVAL;
1518}
1519
1520static void fimc_clear_addr(struct fimc_context *ctx)
1521{
1522 int i;
1523
1524 DRM_DEBUG_KMS("%s:\n", __func__);
1525
1526 for (i = 0; i < FIMC_MAX_SRC; i++) {
1527 fimc_write(0, EXYNOS_CIIYSA(i));
1528 fimc_write(0, EXYNOS_CIICBSA(i));
1529 fimc_write(0, EXYNOS_CIICRSA(i));
1530 }
1531
1532 for (i = 0; i < FIMC_MAX_DST; i++) {
1533 fimc_write(0, EXYNOS_CIOYSA(i));
1534 fimc_write(0, EXYNOS_CIOCBSA(i));
1535 fimc_write(0, EXYNOS_CIOCRSA(i));
1536 }
1537}
1538
1539static int fimc_ippdrv_reset(struct device *dev)
1540{
1541 struct fimc_context *ctx = get_fimc_context(dev);
1542
1543 DRM_DEBUG_KMS("%s\n", __func__);
1544
1545 /* reset h/w block */
1546 fimc_sw_reset(ctx, false);
1547
1548 /* reset scaler capability */
1549 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1550
1551 fimc_clear_addr(ctx);
1552
1553 return 0;
1554}
1555
1556static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1557{
1558 struct fimc_context *ctx = get_fimc_context(dev);
1559 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1560 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1561 struct drm_exynos_ipp_property *property;
1562 struct drm_exynos_ipp_config *config;
1563 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1564 struct drm_exynos_ipp_set_wb set_wb;
1565 int ret, i;
1566 u32 cfg0, cfg1;
1567
1568 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1569
1570 if (!c_node) {
1571 DRM_ERROR("failed to get c_node.\n");
1572 return -EINVAL;
1573 }
1574
1575 property = &c_node->property;
1576 if (!property) {
1577 DRM_ERROR("failed to get property.\n");
1578 return -EINVAL;
1579 }
1580
1581 fimc_handle_irq(ctx, true, false, true);
1582
1583 for_each_ipp_ops(i) {
1584 config = &property->config[i];
1585 img_pos[i] = config->pos;
1586 }
1587
1588 ret = fimc_set_prescaler(ctx, &ctx->sc,
1589 &img_pos[EXYNOS_DRM_OPS_SRC],
1590 &img_pos[EXYNOS_DRM_OPS_DST]);
1591 if (ret) {
1592 dev_err(dev, "failed to set precalser.\n");
1593 return ret;
1594 }
1595
1596 /* If set ture, we can save jpeg about screen */
1597 fimc_handle_jpeg(ctx, false);
1598 fimc_set_scaler(ctx, &ctx->sc);
1599 fimc_set_polarity(ctx, &ctx->pol);
1600
1601 switch (cmd) {
1602 case IPP_CMD_M2M:
1603 fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
1604 fimc_handle_lastend(ctx, false);
1605
1606 /* setup dma */
1607 cfg0 = fimc_read(EXYNOS_MSCTRL);
1608 cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK;
1609 cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY;
1610 fimc_write(cfg0, EXYNOS_MSCTRL);
1611 break;
1612 case IPP_CMD_WB:
1613 fimc_set_type_ctrl(ctx, FIMC_WB_A);
1614 fimc_handle_lastend(ctx, true);
1615
1616 /* setup FIMD */
1617 fimc_set_camblk_fimd0_wb(ctx);
1618
1619 set_wb.enable = 1;
1620 set_wb.refresh = property->refresh_rate;
1621 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1622 break;
1623 case IPP_CMD_OUTPUT:
1624 default:
1625 ret = -EINVAL;
1626 dev_err(dev, "invalid operations.\n");
1627 return ret;
1628 }
1629
1630 /* Reset status */
1631 fimc_write(0x0, EXYNOS_CISTATUS);
1632
1633 cfg0 = fimc_read(EXYNOS_CIIMGCPT);
1634 cfg0 &= ~EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1635 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN_SC;
1636
1637 /* Scaler */
1638 cfg1 = fimc_read(EXYNOS_CISCCTRL);
1639 cfg1 &= ~EXYNOS_CISCCTRL_SCAN_MASK;
1640 cfg1 |= (EXYNOS_CISCCTRL_PROGRESSIVE |
1641 EXYNOS_CISCCTRL_SCALERSTART);
1642
1643 fimc_write(cfg1, EXYNOS_CISCCTRL);
1644
1645 /* Enable image capture*/
1646 cfg0 |= EXYNOS_CIIMGCPT_IMGCPTEN;
1647 fimc_write(cfg0, EXYNOS_CIIMGCPT);
1648
1649 /* Disable frame end irq */
1650 cfg0 = fimc_read(EXYNOS_CIGCTRL);
1651 cfg0 &= ~EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1652 fimc_write(cfg0, EXYNOS_CIGCTRL);
1653
1654 cfg0 = fimc_read(EXYNOS_CIOCTRL);
1655 cfg0 &= ~EXYNOS_CIOCTRL_WEAVE_MASK;
1656 fimc_write(cfg0, EXYNOS_CIOCTRL);
1657
1658 if (cmd == IPP_CMD_M2M) {
1659 cfg0 = fimc_read(EXYNOS_MSCTRL);
1660 cfg0 |= EXYNOS_MSCTRL_ENVID;
1661 fimc_write(cfg0, EXYNOS_MSCTRL);
1662
1663 cfg0 = fimc_read(EXYNOS_MSCTRL);
1664 cfg0 |= EXYNOS_MSCTRL_ENVID;
1665 fimc_write(cfg0, EXYNOS_MSCTRL);
1666 }
1667
1668 return 0;
1669}
1670
1671static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1672{
1673 struct fimc_context *ctx = get_fimc_context(dev);
1674 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1675 u32 cfg;
1676
1677 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1678
1679 switch (cmd) {
1680 case IPP_CMD_M2M:
1681 /* Source clear */
1682 cfg = fimc_read(EXYNOS_MSCTRL);
1683 cfg &= ~EXYNOS_MSCTRL_INPUT_MASK;
1684 cfg &= ~EXYNOS_MSCTRL_ENVID;
1685 fimc_write(cfg, EXYNOS_MSCTRL);
1686 break;
1687 case IPP_CMD_WB:
1688 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1689 break;
1690 case IPP_CMD_OUTPUT:
1691 default:
1692 dev_err(dev, "invalid operations.\n");
1693 break;
1694 }
1695
1696 fimc_handle_irq(ctx, false, false, true);
1697
1698 /* reset sequence */
1699 fimc_write(0x0, EXYNOS_CIFCNTSEQ);
1700
1701 /* Scaler disable */
1702 cfg = fimc_read(EXYNOS_CISCCTRL);
1703 cfg &= ~EXYNOS_CISCCTRL_SCALERSTART;
1704 fimc_write(cfg, EXYNOS_CISCCTRL);
1705
1706 /* Disable image capture */
1707 cfg = fimc_read(EXYNOS_CIIMGCPT);
1708 cfg &= ~(EXYNOS_CIIMGCPT_IMGCPTEN_SC | EXYNOS_CIIMGCPT_IMGCPTEN);
1709 fimc_write(cfg, EXYNOS_CIIMGCPT);
1710
1711 /* Enable frame end irq */
1712 cfg = fimc_read(EXYNOS_CIGCTRL);
1713 cfg |= EXYNOS_CIGCTRL_IRQ_END_DISABLE;
1714 fimc_write(cfg, EXYNOS_CIGCTRL);
1715}
1716
1717static int __devinit fimc_probe(struct platform_device *pdev)
1718{
1719 struct device *dev = &pdev->dev;
1720 struct fimc_context *ctx;
1721 struct clk *parent_clk;
1722 struct resource *res;
1723 struct exynos_drm_ippdrv *ippdrv;
1724 struct exynos_drm_fimc_pdata *pdata;
1725 struct fimc_driverdata *ddata;
1726 int ret;
1727
1728 pdata = pdev->dev.platform_data;
1729 if (!pdata) {
1730 dev_err(dev, "no platform data specified.\n");
1731 return -EINVAL;
1732 }
1733
1734 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1735 if (!ctx)
1736 return -ENOMEM;
1737
1738 ddata = (struct fimc_driverdata *)
1739 platform_get_device_id(pdev)->driver_data;
1740
1741 /* clock control */
1742 ctx->sclk_fimc_clk = clk_get(dev, "sclk_fimc");
1743 if (IS_ERR(ctx->sclk_fimc_clk)) {
1744 dev_err(dev, "failed to get src fimc clock.\n");
1745 ret = PTR_ERR(ctx->sclk_fimc_clk);
1746 goto err_ctx;
1747 }
1748 clk_enable(ctx->sclk_fimc_clk);
1749
1750 ctx->fimc_clk = clk_get(dev, "fimc");
1751 if (IS_ERR(ctx->fimc_clk)) {
1752 dev_err(dev, "failed to get fimc clock.\n");
1753 ret = PTR_ERR(ctx->fimc_clk);
1754 clk_disable(ctx->sclk_fimc_clk);
1755 clk_put(ctx->sclk_fimc_clk);
1756 goto err_ctx;
1757 }
1758
1759 ctx->wb_clk = clk_get(dev, "pxl_async0");
1760 if (IS_ERR(ctx->wb_clk)) {
1761 dev_err(dev, "failed to get writeback a clock.\n");
1762 ret = PTR_ERR(ctx->wb_clk);
1763 clk_disable(ctx->sclk_fimc_clk);
1764 clk_put(ctx->sclk_fimc_clk);
1765 clk_put(ctx->fimc_clk);
1766 goto err_ctx;
1767 }
1768
1769 ctx->wb_b_clk = clk_get(dev, "pxl_async1");
1770 if (IS_ERR(ctx->wb_b_clk)) {
1771 dev_err(dev, "failed to get writeback b clock.\n");
1772 ret = PTR_ERR(ctx->wb_b_clk);
1773 clk_disable(ctx->sclk_fimc_clk);
1774 clk_put(ctx->sclk_fimc_clk);
1775 clk_put(ctx->fimc_clk);
1776 clk_put(ctx->wb_clk);
1777 goto err_ctx;
1778 }
1779
1780 parent_clk = clk_get(dev, ddata->parent_clk);
1781
1782 if (IS_ERR(parent_clk)) {
1783 dev_err(dev, "failed to get parent clock.\n");
1784 ret = PTR_ERR(parent_clk);
1785 clk_disable(ctx->sclk_fimc_clk);
1786 clk_put(ctx->sclk_fimc_clk);
1787 clk_put(ctx->fimc_clk);
1788 clk_put(ctx->wb_clk);
1789 clk_put(ctx->wb_b_clk);
1790 goto err_ctx;
1791 }
1792
1793 if (clk_set_parent(ctx->sclk_fimc_clk, parent_clk)) {
1794 dev_err(dev, "failed to set parent.\n");
1795 ret = -EINVAL;
1796 clk_put(parent_clk);
1797 clk_disable(ctx->sclk_fimc_clk);
1798 clk_put(ctx->sclk_fimc_clk);
1799 clk_put(ctx->fimc_clk);
1800 clk_put(ctx->wb_clk);
1801 clk_put(ctx->wb_b_clk);
1802 goto err_ctx;
1803 }
1804
1805 clk_put(parent_clk);
1806 clk_set_rate(ctx->sclk_fimc_clk, pdata->clk_rate);
1807
1808 /* resource memory */
1809 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1810 if (!ctx->regs_res) {
1811 dev_err(dev, "failed to find registers.\n");
1812 ret = -ENOENT;
1813 goto err_clk;
1814 }
1815
1816 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1817 if (!ctx->regs) {
1818 dev_err(dev, "failed to map registers.\n");
1819 ret = -ENXIO;
1820 goto err_clk;
1821 }
1822
1823 /* resource irq */
1824 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1825 if (!res) {
1826 dev_err(dev, "failed to request irq resource.\n");
1827 ret = -ENOENT;
1828 goto err_get_regs;
1829 }
1830
1831 ctx->irq = res->start;
1832 ret = request_threaded_irq(ctx->irq, NULL, fimc_irq_handler,
1833 IRQF_ONESHOT, "drm_fimc", ctx);
1834 if (ret < 0) {
1835 dev_err(dev, "failed to request irq.\n");
1836 goto err_get_regs;
1837 }
1838
1839 /* context initailization */
1840 ctx->id = pdev->id;
1841 ctx->pol = pdata->pol;
1842 ctx->ddata = ddata;
1843
1844 ippdrv = &ctx->ippdrv;
1845 ippdrv->dev = dev;
1846 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops;
1847 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops;
1848 ippdrv->check_property = fimc_ippdrv_check_property;
1849 ippdrv->reset = fimc_ippdrv_reset;
1850 ippdrv->start = fimc_ippdrv_start;
1851 ippdrv->stop = fimc_ippdrv_stop;
1852 ret = fimc_init_prop_list(ippdrv);
1853 if (ret < 0) {
1854 dev_err(dev, "failed to init property list.\n");
1855 goto err_get_irq;
1856 }
1857
1858 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1859 (int)ippdrv);
1860
1861 mutex_init(&ctx->lock);
1862 platform_set_drvdata(pdev, ctx);
1863
1864 pm_runtime_set_active(dev);
1865 pm_runtime_enable(dev);
1866
1867 ret = exynos_drm_ippdrv_register(ippdrv);
1868 if (ret < 0) {
1869 dev_err(dev, "failed to register drm fimc device.\n");
1870 goto err_ippdrv_register;
1871 }
1872
1873 dev_info(&pdev->dev, "drm fimc registered successfully.\n");
1874
1875 return 0;
1876
1877err_ippdrv_register:
1878 devm_kfree(dev, ippdrv->prop_list);
1879 pm_runtime_disable(dev);
1880err_get_irq:
1881 free_irq(ctx->irq, ctx);
1882err_get_regs:
1883 devm_iounmap(dev, ctx->regs);
1884err_clk:
1885 clk_put(ctx->sclk_fimc_clk);
1886 clk_put(ctx->fimc_clk);
1887 clk_put(ctx->wb_clk);
1888 clk_put(ctx->wb_b_clk);
1889err_ctx:
1890 devm_kfree(dev, ctx);
1891 return ret;
1892}
1893
1894static int __devexit fimc_remove(struct platform_device *pdev)
1895{
1896 struct device *dev = &pdev->dev;
1897 struct fimc_context *ctx = get_fimc_context(dev);
1898 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1899
1900 devm_kfree(dev, ippdrv->prop_list);
1901 exynos_drm_ippdrv_unregister(ippdrv);
1902 mutex_destroy(&ctx->lock);
1903
1904 pm_runtime_set_suspended(dev);
1905 pm_runtime_disable(dev);
1906
1907 free_irq(ctx->irq, ctx);
1908 devm_iounmap(dev, ctx->regs);
1909
1910 clk_put(ctx->sclk_fimc_clk);
1911 clk_put(ctx->fimc_clk);
1912 clk_put(ctx->wb_clk);
1913 clk_put(ctx->wb_b_clk);
1914
1915 devm_kfree(dev, ctx);
1916
1917 return 0;
1918}
1919
1920#ifdef CONFIG_PM_SLEEP
1921static int fimc_suspend(struct device *dev)
1922{
1923 struct fimc_context *ctx = get_fimc_context(dev);
1924
1925 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1926
1927 if (pm_runtime_suspended(dev))
1928 return 0;
1929
1930 return fimc_clk_ctrl(ctx, false);
1931}
1932
1933static int fimc_resume(struct device *dev)
1934{
1935 struct fimc_context *ctx = get_fimc_context(dev);
1936
1937 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1938
1939 if (!pm_runtime_suspended(dev))
1940 return fimc_clk_ctrl(ctx, true);
1941
1942 return 0;
1943}
1944#endif
1945
1946#ifdef CONFIG_PM_RUNTIME
1947static int fimc_runtime_suspend(struct device *dev)
1948{
1949 struct fimc_context *ctx = get_fimc_context(dev);
1950
1951 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1952
1953 return fimc_clk_ctrl(ctx, false);
1954}
1955
1956static int fimc_runtime_resume(struct device *dev)
1957{
1958 struct fimc_context *ctx = get_fimc_context(dev);
1959
1960 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1961
1962 return fimc_clk_ctrl(ctx, true);
1963}
1964#endif
1965
1966static struct fimc_driverdata exynos4210_fimc_data = {
1967 .parent_clk = "mout_mpll",
1968};
1969
1970static struct fimc_driverdata exynos4410_fimc_data = {
1971 .parent_clk = "mout_mpll_user",
1972};
1973
1974static struct platform_device_id fimc_driver_ids[] = {
1975 {
1976 .name = "exynos4210-fimc",
1977 .driver_data = (unsigned long)&exynos4210_fimc_data,
1978 }, {
1979 .name = "exynos4412-fimc",
1980 .driver_data = (unsigned long)&exynos4410_fimc_data,
1981 },
1982 {},
1983};
1984MODULE_DEVICE_TABLE(platform, fimc_driver_ids);
1985
1986static const struct dev_pm_ops fimc_pm_ops = {
1987 SET_SYSTEM_SLEEP_PM_OPS(fimc_suspend, fimc_resume)
1988 SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
1989};
1990
1991struct platform_driver fimc_driver = {
1992 .probe = fimc_probe,
1993 .remove = __devexit_p(fimc_remove),
1994 .id_table = fimc_driver_ids,
1995 .driver = {
1996 .name = "exynos-drm-fimc",
1997 .owner = THIS_MODULE,
1998 .pm = &fimc_pm_ops,
1999 },
2000};
2001
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
new file mode 100644
index 000000000000..dc970fa0d888
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef _EXYNOS_DRM_FIMC_H_
30#define _EXYNOS_DRM_FIMC_H_
31
32/*
33 * TODO
34 * FIMD output interface notifier callback.
35 */
36
37#endif /* _EXYNOS_DRM_FIMC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index e08478f19f1a..bf0d9baca2bc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/of_device.h>
20#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
21 22
22#include <video/samsung_fimd.h> 23#include <video/samsung_fimd.h>
@@ -25,6 +26,7 @@
25#include "exynos_drm_drv.h" 26#include "exynos_drm_drv.h"
26#include "exynos_drm_fbdev.h" 27#include "exynos_drm_fbdev.h"
27#include "exynos_drm_crtc.h" 28#include "exynos_drm_crtc.h"
29#include "exynos_drm_iommu.h"
28 30
29/* 31/*
30 * FIMD is stand for Fully Interactive Mobile Display and 32 * FIMD is stand for Fully Interactive Mobile Display and
@@ -78,10 +80,10 @@ struct fimd_win_data {
78 unsigned int fb_height; 80 unsigned int fb_height;
79 unsigned int bpp; 81 unsigned int bpp;
80 dma_addr_t dma_addr; 82 dma_addr_t dma_addr;
81 void __iomem *vaddr;
82 unsigned int buf_offsize; 83 unsigned int buf_offsize;
83 unsigned int line_size; /* bytes */ 84 unsigned int line_size; /* bytes */
84 bool enabled; 85 bool enabled;
86 bool resume;
85}; 87};
86 88
87struct fimd_context { 89struct fimd_context {
@@ -99,13 +101,34 @@ struct fimd_context {
99 u32 vidcon1; 101 u32 vidcon1;
100 bool suspended; 102 bool suspended;
101 struct mutex lock; 103 struct mutex lock;
104 wait_queue_head_t wait_vsync_queue;
105 atomic_t wait_vsync_event;
102 106
103 struct exynos_drm_panel_info *panel; 107 struct exynos_drm_panel_info *panel;
104}; 108};
105 109
110#ifdef CONFIG_OF
111static const struct of_device_id fimd_driver_dt_match[] = {
112 { .compatible = "samsung,exynos4-fimd",
113 .data = &exynos4_fimd_driver_data },
114 { .compatible = "samsung,exynos5-fimd",
115 .data = &exynos5_fimd_driver_data },
116 {},
117};
118MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
119#endif
120
106static inline struct fimd_driver_data *drm_fimd_get_driver_data( 121static inline struct fimd_driver_data *drm_fimd_get_driver_data(
107 struct platform_device *pdev) 122 struct platform_device *pdev)
108{ 123{
124#ifdef CONFIG_OF
125 const struct of_device_id *of_id =
126 of_match_device(fimd_driver_dt_match, &pdev->dev);
127
128 if (of_id)
129 return (struct fimd_driver_data *)of_id->data;
130#endif
131
109 return (struct fimd_driver_data *) 132 return (struct fimd_driver_data *)
110 platform_get_device_id(pdev)->driver_data; 133 platform_get_device_id(pdev)->driver_data;
111} 134}
@@ -240,7 +263,9 @@ static void fimd_commit(struct device *dev)
240 263
241 /* setup horizontal and vertical display size. */ 264 /* setup horizontal and vertical display size. */
242 val = VIDTCON2_LINEVAL(timing->yres - 1) | 265 val = VIDTCON2_LINEVAL(timing->yres - 1) |
243 VIDTCON2_HOZVAL(timing->xres - 1); 266 VIDTCON2_HOZVAL(timing->xres - 1) |
267 VIDTCON2_LINEVAL_E(timing->yres - 1) |
268 VIDTCON2_HOZVAL_E(timing->xres - 1);
244 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); 269 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
245 270
246 /* setup clock source, clock divider, enable dma. */ 271 /* setup clock source, clock divider, enable dma. */
@@ -307,12 +332,32 @@ static void fimd_disable_vblank(struct device *dev)
307 } 332 }
308} 333}
309 334
335static void fimd_wait_for_vblank(struct device *dev)
336{
337 struct fimd_context *ctx = get_fimd_context(dev);
338
339 if (ctx->suspended)
340 return;
341
342 atomic_set(&ctx->wait_vsync_event, 1);
343
344 /*
345 * wait for FIMD to signal VSYNC interrupt or return after
346 * timeout which is set to 50ms (refresh rate of 20).
347 */
348 if (!wait_event_timeout(ctx->wait_vsync_queue,
349 !atomic_read(&ctx->wait_vsync_event),
350 DRM_HZ/20))
351 DRM_DEBUG_KMS("vblank wait timed out.\n");
352}
353
310static struct exynos_drm_manager_ops fimd_manager_ops = { 354static struct exynos_drm_manager_ops fimd_manager_ops = {
311 .dpms = fimd_dpms, 355 .dpms = fimd_dpms,
312 .apply = fimd_apply, 356 .apply = fimd_apply,
313 .commit = fimd_commit, 357 .commit = fimd_commit,
314 .enable_vblank = fimd_enable_vblank, 358 .enable_vblank = fimd_enable_vblank,
315 .disable_vblank = fimd_disable_vblank, 359 .disable_vblank = fimd_disable_vblank,
360 .wait_for_vblank = fimd_wait_for_vblank,
316}; 361};
317 362
318static void fimd_win_mode_set(struct device *dev, 363static void fimd_win_mode_set(struct device *dev,
@@ -351,7 +396,6 @@ static void fimd_win_mode_set(struct device *dev,
351 win_data->fb_width = overlay->fb_width; 396 win_data->fb_width = overlay->fb_width;
352 win_data->fb_height = overlay->fb_height; 397 win_data->fb_height = overlay->fb_height;
353 win_data->dma_addr = overlay->dma_addr[0] + offset; 398 win_data->dma_addr = overlay->dma_addr[0] + offset;
354 win_data->vaddr = overlay->vaddr[0] + offset;
355 win_data->bpp = overlay->bpp; 399 win_data->bpp = overlay->bpp;
356 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 400 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
357 (overlay->bpp >> 3); 401 (overlay->bpp >> 3);
@@ -361,9 +405,7 @@ static void fimd_win_mode_set(struct device *dev,
361 win_data->offset_x, win_data->offset_y); 405 win_data->offset_x, win_data->offset_y);
362 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 406 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
363 win_data->ovl_width, win_data->ovl_height); 407 win_data->ovl_width, win_data->ovl_height);
364 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 408 DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
365 (unsigned long)win_data->dma_addr,
366 (unsigned long)win_data->vaddr);
367 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 409 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
368 overlay->fb_width, overlay->crtc_width); 410 overlay->fb_width, overlay->crtc_width);
369} 411}
@@ -451,6 +493,8 @@ static void fimd_win_commit(struct device *dev, int zpos)
451 struct fimd_win_data *win_data; 493 struct fimd_win_data *win_data;
452 int win = zpos; 494 int win = zpos;
453 unsigned long val, alpha, size; 495 unsigned long val, alpha, size;
496 unsigned int last_x;
497 unsigned int last_y;
454 498
455 DRM_DEBUG_KMS("%s\n", __FILE__); 499 DRM_DEBUG_KMS("%s\n", __FILE__);
456 500
@@ -496,24 +540,32 @@ static void fimd_win_commit(struct device *dev, int zpos)
496 540
497 /* buffer size */ 541 /* buffer size */
498 val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) | 542 val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
499 VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size); 543 VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size) |
544 VIDW_BUF_SIZE_OFFSET_E(win_data->buf_offsize) |
545 VIDW_BUF_SIZE_PAGEWIDTH_E(win_data->line_size);
500 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); 546 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
501 547
502 /* OSD position */ 548 /* OSD position */
503 val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) | 549 val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
504 VIDOSDxA_TOPLEFT_Y(win_data->offset_y); 550 VIDOSDxA_TOPLEFT_Y(win_data->offset_y) |
551 VIDOSDxA_TOPLEFT_X_E(win_data->offset_x) |
552 VIDOSDxA_TOPLEFT_Y_E(win_data->offset_y);
505 writel(val, ctx->regs + VIDOSD_A(win)); 553 writel(val, ctx->regs + VIDOSD_A(win));
506 554
507 val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x + 555 last_x = win_data->offset_x + win_data->ovl_width;
508 win_data->ovl_width - 1) | 556 if (last_x)
509 VIDOSDxB_BOTRIGHT_Y(win_data->offset_y + 557 last_x--;
510 win_data->ovl_height - 1); 558 last_y = win_data->offset_y + win_data->ovl_height;
559 if (last_y)
560 last_y--;
561
562 val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y) |
563 VIDOSDxB_BOTRIGHT_X_E(last_x) | VIDOSDxB_BOTRIGHT_Y_E(last_y);
564
511 writel(val, ctx->regs + VIDOSD_B(win)); 565 writel(val, ctx->regs + VIDOSD_B(win));
512 566
513 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n", 567 DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
514 win_data->offset_x, win_data->offset_y, 568 win_data->offset_x, win_data->offset_y, last_x, last_y);
515 win_data->offset_x + win_data->ovl_width - 1,
516 win_data->offset_y + win_data->ovl_height - 1);
517 569
518 /* hardware window 0 doesn't support alpha channel. */ 570 /* hardware window 0 doesn't support alpha channel. */
519 if (win != 0) { 571 if (win != 0) {
@@ -573,6 +625,12 @@ static void fimd_win_disable(struct device *dev, int zpos)
573 625
574 win_data = &ctx->win_data[win]; 626 win_data = &ctx->win_data[win];
575 627
628 if (ctx->suspended) {
629 /* do not resume this window*/
630 win_data->resume = false;
631 return;
632 }
633
576 /* protect windows */ 634 /* protect windows */
577 val = readl(ctx->regs + SHADOWCON); 635 val = readl(ctx->regs + SHADOWCON);
578 val |= SHADOWCON_WINx_PROTECT(win); 636 val |= SHADOWCON_WINx_PROTECT(win);
@@ -592,22 +650,10 @@ static void fimd_win_disable(struct device *dev, int zpos)
592 win_data->enabled = false; 650 win_data->enabled = false;
593} 651}
594 652
595static void fimd_wait_for_vblank(struct device *dev)
596{
597 struct fimd_context *ctx = get_fimd_context(dev);
598 int ret;
599
600 ret = wait_for((__raw_readl(ctx->regs + VIDCON1) &
601 VIDCON1_VSTATUS_VSYNC), 50);
602 if (ret < 0)
603 DRM_DEBUG_KMS("vblank wait timed out.\n");
604}
605
606static struct exynos_drm_overlay_ops fimd_overlay_ops = { 653static struct exynos_drm_overlay_ops fimd_overlay_ops = {
607 .mode_set = fimd_win_mode_set, 654 .mode_set = fimd_win_mode_set,
608 .commit = fimd_win_commit, 655 .commit = fimd_win_commit,
609 .disable = fimd_win_disable, 656 .disable = fimd_win_disable,
610 .wait_for_vblank = fimd_wait_for_vblank,
611}; 657};
612 658
613static struct exynos_drm_manager fimd_manager = { 659static struct exynos_drm_manager fimd_manager = {
@@ -623,7 +669,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
623 struct drm_pending_vblank_event *e, *t; 669 struct drm_pending_vblank_event *e, *t;
624 struct timeval now; 670 struct timeval now;
625 unsigned long flags; 671 unsigned long flags;
626 bool is_checked = false;
627 672
628 spin_lock_irqsave(&drm_dev->event_lock, flags); 673 spin_lock_irqsave(&drm_dev->event_lock, flags);
629 674
@@ -633,8 +678,6 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
633 if (crtc != e->pipe) 678 if (crtc != e->pipe)
634 continue; 679 continue;
635 680
636 is_checked = true;
637
638 do_gettimeofday(&now); 681 do_gettimeofday(&now);
639 e->event.sequence = 0; 682 e->event.sequence = 0;
640 e->event.tv_sec = now.tv_sec; 683 e->event.tv_sec = now.tv_sec;
@@ -642,22 +685,7 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
642 685
643 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 686 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
644 wake_up_interruptible(&e->base.file_priv->event_wait); 687 wake_up_interruptible(&e->base.file_priv->event_wait);
645 } 688 drm_vblank_put(drm_dev, crtc);
646
647 if (is_checked) {
648 /*
649 * call drm_vblank_put only in case that drm_vblank_get was
650 * called.
651 */
652 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
653 drm_vblank_put(drm_dev, crtc);
654
655 /*
656 * don't off vblank if vblank_disable_allowed is 1,
657 * because vblank would be off by timer handler.
658 */
659 if (!drm_dev->vblank_disable_allowed)
660 drm_vblank_off(drm_dev, crtc);
661 } 689 }
662 690
663 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 691 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -684,6 +712,11 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
684 drm_handle_vblank(drm_dev, manager->pipe); 712 drm_handle_vblank(drm_dev, manager->pipe);
685 fimd_finish_pageflip(drm_dev, manager->pipe); 713 fimd_finish_pageflip(drm_dev, manager->pipe);
686 714
715 /* set wait vsync event to zero and wake up queue. */
716 if (atomic_read(&ctx->wait_vsync_event)) {
717 atomic_set(&ctx->wait_vsync_event, 0);
718 DRM_WAKEUP(&ctx->wait_vsync_queue);
719 }
687out: 720out:
688 return IRQ_HANDLED; 721 return IRQ_HANDLED;
689} 722}
@@ -709,6 +742,10 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
709 */ 742 */
710 drm_dev->vblank_disable_allowed = 1; 743 drm_dev->vblank_disable_allowed = 1;
711 744
745 /* attach this sub driver to iommu mapping if supported. */
746 if (is_drm_iommu_supported(drm_dev))
747 drm_iommu_attach_device(drm_dev, dev);
748
712 return 0; 749 return 0;
713} 750}
714 751
@@ -716,7 +753,9 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
716{ 753{
717 DRM_DEBUG_KMS("%s\n", __FILE__); 754 DRM_DEBUG_KMS("%s\n", __FILE__);
718 755
719 /* TODO. */ 756 /* detach this sub driver from iommu mapping if supported. */
757 if (is_drm_iommu_supported(drm_dev))
758 drm_iommu_detach_device(drm_dev, dev);
720} 759}
721 760
722static int fimd_calc_clkdiv(struct fimd_context *ctx, 761static int fimd_calc_clkdiv(struct fimd_context *ctx,
@@ -805,11 +844,38 @@ static int fimd_clock(struct fimd_context *ctx, bool enable)
805 return 0; 844 return 0;
806} 845}
807 846
847static void fimd_window_suspend(struct device *dev)
848{
849 struct fimd_context *ctx = get_fimd_context(dev);
850 struct fimd_win_data *win_data;
851 int i;
852
853 for (i = 0; i < WINDOWS_NR; i++) {
854 win_data = &ctx->win_data[i];
855 win_data->resume = win_data->enabled;
856 fimd_win_disable(dev, i);
857 }
858 fimd_wait_for_vblank(dev);
859}
860
861static void fimd_window_resume(struct device *dev)
862{
863 struct fimd_context *ctx = get_fimd_context(dev);
864 struct fimd_win_data *win_data;
865 int i;
866
867 for (i = 0; i < WINDOWS_NR; i++) {
868 win_data = &ctx->win_data[i];
869 win_data->enabled = win_data->resume;
870 win_data->resume = false;
871 }
872}
873
808static int fimd_activate(struct fimd_context *ctx, bool enable) 874static int fimd_activate(struct fimd_context *ctx, bool enable)
809{ 875{
876 struct device *dev = ctx->subdrv.dev;
810 if (enable) { 877 if (enable) {
811 int ret; 878 int ret;
812 struct device *dev = ctx->subdrv.dev;
813 879
814 ret = fimd_clock(ctx, true); 880 ret = fimd_clock(ctx, true);
815 if (ret < 0) 881 if (ret < 0)
@@ -820,7 +886,11 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
820 /* if vblank was enabled status, enable it again. */ 886 /* if vblank was enabled status, enable it again. */
821 if (test_and_clear_bit(0, &ctx->irq_flags)) 887 if (test_and_clear_bit(0, &ctx->irq_flags))
822 fimd_enable_vblank(dev); 888 fimd_enable_vblank(dev);
889
890 fimd_window_resume(dev);
823 } else { 891 } else {
892 fimd_window_suspend(dev);
893
824 fimd_clock(ctx, false); 894 fimd_clock(ctx, false);
825 ctx->suspended = true; 895 ctx->suspended = true;
826 } 896 }
@@ -857,18 +927,16 @@ static int __devinit fimd_probe(struct platform_device *pdev)
857 if (!ctx) 927 if (!ctx)
858 return -ENOMEM; 928 return -ENOMEM;
859 929
860 ctx->bus_clk = clk_get(dev, "fimd"); 930 ctx->bus_clk = devm_clk_get(dev, "fimd");
861 if (IS_ERR(ctx->bus_clk)) { 931 if (IS_ERR(ctx->bus_clk)) {
862 dev_err(dev, "failed to get bus clock\n"); 932 dev_err(dev, "failed to get bus clock\n");
863 ret = PTR_ERR(ctx->bus_clk); 933 return PTR_ERR(ctx->bus_clk);
864 goto err_clk_get;
865 } 934 }
866 935
867 ctx->lcd_clk = clk_get(dev, "sclk_fimd"); 936 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
868 if (IS_ERR(ctx->lcd_clk)) { 937 if (IS_ERR(ctx->lcd_clk)) {
869 dev_err(dev, "failed to get lcd clock\n"); 938 dev_err(dev, "failed to get lcd clock\n");
870 ret = PTR_ERR(ctx->lcd_clk); 939 return PTR_ERR(ctx->lcd_clk);
871 goto err_bus_clk;
872 } 940 }
873 941
874 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 942 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -876,14 +944,13 @@ static int __devinit fimd_probe(struct platform_device *pdev)
876 ctx->regs = devm_request_and_ioremap(&pdev->dev, res); 944 ctx->regs = devm_request_and_ioremap(&pdev->dev, res);
877 if (!ctx->regs) { 945 if (!ctx->regs) {
878 dev_err(dev, "failed to map registers\n"); 946 dev_err(dev, "failed to map registers\n");
879 ret = -ENXIO; 947 return -ENXIO;
880 goto err_clk;
881 } 948 }
882 949
883 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 950 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
884 if (!res) { 951 if (!res) {
885 dev_err(dev, "irq request failed.\n"); 952 dev_err(dev, "irq request failed.\n");
886 goto err_clk; 953 return -ENXIO;
887 } 954 }
888 955
889 ctx->irq = res->start; 956 ctx->irq = res->start;
@@ -892,13 +959,15 @@ static int __devinit fimd_probe(struct platform_device *pdev)
892 0, "drm_fimd", ctx); 959 0, "drm_fimd", ctx);
893 if (ret) { 960 if (ret) {
894 dev_err(dev, "irq request failed.\n"); 961 dev_err(dev, "irq request failed.\n");
895 goto err_clk; 962 return ret;
896 } 963 }
897 964
898 ctx->vidcon0 = pdata->vidcon0; 965 ctx->vidcon0 = pdata->vidcon0;
899 ctx->vidcon1 = pdata->vidcon1; 966 ctx->vidcon1 = pdata->vidcon1;
900 ctx->default_win = pdata->default_win; 967 ctx->default_win = pdata->default_win;
901 ctx->panel = panel; 968 ctx->panel = panel;
969 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
970 atomic_set(&ctx->wait_vsync_event, 0);
902 971
903 subdrv = &ctx->subdrv; 972 subdrv = &ctx->subdrv;
904 973
@@ -926,17 +995,6 @@ static int __devinit fimd_probe(struct platform_device *pdev)
926 exynos_drm_subdrv_register(subdrv); 995 exynos_drm_subdrv_register(subdrv);
927 996
928 return 0; 997 return 0;
929
930err_clk:
931 clk_disable(ctx->lcd_clk);
932 clk_put(ctx->lcd_clk);
933
934err_bus_clk:
935 clk_disable(ctx->bus_clk);
936 clk_put(ctx->bus_clk);
937
938err_clk_get:
939 return ret;
940} 998}
941 999
942static int __devexit fimd_remove(struct platform_device *pdev) 1000static int __devexit fimd_remove(struct platform_device *pdev)
@@ -960,9 +1018,6 @@ static int __devexit fimd_remove(struct platform_device *pdev)
960out: 1018out:
961 pm_runtime_disable(dev); 1019 pm_runtime_disable(dev);
962 1020
963 clk_put(ctx->lcd_clk);
964 clk_put(ctx->bus_clk);
965
966 return 0; 1021 return 0;
967} 1022}
968 1023
@@ -1056,5 +1111,6 @@ struct platform_driver fimd_driver = {
1056 .name = "exynos4-fb", 1111 .name = "exynos4-fb",
1057 .owner = THIS_MODULE, 1112 .owner = THIS_MODULE,
1058 .pm = &fimd_pm_ops, 1113 .pm = &fimd_pm_ops,
1114 .of_match_table = of_match_ptr(fimd_driver_dt_match),
1059 }, 1115 },
1060}; 1116};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f7aab24ea46c..6ffa0763c078 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -17,11 +17,14 @@
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/dma-mapping.h>
21#include <linux/dma-attrs.h>
20 22
21#include <drm/drmP.h> 23#include <drm/drmP.h>
22#include <drm/exynos_drm.h> 24#include <drm/exynos_drm.h>
23#include "exynos_drm_drv.h" 25#include "exynos_drm_drv.h"
24#include "exynos_drm_gem.h" 26#include "exynos_drm_gem.h"
27#include "exynos_drm_iommu.h"
25 28
26#define G2D_HW_MAJOR_VER 4 29#define G2D_HW_MAJOR_VER 4
27#define G2D_HW_MINOR_VER 1 30#define G2D_HW_MINOR_VER 1
@@ -92,11 +95,21 @@
92#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM) 95#define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
93#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2) 96#define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
94 97
98#define MAX_BUF_ADDR_NR 6
99
100/* maximum buffer pool size of userptr is 64MB as default */
101#define MAX_POOL (64 * 1024 * 1024)
102
103enum {
104 BUF_TYPE_GEM = 1,
105 BUF_TYPE_USERPTR,
106};
107
95/* cmdlist data structure */ 108/* cmdlist data structure */
96struct g2d_cmdlist { 109struct g2d_cmdlist {
97 u32 head; 110 u32 head;
98 u32 data[G2D_CMDLIST_DATA_NUM]; 111 unsigned long data[G2D_CMDLIST_DATA_NUM];
99 u32 last; /* last data offset */ 112 u32 last; /* last data offset */
100}; 113};
101 114
102struct drm_exynos_pending_g2d_event { 115struct drm_exynos_pending_g2d_event {
@@ -104,15 +117,26 @@ struct drm_exynos_pending_g2d_event {
104 struct drm_exynos_g2d_event event; 117 struct drm_exynos_g2d_event event;
105}; 118};
106 119
107struct g2d_gem_node { 120struct g2d_cmdlist_userptr {
108 struct list_head list; 121 struct list_head list;
109 unsigned int handle; 122 dma_addr_t dma_addr;
123 unsigned long userptr;
124 unsigned long size;
125 struct page **pages;
126 unsigned int npages;
127 struct sg_table *sgt;
128 struct vm_area_struct *vma;
129 atomic_t refcount;
130 bool in_pool;
131 bool out_of_list;
110}; 132};
111 133
112struct g2d_cmdlist_node { 134struct g2d_cmdlist_node {
113 struct list_head list; 135 struct list_head list;
114 struct g2d_cmdlist *cmdlist; 136 struct g2d_cmdlist *cmdlist;
115 unsigned int gem_nr; 137 unsigned int map_nr;
138 unsigned long handles[MAX_BUF_ADDR_NR];
139 unsigned int obj_type[MAX_BUF_ADDR_NR];
116 dma_addr_t dma_addr; 140 dma_addr_t dma_addr;
117 141
118 struct drm_exynos_pending_g2d_event *event; 142 struct drm_exynos_pending_g2d_event *event;
@@ -122,6 +146,7 @@ struct g2d_runqueue_node {
122 struct list_head list; 146 struct list_head list;
123 struct list_head run_cmdlist; 147 struct list_head run_cmdlist;
124 struct list_head event_list; 148 struct list_head event_list;
149 struct drm_file *filp;
125 pid_t pid; 150 pid_t pid;
126 struct completion complete; 151 struct completion complete;
127 int async; 152 int async;
@@ -143,23 +168,33 @@ struct g2d_data {
143 struct mutex cmdlist_mutex; 168 struct mutex cmdlist_mutex;
144 dma_addr_t cmdlist_pool; 169 dma_addr_t cmdlist_pool;
145 void *cmdlist_pool_virt; 170 void *cmdlist_pool_virt;
171 struct dma_attrs cmdlist_dma_attrs;
146 172
147 /* runqueue*/ 173 /* runqueue*/
148 struct g2d_runqueue_node *runqueue_node; 174 struct g2d_runqueue_node *runqueue_node;
149 struct list_head runqueue; 175 struct list_head runqueue;
150 struct mutex runqueue_mutex; 176 struct mutex runqueue_mutex;
151 struct kmem_cache *runqueue_slab; 177 struct kmem_cache *runqueue_slab;
178
179 unsigned long current_pool;
180 unsigned long max_pool;
152}; 181};
153 182
154static int g2d_init_cmdlist(struct g2d_data *g2d) 183static int g2d_init_cmdlist(struct g2d_data *g2d)
155{ 184{
156 struct device *dev = g2d->dev; 185 struct device *dev = g2d->dev;
157 struct g2d_cmdlist_node *node = g2d->cmdlist_node; 186 struct g2d_cmdlist_node *node = g2d->cmdlist_node;
187 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
158 int nr; 188 int nr;
159 int ret; 189 int ret;
160 190
161 g2d->cmdlist_pool_virt = dma_alloc_coherent(dev, G2D_CMDLIST_POOL_SIZE, 191 init_dma_attrs(&g2d->cmdlist_dma_attrs);
162 &g2d->cmdlist_pool, GFP_KERNEL); 192 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
193
194 g2d->cmdlist_pool_virt = dma_alloc_attrs(subdrv->drm_dev->dev,
195 G2D_CMDLIST_POOL_SIZE,
196 &g2d->cmdlist_pool, GFP_KERNEL,
197 &g2d->cmdlist_dma_attrs);
163 if (!g2d->cmdlist_pool_virt) { 198 if (!g2d->cmdlist_pool_virt) {
164 dev_err(dev, "failed to allocate dma memory\n"); 199 dev_err(dev, "failed to allocate dma memory\n");
165 return -ENOMEM; 200 return -ENOMEM;
@@ -184,18 +219,20 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
184 return 0; 219 return 0;
185 220
186err: 221err:
187 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 222 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
188 g2d->cmdlist_pool); 223 g2d->cmdlist_pool_virt,
224 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
189 return ret; 225 return ret;
190} 226}
191 227
192static void g2d_fini_cmdlist(struct g2d_data *g2d) 228static void g2d_fini_cmdlist(struct g2d_data *g2d)
193{ 229{
194 struct device *dev = g2d->dev; 230 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
195 231
196 kfree(g2d->cmdlist_node); 232 kfree(g2d->cmdlist_node);
197 dma_free_coherent(dev, G2D_CMDLIST_POOL_SIZE, g2d->cmdlist_pool_virt, 233 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
198 g2d->cmdlist_pool); 234 g2d->cmdlist_pool_virt,
235 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
199} 236}
200 237
201static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 238static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
@@ -245,62 +282,300 @@ add_to_list:
245 list_add_tail(&node->event->base.link, &g2d_priv->event_list); 282 list_add_tail(&node->event->base.link, &g2d_priv->event_list);
246} 283}
247 284
248static int g2d_get_cmdlist_gem(struct drm_device *drm_dev, 285static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
249 struct drm_file *file, 286 unsigned long obj,
250 struct g2d_cmdlist_node *node) 287 bool force)
251{ 288{
252 struct drm_exynos_file_private *file_priv = file->driver_priv; 289 struct g2d_cmdlist_userptr *g2d_userptr =
290 (struct g2d_cmdlist_userptr *)obj;
291
292 if (!obj)
293 return;
294
295 if (force)
296 goto out;
297
298 atomic_dec(&g2d_userptr->refcount);
299
300 if (atomic_read(&g2d_userptr->refcount) > 0)
301 return;
302
303 if (g2d_userptr->in_pool)
304 return;
305
306out:
307 exynos_gem_unmap_sgt_from_dma(drm_dev, g2d_userptr->sgt,
308 DMA_BIDIRECTIONAL);
309
310 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
311 g2d_userptr->npages,
312 g2d_userptr->vma);
313
314 if (!g2d_userptr->out_of_list)
315 list_del_init(&g2d_userptr->list);
316
317 sg_free_table(g2d_userptr->sgt);
318 kfree(g2d_userptr->sgt);
319 g2d_userptr->sgt = NULL;
320
321 kfree(g2d_userptr->pages);
322 g2d_userptr->pages = NULL;
323 kfree(g2d_userptr);
324 g2d_userptr = NULL;
325}
326
327dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
328 unsigned long userptr,
329 unsigned long size,
330 struct drm_file *filp,
331 unsigned long *obj)
332{
333 struct drm_exynos_file_private *file_priv = filp->driver_priv;
334 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
335 struct g2d_cmdlist_userptr *g2d_userptr;
336 struct g2d_data *g2d;
337 struct page **pages;
338 struct sg_table *sgt;
339 struct vm_area_struct *vma;
340 unsigned long start, end;
341 unsigned int npages, offset;
342 int ret;
343
344 if (!size) {
345 DRM_ERROR("invalid userptr size.\n");
346 return ERR_PTR(-EINVAL);
347 }
348
349 g2d = dev_get_drvdata(g2d_priv->dev);
350
351 /* check if userptr already exists in userptr_list. */
352 list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
353 if (g2d_userptr->userptr == userptr) {
354 /*
355 * also check size because there could be same address
356 * and different size.
357 */
358 if (g2d_userptr->size == size) {
359 atomic_inc(&g2d_userptr->refcount);
360 *obj = (unsigned long)g2d_userptr;
361
362 return &g2d_userptr->dma_addr;
363 }
364
365 /*
366 * at this moment, maybe g2d dma is accessing this
367 * g2d_userptr memory region so just remove this
368 * g2d_userptr object from userptr_list not to be
369 * referred again and also except it the userptr
370 * pool to be released after the dma access completion.
371 */
372 g2d_userptr->out_of_list = true;
373 g2d_userptr->in_pool = false;
374 list_del_init(&g2d_userptr->list);
375
376 break;
377 }
378 }
379
380 g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
381 if (!g2d_userptr) {
382 DRM_ERROR("failed to allocate g2d_userptr.\n");
383 return ERR_PTR(-ENOMEM);
384 }
385
386 atomic_set(&g2d_userptr->refcount, 1);
387
388 start = userptr & PAGE_MASK;
389 offset = userptr & ~PAGE_MASK;
390 end = PAGE_ALIGN(userptr + size);
391 npages = (end - start) >> PAGE_SHIFT;
392 g2d_userptr->npages = npages;
393
394 pages = kzalloc(npages * sizeof(struct page *), GFP_KERNEL);
395 if (!pages) {
396 DRM_ERROR("failed to allocate pages.\n");
397 kfree(g2d_userptr);
398 return ERR_PTR(-ENOMEM);
399 }
400
401 vma = find_vma(current->mm, userptr);
402 if (!vma) {
403 DRM_ERROR("failed to get vm region.\n");
404 ret = -EFAULT;
405 goto err_free_pages;
406 }
407
408 if (vma->vm_end < userptr + size) {
409 DRM_ERROR("vma is too small.\n");
410 ret = -EFAULT;
411 goto err_free_pages;
412 }
413
414 g2d_userptr->vma = exynos_gem_get_vma(vma);
415 if (!g2d_userptr->vma) {
416 DRM_ERROR("failed to copy vma.\n");
417 ret = -ENOMEM;
418 goto err_free_pages;
419 }
420
421 g2d_userptr->size = size;
422
423 ret = exynos_gem_get_pages_from_userptr(start & PAGE_MASK,
424 npages, pages, vma);
425 if (ret < 0) {
426 DRM_ERROR("failed to get user pages from userptr.\n");
427 goto err_put_vma;
428 }
429
430 g2d_userptr->pages = pages;
431
432 sgt = kzalloc(sizeof *sgt, GFP_KERNEL);
433 if (!sgt) {
434 DRM_ERROR("failed to allocate sg table.\n");
435 ret = -ENOMEM;
436 goto err_free_userptr;
437 }
438
439 ret = sg_alloc_table_from_pages(sgt, pages, npages, offset,
440 size, GFP_KERNEL);
441 if (ret < 0) {
442 DRM_ERROR("failed to get sgt from pages.\n");
443 goto err_free_sgt;
444 }
445
446 g2d_userptr->sgt = sgt;
447
448 ret = exynos_gem_map_sgt_with_dma(drm_dev, g2d_userptr->sgt,
449 DMA_BIDIRECTIONAL);
450 if (ret < 0) {
451 DRM_ERROR("failed to map sgt with dma region.\n");
452 goto err_free_sgt;
453 }
454
455 g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
456 g2d_userptr->userptr = userptr;
457
458 list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
459
460 if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
461 g2d->current_pool += npages << PAGE_SHIFT;
462 g2d_userptr->in_pool = true;
463 }
464
465 *obj = (unsigned long)g2d_userptr;
466
467 return &g2d_userptr->dma_addr;
468
469err_free_sgt:
470 sg_free_table(sgt);
471 kfree(sgt);
472 sgt = NULL;
473
474err_free_userptr:
475 exynos_gem_put_pages_to_userptr(g2d_userptr->pages,
476 g2d_userptr->npages,
477 g2d_userptr->vma);
478
479err_put_vma:
480 exynos_gem_put_vma(g2d_userptr->vma);
481
482err_free_pages:
483 kfree(pages);
484 kfree(g2d_userptr);
485 pages = NULL;
486 g2d_userptr = NULL;
487
488 return ERR_PTR(ret);
489}
490
491static void g2d_userptr_free_all(struct drm_device *drm_dev,
492 struct g2d_data *g2d,
493 struct drm_file *filp)
494{
495 struct drm_exynos_file_private *file_priv = filp->driver_priv;
253 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 496 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
497 struct g2d_cmdlist_userptr *g2d_userptr, *n;
498
499 list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
500 if (g2d_userptr->in_pool)
501 g2d_userptr_put_dma_addr(drm_dev,
502 (unsigned long)g2d_userptr,
503 true);
504
505 g2d->current_pool = 0;
506}
507
508static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
509 struct g2d_cmdlist_node *node,
510 struct drm_device *drm_dev,
511 struct drm_file *file)
512{
254 struct g2d_cmdlist *cmdlist = node->cmdlist; 513 struct g2d_cmdlist *cmdlist = node->cmdlist;
255 dma_addr_t *addr;
256 int offset; 514 int offset;
257 int i; 515 int i;
258 516
259 for (i = 0; i < node->gem_nr; i++) { 517 for (i = 0; i < node->map_nr; i++) {
260 struct g2d_gem_node *gem_node; 518 unsigned long handle;
261 519 dma_addr_t *addr;
262 gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
263 if (!gem_node) {
264 dev_err(g2d_priv->dev, "failed to allocate gem node\n");
265 return -ENOMEM;
266 }
267 520
268 offset = cmdlist->last - (i * 2 + 1); 521 offset = cmdlist->last - (i * 2 + 1);
269 gem_node->handle = cmdlist->data[offset]; 522 handle = cmdlist->data[offset];
270 523
271 addr = exynos_drm_gem_get_dma_addr(drm_dev, gem_node->handle, 524 if (node->obj_type[i] == BUF_TYPE_GEM) {
272 file); 525 addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
273 if (IS_ERR(addr)) { 526 file);
274 node->gem_nr = i; 527 if (IS_ERR(addr)) {
275 kfree(gem_node); 528 node->map_nr = i;
276 return PTR_ERR(addr); 529 return -EFAULT;
530 }
531 } else {
532 struct drm_exynos_g2d_userptr g2d_userptr;
533
534 if (copy_from_user(&g2d_userptr, (void __user *)handle,
535 sizeof(struct drm_exynos_g2d_userptr))) {
536 node->map_nr = i;
537 return -EFAULT;
538 }
539
540 addr = g2d_userptr_get_dma_addr(drm_dev,
541 g2d_userptr.userptr,
542 g2d_userptr.size,
543 file,
544 &handle);
545 if (IS_ERR(addr)) {
546 node->map_nr = i;
547 return -EFAULT;
548 }
277 } 549 }
278 550
279 cmdlist->data[offset] = *addr; 551 cmdlist->data[offset] = *addr;
280 list_add_tail(&gem_node->list, &g2d_priv->gem_list); 552 node->handles[i] = handle;
281 g2d_priv->gem_nr++;
282 } 553 }
283 554
284 return 0; 555 return 0;
285} 556}
286 557
287static void g2d_put_cmdlist_gem(struct drm_device *drm_dev, 558static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
288 struct drm_file *file, 559 struct g2d_cmdlist_node *node,
289 unsigned int nr) 560 struct drm_file *filp)
290{ 561{
291 struct drm_exynos_file_private *file_priv = file->driver_priv; 562 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
292 struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv; 563 int i;
293 struct g2d_gem_node *node, *n;
294 564
295 list_for_each_entry_safe_reverse(node, n, &g2d_priv->gem_list, list) { 565 for (i = 0; i < node->map_nr; i++) {
296 if (!nr) 566 unsigned long handle = node->handles[i];
297 break;
298 567
299 exynos_drm_gem_put_dma_addr(drm_dev, node->handle, file); 568 if (node->obj_type[i] == BUF_TYPE_GEM)
300 list_del_init(&node->list); 569 exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
301 kfree(node); 570 filp);
302 nr--; 571 else
572 g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
573 false);
574
575 node->handles[i] = 0;
303 } 576 }
577
578 node->map_nr = 0;
304} 579}
305 580
306static void g2d_dma_start(struct g2d_data *g2d, 581static void g2d_dma_start(struct g2d_data *g2d,
@@ -337,10 +612,18 @@ static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
337static void g2d_free_runqueue_node(struct g2d_data *g2d, 612static void g2d_free_runqueue_node(struct g2d_data *g2d,
338 struct g2d_runqueue_node *runqueue_node) 613 struct g2d_runqueue_node *runqueue_node)
339{ 614{
615 struct g2d_cmdlist_node *node;
616
340 if (!runqueue_node) 617 if (!runqueue_node)
341 return; 618 return;
342 619
343 mutex_lock(&g2d->cmdlist_mutex); 620 mutex_lock(&g2d->cmdlist_mutex);
621 /*
622 * commands in run_cmdlist have been completed so unmap all gem
623 * objects in each command node so that they are unreferenced.
624 */
625 list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
626 g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
344 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist); 627 list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
345 mutex_unlock(&g2d->cmdlist_mutex); 628 mutex_unlock(&g2d->cmdlist_mutex);
346 629
@@ -430,15 +713,28 @@ static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
430 return IRQ_HANDLED; 713 return IRQ_HANDLED;
431} 714}
432 715
433static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist, 716static int g2d_check_reg_offset(struct device *dev,
717 struct g2d_cmdlist_node *node,
434 int nr, bool for_addr) 718 int nr, bool for_addr)
435{ 719{
720 struct g2d_cmdlist *cmdlist = node->cmdlist;
436 int reg_offset; 721 int reg_offset;
437 int index; 722 int index;
438 int i; 723 int i;
439 724
440 for (i = 0; i < nr; i++) { 725 for (i = 0; i < nr; i++) {
441 index = cmdlist->last - 2 * (i + 1); 726 index = cmdlist->last - 2 * (i + 1);
727
728 if (for_addr) {
729 /* check userptr buffer type. */
730 reg_offset = (cmdlist->data[index] &
731 ~0x7fffffff) >> 31;
732 if (reg_offset) {
733 node->obj_type[i] = BUF_TYPE_USERPTR;
734 cmdlist->data[index] &= ~G2D_BUF_USERPTR;
735 }
736 }
737
442 reg_offset = cmdlist->data[index] & ~0xfffff000; 738 reg_offset = cmdlist->data[index] & ~0xfffff000;
443 739
444 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END) 740 if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
@@ -455,6 +751,9 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
455 case G2D_MSK_BASE_ADDR: 751 case G2D_MSK_BASE_ADDR:
456 if (!for_addr) 752 if (!for_addr)
457 goto err; 753 goto err;
754
755 if (node->obj_type[i] != BUF_TYPE_USERPTR)
756 node->obj_type[i] = BUF_TYPE_GEM;
458 break; 757 break;
459 default: 758 default:
460 if (for_addr) 759 if (for_addr)
@@ -466,7 +765,7 @@ static int g2d_check_reg_offset(struct device *dev, struct g2d_cmdlist *cmdlist,
466 return 0; 765 return 0;
467 766
468err: 767err:
469 dev_err(dev, "Bad register offset: 0x%x\n", cmdlist->data[index]); 768 dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
470 return -EINVAL; 769 return -EINVAL;
471} 770}
472 771
@@ -566,7 +865,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
566 } 865 }
567 866
568 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */ 867 /* Check size of cmdlist: last 2 is about G2D_BITBLT_START */
569 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_gem_nr * 2 + 2; 868 size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
570 if (size > G2D_CMDLIST_DATA_NUM) { 869 if (size > G2D_CMDLIST_DATA_NUM) {
571 dev_err(dev, "cmdlist size is too big\n"); 870 dev_err(dev, "cmdlist size is too big\n");
572 ret = -EINVAL; 871 ret = -EINVAL;
@@ -583,29 +882,29 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
583 } 882 }
584 cmdlist->last += req->cmd_nr * 2; 883 cmdlist->last += req->cmd_nr * 2;
585 884
586 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_nr, false); 885 ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
587 if (ret < 0) 886 if (ret < 0)
588 goto err_free_event; 887 goto err_free_event;
589 888
590 node->gem_nr = req->cmd_gem_nr; 889 node->map_nr = req->cmd_buf_nr;
591 if (req->cmd_gem_nr) { 890 if (req->cmd_buf_nr) {
592 struct drm_exynos_g2d_cmd *cmd_gem; 891 struct drm_exynos_g2d_cmd *cmd_buf;
593 892
594 cmd_gem = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_gem; 893 cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
595 894
596 if (copy_from_user(cmdlist->data + cmdlist->last, 895 if (copy_from_user(cmdlist->data + cmdlist->last,
597 (void __user *)cmd_gem, 896 (void __user *)cmd_buf,
598 sizeof(*cmd_gem) * req->cmd_gem_nr)) { 897 sizeof(*cmd_buf) * req->cmd_buf_nr)) {
599 ret = -EFAULT; 898 ret = -EFAULT;
600 goto err_free_event; 899 goto err_free_event;
601 } 900 }
602 cmdlist->last += req->cmd_gem_nr * 2; 901 cmdlist->last += req->cmd_buf_nr * 2;
603 902
604 ret = g2d_check_reg_offset(dev, cmdlist, req->cmd_gem_nr, true); 903 ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
605 if (ret < 0) 904 if (ret < 0)
606 goto err_free_event; 905 goto err_free_event;
607 906
608 ret = g2d_get_cmdlist_gem(drm_dev, file, node); 907 ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
609 if (ret < 0) 908 if (ret < 0)
610 goto err_unmap; 909 goto err_unmap;
611 } 910 }
@@ -624,7 +923,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
624 return 0; 923 return 0;
625 924
626err_unmap: 925err_unmap:
627 g2d_put_cmdlist_gem(drm_dev, file, node->gem_nr); 926 g2d_unmap_cmdlist_gem(g2d, node, file);
628err_free_event: 927err_free_event:
629 if (node->event) { 928 if (node->event) {
630 spin_lock_irqsave(&drm_dev->event_lock, flags); 929 spin_lock_irqsave(&drm_dev->event_lock, flags);
@@ -680,6 +979,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
680 979
681 mutex_lock(&g2d->runqueue_mutex); 980 mutex_lock(&g2d->runqueue_mutex);
682 runqueue_node->pid = current->pid; 981 runqueue_node->pid = current->pid;
982 runqueue_node->filp = file;
683 list_add_tail(&runqueue_node->list, &g2d->runqueue); 983 list_add_tail(&runqueue_node->list, &g2d->runqueue);
684 if (!g2d->runqueue_node) 984 if (!g2d->runqueue_node)
685 g2d_exec_runqueue(g2d); 985 g2d_exec_runqueue(g2d);
@@ -696,6 +996,43 @@ out:
696} 996}
697EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl); 997EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
698 998
999static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1000{
1001 struct g2d_data *g2d;
1002 int ret;
1003
1004 g2d = dev_get_drvdata(dev);
1005 if (!g2d)
1006 return -EFAULT;
1007
1008 /* allocate dma-aware cmdlist buffer. */
1009 ret = g2d_init_cmdlist(g2d);
1010 if (ret < 0) {
1011 dev_err(dev, "cmdlist init failed\n");
1012 return ret;
1013 }
1014
1015 if (!is_drm_iommu_supported(drm_dev))
1016 return 0;
1017
1018 ret = drm_iommu_attach_device(drm_dev, dev);
1019 if (ret < 0) {
1020 dev_err(dev, "failed to enable iommu.\n");
1021 g2d_fini_cmdlist(g2d);
1022 }
1023
1024 return ret;
1025
1026}
1027
1028static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1029{
1030 if (!is_drm_iommu_supported(drm_dev))
1031 return;
1032
1033 drm_iommu_detach_device(drm_dev, dev);
1034}
1035
699static int g2d_open(struct drm_device *drm_dev, struct device *dev, 1036static int g2d_open(struct drm_device *drm_dev, struct device *dev,
700 struct drm_file *file) 1037 struct drm_file *file)
701{ 1038{
@@ -713,7 +1050,7 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
713 1050
714 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist); 1051 INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
715 INIT_LIST_HEAD(&g2d_priv->event_list); 1052 INIT_LIST_HEAD(&g2d_priv->event_list);
716 INIT_LIST_HEAD(&g2d_priv->gem_list); 1053 INIT_LIST_HEAD(&g2d_priv->userptr_list);
717 1054
718 return 0; 1055 return 0;
719} 1056}
@@ -734,11 +1071,21 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
734 return; 1071 return;
735 1072
736 mutex_lock(&g2d->cmdlist_mutex); 1073 mutex_lock(&g2d->cmdlist_mutex);
737 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) 1074 list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
1075 /*
1076 * unmap all gem objects not completed.
1077 *
1078 * P.S. if current process was terminated forcely then
1079 * there may be some commands in inuse_cmdlist so unmap
1080 * them.
1081 */
1082 g2d_unmap_cmdlist_gem(g2d, node, file);
738 list_move_tail(&node->list, &g2d->free_cmdlist); 1083 list_move_tail(&node->list, &g2d->free_cmdlist);
1084 }
739 mutex_unlock(&g2d->cmdlist_mutex); 1085 mutex_unlock(&g2d->cmdlist_mutex);
740 1086
741 g2d_put_cmdlist_gem(drm_dev, file, g2d_priv->gem_nr); 1087 /* release all g2d_userptr in pool. */
1088 g2d_userptr_free_all(drm_dev, g2d, file);
742 1089
743 kfree(file_priv->g2d_priv); 1090 kfree(file_priv->g2d_priv);
744} 1091}
@@ -778,15 +1125,11 @@ static int __devinit g2d_probe(struct platform_device *pdev)
778 mutex_init(&g2d->cmdlist_mutex); 1125 mutex_init(&g2d->cmdlist_mutex);
779 mutex_init(&g2d->runqueue_mutex); 1126 mutex_init(&g2d->runqueue_mutex);
780 1127
781 ret = g2d_init_cmdlist(g2d); 1128 g2d->gate_clk = devm_clk_get(dev, "fimg2d");
782 if (ret < 0)
783 goto err_destroy_workqueue;
784
785 g2d->gate_clk = clk_get(dev, "fimg2d");
786 if (IS_ERR(g2d->gate_clk)) { 1129 if (IS_ERR(g2d->gate_clk)) {
787 dev_err(dev, "failed to get gate clock\n"); 1130 dev_err(dev, "failed to get gate clock\n");
788 ret = PTR_ERR(g2d->gate_clk); 1131 ret = PTR_ERR(g2d->gate_clk);
789 goto err_fini_cmdlist; 1132 goto err_destroy_workqueue;
790 } 1133 }
791 1134
792 pm_runtime_enable(dev); 1135 pm_runtime_enable(dev);
@@ -814,10 +1157,14 @@ static int __devinit g2d_probe(struct platform_device *pdev)
814 goto err_put_clk; 1157 goto err_put_clk;
815 } 1158 }
816 1159
1160 g2d->max_pool = MAX_POOL;
1161
817 platform_set_drvdata(pdev, g2d); 1162 platform_set_drvdata(pdev, g2d);
818 1163
819 subdrv = &g2d->subdrv; 1164 subdrv = &g2d->subdrv;
820 subdrv->dev = dev; 1165 subdrv->dev = dev;
1166 subdrv->probe = g2d_subdrv_probe;
1167 subdrv->remove = g2d_subdrv_remove;
821 subdrv->open = g2d_open; 1168 subdrv->open = g2d_open;
822 subdrv->close = g2d_close; 1169 subdrv->close = g2d_close;
823 1170
@@ -834,9 +1181,6 @@ static int __devinit g2d_probe(struct platform_device *pdev)
834 1181
835err_put_clk: 1182err_put_clk:
836 pm_runtime_disable(dev); 1183 pm_runtime_disable(dev);
837 clk_put(g2d->gate_clk);
838err_fini_cmdlist:
839 g2d_fini_cmdlist(g2d);
840err_destroy_workqueue: 1184err_destroy_workqueue:
841 destroy_workqueue(g2d->g2d_workq); 1185 destroy_workqueue(g2d->g2d_workq);
842err_destroy_slab: 1186err_destroy_slab:
@@ -857,7 +1201,6 @@ static int __devexit g2d_remove(struct platform_device *pdev)
857 } 1201 }
858 1202
859 pm_runtime_disable(&pdev->dev); 1203 pm_runtime_disable(&pdev->dev);
860 clk_put(g2d->gate_clk);
861 1204
862 g2d_fini_cmdlist(g2d); 1205 g2d_fini_cmdlist(g2d);
863 destroy_workqueue(g2d->g2d_workq); 1206 destroy_workqueue(g2d->g2d_workq);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d2545560664f..d48183e7e056 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -83,157 +83,40 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
83 83
84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags) 84static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
85{ 85{
86 if (!IS_NONCONTIG_BUFFER(flags)) { 86 /* TODO */
87 if (size >= SZ_1M)
88 return roundup(size, SECTION_SIZE);
89 else if (size >= SZ_64K)
90 return roundup(size, SZ_64K);
91 else
92 goto out;
93 }
94out:
95 return roundup(size, PAGE_SIZE);
96}
97
98struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
99 gfp_t gfpmask)
100{
101 struct page *p, **pages;
102 int i, npages;
103
104 npages = obj->size >> PAGE_SHIFT;
105
106 pages = drm_malloc_ab(npages, sizeof(struct page *));
107 if (pages == NULL)
108 return ERR_PTR(-ENOMEM);
109
110 for (i = 0; i < npages; i++) {
111 p = alloc_page(gfpmask);
112 if (IS_ERR(p))
113 goto fail;
114 pages[i] = p;
115 }
116
117 return pages;
118
119fail:
120 while (--i)
121 __free_page(pages[i]);
122
123 drm_free_large(pages);
124 return ERR_CAST(p);
125}
126
127static void exynos_gem_put_pages(struct drm_gem_object *obj,
128 struct page **pages)
129{
130 int npages;
131
132 npages = obj->size >> PAGE_SHIFT;
133
134 while (--npages >= 0)
135 __free_page(pages[npages]);
136 87
137 drm_free_large(pages); 88 return roundup(size, PAGE_SIZE);
138} 89}
139 90
140static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, 91static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
141 struct vm_area_struct *vma, 92 struct vm_area_struct *vma,
142 unsigned long f_vaddr, 93 unsigned long f_vaddr,
143 pgoff_t page_offset) 94 pgoff_t page_offset)
144{ 95{
145 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 96 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
146 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer; 97 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
98 struct scatterlist *sgl;
147 unsigned long pfn; 99 unsigned long pfn;
100 int i;
148 101
149 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 102 if (!buf->sgt)
150 if (!buf->pages) 103 return -EINTR;
151 return -EINTR;
152
153 pfn = page_to_pfn(buf->pages[page_offset++]);
154 } else
155 pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
156
157 return vm_insert_mixed(vma, f_vaddr, pfn);
158}
159 104
160static int exynos_drm_gem_get_pages(struct drm_gem_object *obj) 105 if (page_offset >= (buf->size >> PAGE_SHIFT)) {
161{ 106 DRM_ERROR("invalid page offset\n");
162 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
163 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
164 struct scatterlist *sgl;
165 struct page **pages;
166 unsigned int npages, i = 0;
167 int ret;
168
169 if (buf->pages) {
170 DRM_DEBUG_KMS("already allocated.\n");
171 return -EINVAL; 107 return -EINVAL;
172 } 108 }
173 109
174 pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
175 if (IS_ERR(pages)) {
176 DRM_ERROR("failed to get pages.\n");
177 return PTR_ERR(pages);
178 }
179
180 npages = obj->size >> PAGE_SHIFT;
181 buf->page_size = PAGE_SIZE;
182
183 buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
184 if (!buf->sgt) {
185 DRM_ERROR("failed to allocate sg table.\n");
186 ret = -ENOMEM;
187 goto err;
188 }
189
190 ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
191 if (ret < 0) {
192 DRM_ERROR("failed to initialize sg table.\n");
193 ret = -EFAULT;
194 goto err1;
195 }
196
197 sgl = buf->sgt->sgl; 110 sgl = buf->sgt->sgl;
198 111 for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
199 /* set all pages to sg list. */ 112 if (page_offset < (sgl->length >> PAGE_SHIFT))
200 while (i < npages) { 113 break;
201 sg_set_page(sgl, pages[i], PAGE_SIZE, 0); 114 page_offset -= (sgl->length >> PAGE_SHIFT);
202 sg_dma_address(sgl) = page_to_phys(pages[i]);
203 i++;
204 sgl = sg_next(sgl);
205 } 115 }
206 116
207 /* add some codes for UNCACHED type here. TODO */ 117 pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
208
209 buf->pages = pages;
210 return ret;
211err1:
212 kfree(buf->sgt);
213 buf->sgt = NULL;
214err:
215 exynos_gem_put_pages(obj, pages);
216 return ret;
217
218}
219
220static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
221{
222 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
223 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
224
225 /*
226 * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
227 * allocated at gem fault handler.
228 */
229 sg_free_table(buf->sgt);
230 kfree(buf->sgt);
231 buf->sgt = NULL;
232
233 exynos_gem_put_pages(obj, buf->pages);
234 buf->pages = NULL;
235 118
236 /* add some codes for UNCACHED type here. TODO */ 119 return vm_insert_mixed(vma, f_vaddr, pfn);
237} 120}
238 121
239static int exynos_drm_gem_handle_create(struct drm_gem_object *obj, 122static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -270,9 +153,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
270 153
271 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count)); 154 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
272 155
273 if (!buf->pages)
274 return;
275
276 /* 156 /*
277 * do not release memory region from exporter. 157 * do not release memory region from exporter.
278 * 158 *
@@ -282,10 +162,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
282 if (obj->import_attach) 162 if (obj->import_attach)
283 goto out; 163 goto out;
284 164
285 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) 165 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
286 exynos_drm_gem_put_pages(obj);
287 else
288 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
289 166
290out: 167out:
291 exynos_drm_fini_buf(obj->dev, buf); 168 exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +241,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
364 /* set memory type and cache attribute from user side. */ 241 /* set memory type and cache attribute from user side. */
365 exynos_gem_obj->flags = flags; 242 exynos_gem_obj->flags = flags;
366 243
367 /* 244 ret = exynos_drm_alloc_buf(dev, buf, flags);
368 * allocate all pages as desired size if user wants to allocate 245 if (ret < 0) {
369 * physically non-continuous memory. 246 drm_gem_object_release(&exynos_gem_obj->base);
370 */ 247 goto err_fini_buf;
371 if (flags & EXYNOS_BO_NONCONTIG) {
372 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
373 if (ret < 0) {
374 drm_gem_object_release(&exynos_gem_obj->base);
375 goto err_fini_buf;
376 }
377 } else {
378 ret = exynos_drm_alloc_buf(dev, buf, flags);
379 if (ret < 0) {
380 drm_gem_object_release(&exynos_gem_obj->base);
381 goto err_fini_buf;
382 }
383 } 248 }
384 249
385 return exynos_gem_obj; 250 return exynos_gem_obj;
@@ -412,14 +277,14 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
412 return 0; 277 return 0;
413} 278}
414 279
415void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 280dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
416 unsigned int gem_handle, 281 unsigned int gem_handle,
417 struct drm_file *file_priv) 282 struct drm_file *filp)
418{ 283{
419 struct exynos_drm_gem_obj *exynos_gem_obj; 284 struct exynos_drm_gem_obj *exynos_gem_obj;
420 struct drm_gem_object *obj; 285 struct drm_gem_object *obj;
421 286
422 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 287 obj = drm_gem_object_lookup(dev, filp, gem_handle);
423 if (!obj) { 288 if (!obj) {
424 DRM_ERROR("failed to lookup gem object.\n"); 289 DRM_ERROR("failed to lookup gem object.\n");
425 return ERR_PTR(-EINVAL); 290 return ERR_PTR(-EINVAL);
@@ -427,25 +292,17 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
427 292
428 exynos_gem_obj = to_exynos_gem_obj(obj); 293 exynos_gem_obj = to_exynos_gem_obj(obj);
429 294
430 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
431 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
432 drm_gem_object_unreference_unlocked(obj);
433
434 /* TODO */
435 return ERR_PTR(-EINVAL);
436 }
437
438 return &exynos_gem_obj->buffer->dma_addr; 295 return &exynos_gem_obj->buffer->dma_addr;
439} 296}
440 297
441void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 298void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
442 unsigned int gem_handle, 299 unsigned int gem_handle,
443 struct drm_file *file_priv) 300 struct drm_file *filp)
444{ 301{
445 struct exynos_drm_gem_obj *exynos_gem_obj; 302 struct exynos_drm_gem_obj *exynos_gem_obj;
446 struct drm_gem_object *obj; 303 struct drm_gem_object *obj;
447 304
448 obj = drm_gem_object_lookup(dev, file_priv, gem_handle); 305 obj = drm_gem_object_lookup(dev, filp, gem_handle);
449 if (!obj) { 306 if (!obj) {
450 DRM_ERROR("failed to lookup gem object.\n"); 307 DRM_ERROR("failed to lookup gem object.\n");
451 return; 308 return;
@@ -453,14 +310,6 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
453 310
454 exynos_gem_obj = to_exynos_gem_obj(obj); 311 exynos_gem_obj = to_exynos_gem_obj(obj);
455 312
456 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
457 DRM_DEBUG_KMS("not support NONCONTIG type.\n");
458 drm_gem_object_unreference_unlocked(obj);
459
460 /* TODO */
461 return;
462 }
463
464 drm_gem_object_unreference_unlocked(obj); 313 drm_gem_object_unreference_unlocked(obj);
465 314
466 /* 315 /*
@@ -489,22 +338,57 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
489 &args->offset); 338 &args->offset);
490} 339}
491 340
341static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
342 struct file *filp)
343{
344 struct drm_file *file_priv;
345
346 mutex_lock(&drm_dev->struct_mutex);
347
348 /* find current process's drm_file from filelist. */
349 list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
350 if (file_priv->filp == filp) {
351 mutex_unlock(&drm_dev->struct_mutex);
352 return file_priv;
353 }
354 }
355
356 mutex_unlock(&drm_dev->struct_mutex);
357 WARN_ON(1);
358
359 return ERR_PTR(-EFAULT);
360}
361
492static int exynos_drm_gem_mmap_buffer(struct file *filp, 362static int exynos_drm_gem_mmap_buffer(struct file *filp,
493 struct vm_area_struct *vma) 363 struct vm_area_struct *vma)
494{ 364{
495 struct drm_gem_object *obj = filp->private_data; 365 struct drm_gem_object *obj = filp->private_data;
496 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 366 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
367 struct drm_device *drm_dev = obj->dev;
497 struct exynos_drm_gem_buf *buffer; 368 struct exynos_drm_gem_buf *buffer;
498 unsigned long pfn, vm_size, usize, uaddr = vma->vm_start; 369 struct drm_file *file_priv;
370 unsigned long vm_size;
499 int ret; 371 int ret;
500 372
501 DRM_DEBUG_KMS("%s\n", __FILE__); 373 DRM_DEBUG_KMS("%s\n", __FILE__);
502 374
503 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; 375 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
376 vma->vm_private_data = obj;
377 vma->vm_ops = drm_dev->driver->gem_vm_ops;
378
379 /* restore it to driver's fops. */
380 filp->f_op = fops_get(drm_dev->driver->fops);
381
382 file_priv = exynos_drm_find_drm_file(drm_dev, filp);
383 if (IS_ERR(file_priv))
384 return PTR_ERR(file_priv);
385
386 /* restore it to drm_file. */
387 filp->private_data = file_priv;
504 388
505 update_vm_cache_attr(exynos_gem_obj, vma); 389 update_vm_cache_attr(exynos_gem_obj, vma);
506 390
507 vm_size = usize = vma->vm_end - vma->vm_start; 391 vm_size = vma->vm_end - vma->vm_start;
508 392
509 /* 393 /*
510 * a buffer contains information to physically continuous memory 394 * a buffer contains information to physically continuous memory
@@ -516,40 +400,23 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
516 if (vm_size > buffer->size) 400 if (vm_size > buffer->size)
517 return -EINVAL; 401 return -EINVAL;
518 402
519 if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { 403 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
520 int i = 0; 404 buffer->dma_addr, buffer->size,
521 405 &buffer->dma_attrs);
522 if (!buffer->pages) 406 if (ret < 0) {
523 return -EINVAL; 407 DRM_ERROR("failed to mmap.\n");
408 return ret;
409 }
524 410
525 vma->vm_flags |= VM_MIXEDMAP; 411 /*
412 * take a reference to this mapping of the object. And this reference
413 * is unreferenced by the corresponding vm_close call.
414 */
415 drm_gem_object_reference(obj);
526 416
527 do { 417 mutex_lock(&drm_dev->struct_mutex);
528 ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); 418 drm_vm_open_locked(drm_dev, vma);
529 if (ret) { 419 mutex_unlock(&drm_dev->struct_mutex);
530 DRM_ERROR("failed to remap user space.\n");
531 return ret;
532 }
533
534 uaddr += PAGE_SIZE;
535 usize -= PAGE_SIZE;
536 } while (usize > 0);
537 } else {
538 /*
539 * get page frame number to physical memory to be mapped
540 * to user space.
541 */
542 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
543 PAGE_SHIFT;
544
545 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
546
547 if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
548 vma->vm_page_prot)) {
549 DRM_ERROR("failed to remap pfn range.\n");
550 return -EAGAIN;
551 }
552 }
553 420
554 return 0; 421 return 0;
555} 422}
@@ -578,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
578 return -EINVAL; 445 return -EINVAL;
579 } 446 }
580 447
581 obj->filp->f_op = &exynos_drm_gem_fops; 448 /*
582 obj->filp->private_data = obj; 449 * Set specific mmper's fops. And it will be restored by
450 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
451 * This is used to call specific mapper temporarily.
452 */
453 file_priv->filp->f_op = &exynos_drm_gem_fops;
583 454
584 addr = vm_mmap(obj->filp, 0, args->size, 455 /*
456 * Set gem object to private_data so that specific mmaper
457 * can get the gem object. And it will be restored by
458 * exynos_drm_gem_mmap_buffer to drm_file.
459 */
460 file_priv->filp->private_data = obj;
461
462 addr = vm_mmap(file_priv->filp, 0, args->size,
585 PROT_READ | PROT_WRITE, MAP_SHARED, 0); 463 PROT_READ | PROT_WRITE, MAP_SHARED, 0);
586 464
587 drm_gem_object_unreference_unlocked(obj); 465 drm_gem_object_unreference_unlocked(obj);
588 466
589 if (IS_ERR((void *)addr)) 467 if (IS_ERR((void *)addr)) {
468 file_priv->filp->private_data = file_priv;
590 return PTR_ERR((void *)addr); 469 return PTR_ERR((void *)addr);
470 }
591 471
592 args->mapped = addr; 472 args->mapped = addr;
593 473
@@ -622,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
622 return 0; 502 return 0;
623} 503}
624 504
505struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
506{
507 struct vm_area_struct *vma_copy;
508
509 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
510 if (!vma_copy)
511 return NULL;
512
513 if (vma->vm_ops && vma->vm_ops->open)
514 vma->vm_ops->open(vma);
515
516 if (vma->vm_file)
517 get_file(vma->vm_file);
518
519 memcpy(vma_copy, vma, sizeof(*vma));
520
521 vma_copy->vm_mm = NULL;
522 vma_copy->vm_next = NULL;
523 vma_copy->vm_prev = NULL;
524
525 return vma_copy;
526}
527
528void exynos_gem_put_vma(struct vm_area_struct *vma)
529{
530 if (!vma)
531 return;
532
533 if (vma->vm_ops && vma->vm_ops->close)
534 vma->vm_ops->close(vma);
535
536 if (vma->vm_file)
537 fput(vma->vm_file);
538
539 kfree(vma);
540}
541
542int exynos_gem_get_pages_from_userptr(unsigned long start,
543 unsigned int npages,
544 struct page **pages,
545 struct vm_area_struct *vma)
546{
547 int get_npages;
548
549 /* the memory region mmaped with VM_PFNMAP. */
550 if (vma_is_io(vma)) {
551 unsigned int i;
552
553 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
554 unsigned long pfn;
555 int ret = follow_pfn(vma, start, &pfn);
556 if (ret)
557 return ret;
558
559 pages[i] = pfn_to_page(pfn);
560 }
561
562 if (i != npages) {
563 DRM_ERROR("failed to get user_pages.\n");
564 return -EINVAL;
565 }
566
567 return 0;
568 }
569
570 get_npages = get_user_pages(current, current->mm, start,
571 npages, 1, 1, pages, NULL);
572 get_npages = max(get_npages, 0);
573 if (get_npages != npages) {
574 DRM_ERROR("failed to get user_pages.\n");
575 while (get_npages)
576 put_page(pages[--get_npages]);
577 return -EFAULT;
578 }
579
580 return 0;
581}
582
583void exynos_gem_put_pages_to_userptr(struct page **pages,
584 unsigned int npages,
585 struct vm_area_struct *vma)
586{
587 if (!vma_is_io(vma)) {
588 unsigned int i;
589
590 for (i = 0; i < npages; i++) {
591 set_page_dirty_lock(pages[i]);
592
593 /*
594 * undo the reference we took when populating
595 * the table.
596 */
597 put_page(pages[i]);
598 }
599 }
600}
601
602int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
603 struct sg_table *sgt,
604 enum dma_data_direction dir)
605{
606 int nents;
607
608 mutex_lock(&drm_dev->struct_mutex);
609
610 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
611 if (!nents) {
612 DRM_ERROR("failed to map sgl with dma.\n");
613 mutex_unlock(&drm_dev->struct_mutex);
614 return nents;
615 }
616
617 mutex_unlock(&drm_dev->struct_mutex);
618 return 0;
619}
620
621void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
622 struct sg_table *sgt,
623 enum dma_data_direction dir)
624{
625 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
626}
627
625int exynos_drm_gem_init_object(struct drm_gem_object *obj) 628int exynos_drm_gem_init_object(struct drm_gem_object *obj)
626{ 629{
627 DRM_DEBUG_KMS("%s\n", __FILE__); 630 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -753,9 +756,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
753 756
754 mutex_lock(&dev->struct_mutex); 757 mutex_lock(&dev->struct_mutex);
755 758
756 ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); 759 ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
757 if (ret < 0) 760 if (ret < 0)
758 DRM_ERROR("failed to map pages.\n"); 761 DRM_ERROR("failed to map a buffer with user.\n");
759 762
760 mutex_unlock(&dev->struct_mutex); 763 mutex_unlock(&dev->struct_mutex);
761 764
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 085b2a5d5f70..f11f2afd5bfc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -35,21 +35,27 @@
35 * exynos drm gem buffer structure. 35 * exynos drm gem buffer structure.
36 * 36 *
37 * @kvaddr: kernel virtual address to allocated memory region. 37 * @kvaddr: kernel virtual address to allocated memory region.
38 * *userptr: user space address.
38 * @dma_addr: bus address(accessed by dma) to allocated memory region. 39 * @dma_addr: bus address(accessed by dma) to allocated memory region.
39 * - this address could be physical address without IOMMU and 40 * - this address could be physical address without IOMMU and
40 * device address with IOMMU. 41 * device address with IOMMU.
42 * @write: whether pages will be written to by the caller.
43 * @pages: Array of backing pages.
41 * @sgt: sg table to transfer page data. 44 * @sgt: sg table to transfer page data.
42 * @pages: contain all pages to allocated memory region.
43 * @page_size: could be 4K, 64K or 1MB.
44 * @size: size of allocated memory region. 45 * @size: size of allocated memory region.
46 * @pfnmap: indicate whether memory region from userptr is mmaped with
47 * VM_PFNMAP or not.
45 */ 48 */
46struct exynos_drm_gem_buf { 49struct exynos_drm_gem_buf {
47 void __iomem *kvaddr; 50 void __iomem *kvaddr;
51 unsigned long userptr;
48 dma_addr_t dma_addr; 52 dma_addr_t dma_addr;
49 struct sg_table *sgt; 53 struct dma_attrs dma_attrs;
54 unsigned int write;
50 struct page **pages; 55 struct page **pages;
51 unsigned long page_size; 56 struct sg_table *sgt;
52 unsigned long size; 57 unsigned long size;
58 bool pfnmap;
53}; 59};
54 60
55/* 61/*
@@ -65,6 +71,7 @@ struct exynos_drm_gem_buf {
65 * or at framebuffer creation. 71 * or at framebuffer creation.
66 * @size: size requested from user, in bytes and this size is aligned 72 * @size: size requested from user, in bytes and this size is aligned
67 * in page unit. 73 * in page unit.
74 * @vma: a pointer to vm_area.
68 * @flags: indicate memory type to allocated buffer and cache attruibute. 75 * @flags: indicate memory type to allocated buffer and cache attruibute.
69 * 76 *
70 * P.S. this object would be transfered to user as kms_bo.handle so 77 * P.S. this object would be transfered to user as kms_bo.handle so
@@ -74,6 +81,7 @@ struct exynos_drm_gem_obj {
74 struct drm_gem_object base; 81 struct drm_gem_object base;
75 struct exynos_drm_gem_buf *buffer; 82 struct exynos_drm_gem_buf *buffer;
76 unsigned long size; 83 unsigned long size;
84 struct vm_area_struct *vma;
77 unsigned int flags; 85 unsigned int flags;
78}; 86};
79 87
@@ -104,9 +112,9 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
104 * other drivers such as 2d/3d acceleration drivers. 112 * other drivers such as 2d/3d acceleration drivers.
105 * with this function call, gem object reference count would be increased. 113 * with this function call, gem object reference count would be increased.
106 */ 114 */
107void *exynos_drm_gem_get_dma_addr(struct drm_device *dev, 115dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
108 unsigned int gem_handle, 116 unsigned int gem_handle,
109 struct drm_file *file_priv); 117 struct drm_file *filp);
110 118
111/* 119/*
112 * put dma address from gem handle and this function could be used for 120 * put dma address from gem handle and this function could be used for
@@ -115,7 +123,7 @@ void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
115 */ 123 */
116void exynos_drm_gem_put_dma_addr(struct drm_device *dev, 124void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
117 unsigned int gem_handle, 125 unsigned int gem_handle,
118 struct drm_file *file_priv); 126 struct drm_file *filp);
119 127
120/* get buffer offset to map to user space. */ 128/* get buffer offset to map to user space. */
121int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data, 129int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
@@ -128,6 +136,10 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
128int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, 136int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
129 struct drm_file *file_priv); 137 struct drm_file *file_priv);
130 138
139/* map user space allocated by malloc to pages. */
140int exynos_drm_gem_userptr_ioctl(struct drm_device *dev, void *data,
141 struct drm_file *file_priv);
142
131/* get buffer information to memory region allocated by gem. */ 143/* get buffer information to memory region allocated by gem. */
132int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data, 144int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
133 struct drm_file *file_priv); 145 struct drm_file *file_priv);
@@ -163,4 +175,36 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
163/* set vm_flags and we can change the vm attribute to other one at here. */ 175/* set vm_flags and we can change the vm attribute to other one at here. */
164int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 176int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
165 177
178static inline int vma_is_io(struct vm_area_struct *vma)
179{
180 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
181}
182
183/* get a copy of a virtual memory region. */
184struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma);
185
186/* release a userspace virtual memory area. */
187void exynos_gem_put_vma(struct vm_area_struct *vma);
188
189/* get pages from user space. */
190int exynos_gem_get_pages_from_userptr(unsigned long start,
191 unsigned int npages,
192 struct page **pages,
193 struct vm_area_struct *vma);
194
195/* drop the reference to pages. */
196void exynos_gem_put_pages_to_userptr(struct page **pages,
197 unsigned int npages,
198 struct vm_area_struct *vma);
199
200/* map sgt with dma region. */
201int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
202 struct sg_table *sgt,
203 enum dma_data_direction dir);
204
205/* unmap sgt from dma region. */
206void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
207 struct sg_table *sgt,
208 enum dma_data_direction dir);
209
166#endif 210#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
new file mode 100644
index 000000000000..5639353d47b9
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -0,0 +1,1870 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
19#include <plat/map-base.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-gsc.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_gsc.h"
26
27/*
28 * GSC is stand for General SCaler and
29 * supports image scaler/rotator and input/output DMA operations.
30 * input DMA reads image data from the memory.
31 * output DMA writes image data to memory.
32 * GSC supports image rotation and image effect functions.
33 *
34 * M2M operation : supports crop/scale/rotation/csc so on.
35 * Memory ----> GSC H/W ----> Memory.
36 * Writeback operation : supports cloned screen with FIMD.
37 * FIMD ----> GSC H/W ----> Memory.
38 * Output operation : supports direct display using local path.
39 * Memory ----> GSC H/W ----> FIMD, Mixer.
40 */
41
42/*
43 * TODO
44 * 1. check suspend/resume api if needed.
45 * 2. need to check use case platform_device_id.
46 * 3. check src/dst size with, height.
47 * 4. added check_prepare api for right register.
48 * 5. need to add supported list in prop_list.
49 * 6. check prescaler/scaler optimization.
50 */
51
52#define GSC_MAX_DEVS 4
53#define GSC_MAX_SRC 4
54#define GSC_MAX_DST 16
55#define GSC_RESET_TIMEOUT 50
56#define GSC_BUF_STOP 1
57#define GSC_BUF_START 2
58#define GSC_REG_SZ 16
59#define GSC_WIDTH_ITU_709 1280
60#define GSC_SC_UP_MAX_RATIO 65536
61#define GSC_SC_DOWN_RATIO_7_8 74898
62#define GSC_SC_DOWN_RATIO_6_8 87381
63#define GSC_SC_DOWN_RATIO_5_8 104857
64#define GSC_SC_DOWN_RATIO_4_8 131072
65#define GSC_SC_DOWN_RATIO_3_8 174762
66#define GSC_SC_DOWN_RATIO_2_8 262144
67#define GSC_REFRESH_MIN 12
68#define GSC_REFRESH_MAX 60
69#define GSC_CROP_MAX 8192
70#define GSC_CROP_MIN 32
71#define GSC_SCALE_MAX 4224
72#define GSC_SCALE_MIN 32
73#define GSC_COEF_RATIO 7
74#define GSC_COEF_PHASE 9
75#define GSC_COEF_ATTR 16
76#define GSC_COEF_H_8T 8
77#define GSC_COEF_V_4T 4
78#define GSC_COEF_DEPTH 3
79
80#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
81#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
82 struct gsc_context, ippdrv);
83#define gsc_read(offset) readl(ctx->regs + (offset))
84#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
85
86/*
87 * A structure of scaler.
88 *
89 * @range: narrow, wide.
90 * @pre_shfactor: pre sclaer shift factor.
91 * @pre_hratio: horizontal ratio of the prescaler.
92 * @pre_vratio: vertical ratio of the prescaler.
93 * @main_hratio: the main scaler's horizontal ratio.
94 * @main_vratio: the main scaler's vertical ratio.
95 */
96struct gsc_scaler {
97 bool range;
98 u32 pre_shfactor;
99 u32 pre_hratio;
100 u32 pre_vratio;
101 unsigned long main_hratio;
102 unsigned long main_vratio;
103};
104
105/*
106 * A structure of scaler capability.
107 *
108 * find user manual 49.2 features.
109 * @tile_w: tile mode or rotation width.
110 * @tile_h: tile mode or rotation height.
111 * @w: other cases width.
112 * @h: other cases height.
113 */
114struct gsc_capability {
115 /* tile or rotation */
116 u32 tile_w;
117 u32 tile_h;
118 /* other cases */
119 u32 w;
120 u32 h;
121};
122
123/*
124 * A structure of gsc context.
125 *
126 * @ippdrv: prepare initialization using ippdrv.
127 * @regs_res: register resources.
128 * @regs: memory mapped io registers.
129 * @lock: locking of operations.
130 * @gsc_clk: gsc gate clock.
131 * @sc: scaler infomations.
132 * @id: gsc id.
133 * @irq: irq number.
134 * @rotation: supports rotation of src.
135 * @suspended: qos operations.
136 */
137struct gsc_context {
138 struct exynos_drm_ippdrv ippdrv;
139 struct resource *regs_res;
140 void __iomem *regs;
141 struct mutex lock;
142 struct clk *gsc_clk;
143 struct gsc_scaler sc;
144 int id;
145 int irq;
146 bool rotation;
147 bool suspended;
148};
149
150/* 8-tap Filter Coefficient */
151static const int h_coef_8t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_H_8T] = {
152 { /* Ratio <= 65536 (~8:8) */
153 { 0, 0, 0, 128, 0, 0, 0, 0 },
154 { -1, 2, -6, 127, 7, -2, 1, 0 },
155 { -1, 4, -12, 125, 16, -5, 1, 0 },
156 { -1, 5, -15, 120, 25, -8, 2, 0 },
157 { -1, 6, -18, 114, 35, -10, 3, -1 },
158 { -1, 6, -20, 107, 46, -13, 4, -1 },
159 { -2, 7, -21, 99, 57, -16, 5, -1 },
160 { -1, 6, -20, 89, 68, -18, 5, -1 },
161 { -1, 6, -20, 79, 79, -20, 6, -1 },
162 { -1, 5, -18, 68, 89, -20, 6, -1 },
163 { -1, 5, -16, 57, 99, -21, 7, -2 },
164 { -1, 4, -13, 46, 107, -20, 6, -1 },
165 { -1, 3, -10, 35, 114, -18, 6, -1 },
166 { 0, 2, -8, 25, 120, -15, 5, -1 },
167 { 0, 1, -5, 16, 125, -12, 4, -1 },
168 { 0, 1, -2, 7, 127, -6, 2, -1 }
169 }, { /* 65536 < Ratio <= 74898 (~8:7) */
170 { 3, -8, 14, 111, 13, -8, 3, 0 },
171 { 2, -6, 7, 112, 21, -10, 3, -1 },
172 { 2, -4, 1, 110, 28, -12, 4, -1 },
173 { 1, -2, -3, 106, 36, -13, 4, -1 },
174 { 1, -1, -7, 103, 44, -15, 4, -1 },
175 { 1, 1, -11, 97, 53, -16, 4, -1 },
176 { 0, 2, -13, 91, 61, -16, 4, -1 },
177 { 0, 3, -15, 85, 69, -17, 4, -1 },
178 { 0, 3, -16, 77, 77, -16, 3, 0 },
179 { -1, 4, -17, 69, 85, -15, 3, 0 },
180 { -1, 4, -16, 61, 91, -13, 2, 0 },
181 { -1, 4, -16, 53, 97, -11, 1, 1 },
182 { -1, 4, -15, 44, 103, -7, -1, 1 },
183 { -1, 4, -13, 36, 106, -3, -2, 1 },
184 { -1, 4, -12, 28, 110, 1, -4, 2 },
185 { -1, 3, -10, 21, 112, 7, -6, 2 }
186 }, { /* 74898 < Ratio <= 87381 (~8:6) */
187 { 2, -11, 25, 96, 25, -11, 2, 0 },
188 { 2, -10, 19, 96, 31, -12, 2, 0 },
189 { 2, -9, 14, 94, 37, -12, 2, 0 },
190 { 2, -8, 10, 92, 43, -12, 1, 0 },
191 { 2, -7, 5, 90, 49, -12, 1, 0 },
192 { 2, -5, 1, 86, 55, -12, 0, 1 },
193 { 2, -4, -2, 82, 61, -11, -1, 1 },
194 { 1, -3, -5, 77, 67, -9, -1, 1 },
195 { 1, -2, -7, 72, 72, -7, -2, 1 },
196 { 1, -1, -9, 67, 77, -5, -3, 1 },
197 { 1, -1, -11, 61, 82, -2, -4, 2 },
198 { 1, 0, -12, 55, 86, 1, -5, 2 },
199 { 0, 1, -12, 49, 90, 5, -7, 2 },
200 { 0, 1, -12, 43, 92, 10, -8, 2 },
201 { 0, 2, -12, 37, 94, 14, -9, 2 },
202 { 0, 2, -12, 31, 96, 19, -10, 2 }
203 }, { /* 87381 < Ratio <= 104857 (~8:5) */
204 { -1, -8, 33, 80, 33, -8, -1, 0 },
205 { -1, -8, 28, 80, 37, -7, -2, 1 },
206 { 0, -8, 24, 79, 41, -7, -2, 1 },
207 { 0, -8, 20, 78, 46, -6, -3, 1 },
208 { 0, -8, 16, 76, 50, -4, -3, 1 },
209 { 0, -7, 13, 74, 54, -3, -4, 1 },
210 { 1, -7, 10, 71, 58, -1, -5, 1 },
211 { 1, -6, 6, 68, 62, 1, -5, 1 },
212 { 1, -6, 4, 65, 65, 4, -6, 1 },
213 { 1, -5, 1, 62, 68, 6, -6, 1 },
214 { 1, -5, -1, 58, 71, 10, -7, 1 },
215 { 1, -4, -3, 54, 74, 13, -7, 0 },
216 { 1, -3, -4, 50, 76, 16, -8, 0 },
217 { 1, -3, -6, 46, 78, 20, -8, 0 },
218 { 1, -2, -7, 41, 79, 24, -8, 0 },
219 { 1, -2, -7, 37, 80, 28, -8, -1 }
220 }, { /* 104857 < Ratio <= 131072 (~8:4) */
221 { -3, 0, 35, 64, 35, 0, -3, 0 },
222 { -3, -1, 32, 64, 38, 1, -3, 0 },
223 { -2, -2, 29, 63, 41, 2, -3, 0 },
224 { -2, -3, 27, 63, 43, 4, -4, 0 },
225 { -2, -3, 24, 61, 46, 6, -4, 0 },
226 { -2, -3, 21, 60, 49, 7, -4, 0 },
227 { -1, -4, 19, 59, 51, 9, -4, -1 },
228 { -1, -4, 16, 57, 53, 12, -4, -1 },
229 { -1, -4, 14, 55, 55, 14, -4, -1 },
230 { -1, -4, 12, 53, 57, 16, -4, -1 },
231 { -1, -4, 9, 51, 59, 19, -4, -1 },
232 { 0, -4, 7, 49, 60, 21, -3, -2 },
233 { 0, -4, 6, 46, 61, 24, -3, -2 },
234 { 0, -4, 4, 43, 63, 27, -3, -2 },
235 { 0, -3, 2, 41, 63, 29, -2, -2 },
236 { 0, -3, 1, 38, 64, 32, -1, -3 }
237 }, { /* 131072 < Ratio <= 174762 (~8:3) */
238 { -1, 8, 33, 48, 33, 8, -1, 0 },
239 { -1, 7, 31, 49, 35, 9, -1, -1 },
240 { -1, 6, 30, 49, 36, 10, -1, -1 },
241 { -1, 5, 28, 48, 38, 12, -1, -1 },
242 { -1, 4, 26, 48, 39, 13, 0, -1 },
243 { -1, 3, 24, 47, 41, 15, 0, -1 },
244 { -1, 2, 23, 47, 42, 16, 0, -1 },
245 { -1, 2, 21, 45, 43, 18, 1, -1 },
246 { -1, 1, 19, 45, 45, 19, 1, -1 },
247 { -1, 1, 18, 43, 45, 21, 2, -1 },
248 { -1, 0, 16, 42, 47, 23, 2, -1 },
249 { -1, 0, 15, 41, 47, 24, 3, -1 },
250 { -1, 0, 13, 39, 48, 26, 4, -1 },
251 { -1, -1, 12, 38, 48, 28, 5, -1 },
252 { -1, -1, 10, 36, 49, 30, 6, -1 },
253 { -1, -1, 9, 35, 49, 31, 7, -1 }
254 }, { /* 174762 < Ratio <= 262144 (~8:2) */
255 { 2, 13, 30, 38, 30, 13, 2, 0 },
256 { 2, 12, 29, 38, 30, 14, 3, 0 },
257 { 2, 11, 28, 38, 31, 15, 3, 0 },
258 { 2, 10, 26, 38, 32, 16, 4, 0 },
259 { 1, 10, 26, 37, 33, 17, 4, 0 },
260 { 1, 9, 24, 37, 34, 18, 5, 0 },
261 { 1, 8, 24, 37, 34, 19, 5, 0 },
262 { 1, 7, 22, 36, 35, 20, 6, 1 },
263 { 1, 6, 21, 36, 36, 21, 6, 1 },
264 { 1, 6, 20, 35, 36, 22, 7, 1 },
265 { 0, 5, 19, 34, 37, 24, 8, 1 },
266 { 0, 5, 18, 34, 37, 24, 9, 1 },
267 { 0, 4, 17, 33, 37, 26, 10, 1 },
268 { 0, 4, 16, 32, 38, 26, 10, 2 },
269 { 0, 3, 15, 31, 38, 28, 11, 2 },
270 { 0, 3, 14, 30, 38, 29, 12, 2 }
271 }
272};
273
274/* 4-tap Filter Coefficient */
275static const int v_coef_4t[GSC_COEF_RATIO][GSC_COEF_ATTR][GSC_COEF_V_4T] = {
276 { /* Ratio <= 65536 (~8:8) */
277 { 0, 128, 0, 0 },
278 { -4, 127, 5, 0 },
279 { -6, 124, 11, -1 },
280 { -8, 118, 19, -1 },
281 { -8, 111, 27, -2 },
282 { -8, 102, 37, -3 },
283 { -8, 92, 48, -4 },
284 { -7, 81, 59, -5 },
285 { -6, 70, 70, -6 },
286 { -5, 59, 81, -7 },
287 { -4, 48, 92, -8 },
288 { -3, 37, 102, -8 },
289 { -2, 27, 111, -8 },
290 { -1, 19, 118, -8 },
291 { -1, 11, 124, -6 },
292 { 0, 5, 127, -4 }
293 }, { /* 65536 < Ratio <= 74898 (~8:7) */
294 { 8, 112, 8, 0 },
295 { 4, 111, 14, -1 },
296 { 1, 109, 20, -2 },
297 { -2, 105, 27, -2 },
298 { -3, 100, 34, -3 },
299 { -5, 93, 43, -3 },
300 { -5, 86, 51, -4 },
301 { -5, 77, 60, -4 },
302 { -5, 69, 69, -5 },
303 { -4, 60, 77, -5 },
304 { -4, 51, 86, -5 },
305 { -3, 43, 93, -5 },
306 { -3, 34, 100, -3 },
307 { -2, 27, 105, -2 },
308 { -2, 20, 109, 1 },
309 { -1, 14, 111, 4 }
310 }, { /* 74898 < Ratio <= 87381 (~8:6) */
311 { 16, 96, 16, 0 },
312 { 12, 97, 21, -2 },
313 { 8, 96, 26, -2 },
314 { 5, 93, 32, -2 },
315 { 2, 89, 39, -2 },
316 { 0, 84, 46, -2 },
317 { -1, 79, 53, -3 },
318 { -2, 73, 59, -2 },
319 { -2, 66, 66, -2 },
320 { -2, 59, 73, -2 },
321 { -3, 53, 79, -1 },
322 { -2, 46, 84, 0 },
323 { -2, 39, 89, 2 },
324 { -2, 32, 93, 5 },
325 { -2, 26, 96, 8 },
326 { -2, 21, 97, 12 }
327 }, { /* 87381 < Ratio <= 104857 (~8:5) */
328 { 22, 84, 22, 0 },
329 { 18, 85, 26, -1 },
330 { 14, 84, 31, -1 },
331 { 11, 82, 36, -1 },
332 { 8, 79, 42, -1 },
333 { 6, 76, 47, -1 },
334 { 4, 72, 52, 0 },
335 { 2, 68, 58, 0 },
336 { 1, 63, 63, 1 },
337 { 0, 58, 68, 2 },
338 { 0, 52, 72, 4 },
339 { -1, 47, 76, 6 },
340 { -1, 42, 79, 8 },
341 { -1, 36, 82, 11 },
342 { -1, 31, 84, 14 },
343 { -1, 26, 85, 18 }
344 }, { /* 104857 < Ratio <= 131072 (~8:4) */
345 { 26, 76, 26, 0 },
346 { 22, 76, 30, 0 },
347 { 19, 75, 34, 0 },
348 { 16, 73, 38, 1 },
349 { 13, 71, 43, 1 },
350 { 10, 69, 47, 2 },
351 { 8, 66, 51, 3 },
352 { 6, 63, 55, 4 },
353 { 5, 59, 59, 5 },
354 { 4, 55, 63, 6 },
355 { 3, 51, 66, 8 },
356 { 2, 47, 69, 10 },
357 { 1, 43, 71, 13 },
358 { 1, 38, 73, 16 },
359 { 0, 34, 75, 19 },
360 { 0, 30, 76, 22 }
361 }, { /* 131072 < Ratio <= 174762 (~8:3) */
362 { 29, 70, 29, 0 },
363 { 26, 68, 32, 2 },
364 { 23, 67, 36, 2 },
365 { 20, 66, 39, 3 },
366 { 17, 65, 43, 3 },
367 { 15, 63, 46, 4 },
368 { 12, 61, 50, 5 },
369 { 10, 58, 53, 7 },
370 { 8, 56, 56, 8 },
371 { 7, 53, 58, 10 },
372 { 5, 50, 61, 12 },
373 { 4, 46, 63, 15 },
374 { 3, 43, 65, 17 },
375 { 3, 39, 66, 20 },
376 { 2, 36, 67, 23 },
377 { 2, 32, 68, 26 }
378 }, { /* 174762 < Ratio <= 262144 (~8:2) */
379 { 32, 64, 32, 0 },
380 { 28, 63, 34, 3 },
381 { 25, 62, 37, 4 },
382 { 22, 62, 40, 4 },
383 { 19, 61, 43, 5 },
384 { 17, 59, 46, 6 },
385 { 15, 58, 48, 7 },
386 { 13, 55, 51, 9 },
387 { 11, 53, 53, 11 },
388 { 9, 51, 55, 13 },
389 { 7, 48, 58, 15 },
390 { 6, 46, 59, 17 },
391 { 5, 43, 61, 19 },
392 { 4, 40, 62, 22 },
393 { 4, 37, 62, 25 },
394 { 3, 34, 63, 28 }
395 }
396};
397
398static int gsc_sw_reset(struct gsc_context *ctx)
399{
400 u32 cfg;
401 int count = GSC_RESET_TIMEOUT;
402
403 DRM_DEBUG_KMS("%s\n", __func__);
404
405 /* s/w reset */
406 cfg = (GSC_SW_RESET_SRESET);
407 gsc_write(cfg, GSC_SW_RESET);
408
409 /* wait s/w reset complete */
410 while (count--) {
411 cfg = gsc_read(GSC_SW_RESET);
412 if (!cfg)
413 break;
414 usleep_range(1000, 2000);
415 }
416
417 if (cfg) {
418 DRM_ERROR("failed to reset gsc h/w.\n");
419 return -EBUSY;
420 }
421
422 /* reset sequence */
423 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
424 cfg |= (GSC_IN_BASE_ADDR_MASK |
425 GSC_IN_BASE_ADDR_PINGPONG(0));
426 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
427 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
428 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
429
430 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
431 cfg |= (GSC_OUT_BASE_ADDR_MASK |
432 GSC_OUT_BASE_ADDR_PINGPONG(0));
433 gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
434 gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
435 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
436
437 return 0;
438}
439
440static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable)
441{
442 u32 gscblk_cfg;
443
444 DRM_DEBUG_KMS("%s\n", __func__);
445
446 gscblk_cfg = readl(SYSREG_GSCBLK_CFG1);
447
448 if (enable)
449 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) |
450 GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) |
451 GSC_BLK_SW_RESET_WB_DEST(ctx->id);
452 else
453 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id);
454
455 writel(gscblk_cfg, SYSREG_GSCBLK_CFG1);
456}
457
458static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
459 bool overflow, bool done)
460{
461 u32 cfg;
462
463 DRM_DEBUG_KMS("%s:enable[%d]overflow[%d]level[%d]\n", __func__,
464 enable, overflow, done);
465
466 cfg = gsc_read(GSC_IRQ);
467 cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
468
469 if (enable)
470 cfg |= GSC_IRQ_ENABLE;
471 else
472 cfg &= ~GSC_IRQ_ENABLE;
473
474 if (overflow)
475 cfg &= ~GSC_IRQ_OR_MASK;
476 else
477 cfg |= GSC_IRQ_OR_MASK;
478
479 if (done)
480 cfg &= ~GSC_IRQ_FRMDONE_MASK;
481 else
482 cfg |= GSC_IRQ_FRMDONE_MASK;
483
484 gsc_write(cfg, GSC_IRQ);
485}
486
487
488static int gsc_src_set_fmt(struct device *dev, u32 fmt)
489{
490 struct gsc_context *ctx = get_gsc_context(dev);
491 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
492 u32 cfg;
493
494 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
495
496 cfg = gsc_read(GSC_IN_CON);
497 cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
498 GSC_IN_CHROMA_ORDER_MASK | GSC_IN_FORMAT_MASK |
499 GSC_IN_TILE_TYPE_MASK | GSC_IN_TILE_MODE |
500 GSC_IN_CHROM_STRIDE_SEL_MASK | GSC_IN_RB_SWAP_MASK);
501
502 switch (fmt) {
503 case DRM_FORMAT_RGB565:
504 cfg |= GSC_IN_RGB565;
505 break;
506 case DRM_FORMAT_XRGB8888:
507 cfg |= GSC_IN_XRGB8888;
508 break;
509 case DRM_FORMAT_BGRX8888:
510 cfg |= (GSC_IN_XRGB8888 | GSC_IN_RB_SWAP);
511 break;
512 case DRM_FORMAT_YUYV:
513 cfg |= (GSC_IN_YUV422_1P |
514 GSC_IN_YUV422_1P_ORDER_LSB_Y |
515 GSC_IN_CHROMA_ORDER_CBCR);
516 break;
517 case DRM_FORMAT_YVYU:
518 cfg |= (GSC_IN_YUV422_1P |
519 GSC_IN_YUV422_1P_ORDER_LSB_Y |
520 GSC_IN_CHROMA_ORDER_CRCB);
521 break;
522 case DRM_FORMAT_UYVY:
523 cfg |= (GSC_IN_YUV422_1P |
524 GSC_IN_YUV422_1P_OEDER_LSB_C |
525 GSC_IN_CHROMA_ORDER_CBCR);
526 break;
527 case DRM_FORMAT_VYUY:
528 cfg |= (GSC_IN_YUV422_1P |
529 GSC_IN_YUV422_1P_OEDER_LSB_C |
530 GSC_IN_CHROMA_ORDER_CRCB);
531 break;
532 case DRM_FORMAT_NV21:
533 case DRM_FORMAT_NV61:
534 cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
535 GSC_IN_YUV420_2P);
536 break;
537 case DRM_FORMAT_YUV422:
538 cfg |= GSC_IN_YUV422_3P;
539 break;
540 case DRM_FORMAT_YUV420:
541 case DRM_FORMAT_YVU420:
542 cfg |= GSC_IN_YUV420_3P;
543 break;
544 case DRM_FORMAT_NV12:
545 case DRM_FORMAT_NV16:
546 cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
547 GSC_IN_YUV420_2P);
548 break;
549 case DRM_FORMAT_NV12MT:
550 cfg |= (GSC_IN_TILE_C_16x8 | GSC_IN_TILE_MODE);
551 break;
552 default:
553 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
554 return -EINVAL;
555 }
556
557 gsc_write(cfg, GSC_IN_CON);
558
559 return 0;
560}
561
562static int gsc_src_set_transf(struct device *dev,
563 enum drm_exynos_degree degree,
564 enum drm_exynos_flip flip, bool *swap)
565{
566 struct gsc_context *ctx = get_gsc_context(dev);
567 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
568 u32 cfg;
569
570 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
571 degree, flip);
572
573 cfg = gsc_read(GSC_IN_CON);
574 cfg &= ~GSC_IN_ROT_MASK;
575
576 switch (degree) {
577 case EXYNOS_DRM_DEGREE_0:
578 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
579 cfg |= GSC_IN_ROT_XFLIP;
580 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
581 cfg |= GSC_IN_ROT_YFLIP;
582 break;
583 case EXYNOS_DRM_DEGREE_90:
584 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
585 cfg |= GSC_IN_ROT_90_XFLIP;
586 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
587 cfg |= GSC_IN_ROT_90_YFLIP;
588 else
589 cfg |= GSC_IN_ROT_90;
590 break;
591 case EXYNOS_DRM_DEGREE_180:
592 cfg |= GSC_IN_ROT_180;
593 break;
594 case EXYNOS_DRM_DEGREE_270:
595 cfg |= GSC_IN_ROT_270;
596 break;
597 default:
598 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
599 return -EINVAL;
600 }
601
602 gsc_write(cfg, GSC_IN_CON);
603
604 ctx->rotation = cfg &
605 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
606 *swap = ctx->rotation;
607
608 return 0;
609}
610
611static int gsc_src_set_size(struct device *dev, int swap,
612 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
613{
614 struct gsc_context *ctx = get_gsc_context(dev);
615 struct drm_exynos_pos img_pos = *pos;
616 struct gsc_scaler *sc = &ctx->sc;
617 u32 cfg;
618
619 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
620 __func__, swap, pos->x, pos->y, pos->w, pos->h);
621
622 if (swap) {
623 img_pos.w = pos->h;
624 img_pos.h = pos->w;
625 }
626
627 /* pixel offset */
628 cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) |
629 GSC_SRCIMG_OFFSET_Y(img_pos.y));
630 gsc_write(cfg, GSC_SRCIMG_OFFSET);
631
632 /* cropped size */
633 cfg = (GSC_CROPPED_WIDTH(img_pos.w) |
634 GSC_CROPPED_HEIGHT(img_pos.h));
635 gsc_write(cfg, GSC_CROPPED_SIZE);
636
637 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
638 __func__, sz->hsize, sz->vsize);
639
640 /* original size */
641 cfg = gsc_read(GSC_SRCIMG_SIZE);
642 cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
643 GSC_SRCIMG_WIDTH_MASK);
644
645 cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) |
646 GSC_SRCIMG_HEIGHT(sz->vsize));
647
648 gsc_write(cfg, GSC_SRCIMG_SIZE);
649
650 cfg = gsc_read(GSC_IN_CON);
651 cfg &= ~GSC_IN_RGB_TYPE_MASK;
652
653 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
654 __func__, pos->w, sc->range);
655
656 if (pos->w >= GSC_WIDTH_ITU_709)
657 if (sc->range)
658 cfg |= GSC_IN_RGB_HD_WIDE;
659 else
660 cfg |= GSC_IN_RGB_HD_NARROW;
661 else
662 if (sc->range)
663 cfg |= GSC_IN_RGB_SD_WIDE;
664 else
665 cfg |= GSC_IN_RGB_SD_NARROW;
666
667 gsc_write(cfg, GSC_IN_CON);
668
669 return 0;
670}
671
672static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
673 enum drm_exynos_ipp_buf_type buf_type)
674{
675 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
676 bool masked;
677 u32 cfg;
678 u32 mask = 0x00000001 << buf_id;
679
680 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
681 buf_id, buf_type);
682
683 /* mask register set */
684 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
685
686 switch (buf_type) {
687 case IPP_BUF_ENQUEUE:
688 masked = false;
689 break;
690 case IPP_BUF_DEQUEUE:
691 masked = true;
692 break;
693 default:
694 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
695 return -EINVAL;
696 }
697
698 /* sequence id */
699 cfg &= ~mask;
700 cfg |= masked << buf_id;
701 gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK);
702 gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK);
703 gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK);
704
705 return 0;
706}
707
708static int gsc_src_set_addr(struct device *dev,
709 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
710 enum drm_exynos_ipp_buf_type buf_type)
711{
712 struct gsc_context *ctx = get_gsc_context(dev);
713 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
714 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
715 struct drm_exynos_ipp_property *property;
716
717 if (!c_node) {
718 DRM_ERROR("failed to get c_node.\n");
719 return -EFAULT;
720 }
721
722 property = &c_node->property;
723 if (!property) {
724 DRM_ERROR("failed to get property.\n");
725 return -EFAULT;
726 }
727
728 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
729 property->prop_id, buf_id, buf_type);
730
731 if (buf_id > GSC_MAX_SRC) {
732 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
733 return -EINVAL;
734 }
735
736 /* address register set */
737 switch (buf_type) {
738 case IPP_BUF_ENQUEUE:
739 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
740 GSC_IN_BASE_ADDR_Y(buf_id));
741 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
742 GSC_IN_BASE_ADDR_CB(buf_id));
743 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
744 GSC_IN_BASE_ADDR_CR(buf_id));
745 break;
746 case IPP_BUF_DEQUEUE:
747 gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id));
748 gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id));
749 gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id));
750 break;
751 default:
752 /* bypass */
753 break;
754 }
755
756 return gsc_src_set_buf_seq(ctx, buf_id, buf_type);
757}
758
759static struct exynos_drm_ipp_ops gsc_src_ops = {
760 .set_fmt = gsc_src_set_fmt,
761 .set_transf = gsc_src_set_transf,
762 .set_size = gsc_src_set_size,
763 .set_addr = gsc_src_set_addr,
764};
765
766static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
767{
768 struct gsc_context *ctx = get_gsc_context(dev);
769 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
770 u32 cfg;
771
772 DRM_DEBUG_KMS("%s:fmt[0x%x]\n", __func__, fmt);
773
774 cfg = gsc_read(GSC_OUT_CON);
775 cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
776 GSC_OUT_CHROMA_ORDER_MASK | GSC_OUT_FORMAT_MASK |
777 GSC_OUT_CHROM_STRIDE_SEL_MASK | GSC_OUT_RB_SWAP_MASK |
778 GSC_OUT_GLOBAL_ALPHA_MASK);
779
780 switch (fmt) {
781 case DRM_FORMAT_RGB565:
782 cfg |= GSC_OUT_RGB565;
783 break;
784 case DRM_FORMAT_XRGB8888:
785 cfg |= GSC_OUT_XRGB8888;
786 break;
787 case DRM_FORMAT_BGRX8888:
788 cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP);
789 break;
790 case DRM_FORMAT_YUYV:
791 cfg |= (GSC_OUT_YUV422_1P |
792 GSC_OUT_YUV422_1P_ORDER_LSB_Y |
793 GSC_OUT_CHROMA_ORDER_CBCR);
794 break;
795 case DRM_FORMAT_YVYU:
796 cfg |= (GSC_OUT_YUV422_1P |
797 GSC_OUT_YUV422_1P_ORDER_LSB_Y |
798 GSC_OUT_CHROMA_ORDER_CRCB);
799 break;
800 case DRM_FORMAT_UYVY:
801 cfg |= (GSC_OUT_YUV422_1P |
802 GSC_OUT_YUV422_1P_OEDER_LSB_C |
803 GSC_OUT_CHROMA_ORDER_CBCR);
804 break;
805 case DRM_FORMAT_VYUY:
806 cfg |= (GSC_OUT_YUV422_1P |
807 GSC_OUT_YUV422_1P_OEDER_LSB_C |
808 GSC_OUT_CHROMA_ORDER_CRCB);
809 break;
810 case DRM_FORMAT_NV21:
811 case DRM_FORMAT_NV61:
812 cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
813 break;
814 case DRM_FORMAT_YUV422:
815 case DRM_FORMAT_YUV420:
816 case DRM_FORMAT_YVU420:
817 cfg |= GSC_OUT_YUV420_3P;
818 break;
819 case DRM_FORMAT_NV12:
820 case DRM_FORMAT_NV16:
821 cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
822 GSC_OUT_YUV420_2P);
823 break;
824 case DRM_FORMAT_NV12MT:
825 cfg |= (GSC_OUT_TILE_C_16x8 | GSC_OUT_TILE_MODE);
826 break;
827 default:
828 dev_err(ippdrv->dev, "inavlid target yuv order 0x%x.\n", fmt);
829 return -EINVAL;
830 }
831
832 gsc_write(cfg, GSC_OUT_CON);
833
834 return 0;
835}
836
837static int gsc_dst_set_transf(struct device *dev,
838 enum drm_exynos_degree degree,
839 enum drm_exynos_flip flip, bool *swap)
840{
841 struct gsc_context *ctx = get_gsc_context(dev);
842 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
843 u32 cfg;
844
845 DRM_DEBUG_KMS("%s:degree[%d]flip[0x%x]\n", __func__,
846 degree, flip);
847
848 cfg = gsc_read(GSC_IN_CON);
849 cfg &= ~GSC_IN_ROT_MASK;
850
851 switch (degree) {
852 case EXYNOS_DRM_DEGREE_0:
853 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
854 cfg |= GSC_IN_ROT_XFLIP;
855 if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
856 cfg |= GSC_IN_ROT_YFLIP;
857 break;
858 case EXYNOS_DRM_DEGREE_90:
859 if (flip & EXYNOS_DRM_FLIP_VERTICAL)
860 cfg |= GSC_IN_ROT_90_XFLIP;
861 else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL)
862 cfg |= GSC_IN_ROT_90_YFLIP;
863 else
864 cfg |= GSC_IN_ROT_90;
865 break;
866 case EXYNOS_DRM_DEGREE_180:
867 cfg |= GSC_IN_ROT_180;
868 break;
869 case EXYNOS_DRM_DEGREE_270:
870 cfg |= GSC_IN_ROT_270;
871 break;
872 default:
873 dev_err(ippdrv->dev, "inavlid degree value %d.\n", degree);
874 return -EINVAL;
875 }
876
877 gsc_write(cfg, GSC_IN_CON);
878
879 ctx->rotation = cfg &
880 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
881 *swap = ctx->rotation;
882
883 return 0;
884}
885
886static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
887{
888 DRM_DEBUG_KMS("%s:src[%d]dst[%d]\n", __func__, src, dst);
889
890 if (src >= dst * 8) {
891 DRM_ERROR("failed to make ratio and shift.\n");
892 return -EINVAL;
893 } else if (src >= dst * 4)
894 *ratio = 4;
895 else if (src >= dst * 2)
896 *ratio = 2;
897 else
898 *ratio = 1;
899
900 return 0;
901}
902
903static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor)
904{
905 if (hratio == 4 && vratio == 4)
906 *shfactor = 4;
907 else if ((hratio == 4 && vratio == 2) ||
908 (hratio == 2 && vratio == 4))
909 *shfactor = 3;
910 else if ((hratio == 4 && vratio == 1) ||
911 (hratio == 1 && vratio == 4) ||
912 (hratio == 2 && vratio == 2))
913 *shfactor = 2;
914 else if (hratio == 1 && vratio == 1)
915 *shfactor = 0;
916 else
917 *shfactor = 1;
918}
919
920static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
921 struct drm_exynos_pos *src, struct drm_exynos_pos *dst)
922{
923 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
924 u32 cfg;
925 u32 src_w, src_h, dst_w, dst_h;
926 int ret = 0;
927
928 src_w = src->w;
929 src_h = src->h;
930
931 if (ctx->rotation) {
932 dst_w = dst->h;
933 dst_h = dst->w;
934 } else {
935 dst_w = dst->w;
936 dst_h = dst->h;
937 }
938
939 ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
940 if (ret) {
941 dev_err(ippdrv->dev, "failed to get ratio horizontal.\n");
942 return ret;
943 }
944
945 ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
946 if (ret) {
947 dev_err(ippdrv->dev, "failed to get ratio vertical.\n");
948 return ret;
949 }
950
951 DRM_DEBUG_KMS("%s:pre_hratio[%d]pre_vratio[%d]\n",
952 __func__, sc->pre_hratio, sc->pre_vratio);
953
954 sc->main_hratio = (src_w << 16) / dst_w;
955 sc->main_vratio = (src_h << 16) / dst_h;
956
957 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
958 __func__, sc->main_hratio, sc->main_vratio);
959
960 gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
961 &sc->pre_shfactor);
962
963 DRM_DEBUG_KMS("%s:pre_shfactor[%d]\n", __func__,
964 sc->pre_shfactor);
965
966 cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
967 GSC_PRESC_H_RATIO(sc->pre_hratio) |
968 GSC_PRESC_V_RATIO(sc->pre_vratio));
969 gsc_write(cfg, GSC_PRE_SCALE_RATIO);
970
971 return ret;
972}
973
974static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio)
975{
976 int i, j, k, sc_ratio;
977
978 if (main_hratio <= GSC_SC_UP_MAX_RATIO)
979 sc_ratio = 0;
980 else if (main_hratio <= GSC_SC_DOWN_RATIO_7_8)
981 sc_ratio = 1;
982 else if (main_hratio <= GSC_SC_DOWN_RATIO_6_8)
983 sc_ratio = 2;
984 else if (main_hratio <= GSC_SC_DOWN_RATIO_5_8)
985 sc_ratio = 3;
986 else if (main_hratio <= GSC_SC_DOWN_RATIO_4_8)
987 sc_ratio = 4;
988 else if (main_hratio <= GSC_SC_DOWN_RATIO_3_8)
989 sc_ratio = 5;
990 else
991 sc_ratio = 6;
992
993 for (i = 0; i < GSC_COEF_PHASE; i++)
994 for (j = 0; j < GSC_COEF_H_8T; j++)
995 for (k = 0; k < GSC_COEF_DEPTH; k++)
996 gsc_write(h_coef_8t[sc_ratio][i][j],
997 GSC_HCOEF(i, j, k));
998}
999
1000static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio)
1001{
1002 int i, j, k, sc_ratio;
1003
1004 if (main_vratio <= GSC_SC_UP_MAX_RATIO)
1005 sc_ratio = 0;
1006 else if (main_vratio <= GSC_SC_DOWN_RATIO_7_8)
1007 sc_ratio = 1;
1008 else if (main_vratio <= GSC_SC_DOWN_RATIO_6_8)
1009 sc_ratio = 2;
1010 else if (main_vratio <= GSC_SC_DOWN_RATIO_5_8)
1011 sc_ratio = 3;
1012 else if (main_vratio <= GSC_SC_DOWN_RATIO_4_8)
1013 sc_ratio = 4;
1014 else if (main_vratio <= GSC_SC_DOWN_RATIO_3_8)
1015 sc_ratio = 5;
1016 else
1017 sc_ratio = 6;
1018
1019 for (i = 0; i < GSC_COEF_PHASE; i++)
1020 for (j = 0; j < GSC_COEF_V_4T; j++)
1021 for (k = 0; k < GSC_COEF_DEPTH; k++)
1022 gsc_write(v_coef_4t[sc_ratio][i][j],
1023 GSC_VCOEF(i, j, k));
1024}
1025
1026static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
1027{
1028 u32 cfg;
1029
1030 DRM_DEBUG_KMS("%s:main_hratio[%ld]main_vratio[%ld]\n",
1031 __func__, sc->main_hratio, sc->main_vratio);
1032
1033 gsc_set_h_coef(ctx, sc->main_hratio);
1034 cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
1035 gsc_write(cfg, GSC_MAIN_H_RATIO);
1036
1037 gsc_set_v_coef(ctx, sc->main_vratio);
1038 cfg = GSC_MAIN_V_RATIO_VALUE(sc->main_vratio);
1039 gsc_write(cfg, GSC_MAIN_V_RATIO);
1040}
1041
1042static int gsc_dst_set_size(struct device *dev, int swap,
1043 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz)
1044{
1045 struct gsc_context *ctx = get_gsc_context(dev);
1046 struct drm_exynos_pos img_pos = *pos;
1047 struct gsc_scaler *sc = &ctx->sc;
1048 u32 cfg;
1049
1050 DRM_DEBUG_KMS("%s:swap[%d]x[%d]y[%d]w[%d]h[%d]\n",
1051 __func__, swap, pos->x, pos->y, pos->w, pos->h);
1052
1053 if (swap) {
1054 img_pos.w = pos->h;
1055 img_pos.h = pos->w;
1056 }
1057
1058 /* pixel offset */
1059 cfg = (GSC_DSTIMG_OFFSET_X(pos->x) |
1060 GSC_DSTIMG_OFFSET_Y(pos->y));
1061 gsc_write(cfg, GSC_DSTIMG_OFFSET);
1062
1063 /* scaled size */
1064 cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h));
1065 gsc_write(cfg, GSC_SCALED_SIZE);
1066
1067 DRM_DEBUG_KMS("%s:hsize[%d]vsize[%d]\n",
1068 __func__, sz->hsize, sz->vsize);
1069
1070 /* original size */
1071 cfg = gsc_read(GSC_DSTIMG_SIZE);
1072 cfg &= ~(GSC_DSTIMG_HEIGHT_MASK |
1073 GSC_DSTIMG_WIDTH_MASK);
1074 cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) |
1075 GSC_DSTIMG_HEIGHT(sz->vsize));
1076 gsc_write(cfg, GSC_DSTIMG_SIZE);
1077
1078 cfg = gsc_read(GSC_OUT_CON);
1079 cfg &= ~GSC_OUT_RGB_TYPE_MASK;
1080
1081 DRM_DEBUG_KMS("%s:width[%d]range[%d]\n",
1082 __func__, pos->w, sc->range);
1083
1084 if (pos->w >= GSC_WIDTH_ITU_709)
1085 if (sc->range)
1086 cfg |= GSC_OUT_RGB_HD_WIDE;
1087 else
1088 cfg |= GSC_OUT_RGB_HD_NARROW;
1089 else
1090 if (sc->range)
1091 cfg |= GSC_OUT_RGB_SD_WIDE;
1092 else
1093 cfg |= GSC_OUT_RGB_SD_NARROW;
1094
1095 gsc_write(cfg, GSC_OUT_CON);
1096
1097 return 0;
1098}
1099
1100static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
1101{
1102 u32 cfg, i, buf_num = GSC_REG_SZ;
1103 u32 mask = 0x00000001;
1104
1105 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1106
1107 for (i = 0; i < GSC_REG_SZ; i++)
1108 if (cfg & (mask << i))
1109 buf_num--;
1110
1111 DRM_DEBUG_KMS("%s:buf_num[%d]\n", __func__, buf_num);
1112
1113 return buf_num;
1114}
1115
1116static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id,
1117 enum drm_exynos_ipp_buf_type buf_type)
1118{
1119 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1120 bool masked;
1121 u32 cfg;
1122 u32 mask = 0x00000001 << buf_id;
1123 int ret = 0;
1124
1125 DRM_DEBUG_KMS("%s:buf_id[%d]buf_type[%d]\n", __func__,
1126 buf_id, buf_type);
1127
1128 mutex_lock(&ctx->lock);
1129
1130 /* mask register set */
1131 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1132
1133 switch (buf_type) {
1134 case IPP_BUF_ENQUEUE:
1135 masked = false;
1136 break;
1137 case IPP_BUF_DEQUEUE:
1138 masked = true;
1139 break;
1140 default:
1141 dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n");
1142 ret = -EINVAL;
1143 goto err_unlock;
1144 }
1145
1146 /* sequence id */
1147 cfg &= ~mask;
1148 cfg |= masked << buf_id;
1149 gsc_write(cfg, GSC_OUT_BASE_ADDR_Y_MASK);
1150 gsc_write(cfg, GSC_OUT_BASE_ADDR_CB_MASK);
1151 gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK);
1152
1153 /* interrupt enable */
1154 if (buf_type == IPP_BUF_ENQUEUE &&
1155 gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START)
1156 gsc_handle_irq(ctx, true, false, true);
1157
1158 /* interrupt disable */
1159 if (buf_type == IPP_BUF_DEQUEUE &&
1160 gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP)
1161 gsc_handle_irq(ctx, false, false, true);
1162
1163err_unlock:
1164 mutex_unlock(&ctx->lock);
1165 return ret;
1166}
1167
1168static int gsc_dst_set_addr(struct device *dev,
1169 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
1170 enum drm_exynos_ipp_buf_type buf_type)
1171{
1172 struct gsc_context *ctx = get_gsc_context(dev);
1173 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1174 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1175 struct drm_exynos_ipp_property *property;
1176
1177 if (!c_node) {
1178 DRM_ERROR("failed to get c_node.\n");
1179 return -EFAULT;
1180 }
1181
1182 property = &c_node->property;
1183 if (!property) {
1184 DRM_ERROR("failed to get property.\n");
1185 return -EFAULT;
1186 }
1187
1188 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]buf_type[%d]\n", __func__,
1189 property->prop_id, buf_id, buf_type);
1190
1191 if (buf_id > GSC_MAX_DST) {
1192 dev_info(ippdrv->dev, "inavlid buf_id %d.\n", buf_id);
1193 return -EINVAL;
1194 }
1195
1196 /* address register set */
1197 switch (buf_type) {
1198 case IPP_BUF_ENQUEUE:
1199 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y],
1200 GSC_OUT_BASE_ADDR_Y(buf_id));
1201 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB],
1202 GSC_OUT_BASE_ADDR_CB(buf_id));
1203 gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR],
1204 GSC_OUT_BASE_ADDR_CR(buf_id));
1205 break;
1206 case IPP_BUF_DEQUEUE:
1207 gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id));
1208 gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id));
1209 gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id));
1210 break;
1211 default:
1212 /* bypass */
1213 break;
1214 }
1215
1216 return gsc_dst_set_buf_seq(ctx, buf_id, buf_type);
1217}
1218
1219static struct exynos_drm_ipp_ops gsc_dst_ops = {
1220 .set_fmt = gsc_dst_set_fmt,
1221 .set_transf = gsc_dst_set_transf,
1222 .set_size = gsc_dst_set_size,
1223 .set_addr = gsc_dst_set_addr,
1224};
1225
1226static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable)
1227{
1228 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1229
1230 if (enable) {
1231 clk_enable(ctx->gsc_clk);
1232 ctx->suspended = false;
1233 } else {
1234 clk_disable(ctx->gsc_clk);
1235 ctx->suspended = true;
1236 }
1237
1238 return 0;
1239}
1240
1241static int gsc_get_src_buf_index(struct gsc_context *ctx)
1242{
1243 u32 cfg, curr_index, i;
1244 u32 buf_id = GSC_MAX_SRC;
1245 int ret;
1246
1247 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1248
1249 cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
1250 curr_index = GSC_IN_CURR_GET_INDEX(cfg);
1251
1252 for (i = curr_index; i < GSC_MAX_SRC; i++) {
1253 if (!((cfg >> i) & 0x1)) {
1254 buf_id = i;
1255 break;
1256 }
1257 }
1258
1259 if (buf_id == GSC_MAX_SRC) {
1260 DRM_ERROR("failed to get in buffer index.\n");
1261 return -EINVAL;
1262 }
1263
1264 ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1265 if (ret < 0) {
1266 DRM_ERROR("failed to dequeue.\n");
1267 return ret;
1268 }
1269
1270 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1271 curr_index, buf_id);
1272
1273 return buf_id;
1274}
1275
1276static int gsc_get_dst_buf_index(struct gsc_context *ctx)
1277{
1278 u32 cfg, curr_index, i;
1279 u32 buf_id = GSC_MAX_DST;
1280 int ret;
1281
1282 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1283
1284 cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
1285 curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
1286
1287 for (i = curr_index; i < GSC_MAX_DST; i++) {
1288 if (!((cfg >> i) & 0x1)) {
1289 buf_id = i;
1290 break;
1291 }
1292 }
1293
1294 if (buf_id == GSC_MAX_DST) {
1295 DRM_ERROR("failed to get out buffer index.\n");
1296 return -EINVAL;
1297 }
1298
1299 ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE);
1300 if (ret < 0) {
1301 DRM_ERROR("failed to dequeue.\n");
1302 return ret;
1303 }
1304
1305 DRM_DEBUG_KMS("%s:cfg[0x%x]curr_index[%d]buf_id[%d]\n", __func__, cfg,
1306 curr_index, buf_id);
1307
1308 return buf_id;
1309}
1310
1311static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
1312{
1313 struct gsc_context *ctx = dev_id;
1314 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1315 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1316 struct drm_exynos_ipp_event_work *event_work =
1317 c_node->event_work;
1318 u32 status;
1319 int buf_id[EXYNOS_DRM_OPS_MAX];
1320
1321 DRM_DEBUG_KMS("%s:gsc id[%d]\n", __func__, ctx->id);
1322
1323 status = gsc_read(GSC_IRQ);
1324 if (status & GSC_IRQ_STATUS_OR_IRQ) {
1325 dev_err(ippdrv->dev, "occured overflow at %d, status 0x%x.\n",
1326 ctx->id, status);
1327 return IRQ_NONE;
1328 }
1329
1330 if (status & GSC_IRQ_STATUS_OR_FRM_DONE) {
1331 dev_dbg(ippdrv->dev, "occured frame done at %d, status 0x%x.\n",
1332 ctx->id, status);
1333
1334 buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx);
1335 if (buf_id[EXYNOS_DRM_OPS_SRC] < 0)
1336 return IRQ_HANDLED;
1337
1338 buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx);
1339 if (buf_id[EXYNOS_DRM_OPS_DST] < 0)
1340 return IRQ_HANDLED;
1341
1342 DRM_DEBUG_KMS("%s:buf_id_src[%d]buf_id_dst[%d]\n", __func__,
1343 buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]);
1344
1345 event_work->ippdrv = ippdrv;
1346 event_work->buf_id[EXYNOS_DRM_OPS_SRC] =
1347 buf_id[EXYNOS_DRM_OPS_SRC];
1348 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
1349 buf_id[EXYNOS_DRM_OPS_DST];
1350 queue_work(ippdrv->event_workq,
1351 (struct work_struct *)event_work);
1352 }
1353
1354 return IRQ_HANDLED;
1355}
1356
1357static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
1358{
1359 struct drm_exynos_ipp_prop_list *prop_list;
1360
1361 DRM_DEBUG_KMS("%s\n", __func__);
1362
1363 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
1364 if (!prop_list) {
1365 DRM_ERROR("failed to alloc property list.\n");
1366 return -ENOMEM;
1367 }
1368
1369 prop_list->version = 1;
1370 prop_list->writeback = 1;
1371 prop_list->refresh_min = GSC_REFRESH_MIN;
1372 prop_list->refresh_max = GSC_REFRESH_MAX;
1373 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
1374 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
1375 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
1376 (1 << EXYNOS_DRM_DEGREE_90) |
1377 (1 << EXYNOS_DRM_DEGREE_180) |
1378 (1 << EXYNOS_DRM_DEGREE_270);
1379 prop_list->csc = 1;
1380 prop_list->crop = 1;
1381 prop_list->crop_max.hsize = GSC_CROP_MAX;
1382 prop_list->crop_max.vsize = GSC_CROP_MAX;
1383 prop_list->crop_min.hsize = GSC_CROP_MIN;
1384 prop_list->crop_min.vsize = GSC_CROP_MIN;
1385 prop_list->scale = 1;
1386 prop_list->scale_max.hsize = GSC_SCALE_MAX;
1387 prop_list->scale_max.vsize = GSC_SCALE_MAX;
1388 prop_list->scale_min.hsize = GSC_SCALE_MIN;
1389 prop_list->scale_min.vsize = GSC_SCALE_MIN;
1390
1391 ippdrv->prop_list = prop_list;
1392
1393 return 0;
1394}
1395
1396static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip)
1397{
1398 switch (flip) {
1399 case EXYNOS_DRM_FLIP_NONE:
1400 case EXYNOS_DRM_FLIP_VERTICAL:
1401 case EXYNOS_DRM_FLIP_HORIZONTAL:
1402 case EXYNOS_DRM_FLIP_VERTICAL | EXYNOS_DRM_FLIP_HORIZONTAL:
1403 return true;
1404 default:
1405 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
1406 return false;
1407 }
1408}
1409
1410static int gsc_ippdrv_check_property(struct device *dev,
1411 struct drm_exynos_ipp_property *property)
1412{
1413 struct gsc_context *ctx = get_gsc_context(dev);
1414 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1415 struct drm_exynos_ipp_prop_list *pp = ippdrv->prop_list;
1416 struct drm_exynos_ipp_config *config;
1417 struct drm_exynos_pos *pos;
1418 struct drm_exynos_sz *sz;
1419 bool swap;
1420 int i;
1421
1422 DRM_DEBUG_KMS("%s\n", __func__);
1423
1424 for_each_ipp_ops(i) {
1425 if ((i == EXYNOS_DRM_OPS_SRC) &&
1426 (property->cmd == IPP_CMD_WB))
1427 continue;
1428
1429 config = &property->config[i];
1430 pos = &config->pos;
1431 sz = &config->sz;
1432
1433 /* check for flip */
1434 if (!gsc_check_drm_flip(config->flip)) {
1435 DRM_ERROR("invalid flip.\n");
1436 goto err_property;
1437 }
1438
1439 /* check for degree */
1440 switch (config->degree) {
1441 case EXYNOS_DRM_DEGREE_90:
1442 case EXYNOS_DRM_DEGREE_270:
1443 swap = true;
1444 break;
1445 case EXYNOS_DRM_DEGREE_0:
1446 case EXYNOS_DRM_DEGREE_180:
1447 swap = false;
1448 break;
1449 default:
1450 DRM_ERROR("invalid degree.\n");
1451 goto err_property;
1452 }
1453
1454 /* check for buffer bound */
1455 if ((pos->x + pos->w > sz->hsize) ||
1456 (pos->y + pos->h > sz->vsize)) {
1457 DRM_ERROR("out of buf bound.\n");
1458 goto err_property;
1459 }
1460
1461 /* check for crop */
1462 if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) {
1463 if (swap) {
1464 if ((pos->h < pp->crop_min.hsize) ||
1465 (sz->vsize > pp->crop_max.hsize) ||
1466 (pos->w < pp->crop_min.vsize) ||
1467 (sz->hsize > pp->crop_max.vsize)) {
1468 DRM_ERROR("out of crop size.\n");
1469 goto err_property;
1470 }
1471 } else {
1472 if ((pos->w < pp->crop_min.hsize) ||
1473 (sz->hsize > pp->crop_max.hsize) ||
1474 (pos->h < pp->crop_min.vsize) ||
1475 (sz->vsize > pp->crop_max.vsize)) {
1476 DRM_ERROR("out of crop size.\n");
1477 goto err_property;
1478 }
1479 }
1480 }
1481
1482 /* check for scale */
1483 if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) {
1484 if (swap) {
1485 if ((pos->h < pp->scale_min.hsize) ||
1486 (sz->vsize > pp->scale_max.hsize) ||
1487 (pos->w < pp->scale_min.vsize) ||
1488 (sz->hsize > pp->scale_max.vsize)) {
1489 DRM_ERROR("out of scale size.\n");
1490 goto err_property;
1491 }
1492 } else {
1493 if ((pos->w < pp->scale_min.hsize) ||
1494 (sz->hsize > pp->scale_max.hsize) ||
1495 (pos->h < pp->scale_min.vsize) ||
1496 (sz->vsize > pp->scale_max.vsize)) {
1497 DRM_ERROR("out of scale size.\n");
1498 goto err_property;
1499 }
1500 }
1501 }
1502 }
1503
1504 return 0;
1505
1506err_property:
1507 for_each_ipp_ops(i) {
1508 if ((i == EXYNOS_DRM_OPS_SRC) &&
1509 (property->cmd == IPP_CMD_WB))
1510 continue;
1511
1512 config = &property->config[i];
1513 pos = &config->pos;
1514 sz = &config->sz;
1515
1516 DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n",
1517 i ? "dst" : "src", config->flip, config->degree,
1518 pos->x, pos->y, pos->w, pos->h,
1519 sz->hsize, sz->vsize);
1520 }
1521
1522 return -EINVAL;
1523}
1524
1525
1526static int gsc_ippdrv_reset(struct device *dev)
1527{
1528 struct gsc_context *ctx = get_gsc_context(dev);
1529 struct gsc_scaler *sc = &ctx->sc;
1530 int ret;
1531
1532 DRM_DEBUG_KMS("%s\n", __func__);
1533
1534 /* reset h/w block */
1535 ret = gsc_sw_reset(ctx);
1536 if (ret < 0) {
1537 dev_err(dev, "failed to reset hardware.\n");
1538 return ret;
1539 }
1540
1541 /* scaler setting */
1542 memset(&ctx->sc, 0x0, sizeof(ctx->sc));
1543 sc->range = true;
1544
1545 return 0;
1546}
1547
1548static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1549{
1550 struct gsc_context *ctx = get_gsc_context(dev);
1551 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1552 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
1553 struct drm_exynos_ipp_property *property;
1554 struct drm_exynos_ipp_config *config;
1555 struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX];
1556 struct drm_exynos_ipp_set_wb set_wb;
1557 u32 cfg;
1558 int ret, i;
1559
1560 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1561
1562 if (!c_node) {
1563 DRM_ERROR("failed to get c_node.\n");
1564 return -EINVAL;
1565 }
1566
1567 property = &c_node->property;
1568 if (!property) {
1569 DRM_ERROR("failed to get property.\n");
1570 return -EINVAL;
1571 }
1572
1573 gsc_handle_irq(ctx, true, false, true);
1574
1575 for_each_ipp_ops(i) {
1576 config = &property->config[i];
1577 img_pos[i] = config->pos;
1578 }
1579
1580 switch (cmd) {
1581 case IPP_CMD_M2M:
1582 /* enable one shot */
1583 cfg = gsc_read(GSC_ENABLE);
1584 cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK |
1585 GSC_ENABLE_CLK_GATE_MODE_MASK);
1586 cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT;
1587 gsc_write(cfg, GSC_ENABLE);
1588
1589 /* src dma memory */
1590 cfg = gsc_read(GSC_IN_CON);
1591 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1592 cfg |= GSC_IN_PATH_MEMORY;
1593 gsc_write(cfg, GSC_IN_CON);
1594
1595 /* dst dma memory */
1596 cfg = gsc_read(GSC_OUT_CON);
1597 cfg |= GSC_OUT_PATH_MEMORY;
1598 gsc_write(cfg, GSC_OUT_CON);
1599 break;
1600 case IPP_CMD_WB:
1601 set_wb.enable = 1;
1602 set_wb.refresh = property->refresh_rate;
1603 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1604 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1605
1606 /* src local path */
1607 cfg = readl(GSC_IN_CON);
1608 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1609 cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB);
1610 gsc_write(cfg, GSC_IN_CON);
1611
1612 /* dst dma memory */
1613 cfg = gsc_read(GSC_OUT_CON);
1614 cfg |= GSC_OUT_PATH_MEMORY;
1615 gsc_write(cfg, GSC_OUT_CON);
1616 break;
1617 case IPP_CMD_OUTPUT:
1618 /* src dma memory */
1619 cfg = gsc_read(GSC_IN_CON);
1620 cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK);
1621 cfg |= GSC_IN_PATH_MEMORY;
1622 gsc_write(cfg, GSC_IN_CON);
1623
1624 /* dst local path */
1625 cfg = gsc_read(GSC_OUT_CON);
1626 cfg |= GSC_OUT_PATH_MEMORY;
1627 gsc_write(cfg, GSC_OUT_CON);
1628 break;
1629 default:
1630 ret = -EINVAL;
1631 dev_err(dev, "invalid operations.\n");
1632 return ret;
1633 }
1634
1635 ret = gsc_set_prescaler(ctx, &ctx->sc,
1636 &img_pos[EXYNOS_DRM_OPS_SRC],
1637 &img_pos[EXYNOS_DRM_OPS_DST]);
1638 if (ret) {
1639 dev_err(dev, "failed to set precalser.\n");
1640 return ret;
1641 }
1642
1643 gsc_set_scaler(ctx, &ctx->sc);
1644
1645 cfg = gsc_read(GSC_ENABLE);
1646 cfg |= GSC_ENABLE_ON;
1647 gsc_write(cfg, GSC_ENABLE);
1648
1649 return 0;
1650}
1651
1652static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd)
1653{
1654 struct gsc_context *ctx = get_gsc_context(dev);
1655 struct drm_exynos_ipp_set_wb set_wb = {0, 0};
1656 u32 cfg;
1657
1658 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, cmd);
1659
1660 switch (cmd) {
1661 case IPP_CMD_M2M:
1662 /* bypass */
1663 break;
1664 case IPP_CMD_WB:
1665 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1666 exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb);
1667 break;
1668 case IPP_CMD_OUTPUT:
1669 default:
1670 dev_err(dev, "invalid operations.\n");
1671 break;
1672 }
1673
1674 gsc_handle_irq(ctx, false, false, true);
1675
1676 /* reset sequence */
1677 gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK);
1678 gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK);
1679 gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK);
1680
1681 cfg = gsc_read(GSC_ENABLE);
1682 cfg &= ~GSC_ENABLE_ON;
1683 gsc_write(cfg, GSC_ENABLE);
1684}
1685
1686static int __devinit gsc_probe(struct platform_device *pdev)
1687{
1688 struct device *dev = &pdev->dev;
1689 struct gsc_context *ctx;
1690 struct resource *res;
1691 struct exynos_drm_ippdrv *ippdrv;
1692 int ret;
1693
1694 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1695 if (!ctx)
1696 return -ENOMEM;
1697
1698 /* clock control */
1699 ctx->gsc_clk = clk_get(dev, "gscl");
1700 if (IS_ERR(ctx->gsc_clk)) {
1701 dev_err(dev, "failed to get gsc clock.\n");
1702 ret = PTR_ERR(ctx->gsc_clk);
1703 goto err_ctx;
1704 }
1705
1706 /* resource memory */
1707 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1708 if (!ctx->regs_res) {
1709 dev_err(dev, "failed to find registers.\n");
1710 ret = -ENOENT;
1711 goto err_clk;
1712 }
1713
1714 ctx->regs = devm_request_and_ioremap(dev, ctx->regs_res);
1715 if (!ctx->regs) {
1716 dev_err(dev, "failed to map registers.\n");
1717 ret = -ENXIO;
1718 goto err_clk;
1719 }
1720
1721 /* resource irq */
1722 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1723 if (!res) {
1724 dev_err(dev, "failed to request irq resource.\n");
1725 ret = -ENOENT;
1726 goto err_get_regs;
1727 }
1728
1729 ctx->irq = res->start;
1730 ret = request_threaded_irq(ctx->irq, NULL, gsc_irq_handler,
1731 IRQF_ONESHOT, "drm_gsc", ctx);
1732 if (ret < 0) {
1733 dev_err(dev, "failed to request irq.\n");
1734 goto err_get_regs;
1735 }
1736
1737 /* context initailization */
1738 ctx->id = pdev->id;
1739
1740 ippdrv = &ctx->ippdrv;
1741 ippdrv->dev = dev;
1742 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops;
1743 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops;
1744 ippdrv->check_property = gsc_ippdrv_check_property;
1745 ippdrv->reset = gsc_ippdrv_reset;
1746 ippdrv->start = gsc_ippdrv_start;
1747 ippdrv->stop = gsc_ippdrv_stop;
1748 ret = gsc_init_prop_list(ippdrv);
1749 if (ret < 0) {
1750 dev_err(dev, "failed to init property list.\n");
1751 goto err_get_irq;
1752 }
1753
1754 DRM_DEBUG_KMS("%s:id[%d]ippdrv[0x%x]\n", __func__, ctx->id,
1755 (int)ippdrv);
1756
1757 mutex_init(&ctx->lock);
1758 platform_set_drvdata(pdev, ctx);
1759
1760 pm_runtime_set_active(dev);
1761 pm_runtime_enable(dev);
1762
1763 ret = exynos_drm_ippdrv_register(ippdrv);
1764 if (ret < 0) {
1765 dev_err(dev, "failed to register drm gsc device.\n");
1766 goto err_ippdrv_register;
1767 }
1768
1769 dev_info(&pdev->dev, "drm gsc registered successfully.\n");
1770
1771 return 0;
1772
1773err_ippdrv_register:
1774 devm_kfree(dev, ippdrv->prop_list);
1775 pm_runtime_disable(dev);
1776err_get_irq:
1777 free_irq(ctx->irq, ctx);
1778err_get_regs:
1779 devm_iounmap(dev, ctx->regs);
1780err_clk:
1781 clk_put(ctx->gsc_clk);
1782err_ctx:
1783 devm_kfree(dev, ctx);
1784 return ret;
1785}
1786
1787static int __devexit gsc_remove(struct platform_device *pdev)
1788{
1789 struct device *dev = &pdev->dev;
1790 struct gsc_context *ctx = get_gsc_context(dev);
1791 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv;
1792
1793 devm_kfree(dev, ippdrv->prop_list);
1794 exynos_drm_ippdrv_unregister(ippdrv);
1795 mutex_destroy(&ctx->lock);
1796
1797 pm_runtime_set_suspended(dev);
1798 pm_runtime_disable(dev);
1799
1800 free_irq(ctx->irq, ctx);
1801 devm_iounmap(dev, ctx->regs);
1802
1803 clk_put(ctx->gsc_clk);
1804
1805 devm_kfree(dev, ctx);
1806
1807 return 0;
1808}
1809
1810#ifdef CONFIG_PM_SLEEP
1811static int gsc_suspend(struct device *dev)
1812{
1813 struct gsc_context *ctx = get_gsc_context(dev);
1814
1815 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1816
1817 if (pm_runtime_suspended(dev))
1818 return 0;
1819
1820 return gsc_clk_ctrl(ctx, false);
1821}
1822
1823static int gsc_resume(struct device *dev)
1824{
1825 struct gsc_context *ctx = get_gsc_context(dev);
1826
1827 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1828
1829 if (!pm_runtime_suspended(dev))
1830 return gsc_clk_ctrl(ctx, true);
1831
1832 return 0;
1833}
1834#endif
1835
1836#ifdef CONFIG_PM_RUNTIME
1837static int gsc_runtime_suspend(struct device *dev)
1838{
1839 struct gsc_context *ctx = get_gsc_context(dev);
1840
1841 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, ctx->id);
1842
1843 return gsc_clk_ctrl(ctx, false);
1844}
1845
1846static int gsc_runtime_resume(struct device *dev)
1847{
1848 struct gsc_context *ctx = get_gsc_context(dev);
1849
1850 DRM_DEBUG_KMS("%s:id[%d]\n", __FILE__, ctx->id);
1851
1852 return gsc_clk_ctrl(ctx, true);
1853}
1854#endif
1855
1856static const struct dev_pm_ops gsc_pm_ops = {
1857 SET_SYSTEM_SLEEP_PM_OPS(gsc_suspend, gsc_resume)
1858 SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL)
1859};
1860
1861struct platform_driver gsc_driver = {
1862 .probe = gsc_probe,
1863 .remove = __devexit_p(gsc_remove),
1864 .driver = {
1865 .name = "exynos-drm-gsc",
1866 .owner = THIS_MODULE,
1867 .pm = &gsc_pm_ops,
1868 },
1869};
1870
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
new file mode 100644
index 000000000000..b3c3bc618c0f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef _EXYNOS_DRM_GSC_H_
30#define _EXYNOS_DRM_GSC_H_
31
32/*
33 * TODO
34 * FIMD output interface notifier callback.
35 * Mixer output interface notifier callback.
36 */
37
38#endif /* _EXYNOS_DRM_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index c3b9e2b45185..55793c46e3c2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -29,6 +29,9 @@
29#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\ 29#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
30 struct drm_hdmi_context, subdrv); 30 struct drm_hdmi_context, subdrv);
31 31
32/* platform device pointer for common drm hdmi device. */
33static struct platform_device *exynos_drm_hdmi_pdev;
34
32/* Common hdmi subdrv needs to access the hdmi and mixer though context. 35/* Common hdmi subdrv needs to access the hdmi and mixer though context.
33* These should be initialied by the repective drivers */ 36* These should be initialied by the repective drivers */
34static struct exynos_drm_hdmi_context *hdmi_ctx; 37static struct exynos_drm_hdmi_context *hdmi_ctx;
@@ -46,6 +49,25 @@ struct drm_hdmi_context {
46 bool enabled[MIXER_WIN_NR]; 49 bool enabled[MIXER_WIN_NR];
47}; 50};
48 51
52int exynos_platform_device_hdmi_register(void)
53{
54 if (exynos_drm_hdmi_pdev)
55 return -EEXIST;
56
57 exynos_drm_hdmi_pdev = platform_device_register_simple(
58 "exynos-drm-hdmi", -1, NULL, 0);
59 if (IS_ERR_OR_NULL(exynos_drm_hdmi_pdev))
60 return PTR_ERR(exynos_drm_hdmi_pdev);
61
62 return 0;
63}
64
65void exynos_platform_device_hdmi_unregister(void)
66{
67 if (exynos_drm_hdmi_pdev)
68 platform_device_unregister(exynos_drm_hdmi_pdev);
69}
70
49void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx) 71void exynos_hdmi_drv_attach(struct exynos_drm_hdmi_context *ctx)
50{ 72{
51 if (ctx) 73 if (ctx)
@@ -157,6 +179,16 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
157 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx); 179 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
158} 180}
159 181
182static void drm_hdmi_wait_for_vblank(struct device *subdrv_dev)
183{
184 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
185
186 DRM_DEBUG_KMS("%s\n", __FILE__);
187
188 if (mixer_ops && mixer_ops->wait_for_vblank)
189 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
190}
191
160static void drm_hdmi_mode_fixup(struct device *subdrv_dev, 192static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
161 struct drm_connector *connector, 193 struct drm_connector *connector,
162 const struct drm_display_mode *mode, 194 const struct drm_display_mode *mode,
@@ -238,6 +270,7 @@ static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
238 .apply = drm_hdmi_apply, 270 .apply = drm_hdmi_apply,
239 .enable_vblank = drm_hdmi_enable_vblank, 271 .enable_vblank = drm_hdmi_enable_vblank,
240 .disable_vblank = drm_hdmi_disable_vblank, 272 .disable_vblank = drm_hdmi_disable_vblank,
273 .wait_for_vblank = drm_hdmi_wait_for_vblank,
241 .mode_fixup = drm_hdmi_mode_fixup, 274 .mode_fixup = drm_hdmi_mode_fixup,
242 .mode_set = drm_hdmi_mode_set, 275 .mode_set = drm_hdmi_mode_set,
243 .get_max_resol = drm_hdmi_get_max_resol, 276 .get_max_resol = drm_hdmi_get_max_resol,
@@ -291,21 +324,10 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
291 ctx->enabled[win] = false; 324 ctx->enabled[win] = false;
292} 325}
293 326
294static void drm_mixer_wait_for_vblank(struct device *subdrv_dev)
295{
296 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
297
298 DRM_DEBUG_KMS("%s\n", __FILE__);
299
300 if (mixer_ops && mixer_ops->wait_for_vblank)
301 mixer_ops->wait_for_vblank(ctx->mixer_ctx->ctx);
302}
303
304static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { 327static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
305 .mode_set = drm_mixer_mode_set, 328 .mode_set = drm_mixer_mode_set,
306 .commit = drm_mixer_commit, 329 .commit = drm_mixer_commit,
307 .disable = drm_mixer_disable, 330 .disable = drm_mixer_disable,
308 .wait_for_vblank = drm_mixer_wait_for_vblank,
309}; 331};
310 332
311static struct exynos_drm_manager hdmi_manager = { 333static struct exynos_drm_manager hdmi_manager = {
@@ -346,9 +368,23 @@ static int hdmi_subdrv_probe(struct drm_device *drm_dev,
346 ctx->hdmi_ctx->drm_dev = drm_dev; 368 ctx->hdmi_ctx->drm_dev = drm_dev;
347 ctx->mixer_ctx->drm_dev = drm_dev; 369 ctx->mixer_ctx->drm_dev = drm_dev;
348 370
371 if (mixer_ops->iommu_on)
372 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, true);
373
349 return 0; 374 return 0;
350} 375}
351 376
377static void hdmi_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
378{
379 struct drm_hdmi_context *ctx;
380 struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
381
382 ctx = get_ctx_from_subdrv(subdrv);
383
384 if (mixer_ops->iommu_on)
385 mixer_ops->iommu_on(ctx->mixer_ctx->ctx, false);
386}
387
352static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) 388static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
353{ 389{
354 struct device *dev = &pdev->dev; 390 struct device *dev = &pdev->dev;
@@ -368,6 +404,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
368 subdrv->dev = dev; 404 subdrv->dev = dev;
369 subdrv->manager = &hdmi_manager; 405 subdrv->manager = &hdmi_manager;
370 subdrv->probe = hdmi_subdrv_probe; 406 subdrv->probe = hdmi_subdrv_probe;
407 subdrv->remove = hdmi_subdrv_remove;
371 408
372 platform_set_drvdata(pdev, subdrv); 409 platform_set_drvdata(pdev, subdrv);
373 410
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 2da5ffd3a059..fcc3093ec8fe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -62,12 +62,13 @@ struct exynos_hdmi_ops {
62 62
63struct exynos_mixer_ops { 63struct exynos_mixer_ops {
64 /* manager */ 64 /* manager */
65 int (*iommu_on)(void *ctx, bool enable);
65 int (*enable_vblank)(void *ctx, int pipe); 66 int (*enable_vblank)(void *ctx, int pipe);
66 void (*disable_vblank)(void *ctx); 67 void (*disable_vblank)(void *ctx);
68 void (*wait_for_vblank)(void *ctx);
67 void (*dpms)(void *ctx, int mode); 69 void (*dpms)(void *ctx, int mode);
68 70
69 /* overlay */ 71 /* overlay */
70 void (*wait_for_vblank)(void *ctx);
71 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); 72 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
72 void (*win_commit)(void *ctx, int zpos); 73 void (*win_commit)(void *ctx, int zpos);
73 void (*win_disable)(void *ctx, int zpos); 74 void (*win_disable)(void *ctx, int zpos);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
new file mode 100644
index 000000000000..2482b7f96341
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -0,0 +1,150 @@
1/* exynos_drm_iommu.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#include <drmP.h>
27#include <drm/exynos_drm.h>
28
29#include <linux/dma-mapping.h>
30#include <linux/iommu.h>
31#include <linux/kref.h>
32
33#include <asm/dma-iommu.h>
34
35#include "exynos_drm_drv.h"
36#include "exynos_drm_iommu.h"
37
38/*
39 * drm_create_iommu_mapping - create a mapping structure
40 *
41 * @drm_dev: DRM device
42 */
43int drm_create_iommu_mapping(struct drm_device *drm_dev)
44{
45 struct dma_iommu_mapping *mapping = NULL;
46 struct exynos_drm_private *priv = drm_dev->dev_private;
47 struct device *dev = drm_dev->dev;
48
49 if (!priv->da_start)
50 priv->da_start = EXYNOS_DEV_ADDR_START;
51 if (!priv->da_space_size)
52 priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
53 if (!priv->da_space_order)
54 priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
55
56 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
57 priv->da_space_size,
58 priv->da_space_order);
59 if (IS_ERR(mapping))
60 return PTR_ERR(mapping);
61
62 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
63 GFP_KERNEL);
64 dma_set_max_seg_size(dev, 0xffffffffu);
65 dev->archdata.mapping = mapping;
66
67 return 0;
68}
69
70/*
71 * drm_release_iommu_mapping - release iommu mapping structure
72 *
73 * @drm_dev: DRM device
74 *
75 * if mapping->kref becomes 0 then all things related to iommu mapping
76 * will be released
77 */
78void drm_release_iommu_mapping(struct drm_device *drm_dev)
79{
80 struct device *dev = drm_dev->dev;
81
82 arm_iommu_release_mapping(dev->archdata.mapping);
83}
84
85/*
86 * drm_iommu_attach_device- attach device to iommu mapping
87 *
88 * @drm_dev: DRM device
89 * @subdrv_dev: device to be attach
90 *
91 * This function should be called by sub drivers to attach it to iommu
92 * mapping.
93 */
94int drm_iommu_attach_device(struct drm_device *drm_dev,
95 struct device *subdrv_dev)
96{
97 struct device *dev = drm_dev->dev;
98 int ret;
99
100 if (!dev->archdata.mapping) {
101 DRM_ERROR("iommu_mapping is null.\n");
102 return -EFAULT;
103 }
104
105 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
106 sizeof(*subdrv_dev->dma_parms),
107 GFP_KERNEL);
108 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
109
110 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
111 if (ret < 0) {
112 DRM_DEBUG_KMS("failed iommu attach.\n");
113 return ret;
114 }
115
116 /*
117 * Set dma_ops to drm_device just one time.
118 *
119 * The dma mapping api needs device object and the api is used
120 * to allocate physial memory and map it with iommu table.
121 * If iommu attach succeeded, the sub driver would have dma_ops
122 * for iommu and also all sub drivers have same dma_ops.
123 */
124 if (!dev->archdata.dma_ops)
125 dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
126
127 return 0;
128}
129
130/*
131 * drm_iommu_detach_device -detach device address space mapping from device
132 *
133 * @drm_dev: DRM device
134 * @subdrv_dev: device to be detached
135 *
136 * This function should be called by sub drivers to detach it from iommu
137 * mapping
138 */
139void drm_iommu_detach_device(struct drm_device *drm_dev,
140 struct device *subdrv_dev)
141{
142 struct device *dev = drm_dev->dev;
143 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
144
145 if (!mapping || !mapping->domain)
146 return;
147
148 iommu_detach_device(mapping->domain, subdrv_dev);
149 drm_release_iommu_mapping(drm_dev);
150}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
new file mode 100644
index 000000000000..18a0ca190b98
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -0,0 +1,85 @@
1/* exynos_drm_iommu.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Authoer: Inki Dae <inki.dae@samsung.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 */
25
26#ifndef _EXYNOS_DRM_IOMMU_H_
27#define _EXYNOS_DRM_IOMMU_H_
28
29#define EXYNOS_DEV_ADDR_START 0x20000000
30#define EXYNOS_DEV_ADDR_SIZE 0x40000000
31#define EXYNOS_DEV_ADDR_ORDER 0x4
32
33#ifdef CONFIG_DRM_EXYNOS_IOMMU
34
35int drm_create_iommu_mapping(struct drm_device *drm_dev);
36
37void drm_release_iommu_mapping(struct drm_device *drm_dev);
38
39int drm_iommu_attach_device(struct drm_device *drm_dev,
40 struct device *subdrv_dev);
41
42void drm_iommu_detach_device(struct drm_device *dev_dev,
43 struct device *subdrv_dev);
44
45static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
46{
47#ifdef CONFIG_ARM_DMA_USE_IOMMU
48 struct device *dev = drm_dev->dev;
49
50 return dev->archdata.mapping ? true : false;
51#else
52 return false;
53#endif
54}
55
56#else
57
58struct dma_iommu_mapping;
59static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
60{
61 return 0;
62}
63
64static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
65{
66}
67
68static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
69 struct device *subdrv_dev)
70{
71 return 0;
72}
73
74static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
75 struct device *subdrv_dev)
76{
77}
78
79static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
80{
81 return false;
82}
83
84#endif
85#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
new file mode 100644
index 000000000000..49eebe948ed2
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -0,0 +1,2060 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * Eunchul Kim <chulspro.kim@samsung.com>
5 * Jinyoung Jeon <jy0.jeon@samsung.com>
6 * Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/types.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20#include <plat/map-base.h>
21
22#include <drm/drmP.h>
23#include <drm/exynos_drm.h>
24#include "exynos_drm_drv.h"
25#include "exynos_drm_gem.h"
26#include "exynos_drm_ipp.h"
27#include "exynos_drm_iommu.h"
28
29/*
30 * IPP is stand for Image Post Processing and
31 * supports image scaler/rotator and input/output DMA operations.
32 * using FIMC, GSC, Rotator, so on.
33 * IPP is integration device driver of same attribute h/w
34 */
35
36/*
37 * TODO
38 * 1. expand command control id.
39 * 2. integrate property and config.
40 * 3. removed send_event id check routine.
41 * 4. compare send_event id if needed.
42 * 5. free subdrv_remove notifier callback list if needed.
43 * 6. need to check subdrv_open about multi-open.
44 * 7. need to power_on implement power and sysmmu ctrl.
45 */
46
47#define get_ipp_context(dev) platform_get_drvdata(to_platform_device(dev))
48#define ipp_is_m2m_cmd(c) (c == IPP_CMD_M2M)
49
50/*
51 * A structure of event.
52 *
53 * @base: base of event.
54 * @event: ipp event.
55 */
56struct drm_exynos_ipp_send_event {
57 struct drm_pending_event base;
58 struct drm_exynos_ipp_event event;
59};
60
61/*
62 * A structure of memory node.
63 *
64 * @list: list head to memory queue information.
65 * @ops_id: id of operations.
66 * @prop_id: id of property.
67 * @buf_id: id of buffer.
68 * @buf_info: gem objects and dma address, size.
69 * @filp: a pointer to drm_file.
70 */
71struct drm_exynos_ipp_mem_node {
72 struct list_head list;
73 enum drm_exynos_ops_id ops_id;
74 u32 prop_id;
75 u32 buf_id;
76 struct drm_exynos_ipp_buf_info buf_info;
77 struct drm_file *filp;
78};
79
80/*
81 * A structure of ipp context.
82 *
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
90 */
91struct ipp_context {
92 struct exynos_drm_subdrv subdrv;
93 struct mutex ipp_lock;
94 struct mutex prop_lock;
95 struct idr ipp_idr;
96 struct idr prop_idr;
97 struct workqueue_struct *event_workq;
98 struct workqueue_struct *cmd_workq;
99};
100
101static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104
105int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
106{
107 DRM_DEBUG_KMS("%s\n", __func__);
108
109 if (!ippdrv)
110 return -EINVAL;
111
112 mutex_lock(&exynos_drm_ippdrv_lock);
113 list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
114 mutex_unlock(&exynos_drm_ippdrv_lock);
115
116 return 0;
117}
118
119int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
120{
121 DRM_DEBUG_KMS("%s\n", __func__);
122
123 if (!ippdrv)
124 return -EINVAL;
125
126 mutex_lock(&exynos_drm_ippdrv_lock);
127 list_del(&ippdrv->drv_list);
128 mutex_unlock(&exynos_drm_ippdrv_lock);
129
130 return 0;
131}
132
133static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj,
134 u32 *idp)
135{
136 int ret;
137
138 DRM_DEBUG_KMS("%s\n", __func__);
139
140again:
141 /* ensure there is space available to allocate a handle */
142 if (idr_pre_get(id_idr, GFP_KERNEL) == 0) {
143 DRM_ERROR("failed to get idr.\n");
144 return -ENOMEM;
145 }
146
147 /* do the allocation under our mutexlock */
148 mutex_lock(lock);
149 ret = idr_get_new_above(id_idr, obj, 1, (int *)idp);
150 mutex_unlock(lock);
151 if (ret == -EAGAIN)
152 goto again;
153
154 return ret;
155}
156
157static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
158{
159 void *obj;
160
161 DRM_DEBUG_KMS("%s:id[%d]\n", __func__, id);
162
163 mutex_lock(lock);
164
165 /* find object using handle */
166 obj = idr_find(id_idr, id);
167 if (!obj) {
168 DRM_ERROR("failed to find object.\n");
169 mutex_unlock(lock);
170 return ERR_PTR(-ENODEV);
171 }
172
173 mutex_unlock(lock);
174
175 return obj;
176}
177
178static inline bool ipp_check_dedicated(struct exynos_drm_ippdrv *ippdrv,
179 enum drm_exynos_ipp_cmd cmd)
180{
181 /*
182 * check dedicated flag and WB, OUTPUT operation with
183 * power on state.
184 */
185 if (ippdrv->dedicated || (!ipp_is_m2m_cmd(cmd) &&
186 !pm_runtime_suspended(ippdrv->dev)))
187 return true;
188
189 return false;
190}
191
192static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
193 struct drm_exynos_ipp_property *property)
194{
195 struct exynos_drm_ippdrv *ippdrv;
196 u32 ipp_id = property->ipp_id;
197
198 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, ipp_id);
199
200 if (ipp_id) {
201 /* find ipp driver using idr */
202 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
203 ipp_id);
204 if (IS_ERR_OR_NULL(ippdrv)) {
205 DRM_ERROR("not found ipp%d driver.\n", ipp_id);
206 return ippdrv;
207 }
208
209 /*
210 * WB, OUTPUT opertion not supported multi-operation.
211 * so, make dedicated state at set property ioctl.
212 * when ipp driver finished operations, clear dedicated flags.
213 */
214 if (ipp_check_dedicated(ippdrv, property->cmd)) {
215 DRM_ERROR("already used choose device.\n");
216 return ERR_PTR(-EBUSY);
217 }
218
219 /*
220 * This is necessary to find correct device in ipp drivers.
221 * ipp drivers have different abilities,
222 * so need to check property.
223 */
224 if (ippdrv->check_property &&
225 ippdrv->check_property(ippdrv->dev, property)) {
226 DRM_ERROR("not support property.\n");
227 return ERR_PTR(-EINVAL);
228 }
229
230 return ippdrv;
231 } else {
232 /*
233 * This case is search all ipp driver for finding.
234 * user application don't set ipp_id in this case,
235 * so ipp subsystem search correct driver in driver list.
236 */
237 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
238 if (ipp_check_dedicated(ippdrv, property->cmd)) {
239 DRM_DEBUG_KMS("%s:used device.\n", __func__);
240 continue;
241 }
242
243 if (ippdrv->check_property &&
244 ippdrv->check_property(ippdrv->dev, property)) {
245 DRM_DEBUG_KMS("%s:not support property.\n",
246 __func__);
247 continue;
248 }
249
250 return ippdrv;
251 }
252
253 DRM_ERROR("not support ipp driver operations.\n");
254 }
255
256 return ERR_PTR(-ENODEV);
257}
258
259static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
260{
261 struct exynos_drm_ippdrv *ippdrv;
262 struct drm_exynos_ipp_cmd_node *c_node;
263 int count = 0;
264
265 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
266
267 if (list_empty(&exynos_drm_ippdrv_list)) {
268 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
269 return ERR_PTR(-ENODEV);
270 }
271
272 /*
273 * This case is search ipp driver by prop_id handle.
274 * sometimes, ipp subsystem find driver by prop_id.
275 * e.g PAUSE state, queue buf, command contro.
276 */
277 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
278 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n", __func__,
279 count++, (int)ippdrv);
280
281 if (!list_empty(&ippdrv->cmd_list)) {
282 list_for_each_entry(c_node, &ippdrv->cmd_list, list)
283 if (c_node->property.prop_id == prop_id)
284 return ippdrv;
285 }
286 }
287
288 return ERR_PTR(-ENODEV);
289}
290
291int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
292 struct drm_file *file)
293{
294 struct drm_exynos_file_private *file_priv = file->driver_priv;
295 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
296 struct device *dev = priv->dev;
297 struct ipp_context *ctx = get_ipp_context(dev);
298 struct drm_exynos_ipp_prop_list *prop_list = data;
299 struct exynos_drm_ippdrv *ippdrv;
300 int count = 0;
301
302 DRM_DEBUG_KMS("%s\n", __func__);
303
304 if (!ctx) {
305 DRM_ERROR("invalid context.\n");
306 return -EINVAL;
307 }
308
309 if (!prop_list) {
310 DRM_ERROR("invalid property parameter.\n");
311 return -EINVAL;
312 }
313
314 DRM_DEBUG_KMS("%s:ipp_id[%d]\n", __func__, prop_list->ipp_id);
315
316 if (!prop_list->ipp_id) {
317 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
318 count++;
319 /*
320 * Supports ippdrv list count for user application.
321 * First step user application getting ippdrv count.
322 * and second step getting ippdrv capability using ipp_id.
323 */
324 prop_list->count = count;
325 } else {
326 /*
327 * Getting ippdrv capability by ipp_id.
328 * some deivce not supported wb, output interface.
329 * so, user application detect correct ipp driver
330 * using this ioctl.
331 */
332 ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
333 prop_list->ipp_id);
334 if (!ippdrv) {
335 DRM_ERROR("not found ipp%d driver.\n",
336 prop_list->ipp_id);
337 return -EINVAL;
338 }
339
340 prop_list = ippdrv->prop_list;
341 }
342
343 return 0;
344}
345
346static void ipp_print_property(struct drm_exynos_ipp_property *property,
347 int idx)
348{
349 struct drm_exynos_ipp_config *config = &property->config[idx];
350 struct drm_exynos_pos *pos = &config->pos;
351 struct drm_exynos_sz *sz = &config->sz;
352
353 DRM_DEBUG_KMS("%s:prop_id[%d]ops[%s]fmt[0x%x]\n",
354 __func__, property->prop_id, idx ? "dst" : "src", config->fmt);
355
356 DRM_DEBUG_KMS("%s:pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
357 __func__, pos->x, pos->y, pos->w, pos->h,
358 sz->hsize, sz->vsize, config->flip, config->degree);
359}
360
361static int ipp_find_and_set_property(struct drm_exynos_ipp_property *property)
362{
363 struct exynos_drm_ippdrv *ippdrv;
364 struct drm_exynos_ipp_cmd_node *c_node;
365 u32 prop_id = property->prop_id;
366
367 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, prop_id);
368
369 ippdrv = ipp_find_drv_by_handle(prop_id);
370 if (IS_ERR_OR_NULL(ippdrv)) {
371 DRM_ERROR("failed to get ipp driver.\n");
372 return -EINVAL;
373 }
374
375 /*
376 * Find command node using command list in ippdrv.
377 * when we find this command no using prop_id.
378 * return property information set in this command node.
379 */
380 list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
381 if ((c_node->property.prop_id == prop_id) &&
382 (c_node->state == IPP_STATE_STOP)) {
383 DRM_DEBUG_KMS("%s:found cmd[%d]ippdrv[0x%x]\n",
384 __func__, property->cmd, (int)ippdrv);
385
386 c_node->property = *property;
387 return 0;
388 }
389 }
390
391 DRM_ERROR("failed to search property.\n");
392
393 return -EINVAL;
394}
395
396static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
397{
398 struct drm_exynos_ipp_cmd_work *cmd_work;
399
400 DRM_DEBUG_KMS("%s\n", __func__);
401
402 cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
403 if (!cmd_work) {
404 DRM_ERROR("failed to alloc cmd_work.\n");
405 return ERR_PTR(-ENOMEM);
406 }
407
408 INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
409
410 return cmd_work;
411}
412
413static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
414{
415 struct drm_exynos_ipp_event_work *event_work;
416
417 DRM_DEBUG_KMS("%s\n", __func__);
418
419 event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
420 if (!event_work) {
421 DRM_ERROR("failed to alloc event_work.\n");
422 return ERR_PTR(-ENOMEM);
423 }
424
425 INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
426
427 return event_work;
428}
429
430int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
431 struct drm_file *file)
432{
433 struct drm_exynos_file_private *file_priv = file->driver_priv;
434 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
435 struct device *dev = priv->dev;
436 struct ipp_context *ctx = get_ipp_context(dev);
437 struct drm_exynos_ipp_property *property = data;
438 struct exynos_drm_ippdrv *ippdrv;
439 struct drm_exynos_ipp_cmd_node *c_node;
440 int ret, i;
441
442 DRM_DEBUG_KMS("%s\n", __func__);
443
444 if (!ctx) {
445 DRM_ERROR("invalid context.\n");
446 return -EINVAL;
447 }
448
449 if (!property) {
450 DRM_ERROR("invalid property parameter.\n");
451 return -EINVAL;
452 }
453
454 /*
455 * This is log print for user application property.
456 * user application set various property.
457 */
458 for_each_ipp_ops(i)
459 ipp_print_property(property, i);
460
461 /*
462 * set property ioctl generated new prop_id.
463 * but in this case already asigned prop_id using old set property.
464 * e.g PAUSE state. this case supports find current prop_id and use it
465 * instead of allocation.
466 */
467 if (property->prop_id) {
468 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
469 return ipp_find_and_set_property(property);
470 }
471
472 /* find ipp driver using ipp id */
473 ippdrv = ipp_find_driver(ctx, property);
474 if (IS_ERR_OR_NULL(ippdrv)) {
475 DRM_ERROR("failed to get ipp driver.\n");
476 return -EINVAL;
477 }
478
479 /* allocate command node */
480 c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
481 if (!c_node) {
482 DRM_ERROR("failed to allocate map node.\n");
483 return -ENOMEM;
484 }
485
486 /* create property id */
487 ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
488 &property->prop_id);
489 if (ret) {
490 DRM_ERROR("failed to create id.\n");
491 goto err_clear;
492 }
493
494 DRM_DEBUG_KMS("%s:created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
495 __func__, property->prop_id, property->cmd, (int)ippdrv);
496
497 /* stored property information and ippdrv in private data */
498 c_node->priv = priv;
499 c_node->property = *property;
500 c_node->state = IPP_STATE_IDLE;
501
502 c_node->start_work = ipp_create_cmd_work();
503 if (IS_ERR_OR_NULL(c_node->start_work)) {
504 DRM_ERROR("failed to create start work.\n");
505 goto err_clear;
506 }
507
508 c_node->stop_work = ipp_create_cmd_work();
509 if (IS_ERR_OR_NULL(c_node->stop_work)) {
510 DRM_ERROR("failed to create stop work.\n");
511 goto err_free_start;
512 }
513
514 c_node->event_work = ipp_create_event_work();
515 if (IS_ERR_OR_NULL(c_node->event_work)) {
516 DRM_ERROR("failed to create event work.\n");
517 goto err_free_stop;
518 }
519
520 mutex_init(&c_node->cmd_lock);
521 mutex_init(&c_node->mem_lock);
522 mutex_init(&c_node->event_lock);
523
524 init_completion(&c_node->start_complete);
525 init_completion(&c_node->stop_complete);
526
527 for_each_ipp_ops(i)
528 INIT_LIST_HEAD(&c_node->mem_list[i]);
529
530 INIT_LIST_HEAD(&c_node->event_list);
531 list_splice_init(&priv->event_list, &c_node->event_list);
532 list_add_tail(&c_node->list, &ippdrv->cmd_list);
533
534 /* make dedicated state without m2m */
535 if (!ipp_is_m2m_cmd(property->cmd))
536 ippdrv->dedicated = true;
537
538 return 0;
539
540err_free_stop:
541 kfree(c_node->stop_work);
542err_free_start:
543 kfree(c_node->start_work);
544err_clear:
545 kfree(c_node);
546 return ret;
547}
548
549static void ipp_clean_cmd_node(struct drm_exynos_ipp_cmd_node *c_node)
550{
551 DRM_DEBUG_KMS("%s\n", __func__);
552
553 /* delete list */
554 list_del(&c_node->list);
555
556 /* destroy mutex */
557 mutex_destroy(&c_node->cmd_lock);
558 mutex_destroy(&c_node->mem_lock);
559 mutex_destroy(&c_node->event_lock);
560
561 /* free command node */
562 kfree(c_node->start_work);
563 kfree(c_node->stop_work);
564 kfree(c_node->event_work);
565 kfree(c_node);
566}
567
568static int ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
569{
570 struct drm_exynos_ipp_property *property = &c_node->property;
571 struct drm_exynos_ipp_mem_node *m_node;
572 struct list_head *head;
573 int ret, i, count[EXYNOS_DRM_OPS_MAX] = { 0, };
574
575 DRM_DEBUG_KMS("%s\n", __func__);
576
577 mutex_lock(&c_node->mem_lock);
578
579 for_each_ipp_ops(i) {
580 /* source/destination memory list */
581 head = &c_node->mem_list[i];
582
583 if (list_empty(head)) {
584 DRM_DEBUG_KMS("%s:%s memory empty.\n", __func__,
585 i ? "dst" : "src");
586 continue;
587 }
588
589 /* find memory node entry */
590 list_for_each_entry(m_node, head, list) {
591 DRM_DEBUG_KMS("%s:%s,count[%d]m_node[0x%x]\n", __func__,
592 i ? "dst" : "src", count[i], (int)m_node);
593 count[i]++;
594 }
595 }
596
597 DRM_DEBUG_KMS("%s:min[%d]max[%d]\n", __func__,
598 min(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]),
599 max(count[EXYNOS_DRM_OPS_SRC], count[EXYNOS_DRM_OPS_DST]));
600
601 /*
602 * M2M operations should be need paired memory address.
603 * so, need to check minimum count about src, dst.
604 * other case not use paired memory, so use maximum count
605 */
606 if (ipp_is_m2m_cmd(property->cmd))
607 ret = min(count[EXYNOS_DRM_OPS_SRC],
608 count[EXYNOS_DRM_OPS_DST]);
609 else
610 ret = max(count[EXYNOS_DRM_OPS_SRC],
611 count[EXYNOS_DRM_OPS_DST]);
612
613 mutex_unlock(&c_node->mem_lock);
614
615 return ret;
616}
617
618static struct drm_exynos_ipp_mem_node
619 *ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
620 struct drm_exynos_ipp_queue_buf *qbuf)
621{
622 struct drm_exynos_ipp_mem_node *m_node;
623 struct list_head *head;
624 int count = 0;
625
626 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__, qbuf->buf_id);
627
628 /* source/destination memory list */
629 head = &c_node->mem_list[qbuf->ops_id];
630
631 /* find memory node from memory list */
632 list_for_each_entry(m_node, head, list) {
633 DRM_DEBUG_KMS("%s:count[%d]m_node[0x%x]\n",
634 __func__, count++, (int)m_node);
635
636 /* compare buffer id */
637 if (m_node->buf_id == qbuf->buf_id)
638 return m_node;
639 }
640
641 return NULL;
642}
643
644static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
645 struct drm_exynos_ipp_cmd_node *c_node,
646 struct drm_exynos_ipp_mem_node *m_node)
647{
648 struct exynos_drm_ipp_ops *ops = NULL;
649 int ret = 0;
650
651 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
652
653 if (!m_node) {
654 DRM_ERROR("invalid queue node.\n");
655 return -EFAULT;
656 }
657
658 mutex_lock(&c_node->mem_lock);
659
660 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
661
662 /* get operations callback */
663 ops = ippdrv->ops[m_node->ops_id];
664 if (!ops) {
665 DRM_ERROR("not support ops.\n");
666 ret = -EFAULT;
667 goto err_unlock;
668 }
669
670 /* set address and enable irq */
671 if (ops->set_addr) {
672 ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
673 m_node->buf_id, IPP_BUF_ENQUEUE);
674 if (ret) {
675 DRM_ERROR("failed to set addr.\n");
676 goto err_unlock;
677 }
678 }
679
680err_unlock:
681 mutex_unlock(&c_node->mem_lock);
682 return ret;
683}
684
685static struct drm_exynos_ipp_mem_node
686 *ipp_get_mem_node(struct drm_device *drm_dev,
687 struct drm_file *file,
688 struct drm_exynos_ipp_cmd_node *c_node,
689 struct drm_exynos_ipp_queue_buf *qbuf)
690{
691 struct drm_exynos_ipp_mem_node *m_node;
692 struct drm_exynos_ipp_buf_info buf_info;
693 void *addr;
694 int i;
695
696 DRM_DEBUG_KMS("%s\n", __func__);
697
698 mutex_lock(&c_node->mem_lock);
699
700 m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
701 if (!m_node) {
702 DRM_ERROR("failed to allocate queue node.\n");
703 goto err_unlock;
704 }
705
706 /* clear base address for error handling */
707 memset(&buf_info, 0x0, sizeof(buf_info));
708
709 /* operations, buffer id */
710 m_node->ops_id = qbuf->ops_id;
711 m_node->prop_id = qbuf->prop_id;
712 m_node->buf_id = qbuf->buf_id;
713
714 DRM_DEBUG_KMS("%s:m_node[0x%x]ops_id[%d]\n", __func__,
715 (int)m_node, qbuf->ops_id);
716 DRM_DEBUG_KMS("%s:prop_id[%d]buf_id[%d]\n", __func__,
717 qbuf->prop_id, m_node->buf_id);
718
719 for_each_ipp_planar(i) {
720 DRM_DEBUG_KMS("%s:i[%d]handle[0x%x]\n", __func__,
721 i, qbuf->handle[i]);
722
723 /* get dma address by handle */
724 if (qbuf->handle[i]) {
725 addr = exynos_drm_gem_get_dma_addr(drm_dev,
726 qbuf->handle[i], file);
727 if (IS_ERR(addr)) {
728 DRM_ERROR("failed to get addr.\n");
729 goto err_clear;
730 }
731
732 buf_info.handles[i] = qbuf->handle[i];
733 buf_info.base[i] = *(dma_addr_t *) addr;
734 DRM_DEBUG_KMS("%s:i[%d]base[0x%x]hd[0x%x]\n",
735 __func__, i, buf_info.base[i],
736 (int)buf_info.handles[i]);
737 }
738 }
739
740 m_node->filp = file;
741 m_node->buf_info = buf_info;
742 list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
743
744 mutex_unlock(&c_node->mem_lock);
745 return m_node;
746
747err_clear:
748 kfree(m_node);
749err_unlock:
750 mutex_unlock(&c_node->mem_lock);
751 return ERR_PTR(-EFAULT);
752}
753
754static int ipp_put_mem_node(struct drm_device *drm_dev,
755 struct drm_exynos_ipp_cmd_node *c_node,
756 struct drm_exynos_ipp_mem_node *m_node)
757{
758 int i;
759
760 DRM_DEBUG_KMS("%s:node[0x%x]\n", __func__, (int)m_node);
761
762 if (!m_node) {
763 DRM_ERROR("invalid dequeue node.\n");
764 return -EFAULT;
765 }
766
767 if (list_empty(&m_node->list)) {
768 DRM_ERROR("empty memory node.\n");
769 return -ENOMEM;
770 }
771
772 mutex_lock(&c_node->mem_lock);
773
774 DRM_DEBUG_KMS("%s:ops_id[%d]\n", __func__, m_node->ops_id);
775
776 /* put gem buffer */
777 for_each_ipp_planar(i) {
778 unsigned long handle = m_node->buf_info.handles[i];
779 if (handle)
780 exynos_drm_gem_put_dma_addr(drm_dev, handle,
781 m_node->filp);
782 }
783
784 /* delete list in queue */
785 list_del(&m_node->list);
786 kfree(m_node);
787
788 mutex_unlock(&c_node->mem_lock);
789
790 return 0;
791}
792
793static void ipp_free_event(struct drm_pending_event *event)
794{
795 kfree(event);
796}
797
798static int ipp_get_event(struct drm_device *drm_dev,
799 struct drm_file *file,
800 struct drm_exynos_ipp_cmd_node *c_node,
801 struct drm_exynos_ipp_queue_buf *qbuf)
802{
803 struct drm_exynos_ipp_send_event *e;
804 unsigned long flags;
805
806 DRM_DEBUG_KMS("%s:ops_id[%d]buf_id[%d]\n", __func__,
807 qbuf->ops_id, qbuf->buf_id);
808
809 e = kzalloc(sizeof(*e), GFP_KERNEL);
810
811 if (!e) {
812 DRM_ERROR("failed to allocate event.\n");
813 spin_lock_irqsave(&drm_dev->event_lock, flags);
814 file->event_space += sizeof(e->event);
815 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
816 return -ENOMEM;
817 }
818
819 /* make event */
820 e->event.base.type = DRM_EXYNOS_IPP_EVENT;
821 e->event.base.length = sizeof(e->event);
822 e->event.user_data = qbuf->user_data;
823 e->event.prop_id = qbuf->prop_id;
824 e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
825 e->base.event = &e->event.base;
826 e->base.file_priv = file;
827 e->base.destroy = ipp_free_event;
828 list_add_tail(&e->base.link, &c_node->event_list);
829
830 return 0;
831}
832
833static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
834 struct drm_exynos_ipp_queue_buf *qbuf)
835{
836 struct drm_exynos_ipp_send_event *e, *te;
837 int count = 0;
838
839 DRM_DEBUG_KMS("%s\n", __func__);
840
841 if (list_empty(&c_node->event_list)) {
842 DRM_DEBUG_KMS("%s:event_list is empty.\n", __func__);
843 return;
844 }
845
846 list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
847 DRM_DEBUG_KMS("%s:count[%d]e[0x%x]\n",
848 __func__, count++, (int)e);
849
850 /*
851 * quf == NULL condition means all event deletion.
852 * stop operations want to delete all event list.
853 * another case delete only same buf id.
854 */
855 if (!qbuf) {
856 /* delete list */
857 list_del(&e->base.link);
858 kfree(e);
859 }
860
861 /* compare buffer id */
862 if (qbuf && (qbuf->buf_id ==
863 e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
864 /* delete list */
865 list_del(&e->base.link);
866 kfree(e);
867 return;
868 }
869 }
870}
871
872void ipp_handle_cmd_work(struct device *dev,
873 struct exynos_drm_ippdrv *ippdrv,
874 struct drm_exynos_ipp_cmd_work *cmd_work,
875 struct drm_exynos_ipp_cmd_node *c_node)
876{
877 struct ipp_context *ctx = get_ipp_context(dev);
878
879 cmd_work->ippdrv = ippdrv;
880 cmd_work->c_node = c_node;
881 queue_work(ctx->cmd_workq, (struct work_struct *)cmd_work);
882}
883
884static int ipp_queue_buf_with_run(struct device *dev,
885 struct drm_exynos_ipp_cmd_node *c_node,
886 struct drm_exynos_ipp_mem_node *m_node,
887 struct drm_exynos_ipp_queue_buf *qbuf)
888{
889 struct exynos_drm_ippdrv *ippdrv;
890 struct drm_exynos_ipp_property *property;
891 struct exynos_drm_ipp_ops *ops;
892 int ret;
893
894 DRM_DEBUG_KMS("%s\n", __func__);
895
896 ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
897 if (IS_ERR_OR_NULL(ippdrv)) {
898 DRM_ERROR("failed to get ipp driver.\n");
899 return -EFAULT;
900 }
901
902 ops = ippdrv->ops[qbuf->ops_id];
903 if (!ops) {
904 DRM_ERROR("failed to get ops.\n");
905 return -EFAULT;
906 }
907
908 property = &c_node->property;
909
910 if (c_node->state != IPP_STATE_START) {
911 DRM_DEBUG_KMS("%s:bypass for invalid state.\n" , __func__);
912 return 0;
913 }
914
915 if (!ipp_check_mem_list(c_node)) {
916 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
917 return 0;
918 }
919
920 /*
921 * If set destination buffer and enabled clock,
922 * then m2m operations need start operations at queue_buf
923 */
924 if (ipp_is_m2m_cmd(property->cmd)) {
925 struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
926
927 cmd_work->ctrl = IPP_CTRL_PLAY;
928 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
929 } else {
930 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
931 if (ret) {
932 DRM_ERROR("failed to set m node.\n");
933 return ret;
934 }
935 }
936
937 return 0;
938}
939
940static void ipp_clean_queue_buf(struct drm_device *drm_dev,
941 struct drm_exynos_ipp_cmd_node *c_node,
942 struct drm_exynos_ipp_queue_buf *qbuf)
943{
944 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
945
946 DRM_DEBUG_KMS("%s\n", __func__);
947
948 if (!list_empty(&c_node->mem_list[qbuf->ops_id])) {
949 /* delete list */
950 list_for_each_entry_safe(m_node, tm_node,
951 &c_node->mem_list[qbuf->ops_id], list) {
952 if (m_node->buf_id == qbuf->buf_id &&
953 m_node->ops_id == qbuf->ops_id)
954 ipp_put_mem_node(drm_dev, c_node, m_node);
955 }
956 }
957}
958
959int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
960 struct drm_file *file)
961{
962 struct drm_exynos_file_private *file_priv = file->driver_priv;
963 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
964 struct device *dev = priv->dev;
965 struct ipp_context *ctx = get_ipp_context(dev);
966 struct drm_exynos_ipp_queue_buf *qbuf = data;
967 struct drm_exynos_ipp_cmd_node *c_node;
968 struct drm_exynos_ipp_mem_node *m_node;
969 int ret;
970
971 DRM_DEBUG_KMS("%s\n", __func__);
972
973 if (!qbuf) {
974 DRM_ERROR("invalid buf parameter.\n");
975 return -EINVAL;
976 }
977
978 if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
979 DRM_ERROR("invalid ops parameter.\n");
980 return -EINVAL;
981 }
982
983 DRM_DEBUG_KMS("%s:prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
984 __func__, qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
985 qbuf->buf_id, qbuf->buf_type);
986
987 /* find command node */
988 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
989 qbuf->prop_id);
990 if (!c_node) {
991 DRM_ERROR("failed to get command node.\n");
992 return -EFAULT;
993 }
994
995 /* buffer control */
996 switch (qbuf->buf_type) {
997 case IPP_BUF_ENQUEUE:
998 /* get memory node */
999 m_node = ipp_get_mem_node(drm_dev, file, c_node, qbuf);
1000 if (IS_ERR(m_node)) {
1001 DRM_ERROR("failed to get m_node.\n");
1002 return PTR_ERR(m_node);
1003 }
1004
1005 /*
1006 * first step get event for destination buffer.
1007 * and second step when M2M case run with destination buffer
1008 * if needed.
1009 */
1010 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
1011 /* get event for destination buffer */
1012 ret = ipp_get_event(drm_dev, file, c_node, qbuf);
1013 if (ret) {
1014 DRM_ERROR("failed to get event.\n");
1015 goto err_clean_node;
1016 }
1017
1018 /*
1019 * M2M case run play control for streaming feature.
1020 * other case set address and waiting.
1021 */
1022 ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
1023 if (ret) {
1024 DRM_ERROR("failed to run command.\n");
1025 goto err_clean_node;
1026 }
1027 }
1028 break;
1029 case IPP_BUF_DEQUEUE:
1030 mutex_lock(&c_node->cmd_lock);
1031
1032 /* put event for destination buffer */
1033 if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
1034 ipp_put_event(c_node, qbuf);
1035
1036 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1037
1038 mutex_unlock(&c_node->cmd_lock);
1039 break;
1040 default:
1041 DRM_ERROR("invalid buffer control.\n");
1042 return -EINVAL;
1043 }
1044
1045 return 0;
1046
1047err_clean_node:
1048 DRM_ERROR("clean memory nodes.\n");
1049
1050 ipp_clean_queue_buf(drm_dev, c_node, qbuf);
1051 return ret;
1052}
1053
1054static bool exynos_drm_ipp_check_valid(struct device *dev,
1055 enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
1056{
1057 DRM_DEBUG_KMS("%s\n", __func__);
1058
1059 if (ctrl != IPP_CTRL_PLAY) {
1060 if (pm_runtime_suspended(dev)) {
1061 DRM_ERROR("pm:runtime_suspended.\n");
1062 goto err_status;
1063 }
1064 }
1065
1066 switch (ctrl) {
1067 case IPP_CTRL_PLAY:
1068 if (state != IPP_STATE_IDLE)
1069 goto err_status;
1070 break;
1071 case IPP_CTRL_STOP:
1072 if (state == IPP_STATE_STOP)
1073 goto err_status;
1074 break;
1075 case IPP_CTRL_PAUSE:
1076 if (state != IPP_STATE_START)
1077 goto err_status;
1078 break;
1079 case IPP_CTRL_RESUME:
1080 if (state != IPP_STATE_STOP)
1081 goto err_status;
1082 break;
1083 default:
1084 DRM_ERROR("invalid state.\n");
1085 goto err_status;
1086 break;
1087 }
1088
1089 return true;
1090
1091err_status:
1092 DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1093 return false;
1094}
1095
1096int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1097 struct drm_file *file)
1098{
1099 struct drm_exynos_file_private *file_priv = file->driver_priv;
1100 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1101 struct exynos_drm_ippdrv *ippdrv = NULL;
1102 struct device *dev = priv->dev;
1103 struct ipp_context *ctx = get_ipp_context(dev);
1104 struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1105 struct drm_exynos_ipp_cmd_work *cmd_work;
1106 struct drm_exynos_ipp_cmd_node *c_node;
1107
1108 DRM_DEBUG_KMS("%s\n", __func__);
1109
1110 if (!ctx) {
1111 DRM_ERROR("invalid context.\n");
1112 return -EINVAL;
1113 }
1114
1115 if (!cmd_ctrl) {
1116 DRM_ERROR("invalid control parameter.\n");
1117 return -EINVAL;
1118 }
1119
1120 DRM_DEBUG_KMS("%s:ctrl[%d]prop_id[%d]\n", __func__,
1121 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1122
1123 ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1124 if (IS_ERR(ippdrv)) {
1125 DRM_ERROR("failed to get ipp driver.\n");
1126 return PTR_ERR(ippdrv);
1127 }
1128
1129 c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1130 cmd_ctrl->prop_id);
1131 if (!c_node) {
1132 DRM_ERROR("invalid command node list.\n");
1133 return -EINVAL;
1134 }
1135
1136 if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1137 c_node->state)) {
1138 DRM_ERROR("invalid state.\n");
1139 return -EINVAL;
1140 }
1141
1142 switch (cmd_ctrl->ctrl) {
1143 case IPP_CTRL_PLAY:
1144 if (pm_runtime_suspended(ippdrv->dev))
1145 pm_runtime_get_sync(ippdrv->dev);
1146 c_node->state = IPP_STATE_START;
1147
1148 cmd_work = c_node->start_work;
1149 cmd_work->ctrl = cmd_ctrl->ctrl;
1150 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1151 c_node->state = IPP_STATE_START;
1152 break;
1153 case IPP_CTRL_STOP:
1154 cmd_work = c_node->stop_work;
1155 cmd_work->ctrl = cmd_ctrl->ctrl;
1156 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1157
1158 if (!wait_for_completion_timeout(&c_node->stop_complete,
1159 msecs_to_jiffies(300))) {
1160 DRM_ERROR("timeout stop:prop_id[%d]\n",
1161 c_node->property.prop_id);
1162 }
1163
1164 c_node->state = IPP_STATE_STOP;
1165 ippdrv->dedicated = false;
1166 ipp_clean_cmd_node(c_node);
1167
1168 if (list_empty(&ippdrv->cmd_list))
1169 pm_runtime_put_sync(ippdrv->dev);
1170 break;
1171 case IPP_CTRL_PAUSE:
1172 cmd_work = c_node->stop_work;
1173 cmd_work->ctrl = cmd_ctrl->ctrl;
1174 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1175
1176 if (!wait_for_completion_timeout(&c_node->stop_complete,
1177 msecs_to_jiffies(200))) {
1178 DRM_ERROR("timeout stop:prop_id[%d]\n",
1179 c_node->property.prop_id);
1180 }
1181
1182 c_node->state = IPP_STATE_STOP;
1183 break;
1184 case IPP_CTRL_RESUME:
1185 c_node->state = IPP_STATE_START;
1186 cmd_work = c_node->start_work;
1187 cmd_work->ctrl = cmd_ctrl->ctrl;
1188 ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1189 break;
1190 default:
1191 DRM_ERROR("could not support this state currently.\n");
1192 return -EINVAL;
1193 }
1194
1195 DRM_DEBUG_KMS("%s:done ctrl[%d]prop_id[%d]\n", __func__,
1196 cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1197
1198 return 0;
1199}
1200
1201int exynos_drm_ippnb_register(struct notifier_block *nb)
1202{
1203 return blocking_notifier_chain_register(
1204 &exynos_drm_ippnb_list, nb);
1205}
1206
1207int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1208{
1209 return blocking_notifier_chain_unregister(
1210 &exynos_drm_ippnb_list, nb);
1211}
1212
1213int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1214{
1215 return blocking_notifier_call_chain(
1216 &exynos_drm_ippnb_list, val, v);
1217}
1218
1219static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1220 struct drm_exynos_ipp_property *property)
1221{
1222 struct exynos_drm_ipp_ops *ops = NULL;
1223 bool swap = false;
1224 int ret, i;
1225
1226 if (!property) {
1227 DRM_ERROR("invalid property parameter.\n");
1228 return -EINVAL;
1229 }
1230
1231 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1232
1233 /* reset h/w block */
1234 if (ippdrv->reset &&
1235 ippdrv->reset(ippdrv->dev)) {
1236 DRM_ERROR("failed to reset.\n");
1237 return -EINVAL;
1238 }
1239
1240 /* set source,destination operations */
1241 for_each_ipp_ops(i) {
1242 struct drm_exynos_ipp_config *config =
1243 &property->config[i];
1244
1245 ops = ippdrv->ops[i];
1246 if (!ops || !config) {
1247 DRM_ERROR("not support ops and config.\n");
1248 return -EINVAL;
1249 }
1250
1251 /* set format */
1252 if (ops->set_fmt) {
1253 ret = ops->set_fmt(ippdrv->dev, config->fmt);
1254 if (ret) {
1255 DRM_ERROR("not support format.\n");
1256 return ret;
1257 }
1258 }
1259
1260 /* set transform for rotation, flip */
1261 if (ops->set_transf) {
1262 ret = ops->set_transf(ippdrv->dev, config->degree,
1263 config->flip, &swap);
1264 if (ret) {
1265 DRM_ERROR("not support tranf.\n");
1266 return -EINVAL;
1267 }
1268 }
1269
1270 /* set size */
1271 if (ops->set_size) {
1272 ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1273 &config->sz);
1274 if (ret) {
1275 DRM_ERROR("not support size.\n");
1276 return ret;
1277 }
1278 }
1279 }
1280
1281 return 0;
1282}
1283
1284static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1285 struct drm_exynos_ipp_cmd_node *c_node)
1286{
1287 struct drm_exynos_ipp_mem_node *m_node;
1288 struct drm_exynos_ipp_property *property = &c_node->property;
1289 struct list_head *head;
1290 int ret, i;
1291
1292 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1293
1294 /* store command info in ippdrv */
1295 ippdrv->cmd = c_node;
1296
1297 if (!ipp_check_mem_list(c_node)) {
1298 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1299 return -ENOMEM;
1300 }
1301
1302 /* set current property in ippdrv */
1303 ret = ipp_set_property(ippdrv, property);
1304 if (ret) {
1305 DRM_ERROR("failed to set property.\n");
1306 ippdrv->cmd = NULL;
1307 return ret;
1308 }
1309
1310 /* check command */
1311 switch (property->cmd) {
1312 case IPP_CMD_M2M:
1313 for_each_ipp_ops(i) {
1314 /* source/destination memory list */
1315 head = &c_node->mem_list[i];
1316
1317 m_node = list_first_entry(head,
1318 struct drm_exynos_ipp_mem_node, list);
1319 if (!m_node) {
1320 DRM_ERROR("failed to get node.\n");
1321 ret = -EFAULT;
1322 return ret;
1323 }
1324
1325 DRM_DEBUG_KMS("%s:m_node[0x%x]\n",
1326 __func__, (int)m_node);
1327
1328 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1329 if (ret) {
1330 DRM_ERROR("failed to set m node.\n");
1331 return ret;
1332 }
1333 }
1334 break;
1335 case IPP_CMD_WB:
1336 /* destination memory list */
1337 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1338
1339 list_for_each_entry(m_node, head, list) {
1340 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1341 if (ret) {
1342 DRM_ERROR("failed to set m node.\n");
1343 return ret;
1344 }
1345 }
1346 break;
1347 case IPP_CMD_OUTPUT:
1348 /* source memory list */
1349 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1350
1351 list_for_each_entry(m_node, head, list) {
1352 ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1353 if (ret) {
1354 DRM_ERROR("failed to set m node.\n");
1355 return ret;
1356 }
1357 }
1358 break;
1359 default:
1360 DRM_ERROR("invalid operations.\n");
1361 return -EINVAL;
1362 }
1363
1364 DRM_DEBUG_KMS("%s:cmd[%d]\n", __func__, property->cmd);
1365
1366 /* start operations */
1367 if (ippdrv->start) {
1368 ret = ippdrv->start(ippdrv->dev, property->cmd);
1369 if (ret) {
1370 DRM_ERROR("failed to start ops.\n");
1371 return ret;
1372 }
1373 }
1374
1375 return 0;
1376}
1377
1378static int ipp_stop_property(struct drm_device *drm_dev,
1379 struct exynos_drm_ippdrv *ippdrv,
1380 struct drm_exynos_ipp_cmd_node *c_node)
1381{
1382 struct drm_exynos_ipp_mem_node *m_node, *tm_node;
1383 struct drm_exynos_ipp_property *property = &c_node->property;
1384 struct list_head *head;
1385 int ret = 0, i;
1386
1387 DRM_DEBUG_KMS("%s:prop_id[%d]\n", __func__, property->prop_id);
1388
1389 /* put event */
1390 ipp_put_event(c_node, NULL);
1391
1392 /* check command */
1393 switch (property->cmd) {
1394 case IPP_CMD_M2M:
1395 for_each_ipp_ops(i) {
1396 /* source/destination memory list */
1397 head = &c_node->mem_list[i];
1398
1399 if (list_empty(head)) {
1400 DRM_DEBUG_KMS("%s:mem_list is empty.\n",
1401 __func__);
1402 break;
1403 }
1404
1405 list_for_each_entry_safe(m_node, tm_node,
1406 head, list) {
1407 ret = ipp_put_mem_node(drm_dev, c_node,
1408 m_node);
1409 if (ret) {
1410 DRM_ERROR("failed to put m_node.\n");
1411 goto err_clear;
1412 }
1413 }
1414 }
1415 break;
1416 case IPP_CMD_WB:
1417 /* destination memory list */
1418 head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1419
1420 if (list_empty(head)) {
1421 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1422 break;
1423 }
1424
1425 list_for_each_entry_safe(m_node, tm_node, head, list) {
1426 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1427 if (ret) {
1428 DRM_ERROR("failed to put m_node.\n");
1429 goto err_clear;
1430 }
1431 }
1432 break;
1433 case IPP_CMD_OUTPUT:
1434 /* source memory list */
1435 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1436
1437 if (list_empty(head)) {
1438 DRM_DEBUG_KMS("%s:mem_list is empty.\n", __func__);
1439 break;
1440 }
1441
1442 list_for_each_entry_safe(m_node, tm_node, head, list) {
1443 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1444 if (ret) {
1445 DRM_ERROR("failed to put m_node.\n");
1446 goto err_clear;
1447 }
1448 }
1449 break;
1450 default:
1451 DRM_ERROR("invalid operations.\n");
1452 ret = -EINVAL;
1453 goto err_clear;
1454 }
1455
1456err_clear:
1457 /* stop operations */
1458 if (ippdrv->stop)
1459 ippdrv->stop(ippdrv->dev, property->cmd);
1460
1461 return ret;
1462}
1463
1464void ipp_sched_cmd(struct work_struct *work)
1465{
1466 struct drm_exynos_ipp_cmd_work *cmd_work =
1467 (struct drm_exynos_ipp_cmd_work *)work;
1468 struct exynos_drm_ippdrv *ippdrv;
1469 struct drm_exynos_ipp_cmd_node *c_node;
1470 struct drm_exynos_ipp_property *property;
1471 int ret;
1472
1473 DRM_DEBUG_KMS("%s\n", __func__);
1474
1475 ippdrv = cmd_work->ippdrv;
1476 if (!ippdrv) {
1477 DRM_ERROR("invalid ippdrv list.\n");
1478 return;
1479 }
1480
1481 c_node = cmd_work->c_node;
1482 if (!c_node) {
1483 DRM_ERROR("invalid command node list.\n");
1484 return;
1485 }
1486
1487 mutex_lock(&c_node->cmd_lock);
1488
1489 property = &c_node->property;
1490 if (!property) {
1491 DRM_ERROR("failed to get property:prop_id[%d]\n",
1492 c_node->property.prop_id);
1493 goto err_unlock;
1494 }
1495
1496 switch (cmd_work->ctrl) {
1497 case IPP_CTRL_PLAY:
1498 case IPP_CTRL_RESUME:
1499 ret = ipp_start_property(ippdrv, c_node);
1500 if (ret) {
1501 DRM_ERROR("failed to start property:prop_id[%d]\n",
1502 c_node->property.prop_id);
1503 goto err_unlock;
1504 }
1505
1506 /*
1507 * M2M case supports wait_completion of transfer.
1508 * because M2M case supports single unit operation
1509 * with multiple queue.
1510 * M2M need to wait completion of data transfer.
1511 */
1512 if (ipp_is_m2m_cmd(property->cmd)) {
1513 if (!wait_for_completion_timeout
1514 (&c_node->start_complete, msecs_to_jiffies(200))) {
1515 DRM_ERROR("timeout event:prop_id[%d]\n",
1516 c_node->property.prop_id);
1517 goto err_unlock;
1518 }
1519 }
1520 break;
1521 case IPP_CTRL_STOP:
1522 case IPP_CTRL_PAUSE:
1523 ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1524 c_node);
1525 if (ret) {
1526 DRM_ERROR("failed to stop property.\n");
1527 goto err_unlock;
1528 }
1529
1530 complete(&c_node->stop_complete);
1531 break;
1532 default:
1533 DRM_ERROR("unknown control type\n");
1534 break;
1535 }
1536
1537 DRM_DEBUG_KMS("%s:ctrl[%d] done.\n", __func__, cmd_work->ctrl);
1538
1539err_unlock:
1540 mutex_unlock(&c_node->cmd_lock);
1541}
1542
1543static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1544 struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1545{
1546 struct drm_device *drm_dev = ippdrv->drm_dev;
1547 struct drm_exynos_ipp_property *property = &c_node->property;
1548 struct drm_exynos_ipp_mem_node *m_node;
1549 struct drm_exynos_ipp_queue_buf qbuf;
1550 struct drm_exynos_ipp_send_event *e;
1551 struct list_head *head;
1552 struct timeval now;
1553 unsigned long flags;
1554 u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1555 int ret, i;
1556
1557 for_each_ipp_ops(i)
1558 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1559 i ? "dst" : "src", buf_id[i]);
1560
1561 if (!drm_dev) {
1562 DRM_ERROR("failed to get drm_dev.\n");
1563 return -EINVAL;
1564 }
1565
1566 if (!property) {
1567 DRM_ERROR("failed to get property.\n");
1568 return -EINVAL;
1569 }
1570
1571 if (list_empty(&c_node->event_list)) {
1572 DRM_DEBUG_KMS("%s:event list is empty.\n", __func__);
1573 return 0;
1574 }
1575
1576 if (!ipp_check_mem_list(c_node)) {
1577 DRM_DEBUG_KMS("%s:empty memory.\n", __func__);
1578 return 0;
1579 }
1580
1581 /* check command */
1582 switch (property->cmd) {
1583 case IPP_CMD_M2M:
1584 for_each_ipp_ops(i) {
1585 /* source/destination memory list */
1586 head = &c_node->mem_list[i];
1587
1588 m_node = list_first_entry(head,
1589 struct drm_exynos_ipp_mem_node, list);
1590 if (!m_node) {
1591 DRM_ERROR("empty memory node.\n");
1592 return -ENOMEM;
1593 }
1594
1595 tbuf_id[i] = m_node->buf_id;
1596 DRM_DEBUG_KMS("%s:%s buf_id[%d]\n", __func__,
1597 i ? "dst" : "src", tbuf_id[i]);
1598
1599 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1600 if (ret)
1601 DRM_ERROR("failed to put m_node.\n");
1602 }
1603 break;
1604 case IPP_CMD_WB:
1605 /* clear buf for finding */
1606 memset(&qbuf, 0x0, sizeof(qbuf));
1607 qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1608 qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1609
1610 /* get memory node entry */
1611 m_node = ipp_find_mem_node(c_node, &qbuf);
1612 if (!m_node) {
1613 DRM_ERROR("empty memory node.\n");
1614 return -ENOMEM;
1615 }
1616
1617 tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1618
1619 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1620 if (ret)
1621 DRM_ERROR("failed to put m_node.\n");
1622 break;
1623 case IPP_CMD_OUTPUT:
1624 /* source memory list */
1625 head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1626
1627 m_node = list_first_entry(head,
1628 struct drm_exynos_ipp_mem_node, list);
1629 if (!m_node) {
1630 DRM_ERROR("empty memory node.\n");
1631 return -ENOMEM;
1632 }
1633
1634 tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1635
1636 ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1637 if (ret)
1638 DRM_ERROR("failed to put m_node.\n");
1639 break;
1640 default:
1641 DRM_ERROR("invalid operations.\n");
1642 return -EINVAL;
1643 }
1644
1645 if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1646 DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1647 tbuf_id[1], buf_id[1], property->prop_id);
1648
1649 /*
1650 * command node have event list of destination buffer
1651 * If destination buffer enqueue to mem list,
1652 * then we make event and link to event list tail.
1653 * so, we get first event for first enqueued buffer.
1654 */
1655 e = list_first_entry(&c_node->event_list,
1656 struct drm_exynos_ipp_send_event, base.link);
1657
1658 if (!e) {
1659 DRM_ERROR("empty event.\n");
1660 return -EINVAL;
1661 }
1662
1663 do_gettimeofday(&now);
1664 DRM_DEBUG_KMS("%s:tv_sec[%ld]tv_usec[%ld]\n"
1665 , __func__, now.tv_sec, now.tv_usec);
1666 e->event.tv_sec = now.tv_sec;
1667 e->event.tv_usec = now.tv_usec;
1668 e->event.prop_id = property->prop_id;
1669
1670 /* set buffer id about source destination */
1671 for_each_ipp_ops(i)
1672 e->event.buf_id[i] = tbuf_id[i];
1673
1674 spin_lock_irqsave(&drm_dev->event_lock, flags);
1675 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1676 wake_up_interruptible(&e->base.file_priv->event_wait);
1677 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1678
1679 DRM_DEBUG_KMS("%s:done cmd[%d]prop_id[%d]buf_id[%d]\n", __func__,
1680 property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1681
1682 return 0;
1683}
1684
1685void ipp_sched_event(struct work_struct *work)
1686{
1687 struct drm_exynos_ipp_event_work *event_work =
1688 (struct drm_exynos_ipp_event_work *)work;
1689 struct exynos_drm_ippdrv *ippdrv;
1690 struct drm_exynos_ipp_cmd_node *c_node;
1691 int ret;
1692
1693 if (!event_work) {
1694 DRM_ERROR("failed to get event_work.\n");
1695 return;
1696 }
1697
1698 DRM_DEBUG_KMS("%s:buf_id[%d]\n", __func__,
1699 event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1700
1701 ippdrv = event_work->ippdrv;
1702 if (!ippdrv) {
1703 DRM_ERROR("failed to get ipp driver.\n");
1704 return;
1705 }
1706
1707 c_node = ippdrv->cmd;
1708 if (!c_node) {
1709 DRM_ERROR("failed to get command node.\n");
1710 return;
1711 }
1712
1713 /*
1714 * IPP supports command thread, event thread synchronization.
1715 * If IPP close immediately from user land, then IPP make
1716 * synchronization with command thread, so make complete event.
1717 * or going out operations.
1718 */
1719 if (c_node->state != IPP_STATE_START) {
1720 DRM_DEBUG_KMS("%s:bypass state[%d]prop_id[%d]\n",
1721 __func__, c_node->state, c_node->property.prop_id);
1722 goto err_completion;
1723 }
1724
1725 mutex_lock(&c_node->event_lock);
1726
1727 ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1728 if (ret) {
1729 DRM_ERROR("failed to send event.\n");
1730 goto err_completion;
1731 }
1732
1733err_completion:
1734 if (ipp_is_m2m_cmd(c_node->property.cmd))
1735 complete(&c_node->start_complete);
1736
1737 mutex_unlock(&c_node->event_lock);
1738}
1739
1740static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1741{
1742 struct ipp_context *ctx = get_ipp_context(dev);
1743 struct exynos_drm_ippdrv *ippdrv;
1744 int ret, count = 0;
1745
1746 DRM_DEBUG_KMS("%s\n", __func__);
1747
1748 /* get ipp driver entry */
1749 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1750 ippdrv->drm_dev = drm_dev;
1751
1752 ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv,
1753 &ippdrv->ipp_id);
1754 if (ret) {
1755 DRM_ERROR("failed to create id.\n");
1756 goto err_idr;
1757 }
1758
1759 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]ipp_id[%d]\n", __func__,
1760 count++, (int)ippdrv, ippdrv->ipp_id);
1761
1762 if (ippdrv->ipp_id == 0) {
1763 DRM_ERROR("failed to get ipp_id[%d]\n",
1764 ippdrv->ipp_id);
1765 goto err_idr;
1766 }
1767
1768 /* store parent device for node */
1769 ippdrv->parent_dev = dev;
1770
1771 /* store event work queue and handler */
1772 ippdrv->event_workq = ctx->event_workq;
1773 ippdrv->sched_event = ipp_sched_event;
1774 INIT_LIST_HEAD(&ippdrv->cmd_list);
1775
1776 if (is_drm_iommu_supported(drm_dev)) {
1777 ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1778 if (ret) {
1779 DRM_ERROR("failed to activate iommu\n");
1780 goto err_iommu;
1781 }
1782 }
1783 }
1784
1785 return 0;
1786
1787err_iommu:
1788 /* get ipp driver entry */
1789 list_for_each_entry_reverse(ippdrv, &exynos_drm_ippdrv_list, drv_list)
1790 if (is_drm_iommu_supported(drm_dev))
1791 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1792
1793err_idr:
1794 idr_remove_all(&ctx->ipp_idr);
1795 idr_remove_all(&ctx->prop_idr);
1796 idr_destroy(&ctx->ipp_idr);
1797 idr_destroy(&ctx->prop_idr);
1798 return ret;
1799}
1800
1801static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1802{
1803 struct exynos_drm_ippdrv *ippdrv;
1804
1805 DRM_DEBUG_KMS("%s\n", __func__);
1806
1807 /* get ipp driver entry */
1808 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1809 if (is_drm_iommu_supported(drm_dev))
1810 drm_iommu_detach_device(drm_dev, ippdrv->dev);
1811
1812 ippdrv->drm_dev = NULL;
1813 exynos_drm_ippdrv_unregister(ippdrv);
1814 }
1815}
1816
1817static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1818 struct drm_file *file)
1819{
1820 struct drm_exynos_file_private *file_priv = file->driver_priv;
1821 struct exynos_drm_ipp_private *priv;
1822
1823 DRM_DEBUG_KMS("%s\n", __func__);
1824
1825 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1826 if (!priv) {
1827 DRM_ERROR("failed to allocate priv.\n");
1828 return -ENOMEM;
1829 }
1830 priv->dev = dev;
1831 file_priv->ipp_priv = priv;
1832
1833 INIT_LIST_HEAD(&priv->event_list);
1834
1835 DRM_DEBUG_KMS("%s:done priv[0x%x]\n", __func__, (int)priv);
1836
1837 return 0;
1838}
1839
1840static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1841 struct drm_file *file)
1842{
1843 struct drm_exynos_file_private *file_priv = file->driver_priv;
1844 struct exynos_drm_ipp_private *priv = file_priv->ipp_priv;
1845 struct exynos_drm_ippdrv *ippdrv = NULL;
1846 struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1847 int count = 0;
1848
1849 DRM_DEBUG_KMS("%s:for priv[0x%x]\n", __func__, (int)priv);
1850
1851 if (list_empty(&exynos_drm_ippdrv_list)) {
1852 DRM_DEBUG_KMS("%s:ippdrv_list is empty.\n", __func__);
1853 goto err_clear;
1854 }
1855
1856 list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1857 if (list_empty(&ippdrv->cmd_list))
1858 continue;
1859
1860 list_for_each_entry_safe(c_node, tc_node,
1861 &ippdrv->cmd_list, list) {
1862 DRM_DEBUG_KMS("%s:count[%d]ippdrv[0x%x]\n",
1863 __func__, count++, (int)ippdrv);
1864
1865 if (c_node->priv == priv) {
1866 /*
1867 * userland goto unnormal state. process killed.
1868 * and close the file.
1869 * so, IPP didn't called stop cmd ctrl.
1870 * so, we are make stop operation in this state.
1871 */
1872 if (c_node->state == IPP_STATE_START) {
1873 ipp_stop_property(drm_dev, ippdrv,
1874 c_node);
1875 c_node->state = IPP_STATE_STOP;
1876 }
1877
1878 ippdrv->dedicated = false;
1879 ipp_clean_cmd_node(c_node);
1880 if (list_empty(&ippdrv->cmd_list))
1881 pm_runtime_put_sync(ippdrv->dev);
1882 }
1883 }
1884 }
1885
1886err_clear:
1887 kfree(priv);
1888 return;
1889}
1890
1891static int __devinit ipp_probe(struct platform_device *pdev)
1892{
1893 struct device *dev = &pdev->dev;
1894 struct ipp_context *ctx;
1895 struct exynos_drm_subdrv *subdrv;
1896 int ret;
1897
1898 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1899 if (!ctx)
1900 return -ENOMEM;
1901
1902 DRM_DEBUG_KMS("%s\n", __func__);
1903
1904 mutex_init(&ctx->ipp_lock);
1905 mutex_init(&ctx->prop_lock);
1906
1907 idr_init(&ctx->ipp_idr);
1908 idr_init(&ctx->prop_idr);
1909
1910 /*
1911 * create single thread for ipp event
1912 * IPP supports event thread for IPP drivers.
1913 * IPP driver send event_work to this thread.
1914 * and IPP event thread send event to user process.
1915 */
1916 ctx->event_workq = create_singlethread_workqueue("ipp_event");
1917 if (!ctx->event_workq) {
1918 dev_err(dev, "failed to create event workqueue\n");
1919 ret = -EINVAL;
1920 goto err_clear;
1921 }
1922
1923 /*
1924 * create single thread for ipp command
1925 * IPP supports command thread for user process.
1926 * user process make command node using set property ioctl.
1927 * and make start_work and send this work to command thread.
1928 * and then this command thread start property.
1929 */
1930 ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1931 if (!ctx->cmd_workq) {
1932 dev_err(dev, "failed to create cmd workqueue\n");
1933 ret = -EINVAL;
1934 goto err_event_workq;
1935 }
1936
1937 /* set sub driver informations */
1938 subdrv = &ctx->subdrv;
1939 subdrv->dev = dev;
1940 subdrv->probe = ipp_subdrv_probe;
1941 subdrv->remove = ipp_subdrv_remove;
1942 subdrv->open = ipp_subdrv_open;
1943 subdrv->close = ipp_subdrv_close;
1944
1945 platform_set_drvdata(pdev, ctx);
1946
1947 ret = exynos_drm_subdrv_register(subdrv);
1948 if (ret < 0) {
1949 DRM_ERROR("failed to register drm ipp device.\n");
1950 goto err_cmd_workq;
1951 }
1952
1953 dev_info(&pdev->dev, "drm ipp registered successfully.\n");
1954
1955 return 0;
1956
1957err_cmd_workq:
1958 destroy_workqueue(ctx->cmd_workq);
1959err_event_workq:
1960 destroy_workqueue(ctx->event_workq);
1961err_clear:
1962 kfree(ctx);
1963 return ret;
1964}
1965
1966static int __devexit ipp_remove(struct platform_device *pdev)
1967{
1968 struct ipp_context *ctx = platform_get_drvdata(pdev);
1969
1970 DRM_DEBUG_KMS("%s\n", __func__);
1971
1972 /* unregister sub driver */
1973 exynos_drm_subdrv_unregister(&ctx->subdrv);
1974
1975 /* remove,destroy ipp idr */
1976 idr_remove_all(&ctx->ipp_idr);
1977 idr_remove_all(&ctx->prop_idr);
1978 idr_destroy(&ctx->ipp_idr);
1979 idr_destroy(&ctx->prop_idr);
1980
1981 mutex_destroy(&ctx->ipp_lock);
1982 mutex_destroy(&ctx->prop_lock);
1983
1984 /* destroy command, event work queue */
1985 destroy_workqueue(ctx->cmd_workq);
1986 destroy_workqueue(ctx->event_workq);
1987
1988 kfree(ctx);
1989
1990 return 0;
1991}
1992
1993static int ipp_power_ctrl(struct ipp_context *ctx, bool enable)
1994{
1995 DRM_DEBUG_KMS("%s:enable[%d]\n", __func__, enable);
1996
1997 return 0;
1998}
1999
2000#ifdef CONFIG_PM_SLEEP
2001static int ipp_suspend(struct device *dev)
2002{
2003 struct ipp_context *ctx = get_ipp_context(dev);
2004
2005 DRM_DEBUG_KMS("%s\n", __func__);
2006
2007 if (pm_runtime_suspended(dev))
2008 return 0;
2009
2010 return ipp_power_ctrl(ctx, false);
2011}
2012
2013static int ipp_resume(struct device *dev)
2014{
2015 struct ipp_context *ctx = get_ipp_context(dev);
2016
2017 DRM_DEBUG_KMS("%s\n", __func__);
2018
2019 if (!pm_runtime_suspended(dev))
2020 return ipp_power_ctrl(ctx, true);
2021
2022 return 0;
2023}
2024#endif
2025
2026#ifdef CONFIG_PM_RUNTIME
2027static int ipp_runtime_suspend(struct device *dev)
2028{
2029 struct ipp_context *ctx = get_ipp_context(dev);
2030
2031 DRM_DEBUG_KMS("%s\n", __func__);
2032
2033 return ipp_power_ctrl(ctx, false);
2034}
2035
2036static int ipp_runtime_resume(struct device *dev)
2037{
2038 struct ipp_context *ctx = get_ipp_context(dev);
2039
2040 DRM_DEBUG_KMS("%s\n", __func__);
2041
2042 return ipp_power_ctrl(ctx, true);
2043}
2044#endif
2045
2046static const struct dev_pm_ops ipp_pm_ops = {
2047 SET_SYSTEM_SLEEP_PM_OPS(ipp_suspend, ipp_resume)
2048 SET_RUNTIME_PM_OPS(ipp_runtime_suspend, ipp_runtime_resume, NULL)
2049};
2050
2051struct platform_driver ipp_driver = {
2052 .probe = ipp_probe,
2053 .remove = __devexit_p(ipp_remove),
2054 .driver = {
2055 .name = "exynos-drm-ipp",
2056 .owner = THIS_MODULE,
2057 .pm = &ipp_pm_ops,
2058 },
2059};
2060
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
new file mode 100644
index 000000000000..28ffac95386c
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -0,0 +1,266 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 * Jinyoung Jeon <jy0.jeon@samsung.com>
7 * Sangmin Lee <lsmin.lee@samsung.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 */
28
29#ifndef _EXYNOS_DRM_IPP_H_
30#define _EXYNOS_DRM_IPP_H_
31
32#define for_each_ipp_ops(pos) \
33 for (pos = 0; pos < EXYNOS_DRM_OPS_MAX; pos++)
34#define for_each_ipp_planar(pos) \
35 for (pos = 0; pos < EXYNOS_DRM_PLANAR_MAX; pos++)
36
37#define IPP_GET_LCD_WIDTH _IOR('F', 302, int)
38#define IPP_GET_LCD_HEIGHT _IOR('F', 303, int)
39#define IPP_SET_WRITEBACK _IOW('F', 304, u32)
40
41/* definition of state */
42enum drm_exynos_ipp_state {
43 IPP_STATE_IDLE,
44 IPP_STATE_START,
45 IPP_STATE_STOP,
46};
47
48/*
49 * A structure of command work information.
50 * @work: work structure.
51 * @ippdrv: current work ippdrv.
52 * @c_node: command node information.
53 * @ctrl: command control.
54 */
55struct drm_exynos_ipp_cmd_work {
56 struct work_struct work;
57 struct exynos_drm_ippdrv *ippdrv;
58 struct drm_exynos_ipp_cmd_node *c_node;
59 enum drm_exynos_ipp_ctrl ctrl;
60};
61
62/*
63 * A structure of command node.
64 *
65 * @priv: IPP private infomation.
66 * @list: list head to command queue information.
67 * @event_list: list head of event.
68 * @mem_list: list head to source,destination memory queue information.
69 * @cmd_lock: lock for synchronization of access to ioctl.
70 * @mem_lock: lock for synchronization of access to memory nodes.
71 * @event_lock: lock for synchronization of access to scheduled event.
72 * @start_complete: completion of start of command.
73 * @stop_complete: completion of stop of command.
74 * @property: property information.
75 * @start_work: start command work structure.
76 * @stop_work: stop command work structure.
77 * @event_work: event work structure.
78 * @state: state of command node.
79 */
80struct drm_exynos_ipp_cmd_node {
81 struct exynos_drm_ipp_private *priv;
82 struct list_head list;
83 struct list_head event_list;
84 struct list_head mem_list[EXYNOS_DRM_OPS_MAX];
85 struct mutex cmd_lock;
86 struct mutex mem_lock;
87 struct mutex event_lock;
88 struct completion start_complete;
89 struct completion stop_complete;
90 struct drm_exynos_ipp_property property;
91 struct drm_exynos_ipp_cmd_work *start_work;
92 struct drm_exynos_ipp_cmd_work *stop_work;
93 struct drm_exynos_ipp_event_work *event_work;
94 enum drm_exynos_ipp_state state;
95};
96
97/*
98 * A structure of buffer information.
99 *
100 * @gem_objs: Y, Cb, Cr each gem object.
101 * @base: Y, Cb, Cr each planar address.
102 */
103struct drm_exynos_ipp_buf_info {
104 unsigned long handles[EXYNOS_DRM_PLANAR_MAX];
105 dma_addr_t base[EXYNOS_DRM_PLANAR_MAX];
106};
107
108/*
109 * A structure of wb setting infomation.
110 *
111 * @enable: enable flag for wb.
112 * @refresh: HZ of the refresh rate.
113 */
114struct drm_exynos_ipp_set_wb {
115 __u32 enable;
116 __u32 refresh;
117};
118
119/*
120 * A structure of event work information.
121 *
122 * @work: work structure.
123 * @ippdrv: current work ippdrv.
124 * @buf_id: id of src, dst buffer.
125 */
126struct drm_exynos_ipp_event_work {
127 struct work_struct work;
128 struct exynos_drm_ippdrv *ippdrv;
129 u32 buf_id[EXYNOS_DRM_OPS_MAX];
130};
131
132/*
133 * A structure of source,destination operations.
134 *
135 * @set_fmt: set format of image.
136 * @set_transf: set transform(rotations, flip).
137 * @set_size: set size of region.
138 * @set_addr: set address for dma.
139 */
140struct exynos_drm_ipp_ops {
141 int (*set_fmt)(struct device *dev, u32 fmt);
142 int (*set_transf)(struct device *dev,
143 enum drm_exynos_degree degree,
144 enum drm_exynos_flip flip, bool *swap);
145 int (*set_size)(struct device *dev, int swap,
146 struct drm_exynos_pos *pos, struct drm_exynos_sz *sz);
147 int (*set_addr)(struct device *dev,
148 struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id,
149 enum drm_exynos_ipp_buf_type buf_type);
150};
151
152/*
153 * A structure of ipp driver.
154 *
155 * @drv_list: list head for registed sub driver information.
156 * @parent_dev: parent device information.
157 * @dev: platform device.
158 * @drm_dev: drm device.
159 * @ipp_id: id of ipp driver.
160 * @dedicated: dedicated ipp device.
161 * @ops: source, destination operations.
162 * @event_workq: event work queue.
163 * @cmd: current command information.
164 * @cmd_list: list head for command information.
165 * @prop_list: property informations of current ipp driver.
166 * @check_property: check property about format, size, buffer.
167 * @reset: reset ipp block.
168 * @start: ipp each device start.
169 * @stop: ipp each device stop.
170 * @sched_event: work schedule handler.
171 */
172struct exynos_drm_ippdrv {
173 struct list_head drv_list;
174 struct device *parent_dev;
175 struct device *dev;
176 struct drm_device *drm_dev;
177 u32 ipp_id;
178 bool dedicated;
179 struct exynos_drm_ipp_ops *ops[EXYNOS_DRM_OPS_MAX];
180 struct workqueue_struct *event_workq;
181 struct drm_exynos_ipp_cmd_node *cmd;
182 struct list_head cmd_list;
183 struct drm_exynos_ipp_prop_list *prop_list;
184
185 int (*check_property)(struct device *dev,
186 struct drm_exynos_ipp_property *property);
187 int (*reset)(struct device *dev);
188 int (*start)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
189 void (*stop)(struct device *dev, enum drm_exynos_ipp_cmd cmd);
190 void (*sched_event)(struct work_struct *work);
191};
192
193#ifdef CONFIG_DRM_EXYNOS_IPP
194extern int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv);
195extern int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv);
196extern int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
197 struct drm_file *file);
198extern int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
199 struct drm_file *file);
200extern int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
201 struct drm_file *file);
202extern int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
203 struct drm_file *file);
204extern int exynos_drm_ippnb_register(struct notifier_block *nb);
205extern int exynos_drm_ippnb_unregister(struct notifier_block *nb);
206extern int exynos_drm_ippnb_send_event(unsigned long val, void *v);
207extern void ipp_sched_cmd(struct work_struct *work);
208extern void ipp_sched_event(struct work_struct *work);
209
210#else
211static inline int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
212{
213 return -ENODEV;
214}
215
216static inline int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
217{
218 return -ENODEV;
219}
220
221static inline int exynos_drm_ipp_get_property(struct drm_device *drm_dev,
222 void *data,
223 struct drm_file *file_priv)
224{
225 return -ENOTTY;
226}
227
228static inline int exynos_drm_ipp_set_property(struct drm_device *drm_dev,
229 void *data,
230 struct drm_file *file_priv)
231{
232 return -ENOTTY;
233}
234
235static inline int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev,
236 void *data,
237 struct drm_file *file)
238{
239 return -ENOTTY;
240}
241
242static inline int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev,
243 void *data,
244 struct drm_file *file)
245{
246 return -ENOTTY;
247}
248
249static inline int exynos_drm_ippnb_register(struct notifier_block *nb)
250{
251 return -ENODEV;
252}
253
254static inline int exynos_drm_ippnb_unregister(struct notifier_block *nb)
255{
256 return -ENODEV;
257}
258
259static inline int exynos_drm_ippnb_send_event(unsigned long val, void *v)
260{
261 return -ENOTTY;
262}
263#endif
264
265#endif /* _EXYNOS_DRM_IPP_H_ */
266
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 862ca1eb2102..83efc662d65a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -40,7 +40,7 @@ static const uint32_t formats[] = {
40 * CRTC ---------------- 40 * CRTC ----------------
41 * ^ start ^ end 41 * ^ start ^ end
42 * 42 *
43 * There are six cases from a to b. 43 * There are six cases from a to f.
44 * 44 *
45 * <----- SCREEN -----> 45 * <----- SCREEN ----->
46 * 0 last 46 * 0 last
@@ -93,11 +93,9 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
93 } 93 }
94 94
95 overlay->dma_addr[i] = buffer->dma_addr; 95 overlay->dma_addr[i] = buffer->dma_addr;
96 overlay->vaddr[i] = buffer->kvaddr;
97 96
98 DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n", 97 DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
99 i, (unsigned long)overlay->vaddr[i], 98 i, (unsigned long)overlay->dma_addr[i]);
100 (unsigned long)overlay->dma_addr[i]);
101 } 99 }
102 100
103 actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay); 101 actual_w = exynos_plane_get_size(crtc_x, crtc_w, crtc->mode.hdisplay);
@@ -106,16 +104,12 @@ int exynos_plane_mode_set(struct drm_plane *plane, struct drm_crtc *crtc,
106 if (crtc_x < 0) { 104 if (crtc_x < 0) {
107 if (actual_w) 105 if (actual_w)
108 src_x -= crtc_x; 106 src_x -= crtc_x;
109 else
110 src_x += crtc_w;
111 crtc_x = 0; 107 crtc_x = 0;
112 } 108 }
113 109
114 if (crtc_y < 0) { 110 if (crtc_y < 0) {
115 if (actual_h) 111 if (actual_h)
116 src_y -= crtc_y; 112 src_y -= crtc_y;
117 else
118 src_y += crtc_h;
119 crtc_y = 0; 113 crtc_y = 0;
120 } 114 }
121 115
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
new file mode 100644
index 000000000000..1c2366083c70
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -0,0 +1,855 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 * YoungJun Cho <yj44.cho@samsung.com>
5 * Eunchul Kim <chulspro.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundationr
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/err.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/platform_device.h>
18#include <linux/clk.h>
19#include <linux/pm_runtime.h>
20
21#include <drm/drmP.h>
22#include <drm/exynos_drm.h>
23#include "regs-rotator.h"
24#include "exynos_drm.h"
25#include "exynos_drm_ipp.h"
26
27/*
28 * Rotator supports image crop/rotator and input/output DMA operations.
29 * input DMA reads image data from the memory.
30 * output DMA writes image data to memory.
31 *
32 * M2M operation : supports crop/scale/rotation/csc so on.
33 * Memory ----> Rotator H/W ----> Memory.
34 */
35
36/*
37 * TODO
38 * 1. check suspend/resume api if needed.
39 * 2. need to check use case platform_device_id.
40 * 3. check src/dst size with, height.
41 * 4. need to add supported list in prop_list.
42 */
43
44#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev))
45#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\
46 struct rot_context, ippdrv);
47#define rot_read(offset) readl(rot->regs + (offset))
48#define rot_write(cfg, offset) writel(cfg, rot->regs + (offset))
49
50enum rot_irq_status {
51 ROT_IRQ_STATUS_COMPLETE = 8,
52 ROT_IRQ_STATUS_ILLEGAL = 9,
53};
54
55/*
56 * A structure of limitation.
57 *
58 * @min_w: minimum width.
59 * @min_h: minimum height.
60 * @max_w: maximum width.
61 * @max_h: maximum height.
62 * @align: align size.
63 */
64struct rot_limit {
65 u32 min_w;
66 u32 min_h;
67 u32 max_w;
68 u32 max_h;
69 u32 align;
70};
71
72/*
73 * A structure of limitation table.
74 *
75 * @ycbcr420_2p: case of YUV.
76 * @rgb888: case of RGB.
77 */
78struct rot_limit_table {
79 struct rot_limit ycbcr420_2p;
80 struct rot_limit rgb888;
81};
82
83/*
84 * A structure of rotator context.
85 * @ippdrv: prepare initialization using ippdrv.
86 * @regs_res: register resources.
87 * @regs: memory mapped io registers.
88 * @clock: rotator gate clock.
89 * @limit_tbl: limitation of rotator.
90 * @irq: irq number.
91 * @cur_buf_id: current operation buffer id.
92 * @suspended: suspended state.
93 */
94struct rot_context {
95 struct exynos_drm_ippdrv ippdrv;
96 struct resource *regs_res;
97 void __iomem *regs;
98 struct clk *clock;
99 struct rot_limit_table *limit_tbl;
100 int irq;
101 int cur_buf_id[EXYNOS_DRM_OPS_MAX];
102 bool suspended;
103};
104
105static void rotator_reg_set_irq(struct rot_context *rot, bool enable)
106{
107 u32 val = rot_read(ROT_CONFIG);
108
109 if (enable == true)
110 val |= ROT_CONFIG_IRQ;
111 else
112 val &= ~ROT_CONFIG_IRQ;
113
114 rot_write(val, ROT_CONFIG);
115}
116
117static u32 rotator_reg_get_fmt(struct rot_context *rot)
118{
119 u32 val = rot_read(ROT_CONTROL);
120
121 val &= ROT_CONTROL_FMT_MASK;
122
123 return val;
124}
125
126static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot)
127{
128 u32 val = rot_read(ROT_STATUS);
129
130 val = ROT_STATUS_IRQ(val);
131
132 if (val == ROT_STATUS_IRQ_VAL_COMPLETE)
133 return ROT_IRQ_STATUS_COMPLETE;
134
135 return ROT_IRQ_STATUS_ILLEGAL;
136}
137
138static irqreturn_t rotator_irq_handler(int irq, void *arg)
139{
140 struct rot_context *rot = arg;
141 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
142 struct drm_exynos_ipp_cmd_node *c_node = ippdrv->cmd;
143 struct drm_exynos_ipp_event_work *event_work = c_node->event_work;
144 enum rot_irq_status irq_status;
145 u32 val;
146
147 /* Get execution result */
148 irq_status = rotator_reg_get_irq_status(rot);
149
150 /* clear status */
151 val = rot_read(ROT_STATUS);
152 val |= ROT_STATUS_IRQ_PENDING((u32)irq_status);
153 rot_write(val, ROT_STATUS);
154
155 if (irq_status == ROT_IRQ_STATUS_COMPLETE) {
156 event_work->ippdrv = ippdrv;
157 event_work->buf_id[EXYNOS_DRM_OPS_DST] =
158 rot->cur_buf_id[EXYNOS_DRM_OPS_DST];
159 queue_work(ippdrv->event_workq,
160 (struct work_struct *)event_work);
161 } else
162 DRM_ERROR("the SFR is set illegally\n");
163
164 return IRQ_HANDLED;
165}
166
167static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize,
168 u32 *vsize)
169{
170 struct rot_limit_table *limit_tbl = rot->limit_tbl;
171 struct rot_limit *limit;
172 u32 mask, val;
173
174 /* Get size limit */
175 if (fmt == ROT_CONTROL_FMT_RGB888)
176 limit = &limit_tbl->rgb888;
177 else
178 limit = &limit_tbl->ycbcr420_2p;
179
180 /* Get mask for rounding to nearest aligned val */
181 mask = ~((1 << limit->align) - 1);
182
183 /* Set aligned width */
184 val = ROT_ALIGN(*hsize, limit->align, mask);
185 if (val < limit->min_w)
186 *hsize = ROT_MIN(limit->min_w, mask);
187 else if (val > limit->max_w)
188 *hsize = ROT_MAX(limit->max_w, mask);
189 else
190 *hsize = val;
191
192 /* Set aligned height */
193 val = ROT_ALIGN(*vsize, limit->align, mask);
194 if (val < limit->min_h)
195 *vsize = ROT_MIN(limit->min_h, mask);
196 else if (val > limit->max_h)
197 *vsize = ROT_MAX(limit->max_h, mask);
198 else
199 *vsize = val;
200}
201
202static int rotator_src_set_fmt(struct device *dev, u32 fmt)
203{
204 struct rot_context *rot = dev_get_drvdata(dev);
205 u32 val;
206
207 val = rot_read(ROT_CONTROL);
208 val &= ~ROT_CONTROL_FMT_MASK;
209
210 switch (fmt) {
211 case DRM_FORMAT_NV12:
212 val |= ROT_CONTROL_FMT_YCBCR420_2P;
213 break;
214 case DRM_FORMAT_XRGB8888:
215 val |= ROT_CONTROL_FMT_RGB888;
216 break;
217 default:
218 DRM_ERROR("invalid image format\n");
219 return -EINVAL;
220 }
221
222 rot_write(val, ROT_CONTROL);
223
224 return 0;
225}
226
227static inline bool rotator_check_reg_fmt(u32 fmt)
228{
229 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) ||
230 (fmt == ROT_CONTROL_FMT_RGB888))
231 return true;
232
233 return false;
234}
235
236static int rotator_src_set_size(struct device *dev, int swap,
237 struct drm_exynos_pos *pos,
238 struct drm_exynos_sz *sz)
239{
240 struct rot_context *rot = dev_get_drvdata(dev);
241 u32 fmt, hsize, vsize;
242 u32 val;
243
244 /* Get format */
245 fmt = rotator_reg_get_fmt(rot);
246 if (!rotator_check_reg_fmt(fmt)) {
247 DRM_ERROR("%s:invalid format.\n", __func__);
248 return -EINVAL;
249 }
250
251 /* Align buffer size */
252 hsize = sz->hsize;
253 vsize = sz->vsize;
254 rotator_align_size(rot, fmt, &hsize, &vsize);
255
256 /* Set buffer size configuration */
257 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
258 rot_write(val, ROT_SRC_BUF_SIZE);
259
260 /* Set crop image position configuration */
261 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
262 rot_write(val, ROT_SRC_CROP_POS);
263 val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w);
264 rot_write(val, ROT_SRC_CROP_SIZE);
265
266 return 0;
267}
268
269static int rotator_src_set_addr(struct device *dev,
270 struct drm_exynos_ipp_buf_info *buf_info,
271 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
272{
273 struct rot_context *rot = dev_get_drvdata(dev);
274 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
275 u32 val, fmt, hsize, vsize;
276 int i;
277
278 /* Set current buf_id */
279 rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id;
280
281 switch (buf_type) {
282 case IPP_BUF_ENQUEUE:
283 /* Set address configuration */
284 for_each_ipp_planar(i)
285 addr[i] = buf_info->base[i];
286
287 /* Get format */
288 fmt = rotator_reg_get_fmt(rot);
289 if (!rotator_check_reg_fmt(fmt)) {
290 DRM_ERROR("%s:invalid format.\n", __func__);
291 return -EINVAL;
292 }
293
294 /* Re-set cb planar for NV12 format */
295 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
296 !addr[EXYNOS_DRM_PLANAR_CB]) {
297
298 val = rot_read(ROT_SRC_BUF_SIZE);
299 hsize = ROT_GET_BUF_SIZE_W(val);
300 vsize = ROT_GET_BUF_SIZE_H(val);
301
302 /* Set cb planar */
303 addr[EXYNOS_DRM_PLANAR_CB] =
304 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
305 }
306
307 for_each_ipp_planar(i)
308 rot_write(addr[i], ROT_SRC_BUF_ADDR(i));
309 break;
310 case IPP_BUF_DEQUEUE:
311 for_each_ipp_planar(i)
312 rot_write(0x0, ROT_SRC_BUF_ADDR(i));
313 break;
314 default:
315 /* Nothing to do */
316 break;
317 }
318
319 return 0;
320}
321
322static int rotator_dst_set_transf(struct device *dev,
323 enum drm_exynos_degree degree,
324 enum drm_exynos_flip flip, bool *swap)
325{
326 struct rot_context *rot = dev_get_drvdata(dev);
327 u32 val;
328
329 /* Set transform configuration */
330 val = rot_read(ROT_CONTROL);
331 val &= ~ROT_CONTROL_FLIP_MASK;
332
333 switch (flip) {
334 case EXYNOS_DRM_FLIP_VERTICAL:
335 val |= ROT_CONTROL_FLIP_VERTICAL;
336 break;
337 case EXYNOS_DRM_FLIP_HORIZONTAL:
338 val |= ROT_CONTROL_FLIP_HORIZONTAL;
339 break;
340 default:
341 /* Flip None */
342 break;
343 }
344
345 val &= ~ROT_CONTROL_ROT_MASK;
346
347 switch (degree) {
348 case EXYNOS_DRM_DEGREE_90:
349 val |= ROT_CONTROL_ROT_90;
350 break;
351 case EXYNOS_DRM_DEGREE_180:
352 val |= ROT_CONTROL_ROT_180;
353 break;
354 case EXYNOS_DRM_DEGREE_270:
355 val |= ROT_CONTROL_ROT_270;
356 break;
357 default:
358 /* Rotation 0 Degree */
359 break;
360 }
361
362 rot_write(val, ROT_CONTROL);
363
364 /* Check degree for setting buffer size swap */
365 if ((degree == EXYNOS_DRM_DEGREE_90) ||
366 (degree == EXYNOS_DRM_DEGREE_270))
367 *swap = true;
368 else
369 *swap = false;
370
371 return 0;
372}
373
374static int rotator_dst_set_size(struct device *dev, int swap,
375 struct drm_exynos_pos *pos,
376 struct drm_exynos_sz *sz)
377{
378 struct rot_context *rot = dev_get_drvdata(dev);
379 u32 val, fmt, hsize, vsize;
380
381 /* Get format */
382 fmt = rotator_reg_get_fmt(rot);
383 if (!rotator_check_reg_fmt(fmt)) {
384 DRM_ERROR("%s:invalid format.\n", __func__);
385 return -EINVAL;
386 }
387
388 /* Align buffer size */
389 hsize = sz->hsize;
390 vsize = sz->vsize;
391 rotator_align_size(rot, fmt, &hsize, &vsize);
392
393 /* Set buffer size configuration */
394 val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize);
395 rot_write(val, ROT_DST_BUF_SIZE);
396
397 /* Set crop image position configuration */
398 val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x);
399 rot_write(val, ROT_DST_CROP_POS);
400
401 return 0;
402}
403
404static int rotator_dst_set_addr(struct device *dev,
405 struct drm_exynos_ipp_buf_info *buf_info,
406 u32 buf_id, enum drm_exynos_ipp_buf_type buf_type)
407{
408 struct rot_context *rot = dev_get_drvdata(dev);
409 dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX];
410 u32 val, fmt, hsize, vsize;
411 int i;
412
413 /* Set current buf_id */
414 rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id;
415
416 switch (buf_type) {
417 case IPP_BUF_ENQUEUE:
418 /* Set address configuration */
419 for_each_ipp_planar(i)
420 addr[i] = buf_info->base[i];
421
422 /* Get format */
423 fmt = rotator_reg_get_fmt(rot);
424 if (!rotator_check_reg_fmt(fmt)) {
425 DRM_ERROR("%s:invalid format.\n", __func__);
426 return -EINVAL;
427 }
428
429 /* Re-set cb planar for NV12 format */
430 if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) &&
431 !addr[EXYNOS_DRM_PLANAR_CB]) {
432 /* Get buf size */
433 val = rot_read(ROT_DST_BUF_SIZE);
434
435 hsize = ROT_GET_BUF_SIZE_W(val);
436 vsize = ROT_GET_BUF_SIZE_H(val);
437
438 /* Set cb planar */
439 addr[EXYNOS_DRM_PLANAR_CB] =
440 addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize;
441 }
442
443 for_each_ipp_planar(i)
444 rot_write(addr[i], ROT_DST_BUF_ADDR(i));
445 break;
446 case IPP_BUF_DEQUEUE:
447 for_each_ipp_planar(i)
448 rot_write(0x0, ROT_DST_BUF_ADDR(i));
449 break;
450 default:
451 /* Nothing to do */
452 break;
453 }
454
455 return 0;
456}
457
458static struct exynos_drm_ipp_ops rot_src_ops = {
459 .set_fmt = rotator_src_set_fmt,
460 .set_size = rotator_src_set_size,
461 .set_addr = rotator_src_set_addr,
462};
463
464static struct exynos_drm_ipp_ops rot_dst_ops = {
465 .set_transf = rotator_dst_set_transf,
466 .set_size = rotator_dst_set_size,
467 .set_addr = rotator_dst_set_addr,
468};
469
470static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
471{
472 struct drm_exynos_ipp_prop_list *prop_list;
473
474 DRM_DEBUG_KMS("%s\n", __func__);
475
476 prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
477 if (!prop_list) {
478 DRM_ERROR("failed to alloc property list.\n");
479 return -ENOMEM;
480 }
481
482 prop_list->version = 1;
483 prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
484 (1 << EXYNOS_DRM_FLIP_HORIZONTAL);
485 prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) |
486 (1 << EXYNOS_DRM_DEGREE_90) |
487 (1 << EXYNOS_DRM_DEGREE_180) |
488 (1 << EXYNOS_DRM_DEGREE_270);
489 prop_list->csc = 0;
490 prop_list->crop = 0;
491 prop_list->scale = 0;
492
493 ippdrv->prop_list = prop_list;
494
495 return 0;
496}
497
498static inline bool rotator_check_drm_fmt(u32 fmt)
499{
500 switch (fmt) {
501 case DRM_FORMAT_XRGB8888:
502 case DRM_FORMAT_NV12:
503 return true;
504 default:
505 DRM_DEBUG_KMS("%s:not support format\n", __func__);
506 return false;
507 }
508}
509
510static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip)
511{
512 switch (flip) {
513 case EXYNOS_DRM_FLIP_NONE:
514 case EXYNOS_DRM_FLIP_VERTICAL:
515 case EXYNOS_DRM_FLIP_HORIZONTAL:
516 return true;
517 default:
518 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
519 return false;
520 }
521}
522
523static int rotator_ippdrv_check_property(struct device *dev,
524 struct drm_exynos_ipp_property *property)
525{
526 struct drm_exynos_ipp_config *src_config =
527 &property->config[EXYNOS_DRM_OPS_SRC];
528 struct drm_exynos_ipp_config *dst_config =
529 &property->config[EXYNOS_DRM_OPS_DST];
530 struct drm_exynos_pos *src_pos = &src_config->pos;
531 struct drm_exynos_pos *dst_pos = &dst_config->pos;
532 struct drm_exynos_sz *src_sz = &src_config->sz;
533 struct drm_exynos_sz *dst_sz = &dst_config->sz;
534 bool swap = false;
535
536 /* Check format configuration */
537 if (src_config->fmt != dst_config->fmt) {
538 DRM_DEBUG_KMS("%s:not support csc feature\n", __func__);
539 return -EINVAL;
540 }
541
542 if (!rotator_check_drm_fmt(dst_config->fmt)) {
543 DRM_DEBUG_KMS("%s:invalid format\n", __func__);
544 return -EINVAL;
545 }
546
547 /* Check transform configuration */
548 if (src_config->degree != EXYNOS_DRM_DEGREE_0) {
549 DRM_DEBUG_KMS("%s:not support source-side rotation\n",
550 __func__);
551 return -EINVAL;
552 }
553
554 switch (dst_config->degree) {
555 case EXYNOS_DRM_DEGREE_90:
556 case EXYNOS_DRM_DEGREE_270:
557 swap = true;
558 case EXYNOS_DRM_DEGREE_0:
559 case EXYNOS_DRM_DEGREE_180:
560 /* No problem */
561 break;
562 default:
563 DRM_DEBUG_KMS("%s:invalid degree\n", __func__);
564 return -EINVAL;
565 }
566
567 if (src_config->flip != EXYNOS_DRM_FLIP_NONE) {
568 DRM_DEBUG_KMS("%s:not support source-side flip\n", __func__);
569 return -EINVAL;
570 }
571
572 if (!rotator_check_drm_flip(dst_config->flip)) {
573 DRM_DEBUG_KMS("%s:invalid flip\n", __func__);
574 return -EINVAL;
575 }
576
577 /* Check size configuration */
578 if ((src_pos->x + src_pos->w > src_sz->hsize) ||
579 (src_pos->y + src_pos->h > src_sz->vsize)) {
580 DRM_DEBUG_KMS("%s:out of source buffer bound\n", __func__);
581 return -EINVAL;
582 }
583
584 if (swap) {
585 if ((dst_pos->x + dst_pos->h > dst_sz->vsize) ||
586 (dst_pos->y + dst_pos->w > dst_sz->hsize)) {
587 DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
588 __func__);
589 return -EINVAL;
590 }
591
592 if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) {
593 DRM_DEBUG_KMS("%s:not support scale feature\n",
594 __func__);
595 return -EINVAL;
596 }
597 } else {
598 if ((dst_pos->x + dst_pos->w > dst_sz->hsize) ||
599 (dst_pos->y + dst_pos->h > dst_sz->vsize)) {
600 DRM_DEBUG_KMS("%s:out of destination buffer bound\n",
601 __func__);
602 return -EINVAL;
603 }
604
605 if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) {
606 DRM_DEBUG_KMS("%s:not support scale feature\n",
607 __func__);
608 return -EINVAL;
609 }
610 }
611
612 return 0;
613}
614
615static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
616{
617 struct rot_context *rot = dev_get_drvdata(dev);
618 u32 val;
619
620 if (rot->suspended) {
621 DRM_ERROR("suspended state\n");
622 return -EPERM;
623 }
624
625 if (cmd != IPP_CMD_M2M) {
626 DRM_ERROR("not support cmd: %d\n", cmd);
627 return -EINVAL;
628 }
629
630 /* Set interrupt enable */
631 rotator_reg_set_irq(rot, true);
632
633 val = rot_read(ROT_CONTROL);
634 val |= ROT_CONTROL_START;
635
636 rot_write(val, ROT_CONTROL);
637
638 return 0;
639}
640
641static int __devinit rotator_probe(struct platform_device *pdev)
642{
643 struct device *dev = &pdev->dev;
644 struct rot_context *rot;
645 struct exynos_drm_ippdrv *ippdrv;
646 int ret;
647
648 rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
649 if (!rot) {
650 dev_err(dev, "failed to allocate rot\n");
651 return -ENOMEM;
652 }
653
654 rot->limit_tbl = (struct rot_limit_table *)
655 platform_get_device_id(pdev)->driver_data;
656
657 rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
658 if (!rot->regs_res) {
659 dev_err(dev, "failed to find registers\n");
660 ret = -ENOENT;
661 goto err_get_resource;
662 }
663
664 rot->regs = devm_request_and_ioremap(dev, rot->regs_res);
665 if (!rot->regs) {
666 dev_err(dev, "failed to map register\n");
667 ret = -ENXIO;
668 goto err_get_resource;
669 }
670
671 rot->irq = platform_get_irq(pdev, 0);
672 if (rot->irq < 0) {
673 dev_err(dev, "failed to get irq\n");
674 ret = rot->irq;
675 goto err_get_irq;
676 }
677
678 ret = request_threaded_irq(rot->irq, NULL, rotator_irq_handler,
679 IRQF_ONESHOT, "drm_rotator", rot);
680 if (ret < 0) {
681 dev_err(dev, "failed to request irq\n");
682 goto err_get_irq;
683 }
684
685 rot->clock = clk_get(dev, "rotator");
686 if (IS_ERR_OR_NULL(rot->clock)) {
687 dev_err(dev, "failed to get clock\n");
688 ret = PTR_ERR(rot->clock);
689 goto err_clk_get;
690 }
691
692 pm_runtime_enable(dev);
693
694 ippdrv = &rot->ippdrv;
695 ippdrv->dev = dev;
696 ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops;
697 ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops;
698 ippdrv->check_property = rotator_ippdrv_check_property;
699 ippdrv->start = rotator_ippdrv_start;
700 ret = rotator_init_prop_list(ippdrv);
701 if (ret < 0) {
702 dev_err(dev, "failed to init property list.\n");
703 goto err_ippdrv_register;
704 }
705
706 DRM_DEBUG_KMS("%s:ippdrv[0x%x]\n", __func__, (int)ippdrv);
707
708 platform_set_drvdata(pdev, rot);
709
710 ret = exynos_drm_ippdrv_register(ippdrv);
711 if (ret < 0) {
712 dev_err(dev, "failed to register drm rotator device\n");
713 goto err_ippdrv_register;
714 }
715
716 dev_info(dev, "The exynos rotator is probed successfully\n");
717
718 return 0;
719
720err_ippdrv_register:
721 devm_kfree(dev, ippdrv->prop_list);
722 pm_runtime_disable(dev);
723 clk_put(rot->clock);
724err_clk_get:
725 free_irq(rot->irq, rot);
726err_get_irq:
727 devm_iounmap(dev, rot->regs);
728err_get_resource:
729 devm_kfree(dev, rot);
730 return ret;
731}
732
733static int __devexit rotator_remove(struct platform_device *pdev)
734{
735 struct device *dev = &pdev->dev;
736 struct rot_context *rot = dev_get_drvdata(dev);
737 struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv;
738
739 devm_kfree(dev, ippdrv->prop_list);
740 exynos_drm_ippdrv_unregister(ippdrv);
741
742 pm_runtime_disable(dev);
743 clk_put(rot->clock);
744
745 free_irq(rot->irq, rot);
746 devm_iounmap(dev, rot->regs);
747
748 devm_kfree(dev, rot);
749
750 return 0;
751}
752
753struct rot_limit_table rot_limit_tbl = {
754 .ycbcr420_2p = {
755 .min_w = 32,
756 .min_h = 32,
757 .max_w = SZ_32K,
758 .max_h = SZ_32K,
759 .align = 3,
760 },
761 .rgb888 = {
762 .min_w = 8,
763 .min_h = 8,
764 .max_w = SZ_8K,
765 .max_h = SZ_8K,
766 .align = 2,
767 },
768};
769
770struct platform_device_id rotator_driver_ids[] = {
771 {
772 .name = "exynos-rot",
773 .driver_data = (unsigned long)&rot_limit_tbl,
774 },
775 {},
776};
777
778static int rotator_clk_crtl(struct rot_context *rot, bool enable)
779{
780 DRM_DEBUG_KMS("%s\n", __func__);
781
782 if (enable) {
783 clk_enable(rot->clock);
784 rot->suspended = false;
785 } else {
786 clk_disable(rot->clock);
787 rot->suspended = true;
788 }
789
790 return 0;
791}
792
793
794#ifdef CONFIG_PM_SLEEP
795static int rotator_suspend(struct device *dev)
796{
797 struct rot_context *rot = dev_get_drvdata(dev);
798
799 DRM_DEBUG_KMS("%s\n", __func__);
800
801 if (pm_runtime_suspended(dev))
802 return 0;
803
804 return rotator_clk_crtl(rot, false);
805}
806
807static int rotator_resume(struct device *dev)
808{
809 struct rot_context *rot = dev_get_drvdata(dev);
810
811 DRM_DEBUG_KMS("%s\n", __func__);
812
813 if (!pm_runtime_suspended(dev))
814 return rotator_clk_crtl(rot, true);
815
816 return 0;
817}
818#endif
819
820#ifdef CONFIG_PM_RUNTIME
821static int rotator_runtime_suspend(struct device *dev)
822{
823 struct rot_context *rot = dev_get_drvdata(dev);
824
825 DRM_DEBUG_KMS("%s\n", __func__);
826
827 return rotator_clk_crtl(rot, false);
828}
829
830static int rotator_runtime_resume(struct device *dev)
831{
832 struct rot_context *rot = dev_get_drvdata(dev);
833
834 DRM_DEBUG_KMS("%s\n", __func__);
835
836 return rotator_clk_crtl(rot, true);
837}
838#endif
839
840static const struct dev_pm_ops rotator_pm_ops = {
841 SET_SYSTEM_SLEEP_PM_OPS(rotator_suspend, rotator_resume)
842 SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
843 NULL)
844};
845
846struct platform_driver rotator_driver = {
847 .probe = rotator_probe,
848 .remove = __devexit_p(rotator_remove),
849 .id_table = rotator_driver_ids,
850 .driver = {
851 .name = "exynos-rot",
852 .owner = THIS_MODULE,
853 .pm = &rotator_pm_ops,
854 },
855};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
new file mode 100644
index 000000000000..a2d7a14a52b6
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 *
4 * Authors:
5 * YoungJun Cho <yj44.cho@samsung.com>
6 * Eunchul Kim <chulspro.kim@samsung.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#ifndef _EXYNOS_DRM_ROTATOR_H_
29#define _EXYNOS_DRM_ROTATOR_H_
30
31/* TODO */
32
33#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index e4b8a8f741f7..99bfc38dfaa2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -39,7 +39,6 @@ struct vidi_win_data {
39 unsigned int fb_height; 39 unsigned int fb_height;
40 unsigned int bpp; 40 unsigned int bpp;
41 dma_addr_t dma_addr; 41 dma_addr_t dma_addr;
42 void __iomem *vaddr;
43 unsigned int buf_offsize; 42 unsigned int buf_offsize;
44 unsigned int line_size; /* bytes */ 43 unsigned int line_size; /* bytes */
45 bool enabled; 44 bool enabled;
@@ -294,7 +293,6 @@ static void vidi_win_mode_set(struct device *dev,
294 win_data->fb_width = overlay->fb_width; 293 win_data->fb_width = overlay->fb_width;
295 win_data->fb_height = overlay->fb_height; 294 win_data->fb_height = overlay->fb_height;
296 win_data->dma_addr = overlay->dma_addr[0] + offset; 295 win_data->dma_addr = overlay->dma_addr[0] + offset;
297 win_data->vaddr = overlay->vaddr[0] + offset;
298 win_data->bpp = overlay->bpp; 296 win_data->bpp = overlay->bpp;
299 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 297 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
300 (overlay->bpp >> 3); 298 (overlay->bpp >> 3);
@@ -309,9 +307,7 @@ static void vidi_win_mode_set(struct device *dev,
309 win_data->offset_x, win_data->offset_y); 307 win_data->offset_x, win_data->offset_y);
310 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 308 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
311 win_data->ovl_width, win_data->ovl_height); 309 win_data->ovl_width, win_data->ovl_height);
312 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 310 DRM_DEBUG_KMS("paddr = 0x%lx\n", (unsigned long)win_data->dma_addr);
313 (unsigned long)win_data->dma_addr,
314 (unsigned long)win_data->vaddr);
315 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 311 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
316 overlay->fb_width, overlay->crtc_width); 312 overlay->fb_width, overlay->crtc_width);
317} 313}
@@ -382,7 +378,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
382 struct drm_pending_vblank_event *e, *t; 378 struct drm_pending_vblank_event *e, *t;
383 struct timeval now; 379 struct timeval now;
384 unsigned long flags; 380 unsigned long flags;
385 bool is_checked = false;
386 381
387 spin_lock_irqsave(&drm_dev->event_lock, flags); 382 spin_lock_irqsave(&drm_dev->event_lock, flags);
388 383
@@ -392,8 +387,6 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
392 if (crtc != e->pipe) 387 if (crtc != e->pipe)
393 continue; 388 continue;
394 389
395 is_checked = true;
396
397 do_gettimeofday(&now); 390 do_gettimeofday(&now);
398 e->event.sequence = 0; 391 e->event.sequence = 0;
399 e->event.tv_sec = now.tv_sec; 392 e->event.tv_sec = now.tv_sec;
@@ -401,22 +394,7 @@ static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
401 394
402 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 395 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
403 wake_up_interruptible(&e->base.file_priv->event_wait); 396 wake_up_interruptible(&e->base.file_priv->event_wait);
404 } 397 drm_vblank_put(drm_dev, crtc);
405
406 if (is_checked) {
407 /*
408 * call drm_vblank_put only in case that drm_vblank_get was
409 * called.
410 */
411 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
412 drm_vblank_put(drm_dev, crtc);
413
414 /*
415 * don't off vblank if vblank_disable_allowed is 1,
416 * because vblank would be off by timer handler.
417 */
418 if (!drm_dev->vblank_disable_allowed)
419 drm_vblank_off(drm_dev, crtc);
420 } 398 }
421 399
422 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 400 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 2c115f8a62a3..2c46b6c0b82c 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -50,6 +50,29 @@
50#define MAX_HEIGHT 1080 50#define MAX_HEIGHT 1080
51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) 51#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
52 52
53/* AVI header and aspect ratio */
54#define HDMI_AVI_VERSION 0x02
55#define HDMI_AVI_LENGTH 0x0D
56#define AVI_PIC_ASPECT_RATIO_16_9 (2 << 4)
57#define AVI_SAME_AS_PIC_ASPECT_RATIO 8
58
59/* AUI header info */
60#define HDMI_AUI_VERSION 0x01
61#define HDMI_AUI_LENGTH 0x0A
62
63/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
64enum HDMI_PACKET_TYPE {
65 /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
66 /* InfoFrame packet type */
67 HDMI_PACKET_TYPE_INFOFRAME = 0x80,
68 /* Vendor-Specific InfoFrame */
69 HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
70 /* Auxiliary Video information InfoFrame */
71 HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
72 /* Audio information InfoFrame */
73 HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
74};
75
53enum hdmi_type { 76enum hdmi_type {
54 HDMI_TYPE13, 77 HDMI_TYPE13,
55 HDMI_TYPE14, 78 HDMI_TYPE14,
@@ -74,6 +97,7 @@ struct hdmi_context {
74 struct mutex hdmi_mutex; 97 struct mutex hdmi_mutex;
75 98
76 void __iomem *regs; 99 void __iomem *regs;
100 void *parent_ctx;
77 int external_irq; 101 int external_irq;
78 int internal_irq; 102 int internal_irq;
79 103
@@ -84,7 +108,6 @@ struct hdmi_context {
84 int cur_conf; 108 int cur_conf;
85 109
86 struct hdmi_resources res; 110 struct hdmi_resources res;
87 void *parent_ctx;
88 111
89 int hpd_gpio; 112 int hpd_gpio;
90 113
@@ -182,6 +205,7 @@ struct hdmi_v13_conf {
182 int height; 205 int height;
183 int vrefresh; 206 int vrefresh;
184 bool interlace; 207 bool interlace;
208 int cea_video_id;
185 const u8 *hdmiphy_data; 209 const u8 *hdmiphy_data;
186 const struct hdmi_v13_preset_conf *conf; 210 const struct hdmi_v13_preset_conf *conf;
187}; 211};
@@ -353,15 +377,20 @@ static const struct hdmi_v13_preset_conf hdmi_v13_conf_1080p60 = {
353}; 377};
354 378
355static const struct hdmi_v13_conf hdmi_v13_confs[] = { 379static const struct hdmi_v13_conf hdmi_v13_confs[] = {
356 { 1280, 720, 60, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 380 { 1280, 720, 60, false, 4, hdmiphy_v13_conf74_25,
357 { 1280, 720, 50, false, hdmiphy_v13_conf74_25, &hdmi_v13_conf_720p60 }, 381 &hdmi_v13_conf_720p60 },
358 { 720, 480, 60, false, hdmiphy_v13_conf27_027, &hdmi_v13_conf_480p }, 382 { 1280, 720, 50, false, 19, hdmiphy_v13_conf74_25,
359 { 1920, 1080, 50, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i50 }, 383 &hdmi_v13_conf_720p60 },
360 { 1920, 1080, 50, false, hdmiphy_v13_conf148_5, 384 { 720, 480, 60, false, 3, hdmiphy_v13_conf27_027,
361 &hdmi_v13_conf_1080p50 }, 385 &hdmi_v13_conf_480p },
362 { 1920, 1080, 60, true, hdmiphy_v13_conf74_25, &hdmi_v13_conf_1080i60 }, 386 { 1920, 1080, 50, true, 20, hdmiphy_v13_conf74_25,
363 { 1920, 1080, 60, false, hdmiphy_v13_conf148_5, 387 &hdmi_v13_conf_1080i50 },
364 &hdmi_v13_conf_1080p60 }, 388 { 1920, 1080, 50, false, 31, hdmiphy_v13_conf148_5,
389 &hdmi_v13_conf_1080p50 },
390 { 1920, 1080, 60, true, 5, hdmiphy_v13_conf74_25,
391 &hdmi_v13_conf_1080i60 },
392 { 1920, 1080, 60, false, 16, hdmiphy_v13_conf148_5,
393 &hdmi_v13_conf_1080p60 },
365}; 394};
366 395
367/* HDMI Version 1.4 */ 396/* HDMI Version 1.4 */
@@ -479,6 +508,7 @@ struct hdmi_conf {
479 int height; 508 int height;
480 int vrefresh; 509 int vrefresh;
481 bool interlace; 510 bool interlace;
511 int cea_video_id;
482 const u8 *hdmiphy_data; 512 const u8 *hdmiphy_data;
483 const struct hdmi_preset_conf *conf; 513 const struct hdmi_preset_conf *conf;
484}; 514};
@@ -934,16 +964,21 @@ static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
934}; 964};
935 965
936static const struct hdmi_conf hdmi_confs[] = { 966static const struct hdmi_conf hdmi_confs[] = {
937 { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p60 }, 967 { 720, 480, 60, false, 3, hdmiphy_conf27_027, &hdmi_conf_480p60 },
938 { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p50 }, 968 { 1280, 720, 50, false, 19, hdmiphy_conf74_25, &hdmi_conf_720p50 },
939 { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 }, 969 { 1280, 720, 60, false, 4, hdmiphy_conf74_25, &hdmi_conf_720p60 },
940 { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 }, 970 { 1920, 1080, 50, true, 20, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
941 { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 }, 971 { 1920, 1080, 60, true, 5, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
942 { 1920, 1080, 30, false, hdmiphy_conf74_176, &hdmi_conf_1080p30 }, 972 { 1920, 1080, 30, false, 34, hdmiphy_conf74_176, &hdmi_conf_1080p30 },
943 { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 }, 973 { 1920, 1080, 50, false, 31, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
944 { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 }, 974 { 1920, 1080, 60, false, 16, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
945}; 975};
946 976
977struct hdmi_infoframe {
978 enum HDMI_PACKET_TYPE type;
979 u8 ver;
980 u8 len;
981};
947 982
948static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id) 983static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
949{ 984{
@@ -1267,6 +1302,88 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
1267 return hdmi_v14_conf_index(mode); 1302 return hdmi_v14_conf_index(mode);
1268} 1303}
1269 1304
1305static u8 hdmi_chksum(struct hdmi_context *hdata,
1306 u32 start, u8 len, u32 hdr_sum)
1307{
1308 int i;
1309
1310 /* hdr_sum : header0 + header1 + header2
1311 * start : start address of packet byte1
1312 * len : packet bytes - 1 */
1313 for (i = 0; i < len; ++i)
1314 hdr_sum += 0xff & hdmi_reg_read(hdata, start + i * 4);
1315
1316 /* return 2's complement of 8 bit hdr_sum */
1317 return (u8)(~(hdr_sum & 0xff) + 1);
1318}
1319
1320static void hdmi_reg_infoframe(struct hdmi_context *hdata,
1321 struct hdmi_infoframe *infoframe)
1322{
1323 u32 hdr_sum;
1324 u8 chksum;
1325 u32 aspect_ratio;
1326 u32 mod;
1327 u32 vic;
1328
1329 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1330
1331 mod = hdmi_reg_read(hdata, HDMI_MODE_SEL);
1332 if (hdata->dvi_mode) {
1333 hdmi_reg_writeb(hdata, HDMI_VSI_CON,
1334 HDMI_VSI_CON_DO_NOT_TRANSMIT);
1335 hdmi_reg_writeb(hdata, HDMI_AVI_CON,
1336 HDMI_AVI_CON_DO_NOT_TRANSMIT);
1337 hdmi_reg_writeb(hdata, HDMI_AUI_CON, HDMI_AUI_CON_NO_TRAN);
1338 return;
1339 }
1340
1341 switch (infoframe->type) {
1342 case HDMI_PACKET_TYPE_AVI:
1343 hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
1344 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
1345 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
1346 hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
1347 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1348
1349 /* Output format zero hardcoded ,RGB YBCR selection */
1350 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
1351 AVI_ACTIVE_FORMAT_VALID |
1352 AVI_UNDERSCANNED_DISPLAY_VALID);
1353
1354 aspect_ratio = AVI_PIC_ASPECT_RATIO_16_9;
1355
1356 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(2), aspect_ratio |
1357 AVI_SAME_AS_PIC_ASPECT_RATIO);
1358
1359 if (hdata->type == HDMI_TYPE13)
1360 vic = hdmi_v13_confs[hdata->cur_conf].cea_video_id;
1361 else
1362 vic = hdmi_confs[hdata->cur_conf].cea_video_id;
1363
1364 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
1365
1366 chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
1367 infoframe->len, hdr_sum);
1368 DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
1369 hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
1370 break;
1371 case HDMI_PACKET_TYPE_AUI:
1372 hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
1373 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
1374 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
1375 hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
1376 hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
1377 chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
1378 infoframe->len, hdr_sum);
1379 DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
1380 hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
1381 break;
1382 default:
1383 break;
1384 }
1385}
1386
1270static bool hdmi_is_connected(void *ctx) 1387static bool hdmi_is_connected(void *ctx)
1271{ 1388{
1272 struct hdmi_context *hdata = ctx; 1389 struct hdmi_context *hdata = ctx;
@@ -1293,6 +1410,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
1293 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n", 1410 DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
1294 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), 1411 (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
1295 raw_edid->width_cm, raw_edid->height_cm); 1412 raw_edid->width_cm, raw_edid->height_cm);
1413 kfree(raw_edid);
1296 } else { 1414 } else {
1297 return -ENODEV; 1415 return -ENODEV;
1298 } 1416 }
@@ -1541,6 +1659,8 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
1541 1659
1542static void hdmi_conf_init(struct hdmi_context *hdata) 1660static void hdmi_conf_init(struct hdmi_context *hdata)
1543{ 1661{
1662 struct hdmi_infoframe infoframe;
1663
1544 /* disable HPD interrupts */ 1664 /* disable HPD interrupts */
1545 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL | 1665 hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
1546 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG); 1666 HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
@@ -1575,9 +1695,17 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
1575 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02); 1695 hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
1576 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04); 1696 hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
1577 } else { 1697 } else {
1698 infoframe.type = HDMI_PACKET_TYPE_AVI;
1699 infoframe.ver = HDMI_AVI_VERSION;
1700 infoframe.len = HDMI_AVI_LENGTH;
1701 hdmi_reg_infoframe(hdata, &infoframe);
1702
1703 infoframe.type = HDMI_PACKET_TYPE_AUI;
1704 infoframe.ver = HDMI_AUI_VERSION;
1705 infoframe.len = HDMI_AUI_LENGTH;
1706 hdmi_reg_infoframe(hdata, &infoframe);
1707
1578 /* enable AVI packet every vsync, fixes purple line problem */ 1708 /* enable AVI packet every vsync, fixes purple line problem */
1579 hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
1580 hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 2 << 5);
1581 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5); 1709 hdmi_reg_writemask(hdata, HDMI_CON_1, 2, 3 << 5);
1582 } 1710 }
1583} 1711}
@@ -1875,6 +2003,24 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
1875 mdelay(10); 2003 mdelay(10);
1876} 2004}
1877 2005
2006static void hdmiphy_poweron(struct hdmi_context *hdata)
2007{
2008 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2009
2010 if (hdata->type == HDMI_TYPE14)
2011 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, 0,
2012 HDMI_PHY_POWER_OFF_EN);
2013}
2014
2015static void hdmiphy_poweroff(struct hdmi_context *hdata)
2016{
2017 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2018
2019 if (hdata->type == HDMI_TYPE14)
2020 hdmi_reg_writemask(hdata, HDMI_PHY_CON_0, ~0,
2021 HDMI_PHY_POWER_OFF_EN);
2022}
2023
1878static void hdmiphy_conf_apply(struct hdmi_context *hdata) 2024static void hdmiphy_conf_apply(struct hdmi_context *hdata)
1879{ 2025{
1880 const u8 *hdmiphy_data; 2026 const u8 *hdmiphy_data;
@@ -1978,9 +2124,18 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
1978 index = hdmi_v14_conf_index(m); 2124 index = hdmi_v14_conf_index(m);
1979 2125
1980 if (index >= 0) { 2126 if (index >= 0) {
2127 struct drm_mode_object base;
2128 struct list_head head;
2129
1981 DRM_INFO("desired mode doesn't exist so\n"); 2130 DRM_INFO("desired mode doesn't exist so\n");
1982 DRM_INFO("use the most suitable mode among modes.\n"); 2131 DRM_INFO("use the most suitable mode among modes.\n");
2132
2133 /* preserve display mode header while copying. */
2134 head = adjusted_mode->head;
2135 base = adjusted_mode->base;
1983 memcpy(adjusted_mode, m, sizeof(*m)); 2136 memcpy(adjusted_mode, m, sizeof(*m));
2137 adjusted_mode->head = head;
2138 adjusted_mode->base = base;
1984 break; 2139 break;
1985 } 2140 }
1986 } 2141 }
@@ -2034,12 +2189,12 @@ static void hdmi_poweron(struct hdmi_context *hdata)
2034 2189
2035 mutex_unlock(&hdata->hdmi_mutex); 2190 mutex_unlock(&hdata->hdmi_mutex);
2036 2191
2037 pm_runtime_get_sync(hdata->dev);
2038
2039 regulator_bulk_enable(res->regul_count, res->regul_bulk); 2192 regulator_bulk_enable(res->regul_count, res->regul_bulk);
2040 clk_enable(res->hdmiphy); 2193 clk_enable(res->hdmiphy);
2041 clk_enable(res->hdmi); 2194 clk_enable(res->hdmi);
2042 clk_enable(res->sclk_hdmi); 2195 clk_enable(res->sclk_hdmi);
2196
2197 hdmiphy_poweron(hdata);
2043} 2198}
2044 2199
2045static void hdmi_poweroff(struct hdmi_context *hdata) 2200static void hdmi_poweroff(struct hdmi_context *hdata)
@@ -2058,14 +2213,13 @@ static void hdmi_poweroff(struct hdmi_context *hdata)
2058 * its reset state seems to meet the condition. 2213 * its reset state seems to meet the condition.
2059 */ 2214 */
2060 hdmiphy_conf_reset(hdata); 2215 hdmiphy_conf_reset(hdata);
2216 hdmiphy_poweroff(hdata);
2061 2217
2062 clk_disable(res->sclk_hdmi); 2218 clk_disable(res->sclk_hdmi);
2063 clk_disable(res->hdmi); 2219 clk_disable(res->hdmi);
2064 clk_disable(res->hdmiphy); 2220 clk_disable(res->hdmiphy);
2065 regulator_bulk_disable(res->regul_count, res->regul_bulk); 2221 regulator_bulk_disable(res->regul_count, res->regul_bulk);
2066 2222
2067 pm_runtime_put_sync(hdata->dev);
2068
2069 mutex_lock(&hdata->hdmi_mutex); 2223 mutex_lock(&hdata->hdmi_mutex);
2070 2224
2071 hdata->powered = false; 2225 hdata->powered = false;
@@ -2078,16 +2232,18 @@ static void hdmi_dpms(void *ctx, int mode)
2078{ 2232{
2079 struct hdmi_context *hdata = ctx; 2233 struct hdmi_context *hdata = ctx;
2080 2234
2081 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2235 DRM_DEBUG_KMS("[%d] %s mode %d\n", __LINE__, __func__, mode);
2082 2236
2083 switch (mode) { 2237 switch (mode) {
2084 case DRM_MODE_DPMS_ON: 2238 case DRM_MODE_DPMS_ON:
2085 hdmi_poweron(hdata); 2239 if (pm_runtime_suspended(hdata->dev))
2240 pm_runtime_get_sync(hdata->dev);
2086 break; 2241 break;
2087 case DRM_MODE_DPMS_STANDBY: 2242 case DRM_MODE_DPMS_STANDBY:
2088 case DRM_MODE_DPMS_SUSPEND: 2243 case DRM_MODE_DPMS_SUSPEND:
2089 case DRM_MODE_DPMS_OFF: 2244 case DRM_MODE_DPMS_OFF:
2090 hdmi_poweroff(hdata); 2245 if (!pm_runtime_suspended(hdata->dev))
2246 pm_runtime_put_sync(hdata->dev);
2091 break; 2247 break;
2092 default: 2248 default:
2093 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode); 2249 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
@@ -2166,27 +2322,27 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2166 memset(res, 0, sizeof(*res)); 2322 memset(res, 0, sizeof(*res));
2167 2323
2168 /* get clocks, power */ 2324 /* get clocks, power */
2169 res->hdmi = clk_get(dev, "hdmi"); 2325 res->hdmi = devm_clk_get(dev, "hdmi");
2170 if (IS_ERR_OR_NULL(res->hdmi)) { 2326 if (IS_ERR_OR_NULL(res->hdmi)) {
2171 DRM_ERROR("failed to get clock 'hdmi'\n"); 2327 DRM_ERROR("failed to get clock 'hdmi'\n");
2172 goto fail; 2328 goto fail;
2173 } 2329 }
2174 res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 2330 res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
2175 if (IS_ERR_OR_NULL(res->sclk_hdmi)) { 2331 if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
2176 DRM_ERROR("failed to get clock 'sclk_hdmi'\n"); 2332 DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
2177 goto fail; 2333 goto fail;
2178 } 2334 }
2179 res->sclk_pixel = clk_get(dev, "sclk_pixel"); 2335 res->sclk_pixel = devm_clk_get(dev, "sclk_pixel");
2180 if (IS_ERR_OR_NULL(res->sclk_pixel)) { 2336 if (IS_ERR_OR_NULL(res->sclk_pixel)) {
2181 DRM_ERROR("failed to get clock 'sclk_pixel'\n"); 2337 DRM_ERROR("failed to get clock 'sclk_pixel'\n");
2182 goto fail; 2338 goto fail;
2183 } 2339 }
2184 res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy"); 2340 res->sclk_hdmiphy = devm_clk_get(dev, "sclk_hdmiphy");
2185 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) { 2341 if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
2186 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n"); 2342 DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
2187 goto fail; 2343 goto fail;
2188 } 2344 }
2189 res->hdmiphy = clk_get(dev, "hdmiphy"); 2345 res->hdmiphy = devm_clk_get(dev, "hdmiphy");
2190 if (IS_ERR_OR_NULL(res->hdmiphy)) { 2346 if (IS_ERR_OR_NULL(res->hdmiphy)) {
2191 DRM_ERROR("failed to get clock 'hdmiphy'\n"); 2347 DRM_ERROR("failed to get clock 'hdmiphy'\n");
2192 goto fail; 2348 goto fail;
@@ -2194,7 +2350,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2194 2350
2195 clk_set_parent(res->sclk_hdmi, res->sclk_pixel); 2351 clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
2196 2352
2197 res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * 2353 res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
2198 sizeof(res->regul_bulk[0]), GFP_KERNEL); 2354 sizeof(res->regul_bulk[0]), GFP_KERNEL);
2199 if (!res->regul_bulk) { 2355 if (!res->regul_bulk) {
2200 DRM_ERROR("failed to get memory for regulators\n"); 2356 DRM_ERROR("failed to get memory for regulators\n");
@@ -2204,7 +2360,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
2204 res->regul_bulk[i].supply = supply[i]; 2360 res->regul_bulk[i].supply = supply[i];
2205 res->regul_bulk[i].consumer = NULL; 2361 res->regul_bulk[i].consumer = NULL;
2206 } 2362 }
2207 ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk); 2363 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
2208 if (ret) { 2364 if (ret) {
2209 DRM_ERROR("failed to get regulators\n"); 2365 DRM_ERROR("failed to get regulators\n");
2210 goto fail; 2366 goto fail;
@@ -2217,28 +2373,6 @@ fail:
2217 return -ENODEV; 2373 return -ENODEV;
2218} 2374}
2219 2375
2220static int hdmi_resources_cleanup(struct hdmi_context *hdata)
2221{
2222 struct hdmi_resources *res = &hdata->res;
2223
2224 regulator_bulk_free(res->regul_count, res->regul_bulk);
2225 /* kfree is NULL-safe */
2226 kfree(res->regul_bulk);
2227 if (!IS_ERR_OR_NULL(res->hdmiphy))
2228 clk_put(res->hdmiphy);
2229 if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
2230 clk_put(res->sclk_hdmiphy);
2231 if (!IS_ERR_OR_NULL(res->sclk_pixel))
2232 clk_put(res->sclk_pixel);
2233 if (!IS_ERR_OR_NULL(res->sclk_hdmi))
2234 clk_put(res->sclk_hdmi);
2235 if (!IS_ERR_OR_NULL(res->hdmi))
2236 clk_put(res->hdmi);
2237 memset(res, 0, sizeof(*res));
2238
2239 return 0;
2240}
2241
2242static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy; 2376static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
2243 2377
2244void hdmi_attach_ddc_client(struct i2c_client *ddc) 2378void hdmi_attach_ddc_client(struct i2c_client *ddc)
@@ -2306,6 +2440,7 @@ static struct platform_device_id hdmi_driver_types[] = {
2306 } 2440 }
2307}; 2441};
2308 2442
2443#ifdef CONFIG_OF
2309static struct of_device_id hdmi_match_types[] = { 2444static struct of_device_id hdmi_match_types[] = {
2310 { 2445 {
2311 .compatible = "samsung,exynos5-hdmi", 2446 .compatible = "samsung,exynos5-hdmi",
@@ -2314,6 +2449,7 @@ static struct of_device_id hdmi_match_types[] = {
2314 /* end node */ 2449 /* end node */
2315 } 2450 }
2316}; 2451};
2452#endif
2317 2453
2318static int __devinit hdmi_probe(struct platform_device *pdev) 2454static int __devinit hdmi_probe(struct platform_device *pdev)
2319{ 2455{
@@ -2366,6 +2502,8 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2366 const struct of_device_id *match; 2502 const struct of_device_id *match;
2367 match = of_match_node(of_match_ptr(hdmi_match_types), 2503 match = of_match_node(of_match_ptr(hdmi_match_types),
2368 pdev->dev.of_node); 2504 pdev->dev.of_node);
2505 if (match == NULL)
2506 return -ENODEV;
2369 hdata->type = (enum hdmi_type)match->data; 2507 hdata->type = (enum hdmi_type)match->data;
2370 } else { 2508 } else {
2371 hdata->type = (enum hdmi_type)platform_get_device_id 2509 hdata->type = (enum hdmi_type)platform_get_device_id
@@ -2378,36 +2516,32 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2378 ret = hdmi_resources_init(hdata); 2516 ret = hdmi_resources_init(hdata);
2379 2517
2380 if (ret) { 2518 if (ret) {
2381 ret = -EINVAL;
2382 DRM_ERROR("hdmi_resources_init failed\n"); 2519 DRM_ERROR("hdmi_resources_init failed\n");
2383 goto err_data; 2520 return -EINVAL;
2384 } 2521 }
2385 2522
2386 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2523 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2387 if (!res) { 2524 if (!res) {
2388 DRM_ERROR("failed to find registers\n"); 2525 DRM_ERROR("failed to find registers\n");
2389 ret = -ENOENT; 2526 return -ENOENT;
2390 goto err_resource;
2391 } 2527 }
2392 2528
2393 hdata->regs = devm_request_and_ioremap(&pdev->dev, res); 2529 hdata->regs = devm_request_and_ioremap(&pdev->dev, res);
2394 if (!hdata->regs) { 2530 if (!hdata->regs) {
2395 DRM_ERROR("failed to map registers\n"); 2531 DRM_ERROR("failed to map registers\n");
2396 ret = -ENXIO; 2532 return -ENXIO;
2397 goto err_resource;
2398 } 2533 }
2399 2534
2400 ret = gpio_request(hdata->hpd_gpio, "HPD"); 2535 ret = devm_gpio_request(&pdev->dev, hdata->hpd_gpio, "HPD");
2401 if (ret) { 2536 if (ret) {
2402 DRM_ERROR("failed to request HPD gpio\n"); 2537 DRM_ERROR("failed to request HPD gpio\n");
2403 goto err_resource; 2538 return ret;
2404 } 2539 }
2405 2540
2406 /* DDC i2c driver */ 2541 /* DDC i2c driver */
2407 if (i2c_add_driver(&ddc_driver)) { 2542 if (i2c_add_driver(&ddc_driver)) {
2408 DRM_ERROR("failed to register ddc i2c driver\n"); 2543 DRM_ERROR("failed to register ddc i2c driver\n");
2409 ret = -ENOENT; 2544 return -ENOENT;
2410 goto err_gpio;
2411 } 2545 }
2412 2546
2413 hdata->ddc_port = hdmi_ddc; 2547 hdata->ddc_port = hdmi_ddc;
@@ -2470,11 +2604,6 @@ err_hdmiphy:
2470 i2c_del_driver(&hdmiphy_driver); 2604 i2c_del_driver(&hdmiphy_driver);
2471err_ddc: 2605err_ddc:
2472 i2c_del_driver(&ddc_driver); 2606 i2c_del_driver(&ddc_driver);
2473err_gpio:
2474 gpio_free(hdata->hpd_gpio);
2475err_resource:
2476 hdmi_resources_cleanup(hdata);
2477err_data:
2478 return ret; 2607 return ret;
2479} 2608}
2480 2609
@@ -2491,9 +2620,6 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
2491 free_irq(hdata->internal_irq, hdata); 2620 free_irq(hdata->internal_irq, hdata);
2492 free_irq(hdata->external_irq, hdata); 2621 free_irq(hdata->external_irq, hdata);
2493 2622
2494 gpio_free(hdata->hpd_gpio);
2495
2496 hdmi_resources_cleanup(hdata);
2497 2623
2498 /* hdmiphy i2c driver */ 2624 /* hdmiphy i2c driver */
2499 i2c_del_driver(&hdmiphy_driver); 2625 i2c_del_driver(&hdmiphy_driver);
@@ -2509,6 +2635,8 @@ static int hdmi_suspend(struct device *dev)
2509 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2635 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2510 struct hdmi_context *hdata = ctx->ctx; 2636 struct hdmi_context *hdata = ctx->ctx;
2511 2637
2638 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2639
2512 disable_irq(hdata->internal_irq); 2640 disable_irq(hdata->internal_irq);
2513 disable_irq(hdata->external_irq); 2641 disable_irq(hdata->external_irq);
2514 2642
@@ -2516,6 +2644,11 @@ static int hdmi_suspend(struct device *dev)
2516 if (ctx->drm_dev) 2644 if (ctx->drm_dev)
2517 drm_helper_hpd_irq_event(ctx->drm_dev); 2645 drm_helper_hpd_irq_event(ctx->drm_dev);
2518 2646
2647 if (pm_runtime_suspended(dev)) {
2648 DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
2649 return 0;
2650 }
2651
2519 hdmi_poweroff(hdata); 2652 hdmi_poweroff(hdata);
2520 2653
2521 return 0; 2654 return 0;
@@ -2526,13 +2659,52 @@ static int hdmi_resume(struct device *dev)
2526 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev); 2659 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2527 struct hdmi_context *hdata = ctx->ctx; 2660 struct hdmi_context *hdata = ctx->ctx;
2528 2661
2662 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2663
2664 hdata->hpd = gpio_get_value(hdata->hpd_gpio);
2665
2529 enable_irq(hdata->external_irq); 2666 enable_irq(hdata->external_irq);
2530 enable_irq(hdata->internal_irq); 2667 enable_irq(hdata->internal_irq);
2668
2669 if (!pm_runtime_suspended(dev)) {
2670 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
2671 return 0;
2672 }
2673
2674 hdmi_poweron(hdata);
2675
2531 return 0; 2676 return 0;
2532} 2677}
2533#endif 2678#endif
2534 2679
2535static SIMPLE_DEV_PM_OPS(hdmi_pm_ops, hdmi_suspend, hdmi_resume); 2680#ifdef CONFIG_PM_RUNTIME
2681static int hdmi_runtime_suspend(struct device *dev)
2682{
2683 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2684 struct hdmi_context *hdata = ctx->ctx;
2685 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2686
2687 hdmi_poweroff(hdata);
2688
2689 return 0;
2690}
2691
2692static int hdmi_runtime_resume(struct device *dev)
2693{
2694 struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
2695 struct hdmi_context *hdata = ctx->ctx;
2696 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2697
2698 hdmi_poweron(hdata);
2699
2700 return 0;
2701}
2702#endif
2703
2704static const struct dev_pm_ops hdmi_pm_ops = {
2705 SET_SYSTEM_SLEEP_PM_OPS(hdmi_suspend, hdmi_resume)
2706 SET_RUNTIME_PM_OPS(hdmi_runtime_suspend, hdmi_runtime_resume, NULL)
2707};
2536 2708
2537struct platform_driver hdmi_driver = { 2709struct platform_driver hdmi_driver = {
2538 .probe = hdmi_probe, 2710 .probe = hdmi_probe,
@@ -2542,6 +2714,6 @@ struct platform_driver hdmi_driver = {
2542 .name = "exynos-hdmi", 2714 .name = "exynos-hdmi",
2543 .owner = THIS_MODULE, 2715 .owner = THIS_MODULE,
2544 .pm = &hdmi_pm_ops, 2716 .pm = &hdmi_pm_ops,
2545 .of_match_table = hdmi_match_types, 2717 .of_match_table = of_match_ptr(hdmi_match_types),
2546 }, 2718 },
2547}; 2719};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index 27d1720f1bbd..6206056f4a33 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -46,6 +46,7 @@ static const struct i2c_device_id hdmiphy_id[] = {
46 { }, 46 { },
47}; 47};
48 48
49#ifdef CONFIG_OF
49static struct of_device_id hdmiphy_match_types[] = { 50static struct of_device_id hdmiphy_match_types[] = {
50 { 51 {
51 .compatible = "samsung,exynos5-hdmiphy", 52 .compatible = "samsung,exynos5-hdmiphy",
@@ -53,12 +54,13 @@ static struct of_device_id hdmiphy_match_types[] = {
53 /* end node */ 54 /* end node */
54 } 55 }
55}; 56};
57#endif
56 58
57struct i2c_driver hdmiphy_driver = { 59struct i2c_driver hdmiphy_driver = {
58 .driver = { 60 .driver = {
59 .name = "exynos-hdmiphy", 61 .name = "exynos-hdmiphy",
60 .owner = THIS_MODULE, 62 .owner = THIS_MODULE,
61 .of_match_table = hdmiphy_match_types, 63 .of_match_table = of_match_ptr(hdmiphy_match_types),
62 }, 64 },
63 .id_table = hdmiphy_id, 65 .id_table = hdmiphy_id,
64 .probe = hdmiphy_probe, 66 .probe = hdmiphy_probe,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index e7fbb823fd8e..21db89530fc7 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -36,14 +36,13 @@
36 36
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_hdmi.h" 38#include "exynos_drm_hdmi.h"
39#include "exynos_drm_iommu.h"
39 40
40#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) 41#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
41 42
42struct hdmi_win_data { 43struct hdmi_win_data {
43 dma_addr_t dma_addr; 44 dma_addr_t dma_addr;
44 void __iomem *vaddr;
45 dma_addr_t chroma_dma_addr; 45 dma_addr_t chroma_dma_addr;
46 void __iomem *chroma_vaddr;
47 uint32_t pixel_format; 46 uint32_t pixel_format;
48 unsigned int bpp; 47 unsigned int bpp;
49 unsigned int crtc_x; 48 unsigned int crtc_x;
@@ -59,6 +58,8 @@ struct hdmi_win_data {
59 unsigned int mode_width; 58 unsigned int mode_width;
60 unsigned int mode_height; 59 unsigned int mode_height;
61 unsigned int scan_flags; 60 unsigned int scan_flags;
61 bool enabled;
62 bool resume;
62}; 63};
63 64
64struct mixer_resources { 65struct mixer_resources {
@@ -80,6 +81,7 @@ enum mixer_version_id {
80 81
81struct mixer_context { 82struct mixer_context {
82 struct device *dev; 83 struct device *dev;
84 struct drm_device *drm_dev;
83 int pipe; 85 int pipe;
84 bool interlace; 86 bool interlace;
85 bool powered; 87 bool powered;
@@ -90,6 +92,9 @@ struct mixer_context {
90 struct mixer_resources mixer_res; 92 struct mixer_resources mixer_res;
91 struct hdmi_win_data win_data[MIXER_WIN_NR]; 93 struct hdmi_win_data win_data[MIXER_WIN_NR];
92 enum mixer_version_id mxr_ver; 94 enum mixer_version_id mxr_ver;
95 void *parent_ctx;
96 wait_queue_head_t wait_vsync_queue;
97 atomic_t wait_vsync_event;
93}; 98};
94 99
95struct mixer_drv_data { 100struct mixer_drv_data {
@@ -665,58 +670,22 @@ static void mixer_win_reset(struct mixer_context *ctx)
665 spin_unlock_irqrestore(&res->reg_slock, flags); 670 spin_unlock_irqrestore(&res->reg_slock, flags);
666} 671}
667 672
668static void mixer_poweron(struct mixer_context *ctx) 673static int mixer_iommu_on(void *ctx, bool enable)
669{
670 struct mixer_resources *res = &ctx->mixer_res;
671
672 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
673
674 mutex_lock(&ctx->mixer_mutex);
675 if (ctx->powered) {
676 mutex_unlock(&ctx->mixer_mutex);
677 return;
678 }
679 ctx->powered = true;
680 mutex_unlock(&ctx->mixer_mutex);
681
682 pm_runtime_get_sync(ctx->dev);
683
684 clk_enable(res->mixer);
685 if (ctx->vp_enabled) {
686 clk_enable(res->vp);
687 clk_enable(res->sclk_mixer);
688 }
689
690 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
691 mixer_win_reset(ctx);
692}
693
694static void mixer_poweroff(struct mixer_context *ctx)
695{ 674{
696 struct mixer_resources *res = &ctx->mixer_res; 675 struct exynos_drm_hdmi_context *drm_hdmi_ctx;
697 676 struct mixer_context *mdata = ctx;
698 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 677 struct drm_device *drm_dev;
699 678
700 mutex_lock(&ctx->mixer_mutex); 679 drm_hdmi_ctx = mdata->parent_ctx;
701 if (!ctx->powered) 680 drm_dev = drm_hdmi_ctx->drm_dev;
702 goto out;
703 mutex_unlock(&ctx->mixer_mutex);
704 681
705 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 682 if (is_drm_iommu_supported(drm_dev)) {
683 if (enable)
684 return drm_iommu_attach_device(drm_dev, mdata->dev);
706 685
707 clk_disable(res->mixer); 686 drm_iommu_detach_device(drm_dev, mdata->dev);
708 if (ctx->vp_enabled) {
709 clk_disable(res->vp);
710 clk_disable(res->sclk_mixer);
711 } 687 }
712 688 return 0;
713 pm_runtime_put_sync(ctx->dev);
714
715 mutex_lock(&ctx->mixer_mutex);
716 ctx->powered = false;
717
718out:
719 mutex_unlock(&ctx->mixer_mutex);
720} 689}
721 690
722static int mixer_enable_vblank(void *ctx, int pipe) 691static int mixer_enable_vblank(void *ctx, int pipe)
@@ -746,39 +715,6 @@ static void mixer_disable_vblank(void *ctx)
746 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 715 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
747} 716}
748 717
749static void mixer_dpms(void *ctx, int mode)
750{
751 struct mixer_context *mixer_ctx = ctx;
752
753 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
754
755 switch (mode) {
756 case DRM_MODE_DPMS_ON:
757 mixer_poweron(mixer_ctx);
758 break;
759 case DRM_MODE_DPMS_STANDBY:
760 case DRM_MODE_DPMS_SUSPEND:
761 case DRM_MODE_DPMS_OFF:
762 mixer_poweroff(mixer_ctx);
763 break;
764 default:
765 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
766 break;
767 }
768}
769
770static void mixer_wait_for_vblank(void *ctx)
771{
772 struct mixer_context *mixer_ctx = ctx;
773 struct mixer_resources *res = &mixer_ctx->mixer_res;
774 int ret;
775
776 ret = wait_for((mixer_reg_read(res, MXR_INT_STATUS) &
777 MXR_INT_STATUS_VSYNC), 50);
778 if (ret < 0)
779 DRM_DEBUG_KMS("vblank wait timed out.\n");
780}
781
782static void mixer_win_mode_set(void *ctx, 718static void mixer_win_mode_set(void *ctx,
783 struct exynos_drm_overlay *overlay) 719 struct exynos_drm_overlay *overlay)
784{ 720{
@@ -811,9 +747,7 @@ static void mixer_win_mode_set(void *ctx,
811 win_data = &mixer_ctx->win_data[win]; 747 win_data = &mixer_ctx->win_data[win];
812 748
813 win_data->dma_addr = overlay->dma_addr[0]; 749 win_data->dma_addr = overlay->dma_addr[0];
814 win_data->vaddr = overlay->vaddr[0];
815 win_data->chroma_dma_addr = overlay->dma_addr[1]; 750 win_data->chroma_dma_addr = overlay->dma_addr[1];
816 win_data->chroma_vaddr = overlay->vaddr[1];
817 win_data->pixel_format = overlay->pixel_format; 751 win_data->pixel_format = overlay->pixel_format;
818 win_data->bpp = overlay->bpp; 752 win_data->bpp = overlay->bpp;
819 753
@@ -845,6 +779,8 @@ static void mixer_win_commit(void *ctx, int win)
845 vp_video_buffer(mixer_ctx, win); 779 vp_video_buffer(mixer_ctx, win);
846 else 780 else
847 mixer_graph_buffer(mixer_ctx, win); 781 mixer_graph_buffer(mixer_ctx, win);
782
783 mixer_ctx->win_data[win].enabled = true;
848} 784}
849 785
850static void mixer_win_disable(void *ctx, int win) 786static void mixer_win_disable(void *ctx, int win)
@@ -855,6 +791,14 @@ static void mixer_win_disable(void *ctx, int win)
855 791
856 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 792 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
857 793
794 mutex_lock(&mixer_ctx->mixer_mutex);
795 if (!mixer_ctx->powered) {
796 mutex_unlock(&mixer_ctx->mixer_mutex);
797 mixer_ctx->win_data[win].resume = false;
798 return;
799 }
800 mutex_unlock(&mixer_ctx->mixer_mutex);
801
858 spin_lock_irqsave(&res->reg_slock, flags); 802 spin_lock_irqsave(&res->reg_slock, flags);
859 mixer_vsync_set_update(mixer_ctx, false); 803 mixer_vsync_set_update(mixer_ctx, false);
860 804
@@ -862,16 +806,144 @@ static void mixer_win_disable(void *ctx, int win)
862 806
863 mixer_vsync_set_update(mixer_ctx, true); 807 mixer_vsync_set_update(mixer_ctx, true);
864 spin_unlock_irqrestore(&res->reg_slock, flags); 808 spin_unlock_irqrestore(&res->reg_slock, flags);
809
810 mixer_ctx->win_data[win].enabled = false;
811}
812
813static void mixer_wait_for_vblank(void *ctx)
814{
815 struct mixer_context *mixer_ctx = ctx;
816
817 mutex_lock(&mixer_ctx->mixer_mutex);
818 if (!mixer_ctx->powered) {
819 mutex_unlock(&mixer_ctx->mixer_mutex);
820 return;
821 }
822 mutex_unlock(&mixer_ctx->mixer_mutex);
823
824 atomic_set(&mixer_ctx->wait_vsync_event, 1);
825
826 /*
827 * wait for MIXER to signal VSYNC interrupt or return after
828 * timeout which is set to 50ms (refresh rate of 20).
829 */
830 if (!wait_event_timeout(mixer_ctx->wait_vsync_queue,
831 !atomic_read(&mixer_ctx->wait_vsync_event),
832 DRM_HZ/20))
833 DRM_DEBUG_KMS("vblank wait timed out.\n");
834}
835
836static void mixer_window_suspend(struct mixer_context *ctx)
837{
838 struct hdmi_win_data *win_data;
839 int i;
840
841 for (i = 0; i < MIXER_WIN_NR; i++) {
842 win_data = &ctx->win_data[i];
843 win_data->resume = win_data->enabled;
844 mixer_win_disable(ctx, i);
845 }
846 mixer_wait_for_vblank(ctx);
847}
848
849static void mixer_window_resume(struct mixer_context *ctx)
850{
851 struct hdmi_win_data *win_data;
852 int i;
853
854 for (i = 0; i < MIXER_WIN_NR; i++) {
855 win_data = &ctx->win_data[i];
856 win_data->enabled = win_data->resume;
857 win_data->resume = false;
858 }
859}
860
861static void mixer_poweron(struct mixer_context *ctx)
862{
863 struct mixer_resources *res = &ctx->mixer_res;
864
865 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
866
867 mutex_lock(&ctx->mixer_mutex);
868 if (ctx->powered) {
869 mutex_unlock(&ctx->mixer_mutex);
870 return;
871 }
872 ctx->powered = true;
873 mutex_unlock(&ctx->mixer_mutex);
874
875 clk_enable(res->mixer);
876 if (ctx->vp_enabled) {
877 clk_enable(res->vp);
878 clk_enable(res->sclk_mixer);
879 }
880
881 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
882 mixer_win_reset(ctx);
883
884 mixer_window_resume(ctx);
885}
886
887static void mixer_poweroff(struct mixer_context *ctx)
888{
889 struct mixer_resources *res = &ctx->mixer_res;
890
891 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
892
893 mutex_lock(&ctx->mixer_mutex);
894 if (!ctx->powered)
895 goto out;
896 mutex_unlock(&ctx->mixer_mutex);
897
898 mixer_window_suspend(ctx);
899
900 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
901
902 clk_disable(res->mixer);
903 if (ctx->vp_enabled) {
904 clk_disable(res->vp);
905 clk_disable(res->sclk_mixer);
906 }
907
908 mutex_lock(&ctx->mixer_mutex);
909 ctx->powered = false;
910
911out:
912 mutex_unlock(&ctx->mixer_mutex);
913}
914
915static void mixer_dpms(void *ctx, int mode)
916{
917 struct mixer_context *mixer_ctx = ctx;
918
919 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
920
921 switch (mode) {
922 case DRM_MODE_DPMS_ON:
923 if (pm_runtime_suspended(mixer_ctx->dev))
924 pm_runtime_get_sync(mixer_ctx->dev);
925 break;
926 case DRM_MODE_DPMS_STANDBY:
927 case DRM_MODE_DPMS_SUSPEND:
928 case DRM_MODE_DPMS_OFF:
929 if (!pm_runtime_suspended(mixer_ctx->dev))
930 pm_runtime_put_sync(mixer_ctx->dev);
931 break;
932 default:
933 DRM_DEBUG_KMS("unknown dpms mode: %d\n", mode);
934 break;
935 }
865} 936}
866 937
867static struct exynos_mixer_ops mixer_ops = { 938static struct exynos_mixer_ops mixer_ops = {
868 /* manager */ 939 /* manager */
940 .iommu_on = mixer_iommu_on,
869 .enable_vblank = mixer_enable_vblank, 941 .enable_vblank = mixer_enable_vblank,
870 .disable_vblank = mixer_disable_vblank, 942 .disable_vblank = mixer_disable_vblank,
943 .wait_for_vblank = mixer_wait_for_vblank,
871 .dpms = mixer_dpms, 944 .dpms = mixer_dpms,
872 945
873 /* overlay */ 946 /* overlay */
874 .wait_for_vblank = mixer_wait_for_vblank,
875 .win_mode_set = mixer_win_mode_set, 947 .win_mode_set = mixer_win_mode_set,
876 .win_commit = mixer_win_commit, 948 .win_commit = mixer_win_commit,
877 .win_disable = mixer_win_disable, 949 .win_disable = mixer_win_disable,
@@ -884,7 +956,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
884 struct drm_pending_vblank_event *e, *t; 956 struct drm_pending_vblank_event *e, *t;
885 struct timeval now; 957 struct timeval now;
886 unsigned long flags; 958 unsigned long flags;
887 bool is_checked = false;
888 959
889 spin_lock_irqsave(&drm_dev->event_lock, flags); 960 spin_lock_irqsave(&drm_dev->event_lock, flags);
890 961
@@ -894,7 +965,6 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
894 if (crtc != e->pipe) 965 if (crtc != e->pipe)
895 continue; 966 continue;
896 967
897 is_checked = true;
898 do_gettimeofday(&now); 968 do_gettimeofday(&now);
899 e->event.sequence = 0; 969 e->event.sequence = 0;
900 e->event.tv_sec = now.tv_sec; 970 e->event.tv_sec = now.tv_sec;
@@ -902,16 +972,9 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
902 972
903 list_move_tail(&e->base.link, &e->base.file_priv->event_list); 973 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
904 wake_up_interruptible(&e->base.file_priv->event_wait); 974 wake_up_interruptible(&e->base.file_priv->event_wait);
975 drm_vblank_put(drm_dev, crtc);
905 } 976 }
906 977
907 if (is_checked)
908 /*
909 * call drm_vblank_put only in case that drm_vblank_get was
910 * called.
911 */
912 if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
913 drm_vblank_put(drm_dev, crtc);
914
915 spin_unlock_irqrestore(&drm_dev->event_lock, flags); 978 spin_unlock_irqrestore(&drm_dev->event_lock, flags);
916} 979}
917 980
@@ -944,6 +1007,12 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
944 1007
945 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe); 1008 drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
946 mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe); 1009 mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
1010
1011 /* set wait vsync event to zero and wake up queue. */
1012 if (atomic_read(&ctx->wait_vsync_event)) {
1013 atomic_set(&ctx->wait_vsync_event, 0);
1014 DRM_WAKEUP(&ctx->wait_vsync_queue);
1015 }
947 } 1016 }
948 1017
949out: 1018out:
@@ -971,57 +1040,45 @@ static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
971 1040
972 spin_lock_init(&mixer_res->reg_slock); 1041 spin_lock_init(&mixer_res->reg_slock);
973 1042
974 mixer_res->mixer = clk_get(dev, "mixer"); 1043 mixer_res->mixer = devm_clk_get(dev, "mixer");
975 if (IS_ERR_OR_NULL(mixer_res->mixer)) { 1044 if (IS_ERR_OR_NULL(mixer_res->mixer)) {
976 dev_err(dev, "failed to get clock 'mixer'\n"); 1045 dev_err(dev, "failed to get clock 'mixer'\n");
977 ret = -ENODEV; 1046 return -ENODEV;
978 goto fail;
979 } 1047 }
980 1048
981 mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi"); 1049 mixer_res->sclk_hdmi = devm_clk_get(dev, "sclk_hdmi");
982 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) { 1050 if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
983 dev_err(dev, "failed to get clock 'sclk_hdmi'\n"); 1051 dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
984 ret = -ENODEV; 1052 return -ENODEV;
985 goto fail;
986 } 1053 }
987 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1054 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
988 if (res == NULL) { 1055 if (res == NULL) {
989 dev_err(dev, "get memory resource failed.\n"); 1056 dev_err(dev, "get memory resource failed.\n");
990 ret = -ENXIO; 1057 return -ENXIO;
991 goto fail;
992 } 1058 }
993 1059
994 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start, 1060 mixer_res->mixer_regs = devm_ioremap(&pdev->dev, res->start,
995 resource_size(res)); 1061 resource_size(res));
996 if (mixer_res->mixer_regs == NULL) { 1062 if (mixer_res->mixer_regs == NULL) {
997 dev_err(dev, "register mapping failed.\n"); 1063 dev_err(dev, "register mapping failed.\n");
998 ret = -ENXIO; 1064 return -ENXIO;
999 goto fail;
1000 } 1065 }
1001 1066
1002 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1067 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1003 if (res == NULL) { 1068 if (res == NULL) {
1004 dev_err(dev, "get interrupt resource failed.\n"); 1069 dev_err(dev, "get interrupt resource failed.\n");
1005 ret = -ENXIO; 1070 return -ENXIO;
1006 goto fail;
1007 } 1071 }
1008 1072
1009 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler, 1073 ret = devm_request_irq(&pdev->dev, res->start, mixer_irq_handler,
1010 0, "drm_mixer", ctx); 1074 0, "drm_mixer", ctx);
1011 if (ret) { 1075 if (ret) {
1012 dev_err(dev, "request interrupt failed.\n"); 1076 dev_err(dev, "request interrupt failed.\n");
1013 goto fail; 1077 return ret;
1014 } 1078 }
1015 mixer_res->irq = res->start; 1079 mixer_res->irq = res->start;
1016 1080
1017 return 0; 1081 return 0;
1018
1019fail:
1020 if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
1021 clk_put(mixer_res->sclk_hdmi);
1022 if (!IS_ERR_OR_NULL(mixer_res->mixer))
1023 clk_put(mixer_res->mixer);
1024 return ret;
1025} 1082}
1026 1083
1027static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx, 1084static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
@@ -1031,25 +1088,21 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1031 struct device *dev = &pdev->dev; 1088 struct device *dev = &pdev->dev;
1032 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; 1089 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
1033 struct resource *res; 1090 struct resource *res;
1034 int ret;
1035 1091
1036 mixer_res->vp = clk_get(dev, "vp"); 1092 mixer_res->vp = devm_clk_get(dev, "vp");
1037 if (IS_ERR_OR_NULL(mixer_res->vp)) { 1093 if (IS_ERR_OR_NULL(mixer_res->vp)) {
1038 dev_err(dev, "failed to get clock 'vp'\n"); 1094 dev_err(dev, "failed to get clock 'vp'\n");
1039 ret = -ENODEV; 1095 return -ENODEV;
1040 goto fail;
1041 } 1096 }
1042 mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer"); 1097 mixer_res->sclk_mixer = devm_clk_get(dev, "sclk_mixer");
1043 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) { 1098 if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
1044 dev_err(dev, "failed to get clock 'sclk_mixer'\n"); 1099 dev_err(dev, "failed to get clock 'sclk_mixer'\n");
1045 ret = -ENODEV; 1100 return -ENODEV;
1046 goto fail;
1047 } 1101 }
1048 mixer_res->sclk_dac = clk_get(dev, "sclk_dac"); 1102 mixer_res->sclk_dac = devm_clk_get(dev, "sclk_dac");
1049 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) { 1103 if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
1050 dev_err(dev, "failed to get clock 'sclk_dac'\n"); 1104 dev_err(dev, "failed to get clock 'sclk_dac'\n");
1051 ret = -ENODEV; 1105 return -ENODEV;
1052 goto fail;
1053 } 1106 }
1054 1107
1055 if (mixer_res->sclk_hdmi) 1108 if (mixer_res->sclk_hdmi)
@@ -1058,28 +1111,17 @@ static int __devinit vp_resources_init(struct exynos_drm_hdmi_context *ctx,
1058 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1111 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1059 if (res == NULL) { 1112 if (res == NULL) {
1060 dev_err(dev, "get memory resource failed.\n"); 1113 dev_err(dev, "get memory resource failed.\n");
1061 ret = -ENXIO; 1114 return -ENXIO;
1062 goto fail;
1063 } 1115 }
1064 1116
1065 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start, 1117 mixer_res->vp_regs = devm_ioremap(&pdev->dev, res->start,
1066 resource_size(res)); 1118 resource_size(res));
1067 if (mixer_res->vp_regs == NULL) { 1119 if (mixer_res->vp_regs == NULL) {
1068 dev_err(dev, "register mapping failed.\n"); 1120 dev_err(dev, "register mapping failed.\n");
1069 ret = -ENXIO; 1121 return -ENXIO;
1070 goto fail;
1071 } 1122 }
1072 1123
1073 return 0; 1124 return 0;
1074
1075fail:
1076 if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
1077 clk_put(mixer_res->sclk_dac);
1078 if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
1079 clk_put(mixer_res->sclk_mixer);
1080 if (!IS_ERR_OR_NULL(mixer_res->vp))
1081 clk_put(mixer_res->vp);
1082 return ret;
1083} 1125}
1084 1126
1085static struct mixer_drv_data exynos5_mxr_drv_data = { 1127static struct mixer_drv_data exynos5_mxr_drv_data = {
@@ -1149,9 +1191,12 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1149 } 1191 }
1150 1192
1151 ctx->dev = &pdev->dev; 1193 ctx->dev = &pdev->dev;
1194 ctx->parent_ctx = (void *)drm_hdmi_ctx;
1152 drm_hdmi_ctx->ctx = (void *)ctx; 1195 drm_hdmi_ctx->ctx = (void *)ctx;
1153 ctx->vp_enabled = drv->is_vp_enabled; 1196 ctx->vp_enabled = drv->is_vp_enabled;
1154 ctx->mxr_ver = drv->version; 1197 ctx->mxr_ver = drv->version;
1198 DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
1199 atomic_set(&ctx->wait_vsync_event, 0);
1155 1200
1156 platform_set_drvdata(pdev, drm_hdmi_ctx); 1201 platform_set_drvdata(pdev, drm_hdmi_ctx);
1157 1202
@@ -1202,13 +1247,66 @@ static int mixer_suspend(struct device *dev)
1202 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev); 1247 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1203 struct mixer_context *ctx = drm_hdmi_ctx->ctx; 1248 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1204 1249
1250 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1251
1252 if (pm_runtime_suspended(dev)) {
1253 DRM_DEBUG_KMS("%s : Already suspended\n", __func__);
1254 return 0;
1255 }
1256
1205 mixer_poweroff(ctx); 1257 mixer_poweroff(ctx);
1206 1258
1207 return 0; 1259 return 0;
1208} 1260}
1261
1262static int mixer_resume(struct device *dev)
1263{
1264 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1265 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1266
1267 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1268
1269 if (!pm_runtime_suspended(dev)) {
1270 DRM_DEBUG_KMS("%s : Already resumed\n", __func__);
1271 return 0;
1272 }
1273
1274 mixer_poweron(ctx);
1275
1276 return 0;
1277}
1209#endif 1278#endif
1210 1279
1211static SIMPLE_DEV_PM_OPS(mixer_pm_ops, mixer_suspend, NULL); 1280#ifdef CONFIG_PM_RUNTIME
1281static int mixer_runtime_suspend(struct device *dev)
1282{
1283 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1284 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1285
1286 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1287
1288 mixer_poweroff(ctx);
1289
1290 return 0;
1291}
1292
1293static int mixer_runtime_resume(struct device *dev)
1294{
1295 struct exynos_drm_hdmi_context *drm_hdmi_ctx = get_mixer_context(dev);
1296 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1297
1298 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1299
1300 mixer_poweron(ctx);
1301
1302 return 0;
1303}
1304#endif
1305
1306static const struct dev_pm_ops mixer_pm_ops = {
1307 SET_SYSTEM_SLEEP_PM_OPS(mixer_suspend, mixer_resume)
1308 SET_RUNTIME_PM_OPS(mixer_runtime_suspend, mixer_runtime_resume, NULL)
1309};
1212 1310
1213struct platform_driver mixer_driver = { 1311struct platform_driver mixer_driver = {
1214 .driver = { 1312 .driver = {
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
new file mode 100644
index 000000000000..b4f9ca1fd851
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -0,0 +1,669 @@
1/* drivers/gpu/drm/exynos/regs-fimc.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Register definition file for Samsung Camera Interface (FIMC) driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef EXYNOS_REGS_FIMC_H
14#define EXYNOS_REGS_FIMC_H
15
16/*
17 * Register part
18*/
19/* Input source format */
20#define EXYNOS_CISRCFMT (0x00)
21/* Window offset */
22#define EXYNOS_CIWDOFST (0x04)
23/* Global control */
24#define EXYNOS_CIGCTRL (0x08)
25/* Window offset 2 */
26#define EXYNOS_CIWDOFST2 (0x14)
27/* Y 1st frame start address for output DMA */
28#define EXYNOS_CIOYSA1 (0x18)
29/* Y 2nd frame start address for output DMA */
30#define EXYNOS_CIOYSA2 (0x1c)
31/* Y 3rd frame start address for output DMA */
32#define EXYNOS_CIOYSA3 (0x20)
33/* Y 4th frame start address for output DMA */
34#define EXYNOS_CIOYSA4 (0x24)
35/* Cb 1st frame start address for output DMA */
36#define EXYNOS_CIOCBSA1 (0x28)
37/* Cb 2nd frame start address for output DMA */
38#define EXYNOS_CIOCBSA2 (0x2c)
39/* Cb 3rd frame start address for output DMA */
40#define EXYNOS_CIOCBSA3 (0x30)
41/* Cb 4th frame start address for output DMA */
42#define EXYNOS_CIOCBSA4 (0x34)
43/* Cr 1st frame start address for output DMA */
44#define EXYNOS_CIOCRSA1 (0x38)
45/* Cr 2nd frame start address for output DMA */
46#define EXYNOS_CIOCRSA2 (0x3c)
47/* Cr 3rd frame start address for output DMA */
48#define EXYNOS_CIOCRSA3 (0x40)
49/* Cr 4th frame start address for output DMA */
50#define EXYNOS_CIOCRSA4 (0x44)
51/* Target image format */
52#define EXYNOS_CITRGFMT (0x48)
53/* Output DMA control */
54#define EXYNOS_CIOCTRL (0x4c)
55/* Pre-scaler control 1 */
56#define EXYNOS_CISCPRERATIO (0x50)
57/* Pre-scaler control 2 */
58#define EXYNOS_CISCPREDST (0x54)
59/* Main scaler control */
60#define EXYNOS_CISCCTRL (0x58)
61/* Target area */
62#define EXYNOS_CITAREA (0x5c)
63/* Status */
64#define EXYNOS_CISTATUS (0x64)
65/* Status2 */
66#define EXYNOS_CISTATUS2 (0x68)
67/* Image capture enable command */
68#define EXYNOS_CIIMGCPT (0xc0)
69/* Capture sequence */
70#define EXYNOS_CICPTSEQ (0xc4)
71/* Image effects */
72#define EXYNOS_CIIMGEFF (0xd0)
73/* Y frame start address for input DMA */
74#define EXYNOS_CIIYSA0 (0xd4)
75/* Cb frame start address for input DMA */
76#define EXYNOS_CIICBSA0 (0xd8)
77/* Cr frame start address for input DMA */
78#define EXYNOS_CIICRSA0 (0xdc)
79/* Input DMA Y Line Skip */
80#define EXYNOS_CIILINESKIP_Y (0xec)
81/* Input DMA Cb Line Skip */
82#define EXYNOS_CIILINESKIP_CB (0xf0)
83/* Input DMA Cr Line Skip */
84#define EXYNOS_CIILINESKIP_CR (0xf4)
85/* Real input DMA image size */
86#define EXYNOS_CIREAL_ISIZE (0xf8)
87/* Input DMA control */
88#define EXYNOS_MSCTRL (0xfc)
89/* Y frame start address for input DMA */
90#define EXYNOS_CIIYSA1 (0x144)
91/* Cb frame start address for input DMA */
92#define EXYNOS_CIICBSA1 (0x148)
93/* Cr frame start address for input DMA */
94#define EXYNOS_CIICRSA1 (0x14c)
95/* Output DMA Y offset */
96#define EXYNOS_CIOYOFF (0x168)
97/* Output DMA CB offset */
98#define EXYNOS_CIOCBOFF (0x16c)
99/* Output DMA CR offset */
100#define EXYNOS_CIOCROFF (0x170)
101/* Input DMA Y offset */
102#define EXYNOS_CIIYOFF (0x174)
103/* Input DMA CB offset */
104#define EXYNOS_CIICBOFF (0x178)
105/* Input DMA CR offset */
106#define EXYNOS_CIICROFF (0x17c)
107/* Input DMA original image size */
108#define EXYNOS_ORGISIZE (0x180)
109/* Output DMA original image size */
110#define EXYNOS_ORGOSIZE (0x184)
111/* Real output DMA image size */
112#define EXYNOS_CIEXTEN (0x188)
113/* DMA parameter */
114#define EXYNOS_CIDMAPARAM (0x18c)
115/* MIPI CSI image format */
116#define EXYNOS_CSIIMGFMT (0x194)
117/* FIMC Clock Source Select */
118#define EXYNOS_MISC_FIMC (0x198)
119
120/* Add for FIMC v5.1 */
121/* Output Frame Buffer Sequence */
122#define EXYNOS_CIFCNTSEQ (0x1fc)
123/* Y 5th frame start address for output DMA */
124#define EXYNOS_CIOYSA5 (0x200)
125/* Y 6th frame start address for output DMA */
126#define EXYNOS_CIOYSA6 (0x204)
127/* Y 7th frame start address for output DMA */
128#define EXYNOS_CIOYSA7 (0x208)
129/* Y 8th frame start address for output DMA */
130#define EXYNOS_CIOYSA8 (0x20c)
131/* Y 9th frame start address for output DMA */
132#define EXYNOS_CIOYSA9 (0x210)
133/* Y 10th frame start address for output DMA */
134#define EXYNOS_CIOYSA10 (0x214)
135/* Y 11th frame start address for output DMA */
136#define EXYNOS_CIOYSA11 (0x218)
137/* Y 12th frame start address for output DMA */
138#define EXYNOS_CIOYSA12 (0x21c)
139/* Y 13th frame start address for output DMA */
140#define EXYNOS_CIOYSA13 (0x220)
141/* Y 14th frame start address for output DMA */
142#define EXYNOS_CIOYSA14 (0x224)
143/* Y 15th frame start address for output DMA */
144#define EXYNOS_CIOYSA15 (0x228)
145/* Y 16th frame start address for output DMA */
146#define EXYNOS_CIOYSA16 (0x22c)
147/* Y 17th frame start address for output DMA */
148#define EXYNOS_CIOYSA17 (0x230)
149/* Y 18th frame start address for output DMA */
150#define EXYNOS_CIOYSA18 (0x234)
151/* Y 19th frame start address for output DMA */
152#define EXYNOS_CIOYSA19 (0x238)
153/* Y 20th frame start address for output DMA */
154#define EXYNOS_CIOYSA20 (0x23c)
155/* Y 21th frame start address for output DMA */
156#define EXYNOS_CIOYSA21 (0x240)
157/* Y 22th frame start address for output DMA */
158#define EXYNOS_CIOYSA22 (0x244)
159/* Y 23th frame start address for output DMA */
160#define EXYNOS_CIOYSA23 (0x248)
161/* Y 24th frame start address for output DMA */
162#define EXYNOS_CIOYSA24 (0x24c)
163/* Y 25th frame start address for output DMA */
164#define EXYNOS_CIOYSA25 (0x250)
165/* Y 26th frame start address for output DMA */
166#define EXYNOS_CIOYSA26 (0x254)
167/* Y 27th frame start address for output DMA */
168#define EXYNOS_CIOYSA27 (0x258)
169/* Y 28th frame start address for output DMA */
170#define EXYNOS_CIOYSA28 (0x25c)
171/* Y 29th frame start address for output DMA */
172#define EXYNOS_CIOYSA29 (0x260)
173/* Y 30th frame start address for output DMA */
174#define EXYNOS_CIOYSA30 (0x264)
175/* Y 31th frame start address for output DMA */
176#define EXYNOS_CIOYSA31 (0x268)
177/* Y 32th frame start address for output DMA */
178#define EXYNOS_CIOYSA32 (0x26c)
179
180/* CB 5th frame start address for output DMA */
181#define EXYNOS_CIOCBSA5 (0x270)
182/* CB 6th frame start address for output DMA */
183#define EXYNOS_CIOCBSA6 (0x274)
184/* CB 7th frame start address for output DMA */
185#define EXYNOS_CIOCBSA7 (0x278)
186/* CB 8th frame start address for output DMA */
187#define EXYNOS_CIOCBSA8 (0x27c)
188/* CB 9th frame start address for output DMA */
189#define EXYNOS_CIOCBSA9 (0x280)
190/* CB 10th frame start address for output DMA */
191#define EXYNOS_CIOCBSA10 (0x284)
192/* CB 11th frame start address for output DMA */
193#define EXYNOS_CIOCBSA11 (0x288)
194/* CB 12th frame start address for output DMA */
195#define EXYNOS_CIOCBSA12 (0x28c)
196/* CB 13th frame start address for output DMA */
197#define EXYNOS_CIOCBSA13 (0x290)
198/* CB 14th frame start address for output DMA */
199#define EXYNOS_CIOCBSA14 (0x294)
200/* CB 15th frame start address for output DMA */
201#define EXYNOS_CIOCBSA15 (0x298)
202/* CB 16th frame start address for output DMA */
203#define EXYNOS_CIOCBSA16 (0x29c)
204/* CB 17th frame start address for output DMA */
205#define EXYNOS_CIOCBSA17 (0x2a0)
206/* CB 18th frame start address for output DMA */
207#define EXYNOS_CIOCBSA18 (0x2a4)
208/* CB 19th frame start address for output DMA */
209#define EXYNOS_CIOCBSA19 (0x2a8)
210/* CB 20th frame start address for output DMA */
211#define EXYNOS_CIOCBSA20 (0x2ac)
212/* CB 21th frame start address for output DMA */
213#define EXYNOS_CIOCBSA21 (0x2b0)
214/* CB 22th frame start address for output DMA */
215#define EXYNOS_CIOCBSA22 (0x2b4)
216/* CB 23th frame start address for output DMA */
217#define EXYNOS_CIOCBSA23 (0x2b8)
218/* CB 24th frame start address for output DMA */
219#define EXYNOS_CIOCBSA24 (0x2bc)
220/* CB 25th frame start address for output DMA */
221#define EXYNOS_CIOCBSA25 (0x2c0)
222/* CB 26th frame start address for output DMA */
223#define EXYNOS_CIOCBSA26 (0x2c4)
224/* CB 27th frame start address for output DMA */
225#define EXYNOS_CIOCBSA27 (0x2c8)
226/* CB 28th frame start address for output DMA */
227#define EXYNOS_CIOCBSA28 (0x2cc)
228/* CB 29th frame start address for output DMA */
229#define EXYNOS_CIOCBSA29 (0x2d0)
230/* CB 30th frame start address for output DMA */
231#define EXYNOS_CIOCBSA30 (0x2d4)
232/* CB 31th frame start address for output DMA */
233#define EXYNOS_CIOCBSA31 (0x2d8)
234/* CB 32th frame start address for output DMA */
235#define EXYNOS_CIOCBSA32 (0x2dc)
236
237/* CR 5th frame start address for output DMA */
238#define EXYNOS_CIOCRSA5 (0x2e0)
239/* CR 6th frame start address for output DMA */
240#define EXYNOS_CIOCRSA6 (0x2e4)
241/* CR 7th frame start address for output DMA */
242#define EXYNOS_CIOCRSA7 (0x2e8)
243/* CR 8th frame start address for output DMA */
244#define EXYNOS_CIOCRSA8 (0x2ec)
245/* CR 9th frame start address for output DMA */
246#define EXYNOS_CIOCRSA9 (0x2f0)
247/* CR 10th frame start address for output DMA */
248#define EXYNOS_CIOCRSA10 (0x2f4)
249/* CR 11th frame start address for output DMA */
250#define EXYNOS_CIOCRSA11 (0x2f8)
251/* CR 12th frame start address for output DMA */
252#define EXYNOS_CIOCRSA12 (0x2fc)
253/* CR 13th frame start address for output DMA */
254#define EXYNOS_CIOCRSA13 (0x300)
255/* CR 14th frame start address for output DMA */
256#define EXYNOS_CIOCRSA14 (0x304)
257/* CR 15th frame start address for output DMA */
258#define EXYNOS_CIOCRSA15 (0x308)
259/* CR 16th frame start address for output DMA */
260#define EXYNOS_CIOCRSA16 (0x30c)
261/* CR 17th frame start address for output DMA */
262#define EXYNOS_CIOCRSA17 (0x310)
263/* CR 18th frame start address for output DMA */
264#define EXYNOS_CIOCRSA18 (0x314)
265/* CR 19th frame start address for output DMA */
266#define EXYNOS_CIOCRSA19 (0x318)
267/* CR 20th frame start address for output DMA */
268#define EXYNOS_CIOCRSA20 (0x31c)
269/* CR 21th frame start address for output DMA */
270#define EXYNOS_CIOCRSA21 (0x320)
271/* CR 22th frame start address for output DMA */
272#define EXYNOS_CIOCRSA22 (0x324)
273/* CR 23th frame start address for output DMA */
274#define EXYNOS_CIOCRSA23 (0x328)
275/* CR 24th frame start address for output DMA */
276#define EXYNOS_CIOCRSA24 (0x32c)
277/* CR 25th frame start address for output DMA */
278#define EXYNOS_CIOCRSA25 (0x330)
279/* CR 26th frame start address for output DMA */
280#define EXYNOS_CIOCRSA26 (0x334)
281/* CR 27th frame start address for output DMA */
282#define EXYNOS_CIOCRSA27 (0x338)
283/* CR 28th frame start address for output DMA */
284#define EXYNOS_CIOCRSA28 (0x33c)
285/* CR 29th frame start address for output DMA */
286#define EXYNOS_CIOCRSA29 (0x340)
287/* CR 30th frame start address for output DMA */
288#define EXYNOS_CIOCRSA30 (0x344)
289/* CR 31th frame start address for output DMA */
290#define EXYNOS_CIOCRSA31 (0x348)
291/* CR 32th frame start address for output DMA */
292#define EXYNOS_CIOCRSA32 (0x34c)
293
294/*
295 * Macro part
296*/
297/* frame start address 1 ~ 4, 5 ~ 32 */
298/* Number of Default PingPong Memory */
299#define DEF_PP 4
300#define EXYNOS_CIOYSA(__x) \
301 (((__x) < DEF_PP) ? \
302 (EXYNOS_CIOYSA1 + (__x) * 4) : \
303 (EXYNOS_CIOYSA5 + ((__x) - DEF_PP) * 4))
304#define EXYNOS_CIOCBSA(__x) \
305 (((__x) < DEF_PP) ? \
306 (EXYNOS_CIOCBSA1 + (__x) * 4) : \
307 (EXYNOS_CIOCBSA5 + ((__x) - DEF_PP) * 4))
308#define EXYNOS_CIOCRSA(__x) \
309 (((__x) < DEF_PP) ? \
310 (EXYNOS_CIOCRSA1 + (__x) * 4) : \
311 (EXYNOS_CIOCRSA5 + ((__x) - DEF_PP) * 4))
312/* Number of Default PingPong Memory */
313#define DEF_IPP 1
314#define EXYNOS_CIIYSA(__x) \
315 (((__x) < DEF_IPP) ? \
316 (EXYNOS_CIIYSA0) : (EXYNOS_CIIYSA1))
317#define EXYNOS_CIICBSA(__x) \
318 (((__x) < DEF_IPP) ? \
319 (EXYNOS_CIICBSA0) : (EXYNOS_CIICBSA1))
320#define EXYNOS_CIICRSA(__x) \
321 (((__x) < DEF_IPP) ? \
322 (EXYNOS_CIICRSA0) : (EXYNOS_CIICRSA1))
323
324#define EXYNOS_CISRCFMT_SOURCEHSIZE(x) ((x) << 16)
325#define EXYNOS_CISRCFMT_SOURCEVSIZE(x) ((x) << 0)
326
327#define EXYNOS_CIWDOFST_WINHOROFST(x) ((x) << 16)
328#define EXYNOS_CIWDOFST_WINVEROFST(x) ((x) << 0)
329
330#define EXYNOS_CIWDOFST2_WINHOROFST2(x) ((x) << 16)
331#define EXYNOS_CIWDOFST2_WINVEROFST2(x) ((x) << 0)
332
333#define EXYNOS_CITRGFMT_TARGETHSIZE(x) (((x) & 0x1fff) << 16)
334#define EXYNOS_CITRGFMT_TARGETVSIZE(x) (((x) & 0x1fff) << 0)
335
336#define EXYNOS_CISCPRERATIO_SHFACTOR(x) ((x) << 28)
337#define EXYNOS_CISCPRERATIO_PREHORRATIO(x) ((x) << 16)
338#define EXYNOS_CISCPRERATIO_PREVERRATIO(x) ((x) << 0)
339
340#define EXYNOS_CISCPREDST_PREDSTWIDTH(x) ((x) << 16)
341#define EXYNOS_CISCPREDST_PREDSTHEIGHT(x) ((x) << 0)
342
343#define EXYNOS_CISCCTRL_MAINHORRATIO(x) ((x) << 16)
344#define EXYNOS_CISCCTRL_MAINVERRATIO(x) ((x) << 0)
345
346#define EXYNOS_CITAREA_TARGET_AREA(x) ((x) << 0)
347
348#define EXYNOS_CISTATUS_GET_FRAME_COUNT(x) (((x) >> 26) & 0x3)
349#define EXYNOS_CISTATUS_GET_FRAME_END(x) (((x) >> 17) & 0x1)
350#define EXYNOS_CISTATUS_GET_LAST_CAPTURE_END(x) (((x) >> 16) & 0x1)
351#define EXYNOS_CISTATUS_GET_LCD_STATUS(x) (((x) >> 9) & 0x1)
352#define EXYNOS_CISTATUS_GET_ENVID_STATUS(x) (((x) >> 8) & 0x1)
353
354#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(x) (((x) >> 7) & 0x3f)
355#define EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(x) ((x) & 0x3f)
356
357#define EXYNOS_CIIMGEFF_FIN(x) ((x & 0x7) << 26)
358#define EXYNOS_CIIMGEFF_PAT_CB(x) ((x) << 13)
359#define EXYNOS_CIIMGEFF_PAT_CR(x) ((x) << 0)
360
361#define EXYNOS_CIILINESKIP(x) (((x) & 0xf) << 24)
362
363#define EXYNOS_CIREAL_ISIZE_HEIGHT(x) ((x) << 16)
364#define EXYNOS_CIREAL_ISIZE_WIDTH(x) ((x) << 0)
365
366#define EXYNOS_MSCTRL_SUCCESSIVE_COUNT(x) ((x) << 24)
367#define EXYNOS_MSCTRL_GET_INDMA_STATUS(x) ((x) & 0x1)
368
369#define EXYNOS_CIOYOFF_VERTICAL(x) ((x) << 16)
370#define EXYNOS_CIOYOFF_HORIZONTAL(x) ((x) << 0)
371
372#define EXYNOS_CIOCBOFF_VERTICAL(x) ((x) << 16)
373#define EXYNOS_CIOCBOFF_HORIZONTAL(x) ((x) << 0)
374
375#define EXYNOS_CIOCROFF_VERTICAL(x) ((x) << 16)
376#define EXYNOS_CIOCROFF_HORIZONTAL(x) ((x) << 0)
377
378#define EXYNOS_CIIYOFF_VERTICAL(x) ((x) << 16)
379#define EXYNOS_CIIYOFF_HORIZONTAL(x) ((x) << 0)
380
381#define EXYNOS_CIICBOFF_VERTICAL(x) ((x) << 16)
382#define EXYNOS_CIICBOFF_HORIZONTAL(x) ((x) << 0)
383
384#define EXYNOS_CIICROFF_VERTICAL(x) ((x) << 16)
385#define EXYNOS_CIICROFF_HORIZONTAL(x) ((x) << 0)
386
387#define EXYNOS_ORGISIZE_VERTICAL(x) ((x) << 16)
388#define EXYNOS_ORGISIZE_HORIZONTAL(x) ((x) << 0)
389
390#define EXYNOS_ORGOSIZE_VERTICAL(x) ((x) << 16)
391#define EXYNOS_ORGOSIZE_HORIZONTAL(x) ((x) << 0)
392
393#define EXYNOS_CIEXTEN_TARGETH_EXT(x) ((((x) & 0x2000) >> 13) << 26)
394#define EXYNOS_CIEXTEN_TARGETV_EXT(x) ((((x) & 0x2000) >> 13) << 24)
395#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT(x) (((x) & 0x3F) << 10)
396#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT(x) ((x) & 0x3F)
397
398/*
399 * Bit definition part
400*/
401/* Source format register */
402#define EXYNOS_CISRCFMT_ITU601_8BIT (1 << 31)
403#define EXYNOS_CISRCFMT_ITU656_8BIT (0 << 31)
404#define EXYNOS_CISRCFMT_ITU601_16BIT (1 << 29)
405#define EXYNOS_CISRCFMT_ORDER422_YCBYCR (0 << 14)
406#define EXYNOS_CISRCFMT_ORDER422_YCRYCB (1 << 14)
407#define EXYNOS_CISRCFMT_ORDER422_CBYCRY (2 << 14)
408#define EXYNOS_CISRCFMT_ORDER422_CRYCBY (3 << 14)
409/* ITU601 16bit only */
410#define EXYNOS_CISRCFMT_ORDER422_Y4CBCRCBCR (0 << 14)
411/* ITU601 16bit only */
412#define EXYNOS_CISRCFMT_ORDER422_Y4CRCBCRCB (1 << 14)
413
414/* Window offset register */
415#define EXYNOS_CIWDOFST_WINOFSEN (1 << 31)
416#define EXYNOS_CIWDOFST_CLROVFIY (1 << 30)
417#define EXYNOS_CIWDOFST_CLROVRLB (1 << 29)
418#define EXYNOS_CIWDOFST_WINHOROFST_MASK (0x7ff << 16)
419#define EXYNOS_CIWDOFST_CLROVFICB (1 << 15)
420#define EXYNOS_CIWDOFST_CLROVFICR (1 << 14)
421#define EXYNOS_CIWDOFST_WINVEROFST_MASK (0xfff << 0)
422
423/* Global control register */
424#define EXYNOS_CIGCTRL_SWRST (1 << 31)
425#define EXYNOS_CIGCTRL_CAMRST_A (1 << 30)
426#define EXYNOS_CIGCTRL_SELCAM_ITU_B (0 << 29)
427#define EXYNOS_CIGCTRL_SELCAM_ITU_A (1 << 29)
428#define EXYNOS_CIGCTRL_SELCAM_ITU_MASK (1 << 29)
429#define EXYNOS_CIGCTRL_TESTPATTERN_NORMAL (0 << 27)
430#define EXYNOS_CIGCTRL_TESTPATTERN_COLOR_BAR (1 << 27)
431#define EXYNOS_CIGCTRL_TESTPATTERN_HOR_INC (2 << 27)
432#define EXYNOS_CIGCTRL_TESTPATTERN_VER_INC (3 << 27)
433#define EXYNOS_CIGCTRL_TESTPATTERN_MASK (3 << 27)
434#define EXYNOS_CIGCTRL_TESTPATTERN_SHIFT (27)
435#define EXYNOS_CIGCTRL_INVPOLPCLK (1 << 26)
436#define EXYNOS_CIGCTRL_INVPOLVSYNC (1 << 25)
437#define EXYNOS_CIGCTRL_INVPOLHREF (1 << 24)
438#define EXYNOS_CIGCTRL_IRQ_OVFEN (1 << 22)
439#define EXYNOS_CIGCTRL_HREF_MASK (1 << 21)
440#define EXYNOS_CIGCTRL_IRQ_EDGE (0 << 20)
441#define EXYNOS_CIGCTRL_IRQ_LEVEL (1 << 20)
442#define EXYNOS_CIGCTRL_IRQ_CLR (1 << 19)
443#define EXYNOS_CIGCTRL_IRQ_END_DISABLE (1 << 18)
444#define EXYNOS_CIGCTRL_IRQ_DISABLE (0 << 16)
445#define EXYNOS_CIGCTRL_IRQ_ENABLE (1 << 16)
446#define EXYNOS_CIGCTRL_SHADOW_DISABLE (1 << 12)
447#define EXYNOS_CIGCTRL_CAM_JPEG (1 << 8)
448#define EXYNOS_CIGCTRL_SELCAM_MIPI_B (0 << 7)
449#define EXYNOS_CIGCTRL_SELCAM_MIPI_A (1 << 7)
450#define EXYNOS_CIGCTRL_SELCAM_MIPI_MASK (1 << 7)
451#define EXYNOS_CIGCTRL_SELWB_CAMIF_CAMERA (0 << 6)
452#define EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK (1 << 6)
453#define EXYNOS_CIGCTRL_SELWRITEBACK_MASK (1 << 10)
454#define EXYNOS_CIGCTRL_SELWRITEBACK_A (1 << 10)
455#define EXYNOS_CIGCTRL_SELWRITEBACK_B (0 << 10)
456#define EXYNOS_CIGCTRL_SELWB_CAMIF_MASK (1 << 6)
457#define EXYNOS_CIGCTRL_CSC_ITU601 (0 << 5)
458#define EXYNOS_CIGCTRL_CSC_ITU709 (1 << 5)
459#define EXYNOS_CIGCTRL_CSC_MASK (1 << 5)
460#define EXYNOS_CIGCTRL_INVPOLHSYNC (1 << 4)
461#define EXYNOS_CIGCTRL_SELCAM_FIMC_ITU (0 << 3)
462#define EXYNOS_CIGCTRL_SELCAM_FIMC_MIPI (1 << 3)
463#define EXYNOS_CIGCTRL_SELCAM_FIMC_MASK (1 << 3)
464#define EXYNOS_CIGCTRL_PROGRESSIVE (0 << 0)
465#define EXYNOS_CIGCTRL_INTERLACE (1 << 0)
466
467/* Window offset2 register */
468#define EXYNOS_CIWDOFST_WINHOROFST2_MASK (0xfff << 16)
469#define EXYNOS_CIWDOFST_WINVEROFST2_MASK (0xfff << 16)
470
471/* Target format register */
472#define EXYNOS_CITRGFMT_INROT90_CLOCKWISE (1 << 31)
473#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420 (0 << 29)
474#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422 (1 << 29)
475#define EXYNOS_CITRGFMT_OUTFORMAT_YCBCR422_1PLANE (2 << 29)
476#define EXYNOS_CITRGFMT_OUTFORMAT_RGB (3 << 29)
477#define EXYNOS_CITRGFMT_OUTFORMAT_MASK (3 << 29)
478#define EXYNOS_CITRGFMT_FLIP_SHIFT (14)
479#define EXYNOS_CITRGFMT_FLIP_NORMAL (0 << 14)
480#define EXYNOS_CITRGFMT_FLIP_X_MIRROR (1 << 14)
481#define EXYNOS_CITRGFMT_FLIP_Y_MIRROR (2 << 14)
482#define EXYNOS_CITRGFMT_FLIP_180 (3 << 14)
483#define EXYNOS_CITRGFMT_FLIP_MASK (3 << 14)
484#define EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE (1 << 13)
485#define EXYNOS_CITRGFMT_TARGETV_MASK (0x1fff << 0)
486#define EXYNOS_CITRGFMT_TARGETH_MASK (0x1fff << 16)
487
488/* Output DMA control register */
489#define EXYNOS_CIOCTRL_WEAVE_OUT (1 << 31)
490#define EXYNOS_CIOCTRL_WEAVE_MASK (1 << 31)
491#define EXYNOS_CIOCTRL_LASTENDEN (1 << 30)
492#define EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR (0 << 24)
493#define EXYNOS_CIOCTRL_ORDER2P_LSB_CRCB (1 << 24)
494#define EXYNOS_CIOCTRL_ORDER2P_MSB_CRCB (2 << 24)
495#define EXYNOS_CIOCTRL_ORDER2P_MSB_CBCR (3 << 24)
496#define EXYNOS_CIOCTRL_ORDER2P_SHIFT (24)
497#define EXYNOS_CIOCTRL_ORDER2P_MASK (3 << 24)
498#define EXYNOS_CIOCTRL_YCBCR_3PLANE (0 << 3)
499#define EXYNOS_CIOCTRL_YCBCR_2PLANE (1 << 3)
500#define EXYNOS_CIOCTRL_YCBCR_PLANE_MASK (1 << 3)
501#define EXYNOS_CIOCTRL_LASTIRQ_ENABLE (1 << 2)
502#define EXYNOS_CIOCTRL_ALPHA_OUT (0xff << 4)
503#define EXYNOS_CIOCTRL_ORDER422_YCBYCR (0 << 0)
504#define EXYNOS_CIOCTRL_ORDER422_YCRYCB (1 << 0)
505#define EXYNOS_CIOCTRL_ORDER422_CBYCRY (2 << 0)
506#define EXYNOS_CIOCTRL_ORDER422_CRYCBY (3 << 0)
507#define EXYNOS_CIOCTRL_ORDER422_MASK (3 << 0)
508
509/* Main scaler control register */
510#define EXYNOS_CISCCTRL_SCALERBYPASS (1 << 31)
511#define EXYNOS_CISCCTRL_SCALEUP_H (1 << 30)
512#define EXYNOS_CISCCTRL_SCALEUP_V (1 << 29)
513#define EXYNOS_CISCCTRL_CSCR2Y_NARROW (0 << 28)
514#define EXYNOS_CISCCTRL_CSCR2Y_WIDE (1 << 28)
515#define EXYNOS_CISCCTRL_CSCY2R_NARROW (0 << 27)
516#define EXYNOS_CISCCTRL_CSCY2R_WIDE (1 << 27)
517#define EXYNOS_CISCCTRL_LCDPATHEN_FIFO (1 << 26)
518#define EXYNOS_CISCCTRL_PROGRESSIVE (0 << 25)
519#define EXYNOS_CISCCTRL_INTERLACE (1 << 25)
520#define EXYNOS_CISCCTRL_SCAN_MASK (1 << 25)
521#define EXYNOS_CISCCTRL_SCALERSTART (1 << 15)
522#define EXYNOS_CISCCTRL_INRGB_FMT_RGB565 (0 << 13)
523#define EXYNOS_CISCCTRL_INRGB_FMT_RGB666 (1 << 13)
524#define EXYNOS_CISCCTRL_INRGB_FMT_RGB888 (2 << 13)
525#define EXYNOS_CISCCTRL_INRGB_FMT_RGB_MASK (3 << 13)
526#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565 (0 << 11)
527#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB666 (1 << 11)
528#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 (2 << 11)
529#define EXYNOS_CISCCTRL_OUTRGB_FMT_RGB_MASK (3 << 11)
530#define EXYNOS_CISCCTRL_EXTRGB_NORMAL (0 << 10)
531#define EXYNOS_CISCCTRL_EXTRGB_EXTENSION (1 << 10)
532#define EXYNOS_CISCCTRL_ONE2ONE (1 << 9)
533#define EXYNOS_CISCCTRL_MAIN_V_RATIO_MASK (0x1ff << 0)
534#define EXYNOS_CISCCTRL_MAIN_H_RATIO_MASK (0x1ff << 16)
535
536/* Status register */
537#define EXYNOS_CISTATUS_OVFIY (1 << 31)
538#define EXYNOS_CISTATUS_OVFICB (1 << 30)
539#define EXYNOS_CISTATUS_OVFICR (1 << 29)
540#define EXYNOS_CISTATUS_VSYNC (1 << 28)
541#define EXYNOS_CISTATUS_SCALERSTART (1 << 26)
542#define EXYNOS_CISTATUS_WINOFSTEN (1 << 25)
543#define EXYNOS_CISTATUS_IMGCPTEN (1 << 22)
544#define EXYNOS_CISTATUS_IMGCPTENSC (1 << 21)
545#define EXYNOS_CISTATUS_VSYNC_A (1 << 20)
546#define EXYNOS_CISTATUS_VSYNC_B (1 << 19)
547#define EXYNOS_CISTATUS_OVRLB (1 << 18)
548#define EXYNOS_CISTATUS_FRAMEEND (1 << 17)
549#define EXYNOS_CISTATUS_LASTCAPTUREEND (1 << 16)
550#define EXYNOS_CISTATUS_VVALID_A (1 << 15)
551#define EXYNOS_CISTATUS_VVALID_B (1 << 14)
552
553/* Image capture enable register */
554#define EXYNOS_CIIMGCPT_IMGCPTEN (1 << 31)
555#define EXYNOS_CIIMGCPT_IMGCPTEN_SC (1 << 30)
556#define EXYNOS_CIIMGCPT_CPT_FREN_ENABLE (1 << 25)
557#define EXYNOS_CIIMGCPT_CPT_FRMOD_EN (0 << 18)
558#define EXYNOS_CIIMGCPT_CPT_FRMOD_CNT (1 << 18)
559
560/* Image effects register */
561#define EXYNOS_CIIMGEFF_IE_DISABLE (0 << 30)
562#define EXYNOS_CIIMGEFF_IE_ENABLE (1 << 30)
563#define EXYNOS_CIIMGEFF_IE_SC_BEFORE (0 << 29)
564#define EXYNOS_CIIMGEFF_IE_SC_AFTER (1 << 29)
565#define EXYNOS_CIIMGEFF_FIN_BYPASS (0 << 26)
566#define EXYNOS_CIIMGEFF_FIN_ARBITRARY (1 << 26)
567#define EXYNOS_CIIMGEFF_FIN_NEGATIVE (2 << 26)
568#define EXYNOS_CIIMGEFF_FIN_ARTFREEZE (3 << 26)
569#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
570#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
571#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
572#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
573
574/* Real input DMA size register */
575#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
576#define EXYNOS_CIREAL_ISIZE_ADDR_CH_DISABLE (1 << 30)
577#define EXYNOS_CIREAL_ISIZE_HEIGHT_MASK (0x3FFF << 16)
578#define EXYNOS_CIREAL_ISIZE_WIDTH_MASK (0x3FFF << 0)
579
580/* Input DMA control register */
581#define EXYNOS_MSCTRL_FIELD_MASK (1 << 31)
582#define EXYNOS_MSCTRL_FIELD_WEAVE (1 << 31)
583#define EXYNOS_MSCTRL_FIELD_NORMAL (0 << 31)
584#define EXYNOS_MSCTRL_BURST_CNT (24)
585#define EXYNOS_MSCTRL_BURST_CNT_MASK (0xf << 24)
586#define EXYNOS_MSCTRL_ORDER2P_LSB_CBCR (0 << 16)
587#define EXYNOS_MSCTRL_ORDER2P_LSB_CRCB (1 << 16)
588#define EXYNOS_MSCTRL_ORDER2P_MSB_CRCB (2 << 16)
589#define EXYNOS_MSCTRL_ORDER2P_MSB_CBCR (3 << 16)
590#define EXYNOS_MSCTRL_ORDER2P_SHIFT (16)
591#define EXYNOS_MSCTRL_ORDER2P_SHIFT_MASK (0x3 << 16)
592#define EXYNOS_MSCTRL_C_INT_IN_3PLANE (0 << 15)
593#define EXYNOS_MSCTRL_C_INT_IN_2PLANE (1 << 15)
594#define EXYNOS_MSCTRL_FLIP_SHIFT (13)
595#define EXYNOS_MSCTRL_FLIP_NORMAL (0 << 13)
596#define EXYNOS_MSCTRL_FLIP_X_MIRROR (1 << 13)
597#define EXYNOS_MSCTRL_FLIP_Y_MIRROR (2 << 13)
598#define EXYNOS_MSCTRL_FLIP_180 (3 << 13)
599#define EXYNOS_MSCTRL_FLIP_MASK (3 << 13)
600#define EXYNOS_MSCTRL_ORDER422_CRYCBY (0 << 4)
601#define EXYNOS_MSCTRL_ORDER422_YCRYCB (1 << 4)
602#define EXYNOS_MSCTRL_ORDER422_CBYCRY (2 << 4)
603#define EXYNOS_MSCTRL_ORDER422_YCBYCR (3 << 4)
604#define EXYNOS_MSCTRL_INPUT_EXTCAM (0 << 3)
605#define EXYNOS_MSCTRL_INPUT_MEMORY (1 << 3)
606#define EXYNOS_MSCTRL_INPUT_MASK (1 << 3)
607#define EXYNOS_MSCTRL_INFORMAT_YCBCR420 (0 << 1)
608#define EXYNOS_MSCTRL_INFORMAT_YCBCR422 (1 << 1)
609#define EXYNOS_MSCTRL_INFORMAT_YCBCR422_1PLANE (2 << 1)
610#define EXYNOS_MSCTRL_INFORMAT_RGB (3 << 1)
611#define EXYNOS_MSCTRL_ENVID (1 << 0)
612
613/* DMA parameter register */
614#define EXYNOS_CIDMAPARAM_R_MODE_LINEAR (0 << 29)
615#define EXYNOS_CIDMAPARAM_R_MODE_CONFTILE (1 << 29)
616#define EXYNOS_CIDMAPARAM_R_MODE_16X16 (2 << 29)
617#define EXYNOS_CIDMAPARAM_R_MODE_64X32 (3 << 29)
618#define EXYNOS_CIDMAPARAM_R_MODE_MASK (3 << 29)
619#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_64 (0 << 24)
620#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_128 (1 << 24)
621#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_256 (2 << 24)
622#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_512 (3 << 24)
623#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_1024 (4 << 24)
624#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_2048 (5 << 24)
625#define EXYNOS_CIDMAPARAM_R_TILE_HSIZE_4096 (6 << 24)
626#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_1 (0 << 20)
627#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_2 (1 << 20)
628#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_4 (2 << 20)
629#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_8 (3 << 20)
630#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_16 (4 << 20)
631#define EXYNOS_CIDMAPARAM_R_TILE_VSIZE_32 (5 << 20)
632#define EXYNOS_CIDMAPARAM_W_MODE_LINEAR (0 << 13)
633#define EXYNOS_CIDMAPARAM_W_MODE_CONFTILE (1 << 13)
634#define EXYNOS_CIDMAPARAM_W_MODE_16X16 (2 << 13)
635#define EXYNOS_CIDMAPARAM_W_MODE_64X32 (3 << 13)
636#define EXYNOS_CIDMAPARAM_W_MODE_MASK (3 << 13)
637#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_64 (0 << 8)
638#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_128 (1 << 8)
639#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_256 (2 << 8)
640#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_512 (3 << 8)
641#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_1024 (4 << 8)
642#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_2048 (5 << 8)
643#define EXYNOS_CIDMAPARAM_W_TILE_HSIZE_4096 (6 << 8)
644#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_1 (0 << 4)
645#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_2 (1 << 4)
646#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_4 (2 << 4)
647#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_8 (3 << 4)
648#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_16 (4 << 4)
649#define EXYNOS_CIDMAPARAM_W_TILE_VSIZE_32 (5 << 4)
650
651/* Gathering Extension register */
652#define EXYNOS_CIEXTEN_TARGETH_EXT_MASK (1 << 26)
653#define EXYNOS_CIEXTEN_TARGETV_EXT_MASK (1 << 24)
654#define EXYNOS_CIEXTEN_MAINHORRATIO_EXT_MASK (0x3F << 10)
655#define EXYNOS_CIEXTEN_MAINVERRATIO_EXT_MASK (0x3F)
656#define EXYNOS_CIEXTEN_YUV444_OUT (1 << 22)
657
658/* FIMC Clock Source Select register */
659#define EXYNOS_CLKSRC_HCLK (0 << 1)
660#define EXYNOS_CLKSRC_HCLK_MASK (1 << 1)
661#define EXYNOS_CLKSRC_SCLK (1 << 1)
662
663/* SYSREG for FIMC writeback */
664#define SYSREG_CAMERA_BLK (S3C_VA_SYS + 0x0218)
665#define SYSREG_ISP_BLK (S3C_VA_SYS + 0x020c)
666#define SYSREG_FIMD0WB_DEST_MASK (0x3 << 23)
667#define SYSREG_FIMD0WB_DEST_SHIFT 23
668
669#endif /* EXYNOS_REGS_FIMC_H */
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
new file mode 100644
index 000000000000..9ad592707aaf
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -0,0 +1,284 @@
1/* linux/drivers/gpu/drm/exynos/regs-gsc.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Register definition file for Samsung G-Scaler driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef EXYNOS_REGS_GSC_H_
14#define EXYNOS_REGS_GSC_H_
15
16/* G-Scaler enable */
17#define GSC_ENABLE 0x00
18#define GSC_ENABLE_PP_UPDATE_TIME_MASK (1 << 9)
19#define GSC_ENABLE_PP_UPDATE_TIME_CURR (0 << 9)
20#define GSC_ENABLE_PP_UPDATE_TIME_EOPAS (1 << 9)
21#define GSC_ENABLE_CLK_GATE_MODE_MASK (1 << 8)
22#define GSC_ENABLE_CLK_GATE_MODE_FREE (1 << 8)
23#define GSC_ENABLE_IPC_MODE_MASK (1 << 7)
24#define GSC_ENABLE_NORM_MODE (0 << 7)
25#define GSC_ENABLE_IPC_MODE (1 << 7)
26#define GSC_ENABLE_PP_UPDATE_MODE_MASK (1 << 6)
27#define GSC_ENABLE_PP_UPDATE_FIRE_MODE (1 << 6)
28#define GSC_ENABLE_IN_PP_UPDATE (1 << 5)
29#define GSC_ENABLE_ON_CLEAR_MASK (1 << 4)
30#define GSC_ENABLE_ON_CLEAR_ONESHOT (1 << 4)
31#define GSC_ENABLE_QOS_ENABLE (1 << 3)
32#define GSC_ENABLE_OP_STATUS (1 << 2)
33#define GSC_ENABLE_SFR_UPDATE (1 << 1)
34#define GSC_ENABLE_ON (1 << 0)
35
36/* G-Scaler S/W reset */
37#define GSC_SW_RESET 0x04
38#define GSC_SW_RESET_SRESET (1 << 0)
39
40/* G-Scaler IRQ */
41#define GSC_IRQ 0x08
42#define GSC_IRQ_STATUS_OR_IRQ (1 << 17)
43#define GSC_IRQ_STATUS_OR_FRM_DONE (1 << 16)
44#define GSC_IRQ_OR_MASK (1 << 2)
45#define GSC_IRQ_FRMDONE_MASK (1 << 1)
46#define GSC_IRQ_ENABLE (1 << 0)
47
48/* G-Scaler input control */
49#define GSC_IN_CON 0x10
50#define GSC_IN_CHROM_STRIDE_SEL_MASK (1 << 20)
51#define GSC_IN_CHROM_STRIDE_SEPAR (1 << 20)
52#define GSC_IN_RB_SWAP_MASK (1 << 19)
53#define GSC_IN_RB_SWAP (1 << 19)
54#define GSC_IN_ROT_MASK (7 << 16)
55#define GSC_IN_ROT_270 (7 << 16)
56#define GSC_IN_ROT_90_YFLIP (6 << 16)
57#define GSC_IN_ROT_90_XFLIP (5 << 16)
58#define GSC_IN_ROT_90 (4 << 16)
59#define GSC_IN_ROT_180 (3 << 16)
60#define GSC_IN_ROT_YFLIP (2 << 16)
61#define GSC_IN_ROT_XFLIP (1 << 16)
62#define GSC_IN_RGB_TYPE_MASK (3 << 14)
63#define GSC_IN_RGB_HD_WIDE (3 << 14)
64#define GSC_IN_RGB_HD_NARROW (2 << 14)
65#define GSC_IN_RGB_SD_WIDE (1 << 14)
66#define GSC_IN_RGB_SD_NARROW (0 << 14)
67#define GSC_IN_YUV422_1P_ORDER_MASK (1 << 13)
68#define GSC_IN_YUV422_1P_ORDER_LSB_Y (0 << 13)
69#define GSC_IN_YUV422_1P_OEDER_LSB_C (1 << 13)
70#define GSC_IN_CHROMA_ORDER_MASK (1 << 12)
71#define GSC_IN_CHROMA_ORDER_CBCR (0 << 12)
72#define GSC_IN_CHROMA_ORDER_CRCB (1 << 12)
73#define GSC_IN_FORMAT_MASK (7 << 8)
74#define GSC_IN_XRGB8888 (0 << 8)
75#define GSC_IN_RGB565 (1 << 8)
76#define GSC_IN_YUV420_2P (2 << 8)
77#define GSC_IN_YUV420_3P (3 << 8)
78#define GSC_IN_YUV422_1P (4 << 8)
79#define GSC_IN_YUV422_2P (5 << 8)
80#define GSC_IN_YUV422_3P (6 << 8)
81#define GSC_IN_TILE_TYPE_MASK (1 << 4)
82#define GSC_IN_TILE_C_16x8 (0 << 4)
83#define GSC_IN_TILE_C_16x16 (1 << 4)
84#define GSC_IN_TILE_MODE (1 << 3)
85#define GSC_IN_LOCAL_SEL_MASK (3 << 1)
86#define GSC_IN_LOCAL_CAM3 (3 << 1)
87#define GSC_IN_LOCAL_FIMD_WB (2 << 1)
88#define GSC_IN_LOCAL_CAM1 (1 << 1)
89#define GSC_IN_LOCAL_CAM0 (0 << 1)
90#define GSC_IN_PATH_MASK (1 << 0)
91#define GSC_IN_PATH_LOCAL (1 << 0)
92#define GSC_IN_PATH_MEMORY (0 << 0)
93
94/* G-Scaler source image size */
95#define GSC_SRCIMG_SIZE 0x14
96#define GSC_SRCIMG_HEIGHT_MASK (0x1fff << 16)
97#define GSC_SRCIMG_HEIGHT(x) ((x) << 16)
98#define GSC_SRCIMG_WIDTH_MASK (0x3fff << 0)
99#define GSC_SRCIMG_WIDTH(x) ((x) << 0)
100
101/* G-Scaler source image offset */
102#define GSC_SRCIMG_OFFSET 0x18
103#define GSC_SRCIMG_OFFSET_Y_MASK (0x1fff << 16)
104#define GSC_SRCIMG_OFFSET_Y(x) ((x) << 16)
105#define GSC_SRCIMG_OFFSET_X_MASK (0x1fff << 0)
106#define GSC_SRCIMG_OFFSET_X(x) ((x) << 0)
107
108/* G-Scaler cropped source image size */
109#define GSC_CROPPED_SIZE 0x1C
110#define GSC_CROPPED_HEIGHT_MASK (0x1fff << 16)
111#define GSC_CROPPED_HEIGHT(x) ((x) << 16)
112#define GSC_CROPPED_WIDTH_MASK (0x1fff << 0)
113#define GSC_CROPPED_WIDTH(x) ((x) << 0)
114
115/* G-Scaler output control */
116#define GSC_OUT_CON 0x20
117#define GSC_OUT_GLOBAL_ALPHA_MASK (0xff << 24)
118#define GSC_OUT_GLOBAL_ALPHA(x) ((x) << 24)
119#define GSC_OUT_CHROM_STRIDE_SEL_MASK (1 << 13)
120#define GSC_OUT_CHROM_STRIDE_SEPAR (1 << 13)
121#define GSC_OUT_RB_SWAP_MASK (1 << 12)
122#define GSC_OUT_RB_SWAP (1 << 12)
123#define GSC_OUT_RGB_TYPE_MASK (3 << 10)
124#define GSC_OUT_RGB_HD_NARROW (3 << 10)
125#define GSC_OUT_RGB_HD_WIDE (2 << 10)
126#define GSC_OUT_RGB_SD_NARROW (1 << 10)
127#define GSC_OUT_RGB_SD_WIDE (0 << 10)
128#define GSC_OUT_YUV422_1P_ORDER_MASK (1 << 9)
129#define GSC_OUT_YUV422_1P_ORDER_LSB_Y (0 << 9)
130#define GSC_OUT_YUV422_1P_OEDER_LSB_C (1 << 9)
131#define GSC_OUT_CHROMA_ORDER_MASK (1 << 8)
132#define GSC_OUT_CHROMA_ORDER_CBCR (0 << 8)
133#define GSC_OUT_CHROMA_ORDER_CRCB (1 << 8)
134#define GSC_OUT_FORMAT_MASK (7 << 4)
135#define GSC_OUT_XRGB8888 (0 << 4)
136#define GSC_OUT_RGB565 (1 << 4)
137#define GSC_OUT_YUV420_2P (2 << 4)
138#define GSC_OUT_YUV420_3P (3 << 4)
139#define GSC_OUT_YUV422_1P (4 << 4)
140#define GSC_OUT_YUV422_2P (5 << 4)
141#define GSC_OUT_YUV444 (7 << 4)
142#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
143#define GSC_OUT_TILE_C_16x8 (0 << 2)
144#define GSC_OUT_TILE_C_16x16 (1 << 2)
145#define GSC_OUT_TILE_MODE (1 << 1)
146#define GSC_OUT_PATH_MASK (1 << 0)
147#define GSC_OUT_PATH_LOCAL (1 << 0)
148#define GSC_OUT_PATH_MEMORY (0 << 0)
149
150/* G-Scaler scaled destination image size */
151#define GSC_SCALED_SIZE 0x24
152#define GSC_SCALED_HEIGHT_MASK (0x1fff << 16)
153#define GSC_SCALED_HEIGHT(x) ((x) << 16)
154#define GSC_SCALED_WIDTH_MASK (0x1fff << 0)
155#define GSC_SCALED_WIDTH(x) ((x) << 0)
156
157/* G-Scaler pre scale ratio */
158#define GSC_PRE_SCALE_RATIO 0x28
159#define GSC_PRESC_SHFACTOR_MASK (7 << 28)
160#define GSC_PRESC_SHFACTOR(x) ((x) << 28)
161#define GSC_PRESC_V_RATIO_MASK (7 << 16)
162#define GSC_PRESC_V_RATIO(x) ((x) << 16)
163#define GSC_PRESC_H_RATIO_MASK (7 << 0)
164#define GSC_PRESC_H_RATIO(x) ((x) << 0)
165
166/* G-Scaler main scale horizontal ratio */
167#define GSC_MAIN_H_RATIO 0x2C
168#define GSC_MAIN_H_RATIO_MASK (0xfffff << 0)
169#define GSC_MAIN_H_RATIO_VALUE(x) ((x) << 0)
170
171/* G-Scaler main scale vertical ratio */
172#define GSC_MAIN_V_RATIO 0x30
173#define GSC_MAIN_V_RATIO_MASK (0xfffff << 0)
174#define GSC_MAIN_V_RATIO_VALUE(x) ((x) << 0)
175
176/* G-Scaler input chrominance stride */
177#define GSC_IN_CHROM_STRIDE 0x3C
178#define GSC_IN_CHROM_STRIDE_MASK (0x3fff << 0)
179#define GSC_IN_CHROM_STRIDE_VALUE(x) ((x) << 0)
180
181/* G-Scaler destination image size */
182#define GSC_DSTIMG_SIZE 0x40
183#define GSC_DSTIMG_HEIGHT_MASK (0x1fff << 16)
184#define GSC_DSTIMG_HEIGHT(x) ((x) << 16)
185#define GSC_DSTIMG_WIDTH_MASK (0x1fff << 0)
186#define GSC_DSTIMG_WIDTH(x) ((x) << 0)
187
188/* G-Scaler destination image offset */
189#define GSC_DSTIMG_OFFSET 0x44
190#define GSC_DSTIMG_OFFSET_Y_MASK (0x1fff << 16)
191#define GSC_DSTIMG_OFFSET_Y(x) ((x) << 16)
192#define GSC_DSTIMG_OFFSET_X_MASK (0x1fff << 0)
193#define GSC_DSTIMG_OFFSET_X(x) ((x) << 0)
194
195/* G-Scaler output chrominance stride */
196#define GSC_OUT_CHROM_STRIDE 0x48
197#define GSC_OUT_CHROM_STRIDE_MASK (0x3fff << 0)
198#define GSC_OUT_CHROM_STRIDE_VALUE(x) ((x) << 0)
199
200/* G-Scaler input y address mask */
201#define GSC_IN_BASE_ADDR_Y_MASK 0x4C
202/* G-Scaler input y base address */
203#define GSC_IN_BASE_ADDR_Y(n) (0x50 + (n) * 0x4)
204/* G-Scaler input y base current address */
205#define GSC_IN_BASE_ADDR_Y_CUR(n) (0x60 + (n) * 0x4)
206
207/* G-Scaler input cb address mask */
208#define GSC_IN_BASE_ADDR_CB_MASK 0x7C
209/* G-Scaler input cb base address */
210#define GSC_IN_BASE_ADDR_CB(n) (0x80 + (n) * 0x4)
211/* G-Scaler input cb base current address */
212#define GSC_IN_BASE_ADDR_CB_CUR(n) (0x90 + (n) * 0x4)
213
214/* G-Scaler input cr address mask */
215#define GSC_IN_BASE_ADDR_CR_MASK 0xAC
216/* G-Scaler input cr base address */
217#define GSC_IN_BASE_ADDR_CR(n) (0xB0 + (n) * 0x4)
218/* G-Scaler input cr base current address */
219#define GSC_IN_BASE_ADDR_CR_CUR(n) (0xC0 + (n) * 0x4)
220
221/* G-Scaler input address mask */
222#define GSC_IN_CURR_ADDR_INDEX (0xf << 24)
223#define GSC_IN_CURR_GET_INDEX(x) ((x) >> 24)
224#define GSC_IN_BASE_ADDR_PINGPONG(x) ((x) << 16)
225#define GSC_IN_BASE_ADDR_MASK (0xff << 0)
226
227/* G-Scaler output y address mask */
228#define GSC_OUT_BASE_ADDR_Y_MASK 0x10C
229/* G-Scaler output y base address */
230#define GSC_OUT_BASE_ADDR_Y(n) (0x110 + (n) * 0x4)
231
232/* G-Scaler output cb address mask */
233#define GSC_OUT_BASE_ADDR_CB_MASK 0x15C
234/* G-Scaler output cb base address */
235#define GSC_OUT_BASE_ADDR_CB(n) (0x160 + (n) * 0x4)
236
237/* G-Scaler output cr address mask */
238#define GSC_OUT_BASE_ADDR_CR_MASK 0x1AC
239/* G-Scaler output cr base address */
240#define GSC_OUT_BASE_ADDR_CR(n) (0x1B0 + (n) * 0x4)
241
242/* G-Scaler output address mask */
243#define GSC_OUT_CURR_ADDR_INDEX (0xf << 24)
244#define GSC_OUT_CURR_GET_INDEX(x) ((x) >> 24)
245#define GSC_OUT_BASE_ADDR_PINGPONG(x) ((x) << 16)
246#define GSC_OUT_BASE_ADDR_MASK (0xffff << 0)
247
248/* G-Scaler horizontal scaling filter */
249#define GSC_HCOEF(n, s, x) (0x300 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
250
251/* G-Scaler vertical scaling filter */
252#define GSC_VCOEF(n, s, x) (0x200 + (n) * 0x4 + (s) * 0x30 + (x) * 0x300)
253
254/* G-Scaler BUS control */
255#define GSC_BUSCON 0xA78
256#define GSC_BUSCON_INT_TIME_MASK (1 << 8)
257#define GSC_BUSCON_INT_DATA_TRANS (0 << 8)
258#define GSC_BUSCON_INT_AXI_RESPONSE (1 << 8)
259#define GSC_BUSCON_AWCACHE(x) ((x) << 4)
260#define GSC_BUSCON_ARCACHE(x) ((x) << 0)
261
262/* G-Scaler V position */
263#define GSC_VPOSITION 0xA7C
264#define GSC_VPOS_F(x) ((x) << 0)
265
266
267/* G-Scaler clock initial count */
268#define GSC_CLK_INIT_COUNT 0xC00
269#define GSC_CLK_GATE_MODE_INIT_CNT(x) ((x) << 0)
270
271/* G-Scaler clock snoop count */
272#define GSC_CLK_SNOOP_COUNT 0xC04
273#define GSC_CLK_GATE_MODE_SNOOP_CNT(x) ((x) << 0)
274
275/* SYSCON. GSCBLK_CFG */
276#define SYSREG_GSCBLK_CFG1 (S3C_VA_SYS + 0x0224)
277#define GSC_BLK_DISP1WB_DEST(x) (x << 10)
278#define GSC_BLK_SW_RESET_WB_DEST(x) (1 << (18 + x))
279#define GSC_BLK_PXLASYNC_LO_MASK_WB(x) (0 << (14 + x))
280#define GSC_BLK_GSCL_WB_IN_SRC_SEL(x) (1 << (2 * x))
281#define SYSREG_GSCBLK_CFG2 (S3C_VA_SYS + 0x2000)
282#define PXLASYNC_LO_MASK_CAMIF_GSCL(x) (1 << (x))
283
284#endif /* EXYNOS_REGS_GSC_H_ */
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 9cc7c5e9718c..ef1b3eb3ba6e 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -176,6 +176,11 @@
176#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C) 176#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x007C)
177#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080) 177#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0080)
178 178
179/* PHY Control bit definition */
180
181/* HDMI_PHY_CON_0 */
182#define HDMI_PHY_POWER_OFF_EN (1 << 0)
183
179/* Video related registers */ 184/* Video related registers */
180#define HDMI_YMAX HDMI_CORE_BASE(0x0060) 185#define HDMI_YMAX HDMI_CORE_BASE(0x0060)
181#define HDMI_YMIN HDMI_CORE_BASE(0x0064) 186#define HDMI_YMIN HDMI_CORE_BASE(0x0064)
@@ -298,14 +303,14 @@
298#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714) 303#define HDMI_AVI_HEADER1 HDMI_CORE_BASE(0x0714)
299#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718) 304#define HDMI_AVI_HEADER2 HDMI_CORE_BASE(0x0718)
300#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C) 305#define HDMI_AVI_CHECK_SUM HDMI_CORE_BASE(0x071C)
301#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n)) 306#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0720 + 4 * (n-1))
302 307
303#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800) 308#define HDMI_AUI_CON HDMI_CORE_BASE(0x0800)
304#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810) 309#define HDMI_AUI_HEADER0 HDMI_CORE_BASE(0x0810)
305#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814) 310#define HDMI_AUI_HEADER1 HDMI_CORE_BASE(0x0814)
306#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818) 311#define HDMI_AUI_HEADER2 HDMI_CORE_BASE(0x0818)
307#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C) 312#define HDMI_AUI_CHECK_SUM HDMI_CORE_BASE(0x081C)
308#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n)) 313#define HDMI_AUI_BYTE(n) HDMI_CORE_BASE(0x0820 + 4 * (n-1))
309 314
310#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900) 315#define HDMI_MPG_CON HDMI_CORE_BASE(0x0900)
311#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C) 316#define HDMI_MPG_CHECK_SUM HDMI_CORE_BASE(0x091C)
@@ -338,6 +343,19 @@
338#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60) 343#define HDMI_AN_SEED_2 HDMI_CORE_BASE(0x0E60)
339#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64) 344#define HDMI_AN_SEED_3 HDMI_CORE_BASE(0x0E64)
340 345
346/* AVI bit definition */
347#define HDMI_AVI_CON_DO_NOT_TRANSMIT (0 << 1)
348#define HDMI_AVI_CON_EVERY_VSYNC (1 << 1)
349
350#define AVI_ACTIVE_FORMAT_VALID (1 << 4)
351#define AVI_UNDERSCANNED_DISPLAY_VALID (1 << 1)
352
353/* AUI bit definition */
354#define HDMI_AUI_CON_NO_TRAN (0 << 0)
355
356/* VSI bit definition */
357#define HDMI_VSI_CON_DO_NOT_TRANSMIT (0 << 0)
358
341/* HDCP related registers */ 359/* HDCP related registers */
342#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n)) 360#define HDMI_HDCP_SHA1(n) HDMI_CORE_BASE(0x7000 + 4 * (n))
343#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n)) 361#define HDMI_HDCP_KSV_LIST(n) HDMI_CORE_BASE(0x7050 + 4 * (n))
diff --git a/drivers/gpu/drm/exynos/regs-rotator.h b/drivers/gpu/drm/exynos/regs-rotator.h
new file mode 100644
index 000000000000..a09ac6e180da
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-rotator.h
@@ -0,0 +1,73 @@
1/* drivers/gpu/drm/exynos/regs-rotator.h
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
6 * Register definition file for Samsung Rotator Interface (Rotator) driver
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef EXYNOS_REGS_ROTATOR_H
14#define EXYNOS_REGS_ROTATOR_H
15
16/* Configuration */
17#define ROT_CONFIG 0x00
18#define ROT_CONFIG_IRQ (3 << 8)
19
20/* Image Control */
21#define ROT_CONTROL 0x10
22#define ROT_CONTROL_PATTERN_WRITE (1 << 16)
23#define ROT_CONTROL_FMT_YCBCR420_2P (1 << 8)
24#define ROT_CONTROL_FMT_RGB888 (6 << 8)
25#define ROT_CONTROL_FMT_MASK (7 << 8)
26#define ROT_CONTROL_FLIP_VERTICAL (2 << 6)
27#define ROT_CONTROL_FLIP_HORIZONTAL (3 << 6)
28#define ROT_CONTROL_FLIP_MASK (3 << 6)
29#define ROT_CONTROL_ROT_90 (1 << 4)
30#define ROT_CONTROL_ROT_180 (2 << 4)
31#define ROT_CONTROL_ROT_270 (3 << 4)
32#define ROT_CONTROL_ROT_MASK (3 << 4)
33#define ROT_CONTROL_START (1 << 0)
34
35/* Status */
36#define ROT_STATUS 0x20
37#define ROT_STATUS_IRQ_PENDING(x) (1 << (x))
38#define ROT_STATUS_IRQ(x) (((x) >> 8) & 0x3)
39#define ROT_STATUS_IRQ_VAL_COMPLETE 1
40#define ROT_STATUS_IRQ_VAL_ILLEGAL 2
41
42/* Buffer Address */
43#define ROT_SRC_BUF_ADDR(n) (0x30 + ((n) << 2))
44#define ROT_DST_BUF_ADDR(n) (0x50 + ((n) << 2))
45
46/* Buffer Size */
47#define ROT_SRC_BUF_SIZE 0x3c
48#define ROT_DST_BUF_SIZE 0x5c
49#define ROT_SET_BUF_SIZE_H(x) ((x) << 16)
50#define ROT_SET_BUF_SIZE_W(x) ((x) << 0)
51#define ROT_GET_BUF_SIZE_H(x) ((x) >> 16)
52#define ROT_GET_BUF_SIZE_W(x) ((x) & 0xffff)
53
54/* Crop Position */
55#define ROT_SRC_CROP_POS 0x40
56#define ROT_DST_CROP_POS 0x60
57#define ROT_CROP_POS_Y(x) ((x) << 16)
58#define ROT_CROP_POS_X(x) ((x) << 0)
59
60/* Source Crop Size */
61#define ROT_SRC_CROP_SIZE 0x44
62#define ROT_SRC_CROP_SIZE_H(x) ((x) << 16)
63#define ROT_SRC_CROP_SIZE_W(x) ((x) << 0)
64
65/* Round to nearest aligned value */
66#define ROT_ALIGN(x, align, mask) (((x) + (1 << ((align) - 1))) & (mask))
67/* Minimum limit value */
68#define ROT_MIN(min, mask) (((min) + ~(mask)) & (mask))
69/* Maximum limit value */
70#define ROT_MAX(max, mask) ((max) & (mask))
71
72#endif /* EXYNOS_REGS_ROTATOR_H */
73
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1ceca3d13b65..23e14e93991f 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -523,7 +523,7 @@ void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
523 523
524 dev_priv->force_audio_property = prop; 524 dev_priv->force_audio_property = prop;
525 } 525 }
526 drm_connector_attach_property(connector, prop, 0); 526 drm_object_attach_property(&connector->base, prop, 0);
527} 527}
528 528
529 529
@@ -553,7 +553,7 @@ void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
553 dev_priv->broadcast_rgb_property = prop; 553 dev_priv->broadcast_rgb_property = prop;
554 } 554 }
555 555
556 drm_connector_attach_property(connector, prop, 0); 556 drm_object_attach_property(&connector->base, prop, 0);
557} 557}
558 558
559/* Cedarview */ 559/* Cedarview */
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index e3a3978cf320..51044cc55cf2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1650,7 +1650,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
1650 struct cdv_intel_dp *intel_dp = encoder->dev_priv; 1650 struct cdv_intel_dp *intel_dp = encoder->dev_priv;
1651 int ret; 1651 int ret;
1652 1652
1653 ret = drm_connector_property_set_value(connector, property, val); 1653 ret = drm_object_property_set_value(&connector->base, property, val);
1654 if (ret) 1654 if (ret)
1655 return ret; 1655 return ret;
1656 1656
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 7272a461edfe..e223b500022e 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -185,14 +185,14 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
185 return -1; 185 return -1;
186 } 186 }
187 187
188 if (drm_connector_property_get_value(connector, 188 if (drm_object_property_get_value(&connector->base,
189 property, &curValue)) 189 property, &curValue))
190 return -1; 190 return -1;
191 191
192 if (curValue == value) 192 if (curValue == value)
193 return 0; 193 return 0;
194 194
195 if (drm_connector_property_set_value(connector, 195 if (drm_object_property_set_value(&connector->base,
196 property, value)) 196 property, value))
197 return -1; 197 return -1;
198 198
@@ -341,7 +341,7 @@ void cdv_hdmi_init(struct drm_device *dev,
341 connector->interlace_allowed = false; 341 connector->interlace_allowed = false;
342 connector->doublescan_allowed = false; 342 connector->doublescan_allowed = false;
343 343
344 drm_connector_attach_property(connector, 344 drm_object_attach_property(&connector->base,
345 dev->mode_config.scaling_mode_property, 345 dev->mode_config.scaling_mode_property,
346 DRM_MODE_SCALE_FULLSCREEN); 346 DRM_MODE_SCALE_FULLSCREEN);
347 347
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index b362dd39bf5a..d81dbc3368f0 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -479,7 +479,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
479 return -1; 479 return -1;
480 } 480 }
481 481
482 if (drm_connector_property_get_value(connector, 482 if (drm_object_property_get_value(&connector->base,
483 property, 483 property,
484 &curValue)) 484 &curValue))
485 return -1; 485 return -1;
@@ -487,7 +487,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
487 if (curValue == value) 487 if (curValue == value)
488 return 0; 488 return 0;
489 489
490 if (drm_connector_property_set_value(connector, 490 if (drm_object_property_set_value(&connector->base,
491 property, 491 property,
492 value)) 492 value))
493 return -1; 493 return -1;
@@ -502,7 +502,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
502 return -1; 502 return -1;
503 } 503 }
504 } else if (!strcmp(property->name, "backlight") && encoder) { 504 } else if (!strcmp(property->name, "backlight") && encoder) {
505 if (drm_connector_property_set_value(connector, 505 if (drm_object_property_set_value(&connector->base,
506 property, 506 property,
507 value)) 507 value))
508 return -1; 508 return -1;
@@ -671,10 +671,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
671 connector->doublescan_allowed = false; 671 connector->doublescan_allowed = false;
672 672
673 /*Attach connector properties*/ 673 /*Attach connector properties*/
674 drm_connector_attach_property(connector, 674 drm_object_attach_property(&connector->base,
675 dev->mode_config.scaling_mode_property, 675 dev->mode_config.scaling_mode_property,
676 DRM_MODE_SCALE_FULLSCREEN); 676 DRM_MODE_SCALE_FULLSCREEN);
677 drm_connector_attach_property(connector, 677 drm_object_attach_property(&connector->base,
678 dev_priv->backlight_property, 678 dev_priv->backlight_property,
679 BRIGHTNESS_MAX_LEVEL); 679 BRIGHTNESS_MAX_LEVEL);
680 680
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 32dba2ab53e1..2d4ab48f07a2 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -265,13 +265,13 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
265 goto set_prop_error; 265 goto set_prop_error;
266 } 266 }
267 267
268 if (drm_connector_property_get_value(connector, property, &val)) 268 if (drm_object_property_get_value(&connector->base, property, &val))
269 goto set_prop_error; 269 goto set_prop_error;
270 270
271 if (val == value) 271 if (val == value)
272 goto set_prop_done; 272 goto set_prop_done;
273 273
274 if (drm_connector_property_set_value(connector, 274 if (drm_object_property_set_value(&connector->base,
275 property, value)) 275 property, value))
276 goto set_prop_error; 276 goto set_prop_error;
277 277
@@ -296,7 +296,7 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
296 } 296 }
297 } 297 }
298 } else if (!strcmp(property->name, "backlight") && encoder) { 298 } else if (!strcmp(property->name, "backlight") && encoder) {
299 if (drm_connector_property_set_value(connector, property, 299 if (drm_object_property_set_value(&connector->base, property,
300 value)) 300 value))
301 goto set_prop_error; 301 goto set_prop_error;
302 else 302 else
@@ -506,7 +506,7 @@ void mdfld_dsi_output_init(struct drm_device *dev,
506 506
507 dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe); 507 dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
508 508
509 if (!dev || ((pipe != 0) && (pipe != 2))) { 509 if (pipe != 0 && pipe != 2) {
510 DRM_ERROR("Invalid parameter\n"); 510 DRM_ERROR("Invalid parameter\n");
511 return; 511 return;
512 } 512 }
@@ -572,10 +572,10 @@ void mdfld_dsi_output_init(struct drm_device *dev,
572 connector->doublescan_allowed = false; 572 connector->doublescan_allowed = false;
573 573
574 /*attach properties*/ 574 /*attach properties*/
575 drm_connector_attach_property(connector, 575 drm_object_attach_property(&connector->base,
576 dev->mode_config.scaling_mode_property, 576 dev->mode_config.scaling_mode_property,
577 DRM_MODE_SCALE_FULLSCREEN); 577 DRM_MODE_SCALE_FULLSCREEN);
578 drm_connector_attach_property(connector, 578 drm_object_attach_property(&connector->base,
579 dev_priv->backlight_property, 579 dev_priv->backlight_property,
580 MDFLD_DSI_BRIGHTNESS_MAX_LEVEL); 580 MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
581 581
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index dec6a9aea3c6..74485dc43945 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -820,7 +820,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
820 REG_WRITE(map->pos, 0); 820 REG_WRITE(map->pos, 0);
821 821
822 if (psb_intel_encoder) 822 if (psb_intel_encoder)
823 drm_connector_property_get_value(connector, 823 drm_object_property_get_value(&connector->base,
824 dev->mode_config.scaling_mode_property, &scalingType); 824 dev->mode_config.scaling_mode_property, &scalingType);
825 825
826 if (scalingType == DRM_MODE_SCALE_NO_SCALE) { 826 if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail.h b/drivers/gpu/drm/gma500/oaktrail.h
index f2f9f38a5362..30adbbe23024 100644
--- a/drivers/gpu/drm/gma500/oaktrail.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -249,3 +249,9 @@ extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
249extern void oaktrail_hdmi_save(struct drm_device *dev); 249extern void oaktrail_hdmi_save(struct drm_device *dev);
250extern void oaktrail_hdmi_restore(struct drm_device *dev); 250extern void oaktrail_hdmi_restore(struct drm_device *dev);
251extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev); 251extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
252extern int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
253 struct drm_display_mode *adjusted_mode, int x, int y,
254 struct drm_framebuffer *old_fb);
255extern void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode);
256
257
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index cdafd2acc72f..3071526bc3c1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -168,6 +168,11 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
168 const struct psb_offset *map = &dev_priv->regmap[pipe]; 168 const struct psb_offset *map = &dev_priv->regmap[pipe];
169 u32 temp; 169 u32 temp;
170 170
171 if (pipe == 1) {
172 oaktrail_crtc_hdmi_dpms(crtc, mode);
173 return;
174 }
175
171 if (!gma_power_begin(dev, true)) 176 if (!gma_power_begin(dev, true))
172 return; 177 return;
173 178
@@ -302,6 +307,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
302 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN; 307 uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
303 struct drm_connector *connector; 308 struct drm_connector *connector;
304 309
310 if (pipe == 1)
311 return oaktrail_crtc_hdmi_mode_set(crtc, mode, adjusted_mode, x, y, old_fb);
312
305 if (!gma_power_begin(dev, true)) 313 if (!gma_power_begin(dev, true))
306 return 0; 314 return 0;
307 315
@@ -343,7 +351,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
343 (mode->crtc_vdisplay - 1)); 351 (mode->crtc_vdisplay - 1));
344 352
345 if (psb_intel_encoder) 353 if (psb_intel_encoder)
346 drm_connector_property_get_value(connector, 354 drm_object_property_get_value(&connector->base,
347 dev->mode_config.scaling_mode_property, &scalingType); 355 dev->mode_config.scaling_mode_property, &scalingType);
348 356
349 if (scalingType == DRM_MODE_SCALE_NO_SCALE) { 357 if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 010b820744a5..08747fd7105c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -544,7 +544,7 @@ const struct psb_ops oaktrail_chip_ops = {
544 .accel_2d = 1, 544 .accel_2d = 1,
545 .pipes = 2, 545 .pipes = 2,
546 .crtcs = 2, 546 .crtcs = 2,
547 .hdmi_mask = (1 << 0), 547 .hdmi_mask = (1 << 1),
548 .lvds_mask = (1 << 0), 548 .lvds_mask = (1 << 0),
549 .cursor_needs_phys = 0, 549 .cursor_needs_phys = 0,
550 .sgx_offset = MRST_SGX_OFFSET, 550 .sgx_offset = MRST_SGX_OFFSET,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 69e51e903f35..f036f1fc161e 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,6 +155,345 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
155 HDMI_READ(HDMI_HCR); 155 HDMI_READ(HDMI_HCR);
156} 156}
157 157
158static void wait_for_vblank(struct drm_device *dev)
159{
160 /* Wait for 20ms, i.e. one cycle at 50hz. */
161 mdelay(20);
162}
163
164static unsigned int htotal_calculate(struct drm_display_mode *mode)
165{
166 u32 htotal, new_crtc_htotal;
167
168 htotal = (mode->crtc_hdisplay - 1) | ((mode->crtc_htotal - 1) << 16);
169
170 /*
171 * 1024 x 768 new_crtc_htotal = 0x1024;
172 * 1280 x 1024 new_crtc_htotal = 0x0c34;
173 */
174 new_crtc_htotal = (mode->crtc_htotal - 1) * 200 * 1000 / mode->clock;
175
176 DRM_DEBUG_KMS("new crtc htotal 0x%4x\n", new_crtc_htotal);
177 return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
178}
179
180static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
181 int refclk, struct oaktrail_hdmi_clock *best_clock)
182{
183 int np_min, np_max, nr_min, nr_max;
184 int np, nr, nf;
185
186 np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
187 np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
188 if (np_min < oaktrail_hdmi_limit.np.min)
189 np_min = oaktrail_hdmi_limit.np.min;
190 if (np_max > oaktrail_hdmi_limit.np.max)
191 np_max = oaktrail_hdmi_limit.np.max;
192
193 nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
194 nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
195 if (nr_min < oaktrail_hdmi_limit.nr.min)
196 nr_min = oaktrail_hdmi_limit.nr.min;
197 if (nr_max > oaktrail_hdmi_limit.nr.max)
198 nr_max = oaktrail_hdmi_limit.nr.max;
199
200 np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
201 nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
202 nf = DIV_ROUND_CLOSEST((target * 10 * np * nr), refclk);
203 DRM_DEBUG_KMS("np, nr, nf %d %d %d\n", np, nr, nf);
204
205 /*
206 * 1024 x 768 np = 1; nr = 0x26; nf = 0x0fd8000;
207 * 1280 x 1024 np = 1; nr = 0x17; nf = 0x1034000;
208 */
209 best_clock->np = np;
210 best_clock->nr = nr - 1;
211 best_clock->nf = (nf << 14);
212}
213
214static void scu_busy_loop(void __iomem *scu_base)
215{
216 u32 status = 0;
217 u32 loop_count = 0;
218
219 status = readl(scu_base + 0x04);
220 while (status & 1) {
221 udelay(1); /* scu processing time is in few u secods */
222 status = readl(scu_base + 0x04);
223 loop_count++;
224 /* break if scu doesn't reset busy bit after huge retry */
225 if (loop_count > 1000) {
226 DRM_DEBUG_KMS("SCU IPC timed out");
227 return;
228 }
229 }
230}
231
232/*
233 * You don't want to know, you really really don't want to know....
234 *
235 * This is magic. However it's safe magic because of the way the platform
236 * works and it is necessary magic.
237 */
238static void oaktrail_hdmi_reset(struct drm_device *dev)
239{
240 void __iomem *base;
241 unsigned long scu_ipc_mmio = 0xff11c000UL;
242 int scu_len = 1024;
243
244 base = ioremap((resource_size_t)scu_ipc_mmio, scu_len);
245 if (base == NULL) {
246 DRM_ERROR("failed to map scu mmio\n");
247 return;
248 }
249
250 /* scu ipc: assert hdmi controller reset */
251 writel(0xff11d118, base + 0x0c);
252 writel(0x7fffffdf, base + 0x80);
253 writel(0x42005, base + 0x0);
254 scu_busy_loop(base);
255
256 /* scu ipc: de-assert hdmi controller reset */
257 writel(0xff11d118, base + 0x0c);
258 writel(0x7fffffff, base + 0x80);
259 writel(0x42005, base + 0x0);
260 scu_busy_loop(base);
261
262 iounmap(base);
263}
264
265int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
266 struct drm_display_mode *mode,
267 struct drm_display_mode *adjusted_mode,
268 int x, int y,
269 struct drm_framebuffer *old_fb)
270{
271 struct drm_device *dev = crtc->dev;
272 struct drm_psb_private *dev_priv = dev->dev_private;
273 struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
274 int pipe = 1;
275 int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
276 int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
277 int hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
278 int vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
279 int vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
280 int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
281 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
282 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
283 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
284 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
285 int refclk;
286 struct oaktrail_hdmi_clock clock;
287 u32 dspcntr, pipeconf, dpll, temp;
288 int dspcntr_reg = DSPBCNTR;
289
290 if (!gma_power_begin(dev, true))
291 return 0;
292
293 /* Disable the VGA plane that we never use */
294 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
295
296 /* Disable dpll if necessary */
297 dpll = REG_READ(DPLL_CTRL);
298 if ((dpll & DPLL_PWRDN) == 0) {
299 REG_WRITE(DPLL_CTRL, dpll | (DPLL_PWRDN | DPLL_RESET));
300 REG_WRITE(DPLL_DIV_CTRL, 0x00000000);
301 REG_WRITE(DPLL_STATUS, 0x1);
302 }
303 udelay(150);
304
305 /* Reset controller */
306 oaktrail_hdmi_reset(dev);
307
308 /* program and enable dpll */
309 refclk = 25000;
310 oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
311
312 /* Set the DPLL */
313 dpll = REG_READ(DPLL_CTRL);
314 dpll &= ~DPLL_PDIV_MASK;
315 dpll &= ~(DPLL_PWRDN | DPLL_RESET);
316 REG_WRITE(DPLL_CTRL, 0x00000008);
317 REG_WRITE(DPLL_DIV_CTRL, ((clock.nf << 6) | clock.nr));
318 REG_WRITE(DPLL_ADJUST, ((clock.nf >> 14) - 1));
319 REG_WRITE(DPLL_CTRL, (dpll | (clock.np << DPLL_PDIV_SHIFT) | DPLL_ENSTAT | DPLL_DITHEN));
320 REG_WRITE(DPLL_UPDATE, 0x80000000);
321 REG_WRITE(DPLL_CLK_ENABLE, 0x80050102);
322 udelay(150);
323
324 /* configure HDMI */
325 HDMI_WRITE(0x1004, 0x1fd);
326 HDMI_WRITE(0x2000, 0x1);
327 HDMI_WRITE(0x2008, 0x0);
328 HDMI_WRITE(0x3130, 0x8);
329 HDMI_WRITE(0x101c, 0x1800810);
330
331 temp = htotal_calculate(adjusted_mode);
332 REG_WRITE(htot_reg, temp);
333 REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
334 REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
335 REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
336 REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
337 REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
338 REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
339
340 REG_WRITE(PCH_HTOTAL_B, (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
341 REG_WRITE(PCH_HBLANK_B, (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
342 REG_WRITE(PCH_HSYNC_B, (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
343 REG_WRITE(PCH_VTOTAL_B, (adjusted_mode->crtc_vdisplay - 1) | ((adjusted_mode->crtc_vtotal - 1) << 16));
344 REG_WRITE(PCH_VBLANK_B, (adjusted_mode->crtc_vblank_start - 1) | ((adjusted_mode->crtc_vblank_end - 1) << 16));
345 REG_WRITE(PCH_VSYNC_B, (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
346 REG_WRITE(PCH_PIPEBSRC, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
347
348 temp = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
349 HDMI_WRITE(HDMI_HBLANK_A, ((adjusted_mode->crtc_hdisplay - 1) << 16) | temp);
350
351 REG_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1));
352 REG_WRITE(dsppos_reg, 0);
353
354 /* Flush the plane changes */
355 {
356 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
357 crtc_funcs->mode_set_base(crtc, x, y, old_fb);
358 }
359
360 /* Set up the display plane register */
361 dspcntr = REG_READ(dspcntr_reg);
362 dspcntr |= DISPPLANE_GAMMA_ENABLE;
363 dspcntr |= DISPPLANE_SEL_PIPE_B;
364 dspcntr |= DISPLAY_PLANE_ENABLE;
365
366 /* setup pipeconf */
367 pipeconf = REG_READ(pipeconf_reg);
368 pipeconf |= PIPEACONF_ENABLE;
369
370 REG_WRITE(pipeconf_reg, pipeconf);
371 REG_READ(pipeconf_reg);
372
373 REG_WRITE(PCH_PIPEBCONF, pipeconf);
374 REG_READ(PCH_PIPEBCONF);
375 wait_for_vblank(dev);
376
377 REG_WRITE(dspcntr_reg, dspcntr);
378 wait_for_vblank(dev);
379
380 gma_power_end(dev);
381
382 return 0;
383}
384
385void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
386{
387 struct drm_device *dev = crtc->dev;
388 u32 temp;
389
390 DRM_DEBUG_KMS("%s %d\n", __func__, mode);
391
392 switch (mode) {
393 case DRM_MODE_DPMS_OFF:
394 REG_WRITE(VGACNTRL, 0x80000000);
395
396 /* Disable plane */
397 temp = REG_READ(DSPBCNTR);
398 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
399 REG_WRITE(DSPBCNTR, temp & ~DISPLAY_PLANE_ENABLE);
400 REG_READ(DSPBCNTR);
401 /* Flush the plane changes */
402 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
403 REG_READ(DSPBSURF);
404 }
405
406 /* Disable pipe B */
407 temp = REG_READ(PIPEBCONF);
408 if ((temp & PIPEACONF_ENABLE) != 0) {
409 REG_WRITE(PIPEBCONF, temp & ~PIPEACONF_ENABLE);
410 REG_READ(PIPEBCONF);
411 }
412
413 /* Disable LNW Pipes, etc */
414 temp = REG_READ(PCH_PIPEBCONF);
415 if ((temp & PIPEACONF_ENABLE) != 0) {
416 REG_WRITE(PCH_PIPEBCONF, temp & ~PIPEACONF_ENABLE);
417 REG_READ(PCH_PIPEBCONF);
418 }
419
420 /* wait for pipe off */
421 udelay(150);
422
423 /* Disable dpll */
424 temp = REG_READ(DPLL_CTRL);
425 if ((temp & DPLL_PWRDN) == 0) {
426 REG_WRITE(DPLL_CTRL, temp | (DPLL_PWRDN | DPLL_RESET));
427 REG_WRITE(DPLL_STATUS, 0x1);
428 }
429
430 /* wait for dpll off */
431 udelay(150);
432
433 break;
434 case DRM_MODE_DPMS_ON:
435 case DRM_MODE_DPMS_STANDBY:
436 case DRM_MODE_DPMS_SUSPEND:
437 /* Enable dpll */
438 temp = REG_READ(DPLL_CTRL);
439 if ((temp & DPLL_PWRDN) != 0) {
440 REG_WRITE(DPLL_CTRL, temp & ~(DPLL_PWRDN | DPLL_RESET));
441 temp = REG_READ(DPLL_CLK_ENABLE);
442 REG_WRITE(DPLL_CLK_ENABLE, temp | DPLL_EN_DISP | DPLL_SEL_HDMI | DPLL_EN_HDMI);
443 REG_READ(DPLL_CLK_ENABLE);
444 }
445 /* wait for dpll warm up */
446 udelay(150);
447
448 /* Enable pipe B */
449 temp = REG_READ(PIPEBCONF);
450 if ((temp & PIPEACONF_ENABLE) == 0) {
451 REG_WRITE(PIPEBCONF, temp | PIPEACONF_ENABLE);
452 REG_READ(PIPEBCONF);
453 }
454
455 /* Enable LNW Pipe B */
456 temp = REG_READ(PCH_PIPEBCONF);
457 if ((temp & PIPEACONF_ENABLE) == 0) {
458 REG_WRITE(PCH_PIPEBCONF, temp | PIPEACONF_ENABLE);
459 REG_READ(PCH_PIPEBCONF);
460 }
461
462 wait_for_vblank(dev);
463
464 /* Enable plane */
465 temp = REG_READ(DSPBCNTR);
466 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
467 REG_WRITE(DSPBCNTR, temp | DISPLAY_PLANE_ENABLE);
468 /* Flush the plane changes */
469 REG_WRITE(DSPBSURF, REG_READ(DSPBSURF));
470 REG_READ(DSPBSURF);
471 }
472
473 psb_intel_crtc_load_lut(crtc);
474 }
475
476 /* DSPARB */
477 REG_WRITE(DSPARB, 0x00003fbf);
478
479 /* FW1 */
480 REG_WRITE(0x70034, 0x3f880a0a);
481
482 /* FW2 */
483 REG_WRITE(0x70038, 0x0b060808);
484
485 /* FW4 */
486 REG_WRITE(0x70050, 0x08030404);
487
488 /* FW5 */
489 REG_WRITE(0x70054, 0x04040404);
490
491 /* LNC Chicken Bits - Squawk! */
492 REG_WRITE(0x70400, 0x4000);
493
494 return;
495}
496
158static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode) 497static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
159{ 498{
160 static int dpms_mode = -1; 499 static int dpms_mode = -1;
@@ -233,13 +572,15 @@ static const unsigned char raw_edid[] = {
233 572
234static int oaktrail_hdmi_get_modes(struct drm_connector *connector) 573static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
235{ 574{
236 struct drm_device *dev = connector->dev;
237 struct drm_psb_private *dev_priv = dev->dev_private;
238 struct i2c_adapter *i2c_adap; 575 struct i2c_adapter *i2c_adap;
239 struct edid *edid; 576 struct edid *edid;
240 struct drm_display_mode *mode, *t; 577 int ret = 0;
241 int i = 0, ret = 0;
242 578
579 /*
580 * FIXME: We need to figure this lot out. In theory we can
581 * read the EDID somehow but I've yet to find working reference
582 * code.
583 */
243 i2c_adap = i2c_get_adapter(3); 584 i2c_adap = i2c_get_adapter(3);
244 if (i2c_adap == NULL) { 585 if (i2c_adap == NULL) {
245 DRM_ERROR("No ddc adapter available!\n"); 586 DRM_ERROR("No ddc adapter available!\n");
@@ -253,17 +594,7 @@ static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
253 drm_mode_connector_update_edid_property(connector, edid); 594 drm_mode_connector_update_edid_property(connector, edid);
254 ret = drm_add_edid_modes(connector, edid); 595 ret = drm_add_edid_modes(connector, edid);
255 } 596 }
256 597 return ret;
257 /*
258 * prune modes that require frame buffer bigger than stolen mem
259 */
260 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
261 if ((mode->hdisplay * mode->vdisplay * 4) >= dev_priv->vram_stolen_size) {
262 i++;
263 drm_mode_remove(connector, mode);
264 }
265 }
266 return ret - i;
267} 598}
268 599
269static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder, 600static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
@@ -349,6 +680,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
349 connector->interlace_allowed = false; 680 connector->interlace_allowed = false;
350 connector->doublescan_allowed = false; 681 connector->doublescan_allowed = false;
351 drm_sysfs_connector_add(connector); 682 drm_sysfs_connector_add(connector);
683 dev_info(dev->dev, "HDMI initialised.\n");
352 684
353 return; 685 return;
354 686
@@ -403,6 +735,9 @@ void oaktrail_hdmi_setup(struct drm_device *dev)
403 735
404 dev_priv->hdmi_priv = hdmi_dev; 736 dev_priv->hdmi_priv = hdmi_dev;
405 oaktrail_hdmi_audio_disable(dev); 737 oaktrail_hdmi_audio_disable(dev);
738
739 dev_info(dev->dev, "HDMI hardware present.\n");
740
406 return; 741 return;
407 742
408free: 743free:
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 558c77fb55ec..325013a9c48c 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -133,8 +133,8 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
133 return; 133 return;
134 } 134 }
135 135
136 drm_connector_property_get_value( 136 drm_object_property_get_value(
137 connector, 137 &connector->base,
138 dev->mode_config.scaling_mode_property, 138 dev->mode_config.scaling_mode_property,
139 &v); 139 &v);
140 140
@@ -363,10 +363,10 @@ void oaktrail_lvds_init(struct drm_device *dev,
363 connector->interlace_allowed = false; 363 connector->interlace_allowed = false;
364 connector->doublescan_allowed = false; 364 connector->doublescan_allowed = false;
365 365
366 drm_connector_attach_property(connector, 366 drm_object_attach_property(&connector->base,
367 dev->mode_config.scaling_mode_property, 367 dev->mode_config.scaling_mode_property,
368 DRM_MODE_SCALE_FULLSCREEN); 368 DRM_MODE_SCALE_FULLSCREEN);
369 drm_connector_attach_property(connector, 369 drm_object_attach_property(&connector->base,
370 dev_priv->backlight_property, 370 dev_priv->backlight_property,
371 BRIGHTNESS_MAX_LEVEL); 371 BRIGHTNESS_MAX_LEVEL);
372 372
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 2a4c3a9e33e3..9fa5fa2e6192 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -603,7 +603,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
603 goto set_prop_error; 603 goto set_prop_error;
604 } 604 }
605 605
606 if (drm_connector_property_get_value(connector, 606 if (drm_object_property_get_value(&connector->base,
607 property, 607 property,
608 &curval)) 608 &curval))
609 goto set_prop_error; 609 goto set_prop_error;
@@ -611,7 +611,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
611 if (curval == value) 611 if (curval == value)
612 goto set_prop_done; 612 goto set_prop_done;
613 613
614 if (drm_connector_property_set_value(connector, 614 if (drm_object_property_set_value(&connector->base,
615 property, 615 property,
616 value)) 616 value))
617 goto set_prop_error; 617 goto set_prop_error;
@@ -626,7 +626,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
626 goto set_prop_error; 626 goto set_prop_error;
627 } 627 }
628 } else if (!strcmp(property->name, "backlight")) { 628 } else if (!strcmp(property->name, "backlight")) {
629 if (drm_connector_property_set_value(connector, 629 if (drm_object_property_set_value(&connector->base,
630 property, 630 property,
631 value)) 631 value))
632 goto set_prop_error; 632 goto set_prop_error;
@@ -746,10 +746,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
746 connector->doublescan_allowed = false; 746 connector->doublescan_allowed = false;
747 747
748 /*Attach connector properties*/ 748 /*Attach connector properties*/
749 drm_connector_attach_property(connector, 749 drm_object_attach_property(&connector->base,
750 dev->mode_config.scaling_mode_property, 750 dev->mode_config.scaling_mode_property,
751 DRM_MODE_SCALE_FULLSCREEN); 751 DRM_MODE_SCALE_FULLSCREEN);
752 drm_connector_attach_property(connector, 752 drm_object_attach_property(&connector->base,
753 dev_priv->backlight_property, 753 dev_priv->backlight_property,
754 BRIGHTNESS_MAX_LEVEL); 754 BRIGHTNESS_MAX_LEVEL);
755 755
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index fc9292705dbf..a4cc777ab7a6 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1694,7 +1694,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1694 uint8_t cmd; 1694 uint8_t cmd;
1695 int ret; 1695 int ret;
1696 1696
1697 ret = drm_connector_property_set_value(connector, property, val); 1697 ret = drm_object_property_set_value(&connector->base, property, val);
1698 if (ret) 1698 if (ret)
1699 return ret; 1699 return ret;
1700 1700
@@ -1749,7 +1749,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1749 } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) { 1749 } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
1750 temp_value = val; 1750 temp_value = val;
1751 if (psb_intel_sdvo_connector->left == property) { 1751 if (psb_intel_sdvo_connector->left == property) {
1752 drm_connector_property_set_value(connector, 1752 drm_object_property_set_value(&connector->base,
1753 psb_intel_sdvo_connector->right, val); 1753 psb_intel_sdvo_connector->right, val);
1754 if (psb_intel_sdvo_connector->left_margin == temp_value) 1754 if (psb_intel_sdvo_connector->left_margin == temp_value)
1755 return 0; 1755 return 0;
@@ -1761,7 +1761,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1761 cmd = SDVO_CMD_SET_OVERSCAN_H; 1761 cmd = SDVO_CMD_SET_OVERSCAN_H;
1762 goto set_value; 1762 goto set_value;
1763 } else if (psb_intel_sdvo_connector->right == property) { 1763 } else if (psb_intel_sdvo_connector->right == property) {
1764 drm_connector_property_set_value(connector, 1764 drm_object_property_set_value(&connector->base,
1765 psb_intel_sdvo_connector->left, val); 1765 psb_intel_sdvo_connector->left, val);
1766 if (psb_intel_sdvo_connector->right_margin == temp_value) 1766 if (psb_intel_sdvo_connector->right_margin == temp_value)
1767 return 0; 1767 return 0;
@@ -1773,7 +1773,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1773 cmd = SDVO_CMD_SET_OVERSCAN_H; 1773 cmd = SDVO_CMD_SET_OVERSCAN_H;
1774 goto set_value; 1774 goto set_value;
1775 } else if (psb_intel_sdvo_connector->top == property) { 1775 } else if (psb_intel_sdvo_connector->top == property) {
1776 drm_connector_property_set_value(connector, 1776 drm_object_property_set_value(&connector->base,
1777 psb_intel_sdvo_connector->bottom, val); 1777 psb_intel_sdvo_connector->bottom, val);
1778 if (psb_intel_sdvo_connector->top_margin == temp_value) 1778 if (psb_intel_sdvo_connector->top_margin == temp_value)
1779 return 0; 1779 return 0;
@@ -1785,7 +1785,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
1785 cmd = SDVO_CMD_SET_OVERSCAN_V; 1785 cmd = SDVO_CMD_SET_OVERSCAN_V;
1786 goto set_value; 1786 goto set_value;
1787 } else if (psb_intel_sdvo_connector->bottom == property) { 1787 } else if (psb_intel_sdvo_connector->bottom == property) {
1788 drm_connector_property_set_value(connector, 1788 drm_object_property_set_value(&connector->base,
1789 psb_intel_sdvo_connector->top, val); 1789 psb_intel_sdvo_connector->top, val);
1790 if (psb_intel_sdvo_connector->bottom_margin == temp_value) 1790 if (psb_intel_sdvo_connector->bottom_margin == temp_value)
1791 return 0; 1791 return 0;
@@ -2286,7 +2286,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
2286 i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]); 2286 i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
2287 2287
2288 psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0]; 2288 psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
2289 drm_connector_attach_property(&psb_intel_sdvo_connector->base.base, 2289 drm_object_attach_property(&psb_intel_sdvo_connector->base.base.base,
2290 psb_intel_sdvo_connector->tv_format, 0); 2290 psb_intel_sdvo_connector->tv_format, 0);
2291 return true; 2291 return true;
2292 2292
@@ -2302,7 +2302,7 @@ static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_s
2302 psb_intel_sdvo_connector->name = \ 2302 psb_intel_sdvo_connector->name = \
2303 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ 2303 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
2304 if (!psb_intel_sdvo_connector->name) return false; \ 2304 if (!psb_intel_sdvo_connector->name) return false; \
2305 drm_connector_attach_property(connector, \ 2305 drm_object_attach_property(&connector->base, \
2306 psb_intel_sdvo_connector->name, \ 2306 psb_intel_sdvo_connector->name, \
2307 psb_intel_sdvo_connector->cur_##name); \ 2307 psb_intel_sdvo_connector->cur_##name); \
2308 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ 2308 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2339,7 +2339,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2339 if (!psb_intel_sdvo_connector->left) 2339 if (!psb_intel_sdvo_connector->left)
2340 return false; 2340 return false;
2341 2341
2342 drm_connector_attach_property(connector, 2342 drm_object_attach_property(&connector->base,
2343 psb_intel_sdvo_connector->left, 2343 psb_intel_sdvo_connector->left,
2344 psb_intel_sdvo_connector->left_margin); 2344 psb_intel_sdvo_connector->left_margin);
2345 2345
@@ -2348,7 +2348,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2348 if (!psb_intel_sdvo_connector->right) 2348 if (!psb_intel_sdvo_connector->right)
2349 return false; 2349 return false;
2350 2350
2351 drm_connector_attach_property(connector, 2351 drm_object_attach_property(&connector->base,
2352 psb_intel_sdvo_connector->right, 2352 psb_intel_sdvo_connector->right,
2353 psb_intel_sdvo_connector->right_margin); 2353 psb_intel_sdvo_connector->right_margin);
2354 DRM_DEBUG_KMS("h_overscan: max %d, " 2354 DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2375,7 +2375,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2375 if (!psb_intel_sdvo_connector->top) 2375 if (!psb_intel_sdvo_connector->top)
2376 return false; 2376 return false;
2377 2377
2378 drm_connector_attach_property(connector, 2378 drm_object_attach_property(&connector->base,
2379 psb_intel_sdvo_connector->top, 2379 psb_intel_sdvo_connector->top,
2380 psb_intel_sdvo_connector->top_margin); 2380 psb_intel_sdvo_connector->top_margin);
2381 2381
@@ -2384,7 +2384,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2384 if (!psb_intel_sdvo_connector->bottom) 2384 if (!psb_intel_sdvo_connector->bottom)
2385 return false; 2385 return false;
2386 2386
2387 drm_connector_attach_property(connector, 2387 drm_object_attach_property(&connector->base,
2388 psb_intel_sdvo_connector->bottom, 2388 psb_intel_sdvo_connector->bottom,
2389 psb_intel_sdvo_connector->bottom_margin); 2389 psb_intel_sdvo_connector->bottom_margin);
2390 DRM_DEBUG_KMS("v_overscan: max %d, " 2390 DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2416,7 +2416,7 @@ psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
2416 if (!psb_intel_sdvo_connector->dot_crawl) 2416 if (!psb_intel_sdvo_connector->dot_crawl)
2417 return false; 2417 return false;
2418 2418
2419 drm_connector_attach_property(connector, 2419 drm_object_attach_property(&connector->base,
2420 psb_intel_sdvo_connector->dot_crawl, 2420 psb_intel_sdvo_connector->dot_crawl,
2421 psb_intel_sdvo_connector->cur_dot_crawl); 2421 psb_intel_sdvo_connector->cur_dot_crawl);
2422 DRM_DEBUG_KMS("dot crawl: current %d\n", response); 2422 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 599099fe76e3..b865d0728e28 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -214,7 +214,7 @@ static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encod
214 else 214 else
215 priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; 215 priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
216 216
217 drm_connector_property_set_value(connector, 217 drm_object_property_set_value(&connector->base,
218 encoder->dev->mode_config.tv_subconnector_property, 218 encoder->dev->mode_config.tv_subconnector_property,
219 priv->subconnector); 219 priv->subconnector);
220 220
@@ -254,23 +254,23 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
254 254
255 priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2); 255 priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
256 256
257 drm_connector_attach_property(connector, conf->tv_select_subconnector_property, 257 drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
258 priv->select_subconnector); 258 priv->select_subconnector);
259 drm_connector_attach_property(connector, conf->tv_subconnector_property, 259 drm_object_attach_property(&connector->base, conf->tv_subconnector_property,
260 priv->subconnector); 260 priv->subconnector);
261 drm_connector_attach_property(connector, conf->tv_left_margin_property, 261 drm_object_attach_property(&connector->base, conf->tv_left_margin_property,
262 priv->hmargin); 262 priv->hmargin);
263 drm_connector_attach_property(connector, conf->tv_bottom_margin_property, 263 drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
264 priv->vmargin); 264 priv->vmargin);
265 drm_connector_attach_property(connector, conf->tv_mode_property, 265 drm_object_attach_property(&connector->base, conf->tv_mode_property,
266 priv->norm); 266 priv->norm);
267 drm_connector_attach_property(connector, conf->tv_brightness_property, 267 drm_object_attach_property(&connector->base, conf->tv_brightness_property,
268 priv->brightness); 268 priv->brightness);
269 drm_connector_attach_property(connector, conf->tv_contrast_property, 269 drm_object_attach_property(&connector->base, conf->tv_contrast_property,
270 priv->contrast); 270 priv->contrast);
271 drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, 271 drm_object_attach_property(&connector->base, conf->tv_flicker_reduction_property,
272 priv->flicker); 272 priv->flicker);
273 drm_connector_attach_property(connector, priv->scale_property, 273 drm_object_attach_property(&connector->base, priv->scale_property,
274 priv->scale); 274 priv->scale);
275 275
276 return 0; 276 return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dde8b505bf7f..e6a11ca85eaf 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -317,7 +317,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
317 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 317 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
318 pipe, plane); 318 pipe, plane);
319 } else { 319 } else {
320 if (!work->pending) { 320 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 321 seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
322 pipe, plane); 322 pipe, plane);
323 } else { 323 } else {
@@ -328,7 +328,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
328 seq_printf(m, "Stall check enabled, "); 328 seq_printf(m, "Stall check enabled, ");
329 else 329 else
330 seq_printf(m, "Stall check waiting for page flip ioctl, "); 330 seq_printf(m, "Stall check waiting for page flip ioctl, ");
331 seq_printf(m, "%d prepares\n", work->pending); 331 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
332 332
333 if (work->old_fb_obj) { 333 if (work->old_fb_obj) {
334 struct drm_i915_gem_object *obj = work->old_fb_obj; 334 struct drm_i915_gem_object *obj = work->old_fb_obj;
@@ -655,10 +655,12 @@ static void i915_ring_error_state(struct seq_file *m,
655 if (INTEL_INFO(dev)->gen >= 6) { 655 if (INTEL_INFO(dev)->gen >= 6) {
656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]); 656 seq_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]); 657 seq_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
658 seq_printf(m, " SYNC_0: 0x%08x\n", 658 seq_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
659 error->semaphore_mboxes[ring][0]); 659 error->semaphore_mboxes[ring][0],
660 seq_printf(m, " SYNC_1: 0x%08x\n", 660 error->semaphore_seqno[ring][0]);
661 error->semaphore_mboxes[ring][1]); 661 seq_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
662 error->semaphore_mboxes[ring][1],
663 error->semaphore_seqno[ring][1]);
662 } 664 }
663 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]); 665 seq_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
664 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring])); 666 seq_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
@@ -1068,7 +1070,7 @@ static int gen6_drpc_info(struct seq_file *m)
1068 struct drm_info_node *node = (struct drm_info_node *) m->private; 1070 struct drm_info_node *node = (struct drm_info_node *) m->private;
1069 struct drm_device *dev = node->minor->dev; 1071 struct drm_device *dev = node->minor->dev;
1070 struct drm_i915_private *dev_priv = dev->dev_private; 1072 struct drm_i915_private *dev_priv = dev->dev_private;
1071 u32 rpmodectl1, gt_core_status, rcctl1; 1073 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1072 unsigned forcewake_count; 1074 unsigned forcewake_count;
1073 int count=0, ret; 1075 int count=0, ret;
1074 1076
@@ -1097,6 +1099,9 @@ static int gen6_drpc_info(struct seq_file *m)
1097 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1099 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1098 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1100 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1099 mutex_unlock(&dev->struct_mutex); 1101 mutex_unlock(&dev->struct_mutex);
1102 mutex_lock(&dev_priv->rps.hw_lock);
1103 sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
1104 mutex_unlock(&dev_priv->rps.hw_lock);
1100 1105
1101 seq_printf(m, "Video Turbo Mode: %s\n", 1106 seq_printf(m, "Video Turbo Mode: %s\n",
1102 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO)); 1107 yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
@@ -1148,6 +1153,12 @@ static int gen6_drpc_info(struct seq_file *m)
1148 seq_printf(m, "RC6++ residency since boot: %u\n", 1153 seq_printf(m, "RC6++ residency since boot: %u\n",
1149 I915_READ(GEN6_GT_GFX_RC6pp)); 1154 I915_READ(GEN6_GT_GFX_RC6pp));
1150 1155
1156 seq_printf(m, "RC6 voltage: %dmV\n",
1157 GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
1158 seq_printf(m, "RC6+ voltage: %dmV\n",
1159 GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
1160 seq_printf(m, "RC6++ voltage: %dmV\n",
1161 GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
1151 return 0; 1162 return 0;
1152} 1163}
1153 1164
@@ -1273,7 +1284,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1273 return 0; 1284 return 0;
1274 } 1285 }
1275 1286
1276 ret = mutex_lock_interruptible(&dev->struct_mutex); 1287 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1277 if (ret) 1288 if (ret)
1278 return ret; 1289 return ret;
1279 1290
@@ -1282,19 +1293,14 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1282 for (gpu_freq = dev_priv->rps.min_delay; 1293 for (gpu_freq = dev_priv->rps.min_delay;
1283 gpu_freq <= dev_priv->rps.max_delay; 1294 gpu_freq <= dev_priv->rps.max_delay;
1284 gpu_freq++) { 1295 gpu_freq++) {
1285 I915_WRITE(GEN6_PCODE_DATA, gpu_freq); 1296 ia_freq = gpu_freq;
1286 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | 1297 sandybridge_pcode_read(dev_priv,
1287 GEN6_PCODE_READ_MIN_FREQ_TABLE); 1298 GEN6_PCODE_READ_MIN_FREQ_TABLE,
1288 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & 1299 &ia_freq);
1289 GEN6_PCODE_READY) == 0, 10)) {
1290 DRM_ERROR("pcode read of freq table timed out\n");
1291 continue;
1292 }
1293 ia_freq = I915_READ(GEN6_PCODE_DATA);
1294 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100); 1300 seq_printf(m, "%d\t\t%d\n", gpu_freq * GT_FREQUENCY_MULTIPLIER, ia_freq * 100);
1295 } 1301 }
1296 1302
1297 mutex_unlock(&dev->struct_mutex); 1303 mutex_unlock(&dev_priv->rps.hw_lock);
1298 1304
1299 return 0; 1305 return 0;
1300} 1306}
@@ -1398,15 +1404,15 @@ static int i915_context_status(struct seq_file *m, void *unused)
1398 if (ret) 1404 if (ret)
1399 return ret; 1405 return ret;
1400 1406
1401 if (dev_priv->pwrctx) { 1407 if (dev_priv->ips.pwrctx) {
1402 seq_printf(m, "power context "); 1408 seq_printf(m, "power context ");
1403 describe_obj(m, dev_priv->pwrctx); 1409 describe_obj(m, dev_priv->ips.pwrctx);
1404 seq_printf(m, "\n"); 1410 seq_printf(m, "\n");
1405 } 1411 }
1406 1412
1407 if (dev_priv->renderctx) { 1413 if (dev_priv->ips.renderctx) {
1408 seq_printf(m, "render context "); 1414 seq_printf(m, "render context ");
1409 describe_obj(m, dev_priv->renderctx); 1415 describe_obj(m, dev_priv->ips.renderctx);
1410 seq_printf(m, "\n"); 1416 seq_printf(m, "\n");
1411 } 1417 }
1412 1418
@@ -1711,13 +1717,13 @@ i915_max_freq_read(struct file *filp,
1711 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1717 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1712 return -ENODEV; 1718 return -ENODEV;
1713 1719
1714 ret = mutex_lock_interruptible(&dev->struct_mutex); 1720 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1715 if (ret) 1721 if (ret)
1716 return ret; 1722 return ret;
1717 1723
1718 len = snprintf(buf, sizeof(buf), 1724 len = snprintf(buf, sizeof(buf),
1719 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER); 1725 "max freq: %d\n", dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER);
1720 mutex_unlock(&dev->struct_mutex); 1726 mutex_unlock(&dev_priv->rps.hw_lock);
1721 1727
1722 if (len > sizeof(buf)) 1728 if (len > sizeof(buf))
1723 len = sizeof(buf); 1729 len = sizeof(buf);
@@ -1752,7 +1758,7 @@ i915_max_freq_write(struct file *filp,
1752 1758
1753 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val); 1759 DRM_DEBUG_DRIVER("Manually setting max freq to %d\n", val);
1754 1760
1755 ret = mutex_lock_interruptible(&dev->struct_mutex); 1761 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1756 if (ret) 1762 if (ret)
1757 return ret; 1763 return ret;
1758 1764
@@ -1762,7 +1768,7 @@ i915_max_freq_write(struct file *filp,
1762 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER; 1768 dev_priv->rps.max_delay = val / GT_FREQUENCY_MULTIPLIER;
1763 1769
1764 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1770 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1765 mutex_unlock(&dev->struct_mutex); 1771 mutex_unlock(&dev_priv->rps.hw_lock);
1766 1772
1767 return cnt; 1773 return cnt;
1768} 1774}
@@ -1787,13 +1793,13 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max,
1787 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 1793 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
1788 return -ENODEV; 1794 return -ENODEV;
1789 1795
1790 ret = mutex_lock_interruptible(&dev->struct_mutex); 1796 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1791 if (ret) 1797 if (ret)
1792 return ret; 1798 return ret;
1793 1799
1794 len = snprintf(buf, sizeof(buf), 1800 len = snprintf(buf, sizeof(buf),
1795 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER); 1801 "min freq: %d\n", dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER);
1796 mutex_unlock(&dev->struct_mutex); 1802 mutex_unlock(&dev_priv->rps.hw_lock);
1797 1803
1798 if (len > sizeof(buf)) 1804 if (len > sizeof(buf))
1799 len = sizeof(buf); 1805 len = sizeof(buf);
@@ -1826,7 +1832,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1826 1832
1827 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val); 1833 DRM_DEBUG_DRIVER("Manually setting min freq to %d\n", val);
1828 1834
1829 ret = mutex_lock_interruptible(&dev->struct_mutex); 1835 ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
1830 if (ret) 1836 if (ret)
1831 return ret; 1837 return ret;
1832 1838
@@ -1836,7 +1842,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt,
1836 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER; 1842 dev_priv->rps.min_delay = val / GT_FREQUENCY_MULTIPLIER;
1837 1843
1838 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER); 1844 gen6_set_rps(dev, val / GT_FREQUENCY_MULTIPLIER);
1839 mutex_unlock(&dev->struct_mutex); 1845 mutex_unlock(&dev_priv->rps.hw_lock);
1840 1846
1841 return cnt; 1847 return cnt;
1842} 1848}
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 61ae104dca8c..8f63cd5de4b4 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -104,32 +104,6 @@ static void i915_write_hws_pga(struct drm_device *dev)
104} 104}
105 105
106/** 106/**
107 * Sets up the hardware status page for devices that need a physical address
108 * in the register.
109 */
110static int i915_init_phys_hws(struct drm_device *dev)
111{
112 drm_i915_private_t *dev_priv = dev->dev_private;
113
114 /* Program Hardware Status Page */
115 dev_priv->status_page_dmah =
116 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
117
118 if (!dev_priv->status_page_dmah) {
119 DRM_ERROR("Can not allocate hardware status page\n");
120 return -ENOMEM;
121 }
122
123 memset_io((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
124 0, PAGE_SIZE);
125
126 i915_write_hws_pga(dev);
127
128 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
129 return 0;
130}
131
132/**
133 * Frees the hardware status page, whether it's a physical address or a virtual 107 * Frees the hardware status page, whether it's a physical address or a virtual
134 * address set up by the X Server. 108 * address set up by the X Server.
135 */ 109 */
@@ -167,7 +141,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
167 141
168 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 142 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
169 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 143 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
170 ring->space = ring->head - (ring->tail + 8); 144 ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
171 if (ring->space < 0) 145 if (ring->space < 0)
172 ring->space += ring->size; 146 ring->space += ring->size;
173 147
@@ -451,16 +425,16 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
451 drm_i915_private_t *dev_priv = dev->dev_private; 425 drm_i915_private_t *dev_priv = dev->dev_private;
452 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 426 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
453 427
454 dev_priv->counter++; 428 dev_priv->dri1.counter++;
455 if (dev_priv->counter > 0x7FFFFFFFUL) 429 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
456 dev_priv->counter = 0; 430 dev_priv->dri1.counter = 0;
457 if (master_priv->sarea_priv) 431 if (master_priv->sarea_priv)
458 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 432 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
459 433
460 if (BEGIN_LP_RING(4) == 0) { 434 if (BEGIN_LP_RING(4) == 0) {
461 OUT_RING(MI_STORE_DWORD_INDEX); 435 OUT_RING(MI_STORE_DWORD_INDEX);
462 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 436 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
463 OUT_RING(dev_priv->counter); 437 OUT_RING(dev_priv->dri1.counter);
464 OUT_RING(0); 438 OUT_RING(0);
465 ADVANCE_LP_RING(); 439 ADVANCE_LP_RING();
466 } 440 }
@@ -602,12 +576,12 @@ static int i915_dispatch_flip(struct drm_device * dev)
602 576
603 ADVANCE_LP_RING(); 577 ADVANCE_LP_RING();
604 578
605 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 579 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
606 580
607 if (BEGIN_LP_RING(4) == 0) { 581 if (BEGIN_LP_RING(4) == 0) {
608 OUT_RING(MI_STORE_DWORD_INDEX); 582 OUT_RING(MI_STORE_DWORD_INDEX);
609 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 583 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
610 OUT_RING(dev_priv->counter); 584 OUT_RING(dev_priv->dri1.counter);
611 OUT_RING(0); 585 OUT_RING(0);
612 ADVANCE_LP_RING(); 586 ADVANCE_LP_RING();
613 } 587 }
@@ -618,10 +592,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
618 592
619static int i915_quiescent(struct drm_device *dev) 593static int i915_quiescent(struct drm_device *dev)
620{ 594{
621 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
622
623 i915_kernel_lost_context(dev); 595 i915_kernel_lost_context(dev);
624 return intel_wait_ring_idle(ring); 596 return intel_ring_idle(LP_RING(dev->dev_private));
625} 597}
626 598
627static int i915_flush_ioctl(struct drm_device *dev, void *data, 599static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -775,21 +747,21 @@ static int i915_emit_irq(struct drm_device * dev)
775 747
776 DRM_DEBUG_DRIVER("\n"); 748 DRM_DEBUG_DRIVER("\n");
777 749
778 dev_priv->counter++; 750 dev_priv->dri1.counter++;
779 if (dev_priv->counter > 0x7FFFFFFFUL) 751 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
780 dev_priv->counter = 1; 752 dev_priv->dri1.counter = 1;
781 if (master_priv->sarea_priv) 753 if (master_priv->sarea_priv)
782 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 754 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
783 755
784 if (BEGIN_LP_RING(4) == 0) { 756 if (BEGIN_LP_RING(4) == 0) {
785 OUT_RING(MI_STORE_DWORD_INDEX); 757 OUT_RING(MI_STORE_DWORD_INDEX);
786 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 758 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
787 OUT_RING(dev_priv->counter); 759 OUT_RING(dev_priv->dri1.counter);
788 OUT_RING(MI_USER_INTERRUPT); 760 OUT_RING(MI_USER_INTERRUPT);
789 ADVANCE_LP_RING(); 761 ADVANCE_LP_RING();
790 } 762 }
791 763
792 return dev_priv->counter; 764 return dev_priv->dri1.counter;
793} 765}
794 766
795static int i915_wait_irq(struct drm_device * dev, int irq_nr) 767static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -820,7 +792,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
820 792
821 if (ret == -EBUSY) { 793 if (ret == -EBUSY) {
822 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 794 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
823 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 795 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
824 } 796 }
825 797
826 return ret; 798 return ret;
@@ -1014,6 +986,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1014 case I915_PARAM_HAS_PRIME_VMAP_FLUSH: 986 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
1015 value = 1; 987 value = 1;
1016 break; 988 break;
989 case I915_PARAM_HAS_SECURE_BATCHES:
990 value = capable(CAP_SYS_ADMIN);
991 break;
1017 default: 992 default:
1018 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 993 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
1019 param->param); 994 param->param);
@@ -1068,7 +1043,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1068{ 1043{
1069 drm_i915_private_t *dev_priv = dev->dev_private; 1044 drm_i915_private_t *dev_priv = dev->dev_private;
1070 drm_i915_hws_addr_t *hws = data; 1045 drm_i915_hws_addr_t *hws = data;
1071 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1046 struct intel_ring_buffer *ring;
1072 1047
1073 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1048 if (drm_core_check_feature(dev, DRIVER_MODESET))
1074 return -ENODEV; 1049 return -ENODEV;
@@ -1088,6 +1063,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
1088 1063
1089 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); 1064 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1090 1065
1066 ring = LP_RING(dev_priv);
1091 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); 1067 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1092 1068
1093 dev_priv->dri1.gfx_hws_cpu_addr = 1069 dev_priv->dri1.gfx_hws_cpu_addr =
@@ -1326,6 +1302,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
1326 1302
1327 intel_modeset_gem_init(dev); 1303 intel_modeset_gem_init(dev);
1328 1304
1305 INIT_WORK(&dev_priv->console_resume_work, intel_console_resume);
1306
1329 ret = drm_irq_install(dev); 1307 ret = drm_irq_install(dev);
1330 if (ret) 1308 if (ret)
1331 goto cleanup_gem; 1309 goto cleanup_gem;
@@ -1491,19 +1469,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1491 goto free_priv; 1469 goto free_priv;
1492 } 1470 }
1493 1471
1494 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); 1472 ret = i915_gem_gtt_init(dev);
1495 if (!ret) { 1473 if (ret)
1496 DRM_ERROR("failed to set up gmch\n");
1497 ret = -EIO;
1498 goto put_bridge; 1474 goto put_bridge;
1499 }
1500
1501 dev_priv->mm.gtt = intel_gtt_get();
1502 if (!dev_priv->mm.gtt) {
1503 DRM_ERROR("Failed to initialize GTT\n");
1504 ret = -ENODEV;
1505 goto put_gmch;
1506 }
1507 1475
1508 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1476 if (drm_core_check_feature(dev, DRIVER_MODESET))
1509 i915_kick_out_firmware_fb(dev_priv); 1477 i915_kick_out_firmware_fb(dev_priv);
@@ -1590,18 +1558,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1590 intel_setup_gmbus(dev); 1558 intel_setup_gmbus(dev);
1591 intel_opregion_setup(dev); 1559 intel_opregion_setup(dev);
1592 1560
1593 /* Make sure the bios did its job and set up vital registers */
1594 intel_setup_bios(dev); 1561 intel_setup_bios(dev);
1595 1562
1596 i915_gem_load(dev); 1563 i915_gem_load(dev);
1597 1564
1598 /* Init HWS */
1599 if (!I915_NEED_GFX_HWS(dev)) {
1600 ret = i915_init_phys_hws(dev);
1601 if (ret)
1602 goto out_gem_unload;
1603 }
1604
1605 /* On the 945G/GM, the chipset reports the MSI capability on the 1565 /* On the 945G/GM, the chipset reports the MSI capability on the
1606 * integrated graphics even though the support isn't actually there 1566 * integrated graphics even though the support isn't actually there
1607 * according to the published specs. It doesn't appear to function 1567 * according to the published specs. It doesn't appear to function
@@ -1621,6 +1581,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1621 spin_lock_init(&dev_priv->rps.lock); 1581 spin_lock_init(&dev_priv->rps.lock);
1622 spin_lock_init(&dev_priv->dpio_lock); 1582 spin_lock_init(&dev_priv->dpio_lock);
1623 1583
1584 mutex_init(&dev_priv->rps.hw_lock);
1585
1624 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 1586 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1625 dev_priv->num_pipe = 3; 1587 dev_priv->num_pipe = 3;
1626 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 1588 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
@@ -1678,7 +1640,7 @@ out_mtrrfree:
1678out_rmmap: 1640out_rmmap:
1679 pci_iounmap(dev->pdev, dev_priv->regs); 1641 pci_iounmap(dev->pdev, dev_priv->regs);
1680put_gmch: 1642put_gmch:
1681 intel_gmch_remove(); 1643 i915_gem_gtt_fini(dev);
1682put_bridge: 1644put_bridge:
1683 pci_dev_put(dev_priv->bridge_dev); 1645 pci_dev_put(dev_priv->bridge_dev);
1684free_priv: 1646free_priv:
@@ -1721,6 +1683,7 @@ int i915_driver_unload(struct drm_device *dev)
1721 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1683 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1722 intel_fbdev_fini(dev); 1684 intel_fbdev_fini(dev);
1723 intel_modeset_cleanup(dev); 1685 intel_modeset_cleanup(dev);
1686 cancel_work_sync(&dev_priv->console_resume_work);
1724 1687
1725 /* 1688 /*
1726 * free the memory space allocated for the child device 1689 * free the memory space allocated for the child device
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6770ee6084b4..530db83ef320 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -47,11 +47,11 @@ MODULE_PARM_DESC(modeset,
47unsigned int i915_fbpercrtc __always_unused = 0; 47unsigned int i915_fbpercrtc __always_unused = 0;
48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 48module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
49 49
50int i915_panel_ignore_lid __read_mostly = 0; 50int i915_panel_ignore_lid __read_mostly = 1;
51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); 51module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
52MODULE_PARM_DESC(panel_ignore_lid, 52MODULE_PARM_DESC(panel_ignore_lid,
53 "Override lid status (0=autodetect [default], 1=lid open, " 53 "Override lid status (0=autodetect, 1=autodetect disabled [default], "
54 "-1=lid closed)"); 54 "-1=force lid closed, -2=force lid open)");
55 55
56unsigned int i915_powersave __read_mostly = 1; 56unsigned int i915_powersave __read_mostly = 1;
57module_param_named(powersave, i915_powersave, int, 0600); 57module_param_named(powersave, i915_powersave, int, 0600);
@@ -396,12 +396,6 @@ static const struct pci_device_id pciidlist[] = { /* aka */
396MODULE_DEVICE_TABLE(pci, pciidlist); 396MODULE_DEVICE_TABLE(pci, pciidlist);
397#endif 397#endif
398 398
399#define INTEL_PCH_DEVICE_ID_MASK 0xff00
400#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
401#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
402#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
403#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
404
405void intel_detect_pch(struct drm_device *dev) 399void intel_detect_pch(struct drm_device *dev)
406{ 400{
407 struct drm_i915_private *dev_priv = dev->dev_private; 401 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -416,26 +410,36 @@ void intel_detect_pch(struct drm_device *dev)
416 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 410 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
417 if (pch) { 411 if (pch) {
418 if (pch->vendor == PCI_VENDOR_ID_INTEL) { 412 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
419 int id; 413 unsigned short id;
420 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 414 id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
415 dev_priv->pch_id = id;
421 416
422 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 417 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
423 dev_priv->pch_type = PCH_IBX; 418 dev_priv->pch_type = PCH_IBX;
424 dev_priv->num_pch_pll = 2; 419 dev_priv->num_pch_pll = 2;
425 DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 420 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
421 WARN_ON(!IS_GEN5(dev));
426 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 422 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
427 dev_priv->pch_type = PCH_CPT; 423 dev_priv->pch_type = PCH_CPT;
428 dev_priv->num_pch_pll = 2; 424 dev_priv->num_pch_pll = 2;
429 DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 425 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
426 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
430 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 427 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
431 /* PantherPoint is CPT compatible */ 428 /* PantherPoint is CPT compatible */
432 dev_priv->pch_type = PCH_CPT; 429 dev_priv->pch_type = PCH_CPT;
433 dev_priv->num_pch_pll = 2; 430 dev_priv->num_pch_pll = 2;
434 DRM_DEBUG_KMS("Found PatherPoint PCH\n"); 431 DRM_DEBUG_KMS("Found PatherPoint PCH\n");
432 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
435 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 433 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
436 dev_priv->pch_type = PCH_LPT; 434 dev_priv->pch_type = PCH_LPT;
437 dev_priv->num_pch_pll = 0; 435 dev_priv->num_pch_pll = 0;
438 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 436 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
437 WARN_ON(!IS_HASWELL(dev));
438 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
439 dev_priv->pch_type = PCH_LPT;
440 dev_priv->num_pch_pll = 0;
441 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
442 WARN_ON(!IS_HASWELL(dev));
439 } 443 }
440 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); 444 BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
441 } 445 }
@@ -477,6 +481,8 @@ static int i915_drm_freeze(struct drm_device *dev)
477 return error; 481 return error;
478 } 482 }
479 483
484 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
485
480 intel_modeset_disable(dev); 486 intel_modeset_disable(dev);
481 487
482 drm_irq_uninstall(dev); 488 drm_irq_uninstall(dev);
@@ -526,24 +532,29 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
526 return 0; 532 return 0;
527} 533}
528 534
529static int i915_drm_thaw(struct drm_device *dev) 535void intel_console_resume(struct work_struct *work)
536{
537 struct drm_i915_private *dev_priv =
538 container_of(work, struct drm_i915_private,
539 console_resume_work);
540 struct drm_device *dev = dev_priv->dev;
541
542 console_lock();
543 intel_fbdev_set_suspend(dev, 0);
544 console_unlock();
545}
546
547static int __i915_drm_thaw(struct drm_device *dev)
530{ 548{
531 struct drm_i915_private *dev_priv = dev->dev_private; 549 struct drm_i915_private *dev_priv = dev->dev_private;
532 int error = 0; 550 int error = 0;
533 551
534 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
535 mutex_lock(&dev->struct_mutex);
536 i915_gem_restore_gtt_mappings(dev);
537 mutex_unlock(&dev->struct_mutex);
538 }
539
540 i915_restore_state(dev); 552 i915_restore_state(dev);
541 intel_opregion_setup(dev); 553 intel_opregion_setup(dev);
542 554
543 /* KMS EnterVT equivalent */ 555 /* KMS EnterVT equivalent */
544 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 556 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
545 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 557 intel_init_pch_refclk(dev);
546 ironlake_init_pch_refclk(dev);
547 558
548 mutex_lock(&dev->struct_mutex); 559 mutex_lock(&dev->struct_mutex);
549 dev_priv->mm.suspended = 0; 560 dev_priv->mm.suspended = 0;
@@ -552,8 +563,7 @@ static int i915_drm_thaw(struct drm_device *dev)
552 mutex_unlock(&dev->struct_mutex); 563 mutex_unlock(&dev->struct_mutex);
553 564
554 intel_modeset_init_hw(dev); 565 intel_modeset_init_hw(dev);
555 intel_modeset_setup_hw_state(dev); 566 intel_modeset_setup_hw_state(dev, false);
556 drm_mode_config_reset(dev);
557 drm_irq_install(dev); 567 drm_irq_install(dev);
558 } 568 }
559 569
@@ -561,14 +571,41 @@ static int i915_drm_thaw(struct drm_device *dev)
561 571
562 dev_priv->modeset_on_lid = 0; 572 dev_priv->modeset_on_lid = 0;
563 573
564 console_lock(); 574 /*
565 intel_fbdev_set_suspend(dev, 0); 575 * The console lock can be pretty contented on resume due
566 console_unlock(); 576 * to all the printk activity. Try to keep it out of the hot
577 * path of resume if possible.
578 */
579 if (console_trylock()) {
580 intel_fbdev_set_suspend(dev, 0);
581 console_unlock();
582 } else {
583 schedule_work(&dev_priv->console_resume_work);
584 }
585
586 return error;
587}
588
589static int i915_drm_thaw(struct drm_device *dev)
590{
591 int error = 0;
592
593 intel_gt_reset(dev);
594
595 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
596 mutex_lock(&dev->struct_mutex);
597 i915_gem_restore_gtt_mappings(dev);
598 mutex_unlock(&dev->struct_mutex);
599 }
600
601 __i915_drm_thaw(dev);
602
567 return error; 603 return error;
568} 604}
569 605
570int i915_resume(struct drm_device *dev) 606int i915_resume(struct drm_device *dev)
571{ 607{
608 struct drm_i915_private *dev_priv = dev->dev_private;
572 int ret; 609 int ret;
573 610
574 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 611 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -579,7 +616,20 @@ int i915_resume(struct drm_device *dev)
579 616
580 pci_set_master(dev->pdev); 617 pci_set_master(dev->pdev);
581 618
582 ret = i915_drm_thaw(dev); 619 intel_gt_reset(dev);
620
621 /*
622 * Platforms with opregion should have sane BIOS, older ones (gen3 and
623 * earlier) need this since the BIOS might clear all our scratch PTEs.
624 */
625 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
626 !dev_priv->opregion.header) {
627 mutex_lock(&dev->struct_mutex);
628 i915_gem_restore_gtt_mappings(dev);
629 mutex_unlock(&dev->struct_mutex);
630 }
631
632 ret = __i915_drm_thaw(dev);
583 if (ret) 633 if (ret)
584 return ret; 634 return ret;
585 635
@@ -833,7 +883,7 @@ i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
833 struct intel_device_info *intel_info = 883 struct intel_device_info *intel_info =
834 (struct intel_device_info *) ent->driver_data; 884 (struct intel_device_info *) ent->driver_data;
835 885
836 if (intel_info->is_haswell || intel_info->is_valleyview) 886 if (intel_info->is_valleyview)
837 if(!i915_preliminary_hw_support) { 887 if(!i915_preliminary_hw_support) {
838 DRM_ERROR("Preliminary hardware support disabled\n"); 888 DRM_ERROR("Preliminary hardware support disabled\n");
839 return -ENODEV; 889 return -ENODEV;
@@ -1140,12 +1190,40 @@ static bool IS_DISPLAYREG(u32 reg)
1140 if (reg == GEN6_GDRST) 1190 if (reg == GEN6_GDRST)
1141 return false; 1191 return false;
1142 1192
1193 switch (reg) {
1194 case _3D_CHICKEN3:
1195 case IVB_CHICKEN3:
1196 case GEN7_COMMON_SLICE_CHICKEN1:
1197 case GEN7_L3CNTLREG1:
1198 case GEN7_L3_CHICKEN_MODE_REGISTER:
1199 case GEN7_ROW_CHICKEN2:
1200 case GEN7_L3SQCREG4:
1201 case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
1202 case GEN7_HALF_SLICE_CHICKEN1:
1203 case GEN6_MBCTL:
1204 case GEN6_UCGCTL2:
1205 return false;
1206 default:
1207 break;
1208 }
1209
1143 return true; 1210 return true;
1144} 1211}
1145 1212
1213static void
1214ilk_dummy_write(struct drm_i915_private *dev_priv)
1215{
1216 /* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
1217 * chip from rc6 before touching it for real. MI_MODE is masked, hence
1218 * harmless to write 0 into. */
1219 I915_WRITE_NOTRACE(MI_MODE, 0);
1220}
1221
1146#define __i915_read(x, y) \ 1222#define __i915_read(x, y) \
1147u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ 1223u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1148 u##x val = 0; \ 1224 u##x val = 0; \
1225 if (IS_GEN5(dev_priv->dev)) \
1226 ilk_dummy_write(dev_priv); \
1149 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1227 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1150 unsigned long irqflags; \ 1228 unsigned long irqflags; \
1151 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ 1229 spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
@@ -1177,6 +1255,12 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1177 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ 1255 if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
1178 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ 1256 __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
1179 } \ 1257 } \
1258 if (IS_GEN5(dev_priv->dev)) \
1259 ilk_dummy_write(dev_priv); \
1260 if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
1261 DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
1262 I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
1263 } \
1180 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ 1264 if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
1181 write##y(val, dev_priv->regs + reg + 0x180000); \ 1265 write##y(val, dev_priv->regs + reg + 0x180000); \
1182 } else { \ 1266 } else { \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f511fa2f4168..557843dd4b2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -58,6 +58,14 @@ enum pipe {
58}; 58};
59#define pipe_name(p) ((p) + 'A') 59#define pipe_name(p) ((p) + 'A')
60 60
61enum transcoder {
62 TRANSCODER_A = 0,
63 TRANSCODER_B,
64 TRANSCODER_C,
65 TRANSCODER_EDP = 0xF,
66};
67#define transcoder_name(t) ((t) + 'A')
68
61enum plane { 69enum plane {
62 PLANE_A = 0, 70 PLANE_A = 0,
63 PLANE_B, 71 PLANE_B,
@@ -93,6 +101,12 @@ struct intel_pch_pll {
93}; 101};
94#define I915_NUM_PLLS 2 102#define I915_NUM_PLLS 2
95 103
104struct intel_ddi_plls {
105 int spll_refcount;
106 int wrpll1_refcount;
107 int wrpll2_refcount;
108};
109
96/* Interface history: 110/* Interface history:
97 * 111 *
98 * 1.1: Original. 112 * 1.1: Original.
@@ -123,14 +137,6 @@ struct drm_i915_gem_phys_object {
123 struct drm_i915_gem_object *cur_obj; 137 struct drm_i915_gem_object *cur_obj;
124}; 138};
125 139
126struct mem_block {
127 struct mem_block *next;
128 struct mem_block *prev;
129 int start;
130 int size;
131 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
132};
133
134struct opregion_header; 140struct opregion_header;
135struct opregion_acpi; 141struct opregion_acpi;
136struct opregion_swsci; 142struct opregion_swsci;
@@ -191,6 +197,7 @@ struct drm_i915_error_state {
191 u32 instdone[I915_NUM_RINGS]; 197 u32 instdone[I915_NUM_RINGS];
192 u32 acthd[I915_NUM_RINGS]; 198 u32 acthd[I915_NUM_RINGS];
193 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; 199 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
200 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
194 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ 201 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
195 /* our own tracking of ring head and tail */ 202 /* our own tracking of ring head and tail */
196 u32 cpu_ring_head[I915_NUM_RINGS]; 203 u32 cpu_ring_head[I915_NUM_RINGS];
@@ -251,6 +258,7 @@ struct drm_i915_display_funcs {
251 uint32_t sprite_width, int pixel_size); 258 uint32_t sprite_width, int pixel_size);
252 void (*update_linetime_wm)(struct drm_device *dev, int pipe, 259 void (*update_linetime_wm)(struct drm_device *dev, int pipe,
253 struct drm_display_mode *mode); 260 struct drm_display_mode *mode);
261 void (*modeset_global_resources)(struct drm_device *dev);
254 int (*crtc_mode_set)(struct drm_crtc *crtc, 262 int (*crtc_mode_set)(struct drm_crtc *crtc,
255 struct drm_display_mode *mode, 263 struct drm_display_mode *mode,
256 struct drm_display_mode *adjusted_mode, 264 struct drm_display_mode *adjusted_mode,
@@ -263,7 +271,6 @@ struct drm_i915_display_funcs {
263 struct drm_crtc *crtc); 271 struct drm_crtc *crtc);
264 void (*fdi_link_train)(struct drm_crtc *crtc); 272 void (*fdi_link_train)(struct drm_crtc *crtc);
265 void (*init_clock_gating)(struct drm_device *dev); 273 void (*init_clock_gating)(struct drm_device *dev);
266 void (*init_pch_clock_gating)(struct drm_device *dev);
267 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 274 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
268 struct drm_framebuffer *fb, 275 struct drm_framebuffer *fb,
269 struct drm_i915_gem_object *obj); 276 struct drm_i915_gem_object *obj);
@@ -338,6 +345,7 @@ struct intel_device_info {
338#define I915_PPGTT_PD_ENTRIES 512 345#define I915_PPGTT_PD_ENTRIES 512
339#define I915_PPGTT_PT_ENTRIES 1024 346#define I915_PPGTT_PT_ENTRIES 1024
340struct i915_hw_ppgtt { 347struct i915_hw_ppgtt {
348 struct drm_device *dev;
341 unsigned num_pd_entries; 349 unsigned num_pd_entries;
342 struct page **pt_pages; 350 struct page **pt_pages;
343 uint32_t pd_offset; 351 uint32_t pd_offset;
@@ -374,6 +382,11 @@ enum intel_pch {
374 PCH_LPT, /* Lynxpoint PCH */ 382 PCH_LPT, /* Lynxpoint PCH */
375}; 383};
376 384
385enum intel_sbi_destination {
386 SBI_ICLK,
387 SBI_MPHY,
388};
389
377#define QUIRK_PIPEA_FORCE (1<<0) 390#define QUIRK_PIPEA_FORCE (1<<0)
378#define QUIRK_LVDS_SSC_DISABLE (1<<1) 391#define QUIRK_LVDS_SSC_DISABLE (1<<1)
379#define QUIRK_INVERT_BRIGHTNESS (1<<2) 392#define QUIRK_INVERT_BRIGHTNESS (1<<2)
@@ -383,154 +396,18 @@ struct intel_fbc_work;
383 396
384struct intel_gmbus { 397struct intel_gmbus {
385 struct i2c_adapter adapter; 398 struct i2c_adapter adapter;
386 bool force_bit; 399 u32 force_bit;
387 u32 reg0; 400 u32 reg0;
388 u32 gpio_reg; 401 u32 gpio_reg;
389 struct i2c_algo_bit_data bit_algo; 402 struct i2c_algo_bit_data bit_algo;
390 struct drm_i915_private *dev_priv; 403 struct drm_i915_private *dev_priv;
391}; 404};
392 405
393typedef struct drm_i915_private { 406struct i915_suspend_saved_registers {
394 struct drm_device *dev;
395
396 const struct intel_device_info *info;
397
398 int relative_constants_mode;
399
400 void __iomem *regs;
401
402 struct drm_i915_gt_funcs gt;
403 /** gt_fifo_count and the subsequent register write are synchronized
404 * with dev->struct_mutex. */
405 unsigned gt_fifo_count;
406 /** forcewake_count is protected by gt_lock */
407 unsigned forcewake_count;
408 /** gt_lock is also taken in irq contexts. */
409 struct spinlock gt_lock;
410
411 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
412
413 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
414 * controller on different i2c buses. */
415 struct mutex gmbus_mutex;
416
417 /**
418 * Base address of the gmbus and gpio block.
419 */
420 uint32_t gpio_mmio_base;
421
422 struct pci_dev *bridge_dev;
423 struct intel_ring_buffer ring[I915_NUM_RINGS];
424 uint32_t next_seqno;
425
426 drm_dma_handle_t *status_page_dmah;
427 uint32_t counter;
428 struct drm_i915_gem_object *pwrctx;
429 struct drm_i915_gem_object *renderctx;
430
431 struct resource mch_res;
432
433 atomic_t irq_received;
434
435 /* protects the irq masks */
436 spinlock_t irq_lock;
437
438 /* DPIO indirect register protection */
439 spinlock_t dpio_lock;
440
441 /** Cached value of IMR to avoid reads in updating the bitfield */
442 u32 pipestat[2];
443 u32 irq_mask;
444 u32 gt_irq_mask;
445 u32 pch_irq_mask;
446
447 u32 hotplug_supported_mask;
448 struct work_struct hotplug_work;
449
450 int num_pipe;
451 int num_pch_pll;
452
453 /* For hangcheck timer */
454#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
455 struct timer_list hangcheck_timer;
456 int hangcheck_count;
457 uint32_t last_acthd[I915_NUM_RINGS];
458 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
459
460 unsigned int stop_rings;
461
462 unsigned long cfb_size;
463 unsigned int cfb_fb;
464 enum plane cfb_plane;
465 int cfb_y;
466 struct intel_fbc_work *fbc_work;
467
468 struct intel_opregion opregion;
469
470 /* overlay */
471 struct intel_overlay *overlay;
472 bool sprite_scaling_enabled;
473
474 /* LVDS info */
475 int backlight_level; /* restore backlight to this value */
476 bool backlight_enabled;
477 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
478 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
479
480 /* Feature bits from the VBIOS */
481 unsigned int int_tv_support:1;
482 unsigned int lvds_dither:1;
483 unsigned int lvds_vbt:1;
484 unsigned int int_crt_support:1;
485 unsigned int lvds_use_ssc:1;
486 unsigned int display_clock_mode:1;
487 int lvds_ssc_freq;
488 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
489 unsigned int lvds_val; /* used for checking LVDS channel mode */
490 struct {
491 int rate;
492 int lanes;
493 int preemphasis;
494 int vswing;
495
496 bool initialized;
497 bool support;
498 int bpp;
499 struct edp_power_seq pps;
500 } edp;
501 bool no_aux_handshake;
502
503 struct notifier_block lid_notifier;
504
505 int crt_ddc_pin;
506 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
507 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
508 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
509
510 unsigned int fsb_freq, mem_freq, is_ddr3;
511
512 spinlock_t error_lock;
513 /* Protected by dev->error_lock. */
514 struct drm_i915_error_state *first_error;
515 struct work_struct error_work;
516 struct completion error_completion;
517 struct workqueue_struct *wq;
518
519 /* Display functions */
520 struct drm_i915_display_funcs display;
521
522 /* PCH chipset type */
523 enum intel_pch pch_type;
524
525 unsigned long quirks;
526
527 /* Register state */
528 bool modeset_on_lid;
529 u8 saveLBB; 407 u8 saveLBB;
530 u32 saveDSPACNTR; 408 u32 saveDSPACNTR;
531 u32 saveDSPBCNTR; 409 u32 saveDSPBCNTR;
532 u32 saveDSPARB; 410 u32 saveDSPARB;
533 u32 saveHWS;
534 u32 savePIPEACONF; 411 u32 savePIPEACONF;
535 u32 savePIPEBCONF; 412 u32 savePIPEBCONF;
536 u32 savePIPEASRC; 413 u32 savePIPEASRC;
@@ -676,10 +553,206 @@ typedef struct drm_i915_private {
676 u32 savePIPEB_LINK_N1; 553 u32 savePIPEB_LINK_N1;
677 u32 saveMCHBAR_RENDER_STANDBY; 554 u32 saveMCHBAR_RENDER_STANDBY;
678 u32 savePCH_PORT_HOTPLUG; 555 u32 savePCH_PORT_HOTPLUG;
556};
557
558struct intel_gen6_power_mgmt {
559 struct work_struct work;
560 u32 pm_iir;
561 /* lock - irqsave spinlock that protectects the work_struct and
562 * pm_iir. */
563 spinlock_t lock;
564
565 /* The below variables an all the rps hw state are protected by
566 * dev->struct mutext. */
567 u8 cur_delay;
568 u8 min_delay;
569 u8 max_delay;
570
571 struct delayed_work delayed_resume_work;
572
573 /*
574 * Protects RPS/RC6 register access and PCU communication.
575 * Must be taken after struct_mutex if nested.
576 */
577 struct mutex hw_lock;
578};
579
580struct intel_ilk_power_mgmt {
581 u8 cur_delay;
582 u8 min_delay;
583 u8 max_delay;
584 u8 fmax;
585 u8 fstart;
586
587 u64 last_count1;
588 unsigned long last_time1;
589 unsigned long chipset_power;
590 u64 last_count2;
591 struct timespec last_time2;
592 unsigned long gfx_power;
593 u8 corr;
594
595 int c_m;
596 int r_t;
597
598 struct drm_i915_gem_object *pwrctx;
599 struct drm_i915_gem_object *renderctx;
600};
601
602struct i915_dri1_state {
603 unsigned allow_batchbuffer : 1;
604 u32 __iomem *gfx_hws_cpu_addr;
605
606 unsigned int cpp;
607 int back_offset;
608 int front_offset;
609 int current_page;
610 int page_flipping;
611
612 uint32_t counter;
613};
614
615struct intel_l3_parity {
616 u32 *remap_info;
617 struct work_struct error_work;
618};
619
620typedef struct drm_i915_private {
621 struct drm_device *dev;
622
623 const struct intel_device_info *info;
624
625 int relative_constants_mode;
626
627 void __iomem *regs;
628
629 struct drm_i915_gt_funcs gt;
630 /** gt_fifo_count and the subsequent register write are synchronized
631 * with dev->struct_mutex. */
632 unsigned gt_fifo_count;
633 /** forcewake_count is protected by gt_lock */
634 unsigned forcewake_count;
635 /** gt_lock is also taken in irq contexts. */
636 struct spinlock gt_lock;
637
638 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
639
640 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
641 * controller on different i2c buses. */
642 struct mutex gmbus_mutex;
643
644 /**
645 * Base address of the gmbus and gpio block.
646 */
647 uint32_t gpio_mmio_base;
648
649 struct pci_dev *bridge_dev;
650 struct intel_ring_buffer ring[I915_NUM_RINGS];
651 uint32_t next_seqno;
652
653 drm_dma_handle_t *status_page_dmah;
654 struct resource mch_res;
655
656 atomic_t irq_received;
657
658 /* protects the irq masks */
659 spinlock_t irq_lock;
660
661 /* DPIO indirect register protection */
662 spinlock_t dpio_lock;
663
664 /** Cached value of IMR to avoid reads in updating the bitfield */
665 u32 pipestat[2];
666 u32 irq_mask;
667 u32 gt_irq_mask;
668 u32 pch_irq_mask;
669
670 u32 hotplug_supported_mask;
671 struct work_struct hotplug_work;
672
673 int num_pipe;
674 int num_pch_pll;
675
676 /* For hangcheck timer */
677#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
678#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
679 struct timer_list hangcheck_timer;
680 int hangcheck_count;
681 uint32_t last_acthd[I915_NUM_RINGS];
682 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
683
684 unsigned int stop_rings;
685
686 unsigned long cfb_size;
687 unsigned int cfb_fb;
688 enum plane cfb_plane;
689 int cfb_y;
690 struct intel_fbc_work *fbc_work;
691
692 struct intel_opregion opregion;
693
694 /* overlay */
695 struct intel_overlay *overlay;
696 bool sprite_scaling_enabled;
697
698 /* LVDS info */
699 int backlight_level; /* restore backlight to this value */
700 bool backlight_enabled;
701 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
702 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
703
704 /* Feature bits from the VBIOS */
705 unsigned int int_tv_support:1;
706 unsigned int lvds_dither:1;
707 unsigned int lvds_vbt:1;
708 unsigned int int_crt_support:1;
709 unsigned int lvds_use_ssc:1;
710 unsigned int display_clock_mode:1;
711 int lvds_ssc_freq;
712 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
713 unsigned int lvds_val; /* used for checking LVDS channel mode */
714 struct {
715 int rate;
716 int lanes;
717 int preemphasis;
718 int vswing;
719
720 bool initialized;
721 bool support;
722 int bpp;
723 struct edp_power_seq pps;
724 } edp;
725 bool no_aux_handshake;
726
727 int crt_ddc_pin;
728 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
729 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
730 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
731
732 unsigned int fsb_freq, mem_freq, is_ddr3;
733
734 spinlock_t error_lock;
735 /* Protected by dev->error_lock. */
736 struct drm_i915_error_state *first_error;
737 struct work_struct error_work;
738 struct completion error_completion;
739 struct workqueue_struct *wq;
740
741 /* Display functions */
742 struct drm_i915_display_funcs display;
743
744 /* PCH chipset type */
745 enum intel_pch pch_type;
746 unsigned short pch_id;
747
748 unsigned long quirks;
749
750 /* Register state */
751 bool modeset_on_lid;
679 752
680 struct { 753 struct {
681 /** Bridge to intel-gtt-ko */ 754 /** Bridge to intel-gtt-ko */
682 const struct intel_gtt *gtt; 755 struct intel_gtt *gtt;
683 /** Memory allocator for GTT stolen memory */ 756 /** Memory allocator for GTT stolen memory */
684 struct drm_mm stolen; 757 struct drm_mm stolen;
685 /** Memory allocator for GTT */ 758 /** Memory allocator for GTT */
@@ -706,8 +779,6 @@ typedef struct drm_i915_private {
706 /** PPGTT used for aliasing the PPGTT with the GTT */ 779 /** PPGTT used for aliasing the PPGTT with the GTT */
707 struct i915_hw_ppgtt *aliasing_ppgtt; 780 struct i915_hw_ppgtt *aliasing_ppgtt;
708 781
709 u32 *l3_remap_info;
710
711 struct shrinker inactive_shrinker; 782 struct shrinker inactive_shrinker;
712 783
713 /** 784 /**
@@ -785,19 +856,6 @@ typedef struct drm_i915_private {
785 u32 object_count; 856 u32 object_count;
786 } mm; 857 } mm;
787 858
788 /* Old dri1 support infrastructure, beware the dragons ya fools entering
789 * here! */
790 struct {
791 unsigned allow_batchbuffer : 1;
792 u32 __iomem *gfx_hws_cpu_addr;
793
794 unsigned int cpp;
795 int back_offset;
796 int front_offset;
797 int current_page;
798 int page_flipping;
799 } dri1;
800
801 /* Kernel Modesetting */ 859 /* Kernel Modesetting */
802 860
803 struct sdvo_device_mapping sdvo_mappings[2]; 861 struct sdvo_device_mapping sdvo_mappings[2];
@@ -811,6 +869,7 @@ typedef struct drm_i915_private {
811 wait_queue_head_t pending_flip_queue; 869 wait_queue_head_t pending_flip_queue;
812 870
813 struct intel_pch_pll pch_plls[I915_NUM_PLLS]; 871 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
872 struct intel_ddi_plls ddi_plls;
814 873
815 /* Reclocking support */ 874 /* Reclocking support */
816 bool render_reclock_avail; 875 bool render_reclock_avail;
@@ -820,46 +879,17 @@ typedef struct drm_i915_private {
820 u16 orig_clock; 879 u16 orig_clock;
821 int child_dev_num; 880 int child_dev_num;
822 struct child_device_config *child_dev; 881 struct child_device_config *child_dev;
823 struct drm_connector *int_lvds_connector;
824 struct drm_connector *int_edp_connector;
825 882
826 bool mchbar_need_disable; 883 bool mchbar_need_disable;
827 884
885 struct intel_l3_parity l3_parity;
886
828 /* gen6+ rps state */ 887 /* gen6+ rps state */
829 struct { 888 struct intel_gen6_power_mgmt rps;
830 struct work_struct work;
831 u32 pm_iir;
832 /* lock - irqsave spinlock that protectects the work_struct and
833 * pm_iir. */
834 spinlock_t lock;
835
836 /* The below variables an all the rps hw state are protected by
837 * dev->struct mutext. */
838 u8 cur_delay;
839 u8 min_delay;
840 u8 max_delay;
841 } rps;
842 889
843 /* ilk-only ips/rps state. Everything in here is protected by the global 890 /* ilk-only ips/rps state. Everything in here is protected by the global
844 * mchdev_lock in intel_pm.c */ 891 * mchdev_lock in intel_pm.c */
845 struct { 892 struct intel_ilk_power_mgmt ips;
846 u8 cur_delay;
847 u8 min_delay;
848 u8 max_delay;
849 u8 fmax;
850 u8 fstart;
851
852 u64 last_count1;
853 unsigned long last_time1;
854 unsigned long chipset_power;
855 u64 last_count2;
856 struct timespec last_time2;
857 unsigned long gfx_power;
858 u8 corr;
859
860 int c_m;
861 int r_t;
862 } ips;
863 893
864 enum no_fbc_reason no_fbc_reason; 894 enum no_fbc_reason no_fbc_reason;
865 895
@@ -871,14 +901,27 @@ typedef struct drm_i915_private {
871 /* list of fbdev register on this device */ 901 /* list of fbdev register on this device */
872 struct intel_fbdev *fbdev; 902 struct intel_fbdev *fbdev;
873 903
904 /*
905 * The console may be contended at resume, but we don't
906 * want it to block on it.
907 */
908 struct work_struct console_resume_work;
909
874 struct backlight_device *backlight; 910 struct backlight_device *backlight;
875 911
876 struct drm_property *broadcast_rgb_property; 912 struct drm_property *broadcast_rgb_property;
877 struct drm_property *force_audio_property; 913 struct drm_property *force_audio_property;
878 914
879 struct work_struct parity_error_work;
880 bool hw_contexts_disabled; 915 bool hw_contexts_disabled;
881 uint32_t hw_context_size; 916 uint32_t hw_context_size;
917
918 bool fdi_rx_polarity_reversed;
919
920 struct i915_suspend_saved_registers regfile;
921
922 /* Old dri1 support infrastructure, beware the dragons ya fools entering
923 * here! */
924 struct i915_dri1_state dri1;
882} drm_i915_private_t; 925} drm_i915_private_t;
883 926
884/* Iterate over initialised rings */ 927/* Iterate over initialised rings */
@@ -1120,9 +1163,14 @@ struct drm_i915_file_private {
1120#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1163#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1121#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1164#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1122#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge) 1165#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1166#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1167 (dev)->pci_device == 0x0152 || \
1168 (dev)->pci_device == 0x015a)
1123#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 1169#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1124#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 1170#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1125#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1171#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1172#define IS_ULT(dev) (IS_HASWELL(dev) && \
1173 ((dev)->pci_device & 0xFF00) == 0x0A00)
1126 1174
1127/* 1175/*
1128 * The genX designation typically refers to the render engine, so render 1176 * The genX designation typically refers to the render engine, so render
@@ -1168,6 +1216,13 @@ struct drm_i915_file_private {
1168 1216
1169#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) 1217#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1170 1218
1219#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1220#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1221#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1222#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1223#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1224#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1225
1171#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1226#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1172#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 1227#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1173#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1228#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
@@ -1250,6 +1305,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1250extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); 1305extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1251extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); 1306extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1252 1307
1308extern void intel_console_resume(struct work_struct *work);
1253 1309
1254/* i915_irq.c */ 1310/* i915_irq.c */
1255void i915_hangcheck_elapsed(unsigned long data); 1311void i915_hangcheck_elapsed(unsigned long data);
@@ -1257,6 +1313,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged);
1257 1313
1258extern void intel_irq_init(struct drm_device *dev); 1314extern void intel_irq_init(struct drm_device *dev);
1259extern void intel_gt_init(struct drm_device *dev); 1315extern void intel_gt_init(struct drm_device *dev);
1316extern void intel_gt_reset(struct drm_device *dev);
1260 1317
1261void i915_error_state_free(struct kref *error_ref); 1318void i915_error_state_free(struct kref *error_ref);
1262 1319
@@ -1368,8 +1425,7 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1368int i915_gem_object_sync(struct drm_i915_gem_object *obj, 1425int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1369 struct intel_ring_buffer *to); 1426 struct intel_ring_buffer *to);
1370void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1427void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1371 struct intel_ring_buffer *ring, 1428 struct intel_ring_buffer *ring);
1372 u32 seqno);
1373 1429
1374int i915_gem_dumb_create(struct drm_file *file_priv, 1430int i915_gem_dumb_create(struct drm_file *file_priv,
1375 struct drm_device *dev, 1431 struct drm_device *dev,
@@ -1387,7 +1443,7 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1387 return (int32_t)(seq1 - seq2) >= 0; 1443 return (int32_t)(seq1 - seq2) >= 0;
1388} 1444}
1389 1445
1390u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); 1446extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1391 1447
1392int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); 1448int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1393int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1449int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
@@ -1499,6 +1555,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
1499 unsigned long start, 1555 unsigned long start,
1500 unsigned long mappable_end, 1556 unsigned long mappable_end,
1501 unsigned long end); 1557 unsigned long end);
1558int i915_gem_gtt_init(struct drm_device *dev);
1559void i915_gem_gtt_fini(struct drm_device *dev);
1560static inline void i915_gem_chipset_flush(struct drm_device *dev)
1561{
1562 if (INTEL_INFO(dev)->gen < 6)
1563 intel_gtt_chipset_flush();
1564}
1565
1502 1566
1503/* i915_gem_evict.c */ 1567/* i915_gem_evict.c */
1504int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1568int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1595,11 +1659,12 @@ extern void intel_modeset_init(struct drm_device *dev);
1595extern void intel_modeset_gem_init(struct drm_device *dev); 1659extern void intel_modeset_gem_init(struct drm_device *dev);
1596extern void intel_modeset_cleanup(struct drm_device *dev); 1660extern void intel_modeset_cleanup(struct drm_device *dev);
1597extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 1661extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1598extern void intel_modeset_setup_hw_state(struct drm_device *dev); 1662extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1663 bool force_restore);
1599extern bool intel_fbc_enabled(struct drm_device *dev); 1664extern bool intel_fbc_enabled(struct drm_device *dev);
1600extern void intel_disable_fbc(struct drm_device *dev); 1665extern void intel_disable_fbc(struct drm_device *dev);
1601extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1666extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1602extern void ironlake_init_pch_refclk(struct drm_device *dev); 1667extern void intel_init_pch_refclk(struct drm_device *dev);
1603extern void gen6_set_rps(struct drm_device *dev, u8 val); 1668extern void gen6_set_rps(struct drm_device *dev, u8 val);
1604extern void intel_detect_pch(struct drm_device *dev); 1669extern void intel_detect_pch(struct drm_device *dev);
1605extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1670extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
@@ -1628,6 +1693,9 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1628void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1693void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1629int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); 1694int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1630 1695
1696int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1697int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1698
1631#define __i915_read(x, y) \ 1699#define __i915_read(x, y) \
1632 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); 1700 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1633 1701
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b285da4449b..742206e45103 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -845,12 +845,12 @@ out:
845 * domain anymore. */ 845 * domain anymore. */
846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 846 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
847 i915_gem_clflush_object(obj); 847 i915_gem_clflush_object(obj);
848 intel_gtt_chipset_flush(); 848 i915_gem_chipset_flush(dev);
849 } 849 }
850 } 850 }
851 851
852 if (needs_clflush_after) 852 if (needs_clflush_after)
853 intel_gtt_chipset_flush(); 853 i915_gem_chipset_flush(dev);
854 854
855 return ret; 855 return ret;
856} 856}
@@ -1345,30 +1345,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1345 trace_i915_gem_object_fault(obj, page_offset, true, write); 1345 trace_i915_gem_object_fault(obj, page_offset, true, write);
1346 1346
1347 /* Now bind it into the GTT if needed */ 1347 /* Now bind it into the GTT if needed */
1348 if (!obj->map_and_fenceable) { 1348 ret = i915_gem_object_pin(obj, 0, true, false);
1349 ret = i915_gem_object_unbind(obj); 1349 if (ret)
1350 if (ret) 1350 goto unlock;
1351 goto unlock;
1352 }
1353 if (!obj->gtt_space) {
1354 ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
1355 if (ret)
1356 goto unlock;
1357
1358 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1359 if (ret)
1360 goto unlock;
1361 }
1362 1351
1363 if (!obj->has_global_gtt_mapping) 1352 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1364 i915_gem_gtt_bind_object(obj, obj->cache_level); 1353 if (ret)
1354 goto unpin;
1365 1355
1366 ret = i915_gem_object_get_fence(obj); 1356 ret = i915_gem_object_get_fence(obj);
1367 if (ret) 1357 if (ret)
1368 goto unlock; 1358 goto unpin;
1369
1370 if (i915_gem_object_is_inactive(obj))
1371 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1372 1359
1373 obj->fault_mappable = true; 1360 obj->fault_mappable = true;
1374 1361
@@ -1377,6 +1364,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1377 1364
1378 /* Finally, remap it using the new GTT offset */ 1365 /* Finally, remap it using the new GTT offset */
1379 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); 1366 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1367unpin:
1368 i915_gem_object_unpin(obj);
1380unlock: 1369unlock:
1381 mutex_unlock(&dev->struct_mutex); 1370 mutex_unlock(&dev->struct_mutex);
1382out: 1371out:
@@ -1707,10 +1696,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
1707 if (obj->pages_pin_count) 1696 if (obj->pages_pin_count)
1708 return -EBUSY; 1697 return -EBUSY;
1709 1698
1699 /* ->put_pages might need to allocate memory for the bit17 swizzle
1700 * array, hence protect them from being reaped by removing them from gtt
1701 * lists early. */
1702 list_del(&obj->gtt_list);
1703
1710 ops->put_pages(obj); 1704 ops->put_pages(obj);
1711 obj->pages = NULL; 1705 obj->pages = NULL;
1712 1706
1713 list_del(&obj->gtt_list);
1714 if (i915_gem_object_is_purgeable(obj)) 1707 if (i915_gem_object_is_purgeable(obj))
1715 i915_gem_object_truncate(obj); 1708 i915_gem_object_truncate(obj);
1716 1709
@@ -1868,11 +1861,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
1868 1861
1869void 1862void
1870i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1863i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1871 struct intel_ring_buffer *ring, 1864 struct intel_ring_buffer *ring)
1872 u32 seqno)
1873{ 1865{
1874 struct drm_device *dev = obj->base.dev; 1866 struct drm_device *dev = obj->base.dev;
1875 struct drm_i915_private *dev_priv = dev->dev_private; 1867 struct drm_i915_private *dev_priv = dev->dev_private;
1868 u32 seqno = intel_ring_get_seqno(ring);
1876 1869
1877 BUG_ON(ring == NULL); 1870 BUG_ON(ring == NULL);
1878 obj->ring = ring; 1871 obj->ring = ring;
@@ -1933,26 +1926,54 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1933 WARN_ON(i915_verify_lists(dev)); 1926 WARN_ON(i915_verify_lists(dev));
1934} 1927}
1935 1928
1936static u32 1929static int
1937i915_gem_get_seqno(struct drm_device *dev) 1930i915_gem_handle_seqno_wrap(struct drm_device *dev)
1938{ 1931{
1939 drm_i915_private_t *dev_priv = dev->dev_private; 1932 struct drm_i915_private *dev_priv = dev->dev_private;
1940 u32 seqno = dev_priv->next_seqno; 1933 struct intel_ring_buffer *ring;
1934 int ret, i, j;
1941 1935
1942 /* reserve 0 for non-seqno */ 1936 /* The hardware uses various monotonic 32-bit counters, if we
1943 if (++dev_priv->next_seqno == 0) 1937 * detect that they will wraparound we need to idle the GPU
1944 dev_priv->next_seqno = 1; 1938 * and reset those counters.
1939 */
1940 ret = 0;
1941 for_each_ring(ring, dev_priv, i) {
1942 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1943 ret |= ring->sync_seqno[j] != 0;
1944 }
1945 if (ret == 0)
1946 return ret;
1947
1948 ret = i915_gpu_idle(dev);
1949 if (ret)
1950 return ret;
1951
1952 i915_gem_retire_requests(dev);
1953 for_each_ring(ring, dev_priv, i) {
1954 for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
1955 ring->sync_seqno[j] = 0;
1956 }
1945 1957
1946 return seqno; 1958 return 0;
1947} 1959}
1948 1960
1949u32 1961int
1950i915_gem_next_request_seqno(struct intel_ring_buffer *ring) 1962i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
1951{ 1963{
1952 if (ring->outstanding_lazy_request == 0) 1964 struct drm_i915_private *dev_priv = dev->dev_private;
1953 ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev); 1965
1966 /* reserve 0 for non-seqno */
1967 if (dev_priv->next_seqno == 0) {
1968 int ret = i915_gem_handle_seqno_wrap(dev);
1969 if (ret)
1970 return ret;
1954 1971
1955 return ring->outstanding_lazy_request; 1972 dev_priv->next_seqno = 1;
1973 }
1974
1975 *seqno = dev_priv->next_seqno++;
1976 return 0;
1956} 1977}
1957 1978
1958int 1979int
@@ -1963,7 +1984,6 @@ i915_add_request(struct intel_ring_buffer *ring,
1963 drm_i915_private_t *dev_priv = ring->dev->dev_private; 1984 drm_i915_private_t *dev_priv = ring->dev->dev_private;
1964 struct drm_i915_gem_request *request; 1985 struct drm_i915_gem_request *request;
1965 u32 request_ring_position; 1986 u32 request_ring_position;
1966 u32 seqno;
1967 int was_empty; 1987 int was_empty;
1968 int ret; 1988 int ret;
1969 1989
@@ -1982,7 +2002,6 @@ i915_add_request(struct intel_ring_buffer *ring,
1982 if (request == NULL) 2002 if (request == NULL)
1983 return -ENOMEM; 2003 return -ENOMEM;
1984 2004
1985 seqno = i915_gem_next_request_seqno(ring);
1986 2005
1987 /* Record the position of the start of the request so that 2006 /* Record the position of the start of the request so that
1988 * should we detect the updated seqno part-way through the 2007 * should we detect the updated seqno part-way through the
@@ -1991,15 +2010,13 @@ i915_add_request(struct intel_ring_buffer *ring,
1991 */ 2010 */
1992 request_ring_position = intel_ring_get_tail(ring); 2011 request_ring_position = intel_ring_get_tail(ring);
1993 2012
1994 ret = ring->add_request(ring, &seqno); 2013 ret = ring->add_request(ring);
1995 if (ret) { 2014 if (ret) {
1996 kfree(request); 2015 kfree(request);
1997 return ret; 2016 return ret;
1998 } 2017 }
1999 2018
2000 trace_i915_gem_request_add(ring, seqno); 2019 request->seqno = intel_ring_get_seqno(ring);
2001
2002 request->seqno = seqno;
2003 request->ring = ring; 2020 request->ring = ring;
2004 request->tail = request_ring_position; 2021 request->tail = request_ring_position;
2005 request->emitted_jiffies = jiffies; 2022 request->emitted_jiffies = jiffies;
@@ -2017,23 +2034,24 @@ i915_add_request(struct intel_ring_buffer *ring,
2017 spin_unlock(&file_priv->mm.lock); 2034 spin_unlock(&file_priv->mm.lock);
2018 } 2035 }
2019 2036
2037 trace_i915_gem_request_add(ring, request->seqno);
2020 ring->outstanding_lazy_request = 0; 2038 ring->outstanding_lazy_request = 0;
2021 2039
2022 if (!dev_priv->mm.suspended) { 2040 if (!dev_priv->mm.suspended) {
2023 if (i915_enable_hangcheck) { 2041 if (i915_enable_hangcheck) {
2024 mod_timer(&dev_priv->hangcheck_timer, 2042 mod_timer(&dev_priv->hangcheck_timer,
2025 jiffies + 2043 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
2026 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
2027 } 2044 }
2028 if (was_empty) { 2045 if (was_empty) {
2029 queue_delayed_work(dev_priv->wq, 2046 queue_delayed_work(dev_priv->wq,
2030 &dev_priv->mm.retire_work, HZ); 2047 &dev_priv->mm.retire_work,
2048 round_jiffies_up_relative(HZ));
2031 intel_mark_busy(dev_priv->dev); 2049 intel_mark_busy(dev_priv->dev);
2032 } 2050 }
2033 } 2051 }
2034 2052
2035 if (out_seqno) 2053 if (out_seqno)
2036 *out_seqno = seqno; 2054 *out_seqno = request->seqno;
2037 return 0; 2055 return 0;
2038} 2056}
2039 2057
@@ -2131,7 +2149,6 @@ void
2131i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 2149i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2132{ 2150{
2133 uint32_t seqno; 2151 uint32_t seqno;
2134 int i;
2135 2152
2136 if (list_empty(&ring->request_list)) 2153 if (list_empty(&ring->request_list))
2137 return; 2154 return;
@@ -2140,10 +2157,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2140 2157
2141 seqno = ring->get_seqno(ring, true); 2158 seqno = ring->get_seqno(ring, true);
2142 2159
2143 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
2144 if (seqno >= ring->sync_seqno[i])
2145 ring->sync_seqno[i] = 0;
2146
2147 while (!list_empty(&ring->request_list)) { 2160 while (!list_empty(&ring->request_list)) {
2148 struct drm_i915_gem_request *request; 2161 struct drm_i915_gem_request *request;
2149 2162
@@ -2218,7 +2231,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
2218 2231
2219 /* Come back later if the device is busy... */ 2232 /* Come back later if the device is busy... */
2220 if (!mutex_trylock(&dev->struct_mutex)) { 2233 if (!mutex_trylock(&dev->struct_mutex)) {
2221 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 2234 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2235 round_jiffies_up_relative(HZ));
2222 return; 2236 return;
2223 } 2237 }
2224 2238
@@ -2236,7 +2250,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
2236 } 2250 }
2237 2251
2238 if (!dev_priv->mm.suspended && !idle) 2252 if (!dev_priv->mm.suspended && !idle)
2239 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 2253 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2254 round_jiffies_up_relative(HZ));
2240 if (idle) 2255 if (idle)
2241 intel_mark_idle(dev); 2256 intel_mark_idle(dev);
2242 2257
@@ -2386,7 +2401,11 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
2386 2401
2387 ret = to->sync_to(to, from, seqno); 2402 ret = to->sync_to(to, from, seqno);
2388 if (!ret) 2403 if (!ret)
2389 from->sync_seqno[idx] = seqno; 2404 /* We use last_read_seqno because sync_to()
2405 * might have just caused seqno wrap under
2406 * the radar.
2407 */
2408 from->sync_seqno[idx] = obj->last_read_seqno;
2390 2409
2391 return ret; 2410 return ret;
2392} 2411}
@@ -2469,14 +2488,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2469 return 0; 2488 return 0;
2470} 2489}
2471 2490
2472static int i915_ring_idle(struct intel_ring_buffer *ring)
2473{
2474 if (list_empty(&ring->active_list))
2475 return 0;
2476
2477 return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
2478}
2479
2480int i915_gpu_idle(struct drm_device *dev) 2491int i915_gpu_idle(struct drm_device *dev)
2481{ 2492{
2482 drm_i915_private_t *dev_priv = dev->dev_private; 2493 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2489,7 +2500,7 @@ int i915_gpu_idle(struct drm_device *dev)
2489 if (ret) 2500 if (ret)
2490 return ret; 2501 return ret;
2491 2502
2492 ret = i915_ring_idle(ring); 2503 ret = intel_ring_idle(ring);
2493 if (ret) 2504 if (ret)
2494 return ret; 2505 return ret;
2495 } 2506 }
@@ -2923,13 +2934,14 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2923 if (ret) 2934 if (ret)
2924 return ret; 2935 return ret;
2925 2936
2937 i915_gem_object_pin_pages(obj);
2938
2926 search_free: 2939 search_free:
2927 if (map_and_fenceable) 2940 if (map_and_fenceable)
2928 free_space = 2941 free_space = drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
2929 drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space, 2942 size, alignment, obj->cache_level,
2930 size, alignment, obj->cache_level, 2943 0, dev_priv->mm.gtt_mappable_end,
2931 0, dev_priv->mm.gtt_mappable_end, 2944 false);
2932 false);
2933 else 2945 else
2934 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space, 2946 free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
2935 size, alignment, obj->cache_level, 2947 size, alignment, obj->cache_level,
@@ -2937,60 +2949,60 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2937 2949
2938 if (free_space != NULL) { 2950 if (free_space != NULL) {
2939 if (map_and_fenceable) 2951 if (map_and_fenceable)
2940 obj->gtt_space = 2952 free_space =
2941 drm_mm_get_block_range_generic(free_space, 2953 drm_mm_get_block_range_generic(free_space,
2942 size, alignment, obj->cache_level, 2954 size, alignment, obj->cache_level,
2943 0, dev_priv->mm.gtt_mappable_end, 2955 0, dev_priv->mm.gtt_mappable_end,
2944 false); 2956 false);
2945 else 2957 else
2946 obj->gtt_space = 2958 free_space =
2947 drm_mm_get_block_generic(free_space, 2959 drm_mm_get_block_generic(free_space,
2948 size, alignment, obj->cache_level, 2960 size, alignment, obj->cache_level,
2949 false); 2961 false);
2950 } 2962 }
2951 if (obj->gtt_space == NULL) { 2963 if (free_space == NULL) {
2952 ret = i915_gem_evict_something(dev, size, alignment, 2964 ret = i915_gem_evict_something(dev, size, alignment,
2953 obj->cache_level, 2965 obj->cache_level,
2954 map_and_fenceable, 2966 map_and_fenceable,
2955 nonblocking); 2967 nonblocking);
2956 if (ret) 2968 if (ret) {
2969 i915_gem_object_unpin_pages(obj);
2957 return ret; 2970 return ret;
2971 }
2958 2972
2959 goto search_free; 2973 goto search_free;
2960 } 2974 }
2961 if (WARN_ON(!i915_gem_valid_gtt_space(dev, 2975 if (WARN_ON(!i915_gem_valid_gtt_space(dev,
2962 obj->gtt_space, 2976 free_space,
2963 obj->cache_level))) { 2977 obj->cache_level))) {
2964 drm_mm_put_block(obj->gtt_space); 2978 i915_gem_object_unpin_pages(obj);
2965 obj->gtt_space = NULL; 2979 drm_mm_put_block(free_space);
2966 return -EINVAL; 2980 return -EINVAL;
2967 } 2981 }
2968 2982
2969
2970 ret = i915_gem_gtt_prepare_object(obj); 2983 ret = i915_gem_gtt_prepare_object(obj);
2971 if (ret) { 2984 if (ret) {
2972 drm_mm_put_block(obj->gtt_space); 2985 i915_gem_object_unpin_pages(obj);
2973 obj->gtt_space = NULL; 2986 drm_mm_put_block(free_space);
2974 return ret; 2987 return ret;
2975 } 2988 }
2976 2989
2977 if (!dev_priv->mm.aliasing_ppgtt)
2978 i915_gem_gtt_bind_object(obj, obj->cache_level);
2979
2980 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); 2990 list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
2981 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 2991 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2982 2992
2983 obj->gtt_offset = obj->gtt_space->start; 2993 obj->gtt_space = free_space;
2994 obj->gtt_offset = free_space->start;
2984 2995
2985 fenceable = 2996 fenceable =
2986 obj->gtt_space->size == fence_size && 2997 free_space->size == fence_size &&
2987 (obj->gtt_space->start & (fence_alignment - 1)) == 0; 2998 (free_space->start & (fence_alignment - 1)) == 0;
2988 2999
2989 mappable = 3000 mappable =
2990 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; 3001 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2991 3002
2992 obj->map_and_fenceable = mappable && fenceable; 3003 obj->map_and_fenceable = mappable && fenceable;
2993 3004
3005 i915_gem_object_unpin_pages(obj);
2994 trace_i915_gem_object_bind(obj, map_and_fenceable); 3006 trace_i915_gem_object_bind(obj, map_and_fenceable);
2995 i915_gem_verify_gtt(dev); 3007 i915_gem_verify_gtt(dev);
2996 return 0; 3008 return 0;
@@ -3059,7 +3071,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3059 return; 3071 return;
3060 3072
3061 i915_gem_clflush_object(obj); 3073 i915_gem_clflush_object(obj);
3062 intel_gtt_chipset_flush(); 3074 i915_gem_chipset_flush(obj->base.dev);
3063 old_write_domain = obj->base.write_domain; 3075 old_write_domain = obj->base.write_domain;
3064 obj->base.write_domain = 0; 3076 obj->base.write_domain = 0;
3065 3077
@@ -3454,11 +3466,16 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
3454 } 3466 }
3455 3467
3456 if (obj->gtt_space == NULL) { 3468 if (obj->gtt_space == NULL) {
3469 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3470
3457 ret = i915_gem_object_bind_to_gtt(obj, alignment, 3471 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3458 map_and_fenceable, 3472 map_and_fenceable,
3459 nonblocking); 3473 nonblocking);
3460 if (ret) 3474 if (ret)
3461 return ret; 3475 return ret;
3476
3477 if (!dev_priv->mm.aliasing_ppgtt)
3478 i915_gem_gtt_bind_object(obj, obj->cache_level);
3462 } 3479 }
3463 3480
3464 if (!obj->has_global_gtt_mapping && map_and_fenceable) 3481 if (!obj->has_global_gtt_mapping && map_and_fenceable)
@@ -3832,7 +3849,7 @@ void i915_gem_l3_remap(struct drm_device *dev)
3832 if (!IS_IVYBRIDGE(dev)) 3849 if (!IS_IVYBRIDGE(dev))
3833 return; 3850 return;
3834 3851
3835 if (!dev_priv->mm.l3_remap_info) 3852 if (!dev_priv->l3_parity.remap_info)
3836 return; 3853 return;
3837 3854
3838 misccpctl = I915_READ(GEN7_MISCCPCTL); 3855 misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -3841,12 +3858,12 @@ void i915_gem_l3_remap(struct drm_device *dev)
3841 3858
3842 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) { 3859 for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
3843 u32 remap = I915_READ(GEN7_L3LOG_BASE + i); 3860 u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
3844 if (remap && remap != dev_priv->mm.l3_remap_info[i/4]) 3861 if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
3845 DRM_DEBUG("0x%x was already programmed to %x\n", 3862 DRM_DEBUG("0x%x was already programmed to %x\n",
3846 GEN7_L3LOG_BASE + i, remap); 3863 GEN7_L3LOG_BASE + i, remap);
3847 if (remap && !dev_priv->mm.l3_remap_info[i/4]) 3864 if (remap && !dev_priv->l3_parity.remap_info[i/4])
3848 DRM_DEBUG_DRIVER("Clearing remapped register\n"); 3865 DRM_DEBUG_DRIVER("Clearing remapped register\n");
3849 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]); 3866 I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
3850 } 3867 }
3851 3868
3852 /* Make sure all the writes land before disabling dop clock gating */ 3869 /* Make sure all the writes land before disabling dop clock gating */
@@ -3876,68 +3893,6 @@ void i915_gem_init_swizzling(struct drm_device *dev)
3876 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); 3893 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
3877} 3894}
3878 3895
3879void i915_gem_init_ppgtt(struct drm_device *dev)
3880{
3881 drm_i915_private_t *dev_priv = dev->dev_private;
3882 uint32_t pd_offset;
3883 struct intel_ring_buffer *ring;
3884 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
3885 uint32_t __iomem *pd_addr;
3886 uint32_t pd_entry;
3887 int i;
3888
3889 if (!dev_priv->mm.aliasing_ppgtt)
3890 return;
3891
3892
3893 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
3894 for (i = 0; i < ppgtt->num_pd_entries; i++) {
3895 dma_addr_t pt_addr;
3896
3897 if (dev_priv->mm.gtt->needs_dmar)
3898 pt_addr = ppgtt->pt_dma_addr[i];
3899 else
3900 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
3901
3902 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
3903 pd_entry |= GEN6_PDE_VALID;
3904
3905 writel(pd_entry, pd_addr + i);
3906 }
3907 readl(pd_addr);
3908
3909 pd_offset = ppgtt->pd_offset;
3910 pd_offset /= 64; /* in cachelines, */
3911 pd_offset <<= 16;
3912
3913 if (INTEL_INFO(dev)->gen == 6) {
3914 uint32_t ecochk, gab_ctl, ecobits;
3915
3916 ecobits = I915_READ(GAC_ECO_BITS);
3917 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
3918
3919 gab_ctl = I915_READ(GAB_CTL);
3920 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3921
3922 ecochk = I915_READ(GAM_ECOCHK);
3923 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3924 ECOCHK_PPGTT_CACHE64B);
3925 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3926 } else if (INTEL_INFO(dev)->gen >= 7) {
3927 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
3928 /* GFX_MODE is per-ring on gen7+ */
3929 }
3930
3931 for_each_ring(ring, dev_priv, i) {
3932 if (INTEL_INFO(dev)->gen >= 7)
3933 I915_WRITE(RING_MODE_GEN7(ring),
3934 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
3935
3936 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
3937 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
3938 }
3939}
3940
3941static bool 3896static bool
3942intel_enable_blt(struct drm_device *dev) 3897intel_enable_blt(struct drm_device *dev)
3943{ 3898{
@@ -3960,7 +3915,7 @@ i915_gem_init_hw(struct drm_device *dev)
3960 drm_i915_private_t *dev_priv = dev->dev_private; 3915 drm_i915_private_t *dev_priv = dev->dev_private;
3961 int ret; 3916 int ret;
3962 3917
3963 if (!intel_enable_gtt()) 3918 if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
3964 return -EIO; 3919 return -EIO;
3965 3920
3966 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) 3921 if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
@@ -4295,7 +4250,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
4295 page_cache_release(page); 4250 page_cache_release(page);
4296 } 4251 }
4297 } 4252 }
4298 intel_gtt_chipset_flush(); 4253 i915_gem_chipset_flush(dev);
4299 4254
4300 obj->phys_obj->cur_obj = NULL; 4255 obj->phys_obj->cur_obj = NULL;
4301 obj->phys_obj = NULL; 4256 obj->phys_obj = NULL;
@@ -4382,7 +4337,7 @@ i915_gem_phys_pwrite(struct drm_device *dev,
4382 return -EFAULT; 4337 return -EFAULT;
4383 } 4338 }
4384 4339
4385 intel_gtt_chipset_flush(); 4340 i915_gem_chipset_flush(dev);
4386 return 0; 4341 return 0;
4387} 4342}
4388 4343
@@ -4407,6 +4362,19 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4407 spin_unlock(&file_priv->mm.lock); 4362 spin_unlock(&file_priv->mm.lock);
4408} 4363}
4409 4364
4365static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
4366{
4367 if (!mutex_is_locked(mutex))
4368 return false;
4369
4370#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
4371 return mutex->owner == task;
4372#else
4373 /* Since UP may be pre-empted, we cannot assume that we own the lock */
4374 return false;
4375#endif
4376}
4377
4410static int 4378static int
4411i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) 4379i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4412{ 4380{
@@ -4417,10 +4385,15 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4417 struct drm_device *dev = dev_priv->dev; 4385 struct drm_device *dev = dev_priv->dev;
4418 struct drm_i915_gem_object *obj; 4386 struct drm_i915_gem_object *obj;
4419 int nr_to_scan = sc->nr_to_scan; 4387 int nr_to_scan = sc->nr_to_scan;
4388 bool unlock = true;
4420 int cnt; 4389 int cnt;
4421 4390
4422 if (!mutex_trylock(&dev->struct_mutex)) 4391 if (!mutex_trylock(&dev->struct_mutex)) {
4423 return 0; 4392 if (!mutex_is_locked_by(&dev->struct_mutex, current))
4393 return 0;
4394
4395 unlock = false;
4396 }
4424 4397
4425 if (nr_to_scan) { 4398 if (nr_to_scan) {
4426 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); 4399 nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
@@ -4436,6 +4409,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4436 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4409 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4437 cnt += obj->base.size >> PAGE_SHIFT; 4410 cnt += obj->base.size >> PAGE_SHIFT;
4438 4411
4439 mutex_unlock(&dev->struct_mutex); 4412 if (unlock)
4413 mutex_unlock(&dev->struct_mutex);
4440 return cnt; 4414 return cnt;
4441} 4415}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 05ed42f203d7..a3f06bcad551 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -146,7 +146,7 @@ create_hw_context(struct drm_device *dev,
146 struct i915_hw_context *ctx; 146 struct i915_hw_context *ctx;
147 int ret, id; 147 int ret, id;
148 148
149 ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL); 149 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
150 if (ctx == NULL) 150 if (ctx == NULL)
151 return ERR_PTR(-ENOMEM); 151 return ERR_PTR(-ENOMEM);
152 152
@@ -410,9 +410,8 @@ static int do_switch(struct i915_hw_context *to)
410 * MI_SET_CONTEXT instead of when the next seqno has completed. 410 * MI_SET_CONTEXT instead of when the next seqno has completed.
411 */ 411 */
412 if (from_obj != NULL) { 412 if (from_obj != NULL) {
413 u32 seqno = i915_gem_next_request_seqno(ring);
414 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 413 from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
415 i915_gem_object_move_to_active(from_obj, ring, seqno); 414 i915_gem_object_move_to_active(from_obj, ring);
416 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 415 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
417 * whole damn pipeline, we don't need to explicitly mark the 416 * whole damn pipeline, we don't need to explicitly mark the
418 * object dirty. The only exception is that the context must be 417 * object dirty. The only exception is that the context must be
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3eea143749f6..ee8f97f0539e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -128,15 +128,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
128 target_i915_obj->cache_level); 128 target_i915_obj->cache_level);
129 } 129 }
130 130
131 /* The target buffer should have appeared before us in the
132 * exec_object list, so it should have a GTT space bound by now.
133 */
134 if (unlikely(target_offset == 0)) {
135 DRM_DEBUG("No GTT space found for object %d\n",
136 reloc->target_handle);
137 return ret;
138 }
139
140 /* Validate that the target is in a valid r/w GPU domain */ 131 /* Validate that the target is in a valid r/w GPU domain */
141 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 132 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
142 DRM_DEBUG("reloc with multiple write domains: " 133 DRM_DEBUG("reloc with multiple write domains: "
@@ -672,7 +663,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
672 } 663 }
673 664
674 if (flush_domains & I915_GEM_DOMAIN_CPU) 665 if (flush_domains & I915_GEM_DOMAIN_CPU)
675 intel_gtt_chipset_flush(); 666 i915_gem_chipset_flush(ring->dev);
676 667
677 if (flush_domains & I915_GEM_DOMAIN_GTT) 668 if (flush_domains & I915_GEM_DOMAIN_GTT)
678 wmb(); 669 wmb();
@@ -722,8 +713,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
722 713
723static void 714static void
724i915_gem_execbuffer_move_to_active(struct list_head *objects, 715i915_gem_execbuffer_move_to_active(struct list_head *objects,
725 struct intel_ring_buffer *ring, 716 struct intel_ring_buffer *ring)
726 u32 seqno)
727{ 717{
728 struct drm_i915_gem_object *obj; 718 struct drm_i915_gem_object *obj;
729 719
@@ -735,10 +725,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
735 obj->base.write_domain = obj->base.pending_write_domain; 725 obj->base.write_domain = obj->base.pending_write_domain;
736 obj->fenced_gpu_access = obj->pending_fenced_gpu_access; 726 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
737 727
738 i915_gem_object_move_to_active(obj, ring, seqno); 728 i915_gem_object_move_to_active(obj, ring);
739 if (obj->base.write_domain) { 729 if (obj->base.write_domain) {
740 obj->dirty = 1; 730 obj->dirty = 1;
741 obj->last_write_seqno = seqno; 731 obj->last_write_seqno = intel_ring_get_seqno(ring);
742 if (obj->pin_count) /* check for potential scanout */ 732 if (obj->pin_count) /* check for potential scanout */
743 intel_mark_fb_busy(obj); 733 intel_mark_fb_busy(obj);
744 } 734 }
@@ -798,8 +788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
798 struct intel_ring_buffer *ring; 788 struct intel_ring_buffer *ring;
799 u32 ctx_id = i915_execbuffer2_get_context_id(*args); 789 u32 ctx_id = i915_execbuffer2_get_context_id(*args);
800 u32 exec_start, exec_len; 790 u32 exec_start, exec_len;
801 u32 seqno;
802 u32 mask; 791 u32 mask;
792 u32 flags;
803 int ret, mode, i; 793 int ret, mode, i;
804 794
805 if (!i915_gem_check_execbuffer(args)) { 795 if (!i915_gem_check_execbuffer(args)) {
@@ -811,6 +801,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
811 if (ret) 801 if (ret)
812 return ret; 802 return ret;
813 803
804 flags = 0;
805 if (args->flags & I915_EXEC_SECURE) {
806 if (!file->is_master || !capable(CAP_SYS_ADMIN))
807 return -EPERM;
808
809 flags |= I915_DISPATCH_SECURE;
810 }
811
814 switch (args->flags & I915_EXEC_RING_MASK) { 812 switch (args->flags & I915_EXEC_RING_MASK) {
815 case I915_EXEC_DEFAULT: 813 case I915_EXEC_DEFAULT:
816 case I915_EXEC_RENDER: 814 case I915_EXEC_RENDER:
@@ -983,26 +981,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
983 } 981 }
984 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; 982 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
985 983
984 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
985 * batch" bit. Hence we need to pin secure batches into the global gtt.
986 * hsw should have this fixed, but let's be paranoid and do it
987 * unconditionally for now. */
988 if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
989 i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
990
986 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); 991 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
987 if (ret) 992 if (ret)
988 goto err; 993 goto err;
989 994
990 seqno = i915_gem_next_request_seqno(ring);
991 for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++) {
992 if (seqno < ring->sync_seqno[i]) {
993 /* The GPU can not handle its semaphore value wrapping,
994 * so every billion or so execbuffers, we need to stall
995 * the GPU in order to reset the counters.
996 */
997 ret = i915_gpu_idle(dev);
998 if (ret)
999 goto err;
1000 i915_gem_retire_requests(dev);
1001
1002 BUG_ON(ring->sync_seqno[i]);
1003 }
1004 }
1005
1006 ret = i915_switch_context(ring, file, ctx_id); 995 ret = i915_switch_context(ring, file, ctx_id);
1007 if (ret) 996 if (ret)
1008 goto err; 997 goto err;
@@ -1028,8 +1017,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1028 goto err; 1017 goto err;
1029 } 1018 }
1030 1019
1031 trace_i915_gem_ring_dispatch(ring, seqno);
1032
1033 exec_start = batch_obj->gtt_offset + args->batch_start_offset; 1020 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1034 exec_len = args->batch_len; 1021 exec_len = args->batch_len;
1035 if (cliprects) { 1022 if (cliprects) {
@@ -1040,17 +1027,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1040 goto err; 1027 goto err;
1041 1028
1042 ret = ring->dispatch_execbuffer(ring, 1029 ret = ring->dispatch_execbuffer(ring,
1043 exec_start, exec_len); 1030 exec_start, exec_len,
1031 flags);
1044 if (ret) 1032 if (ret)
1045 goto err; 1033 goto err;
1046 } 1034 }
1047 } else { 1035 } else {
1048 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len); 1036 ret = ring->dispatch_execbuffer(ring,
1037 exec_start, exec_len,
1038 flags);
1049 if (ret) 1039 if (ret)
1050 goto err; 1040 goto err;
1051 } 1041 }
1052 1042
1053 i915_gem_execbuffer_move_to_active(&objects, ring, seqno); 1043 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1044
1045 i915_gem_execbuffer_move_to_active(&objects, ring);
1054 i915_gem_execbuffer_retire_commands(dev, file, ring); 1046 i915_gem_execbuffer_retire_commands(dev, file, ring);
1055 1047
1056err: 1048err:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index df470b5e8d36..2c150dee78a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,19 +28,67 @@
28#include "i915_trace.h" 28#include "i915_trace.h"
29#include "intel_drv.h" 29#include "intel_drv.h"
30 30
31typedef uint32_t gtt_pte_t;
32
33/* PPGTT stuff */
34#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
35
36#define GEN6_PDE_VALID (1 << 0)
37/* gen6+ has bit 11-4 for physical addr bit 39-32 */
38#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
39
40#define GEN6_PTE_VALID (1 << 0)
41#define GEN6_PTE_UNCACHED (1 << 1)
42#define HSW_PTE_UNCACHED (0)
43#define GEN6_PTE_CACHE_LLC (2 << 1)
44#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
45#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
46
47static inline gtt_pte_t pte_encode(struct drm_device *dev,
48 dma_addr_t addr,
49 enum i915_cache_level level)
50{
51 gtt_pte_t pte = GEN6_PTE_VALID;
52 pte |= GEN6_PTE_ADDR_ENCODE(addr);
53
54 switch (level) {
55 case I915_CACHE_LLC_MLC:
56 /* Haswell doesn't set L3 this way */
57 if (IS_HASWELL(dev))
58 pte |= GEN6_PTE_CACHE_LLC;
59 else
60 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 break;
62 case I915_CACHE_LLC:
63 pte |= GEN6_PTE_CACHE_LLC;
64 break;
65 case I915_CACHE_NONE:
66 if (IS_HASWELL(dev))
67 pte |= HSW_PTE_UNCACHED;
68 else
69 pte |= GEN6_PTE_UNCACHED;
70 break;
71 default:
72 BUG();
73 }
74
75
76 return pte;
77}
78
31/* PPGTT support for Sandybdrige/Gen6 and later */ 79/* PPGTT support for Sandybdrige/Gen6 and later */
32static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, 80static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
33 unsigned first_entry, 81 unsigned first_entry,
34 unsigned num_entries) 82 unsigned num_entries)
35{ 83{
36 uint32_t *pt_vaddr; 84 gtt_pte_t *pt_vaddr;
37 uint32_t scratch_pte; 85 gtt_pte_t scratch_pte;
38 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 86 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
39 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 87 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
40 unsigned last_pte, i; 88 unsigned last_pte, i;
41 89
42 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr); 90 scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
43 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC; 91 I915_CACHE_LLC);
44 92
45 while (num_entries) { 93 while (num_entries) {
46 last_pte = first_pte + num_entries; 94 last_pte = first_pte + num_entries;
@@ -77,6 +125,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
77 if (!ppgtt) 125 if (!ppgtt)
78 return ret; 126 return ret;
79 127
128 ppgtt->dev = dev;
80 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; 129 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
81 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, 130 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
82 GFP_KERNEL); 131 GFP_KERNEL);
@@ -118,7 +167,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
118 i915_ppgtt_clear_range(ppgtt, 0, 167 i915_ppgtt_clear_range(ppgtt, 0,
119 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); 168 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
120 169
121 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t); 170 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
122 171
123 dev_priv->mm.aliasing_ppgtt = ppgtt; 172 dev_priv->mm.aliasing_ppgtt = ppgtt;
124 173
@@ -168,9 +217,9 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
168static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, 217static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
169 const struct sg_table *pages, 218 const struct sg_table *pages,
170 unsigned first_entry, 219 unsigned first_entry,
171 uint32_t pte_flags) 220 enum i915_cache_level cache_level)
172{ 221{
173 uint32_t *pt_vaddr, pte; 222 gtt_pte_t *pt_vaddr;
174 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; 223 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
175 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; 224 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
176 unsigned i, j, m, segment_len; 225 unsigned i, j, m, segment_len;
@@ -188,8 +237,8 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
188 237
189 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { 238 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
190 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); 239 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
191 pte = GEN6_PTE_ADDR_ENCODE(page_addr); 240 pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
192 pt_vaddr[j] = pte | pte_flags; 241 cache_level);
193 242
194 /* grab the next page */ 243 /* grab the next page */
195 if (++m == segment_len) { 244 if (++m == segment_len) {
@@ -213,29 +262,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
213 struct drm_i915_gem_object *obj, 262 struct drm_i915_gem_object *obj,
214 enum i915_cache_level cache_level) 263 enum i915_cache_level cache_level)
215{ 264{
216 uint32_t pte_flags = GEN6_PTE_VALID;
217
218 switch (cache_level) {
219 case I915_CACHE_LLC_MLC:
220 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
221 break;
222 case I915_CACHE_LLC:
223 pte_flags |= GEN6_PTE_CACHE_LLC;
224 break;
225 case I915_CACHE_NONE:
226 if (IS_HASWELL(obj->base.dev))
227 pte_flags |= HSW_PTE_UNCACHED;
228 else
229 pte_flags |= GEN6_PTE_UNCACHED;
230 break;
231 default:
232 BUG();
233 }
234
235 i915_ppgtt_insert_sg_entries(ppgtt, 265 i915_ppgtt_insert_sg_entries(ppgtt,
236 obj->pages, 266 obj->pages,
237 obj->gtt_space->start >> PAGE_SHIFT, 267 obj->gtt_space->start >> PAGE_SHIFT,
238 pte_flags); 268 cache_level);
239} 269}
240 270
241void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, 271void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
@@ -246,23 +276,65 @@ void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
246 obj->base.size >> PAGE_SHIFT); 276 obj->base.size >> PAGE_SHIFT);
247} 277}
248 278
249/* XXX kill agp_type! */ 279void i915_gem_init_ppgtt(struct drm_device *dev)
250static unsigned int cache_level_to_agp_type(struct drm_device *dev,
251 enum i915_cache_level cache_level)
252{ 280{
253 switch (cache_level) { 281 drm_i915_private_t *dev_priv = dev->dev_private;
254 case I915_CACHE_LLC_MLC: 282 uint32_t pd_offset;
255 if (INTEL_INFO(dev)->gen >= 6) 283 struct intel_ring_buffer *ring;
256 return AGP_USER_CACHED_MEMORY_LLC_MLC; 284 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
257 /* Older chipsets do not have this extra level of CPU 285 uint32_t __iomem *pd_addr;
258 * cacheing, so fallthrough and request the PTE simply 286 uint32_t pd_entry;
259 * as cached. 287 int i;
260 */ 288
261 case I915_CACHE_LLC: 289 if (!dev_priv->mm.aliasing_ppgtt)
262 return AGP_USER_CACHED_MEMORY; 290 return;
263 default: 291
264 case I915_CACHE_NONE: 292
265 return AGP_USER_MEMORY; 293 pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
294 for (i = 0; i < ppgtt->num_pd_entries; i++) {
295 dma_addr_t pt_addr;
296
297 if (dev_priv->mm.gtt->needs_dmar)
298 pt_addr = ppgtt->pt_dma_addr[i];
299 else
300 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
301
302 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
303 pd_entry |= GEN6_PDE_VALID;
304
305 writel(pd_entry, pd_addr + i);
306 }
307 readl(pd_addr);
308
309 pd_offset = ppgtt->pd_offset;
310 pd_offset /= 64; /* in cachelines, */
311 pd_offset <<= 16;
312
313 if (INTEL_INFO(dev)->gen == 6) {
314 uint32_t ecochk, gab_ctl, ecobits;
315
316 ecobits = I915_READ(GAC_ECO_BITS);
317 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
318
319 gab_ctl = I915_READ(GAB_CTL);
320 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
321
322 ecochk = I915_READ(GAM_ECOCHK);
323 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
324 ECOCHK_PPGTT_CACHE64B);
325 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
326 } else if (INTEL_INFO(dev)->gen >= 7) {
327 I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
328 /* GFX_MODE is per-ring on gen7+ */
329 }
330
331 for_each_ring(ring, dev_priv, i) {
332 if (INTEL_INFO(dev)->gen >= 7)
333 I915_WRITE(RING_MODE_GEN7(ring),
334 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
335
336 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
337 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
266 } 338 }
267} 339}
268 340
@@ -288,13 +360,40 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
288 dev_priv->mm.interruptible = interruptible; 360 dev_priv->mm.interruptible = interruptible;
289} 361}
290 362
363
364static void i915_ggtt_clear_range(struct drm_device *dev,
365 unsigned first_entry,
366 unsigned num_entries)
367{
368 struct drm_i915_private *dev_priv = dev->dev_private;
369 gtt_pte_t scratch_pte;
370 gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
371 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
372 int i;
373
374 if (INTEL_INFO(dev)->gen < 6) {
375 intel_gtt_clear_range(first_entry, num_entries);
376 return;
377 }
378
379 if (WARN(num_entries > max_entries,
380 "First entry = %d; Num entries = %d (max=%d)\n",
381 first_entry, num_entries, max_entries))
382 num_entries = max_entries;
383
384 scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
385 for (i = 0; i < num_entries; i++)
386 iowrite32(scratch_pte, &gtt_base[i]);
387 readl(gtt_base);
388}
389
291void i915_gem_restore_gtt_mappings(struct drm_device *dev) 390void i915_gem_restore_gtt_mappings(struct drm_device *dev)
292{ 391{
293 struct drm_i915_private *dev_priv = dev->dev_private; 392 struct drm_i915_private *dev_priv = dev->dev_private;
294 struct drm_i915_gem_object *obj; 393 struct drm_i915_gem_object *obj;
295 394
296 /* First fill our portion of the GTT with scratch pages */ 395 /* First fill our portion of the GTT with scratch pages */
297 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, 396 i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
298 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); 397 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
299 398
300 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { 399 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
@@ -302,7 +401,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
302 i915_gem_gtt_bind_object(obj, obj->cache_level); 401 i915_gem_gtt_bind_object(obj, obj->cache_level);
303 } 402 }
304 403
305 intel_gtt_chipset_flush(); 404 i915_gem_chipset_flush(dev);
306} 405}
307 406
308int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) 407int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
@@ -318,21 +417,76 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
318 return 0; 417 return 0;
319} 418}
320 419
420/*
421 * Binds an object into the global gtt with the specified cache level. The object
422 * will be accessible to the GPU via commands whose operands reference offsets
423 * within the global GTT as well as accessible by the GPU through the GMADR
424 * mapped BAR (dev_priv->mm.gtt->gtt).
425 */
426static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
427 enum i915_cache_level level)
428{
429 struct drm_device *dev = obj->base.dev;
430 struct drm_i915_private *dev_priv = dev->dev_private;
431 struct sg_table *st = obj->pages;
432 struct scatterlist *sg = st->sgl;
433 const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
434 const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
435 gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
436 int unused, i = 0;
437 unsigned int len, m = 0;
438 dma_addr_t addr;
439
440 for_each_sg(st->sgl, sg, st->nents, unused) {
441 len = sg_dma_len(sg) >> PAGE_SHIFT;
442 for (m = 0; m < len; m++) {
443 addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
444 iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
445 i++;
446 }
447 }
448
449 BUG_ON(i > max_entries);
450 BUG_ON(i != obj->base.size / PAGE_SIZE);
451
452 /* XXX: This serves as a posting read to make sure that the PTE has
453 * actually been updated. There is some concern that even though
454 * registers and PTEs are within the same BAR that they are potentially
455 * of NUMA access patterns. Therefore, even with the way we assume
456 * hardware should work, we must keep this posting read for paranoia.
457 */
458 if (i != 0)
459 WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
460
461 /* This next bit makes the above posting read even more important. We
462 * want to flush the TLBs only after we're certain all the PTE updates
463 * have finished.
464 */
465 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
466 POSTING_READ(GFX_FLSH_CNTL_GEN6);
467}
468
321void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, 469void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
322 enum i915_cache_level cache_level) 470 enum i915_cache_level cache_level)
323{ 471{
324 struct drm_device *dev = obj->base.dev; 472 struct drm_device *dev = obj->base.dev;
325 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level); 473 if (INTEL_INFO(dev)->gen < 6) {
474 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
475 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
476 intel_gtt_insert_sg_entries(obj->pages,
477 obj->gtt_space->start >> PAGE_SHIFT,
478 flags);
479 } else {
480 gen6_ggtt_bind_object(obj, cache_level);
481 }
326 482
327 intel_gtt_insert_sg_entries(obj->pages,
328 obj->gtt_space->start >> PAGE_SHIFT,
329 agp_type);
330 obj->has_global_gtt_mapping = 1; 483 obj->has_global_gtt_mapping = 1;
331} 484}
332 485
333void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) 486void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
334{ 487{
335 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, 488 i915_ggtt_clear_range(obj->base.dev,
489 obj->gtt_space->start >> PAGE_SHIFT,
336 obj->base.size >> PAGE_SHIFT); 490 obj->base.size >> PAGE_SHIFT);
337 491
338 obj->has_global_gtt_mapping = 0; 492 obj->has_global_gtt_mapping = 0;
@@ -390,5 +544,165 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
390 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; 544 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
391 545
392 /* ... but ensure that we clear the entire range. */ 546 /* ... but ensure that we clear the entire range. */
393 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE); 547 i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
548}
549
550static int setup_scratch_page(struct drm_device *dev)
551{
552 struct drm_i915_private *dev_priv = dev->dev_private;
553 struct page *page;
554 dma_addr_t dma_addr;
555
556 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
557 if (page == NULL)
558 return -ENOMEM;
559 get_page(page);
560 set_pages_uc(page, 1);
561
562#ifdef CONFIG_INTEL_IOMMU
563 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
564 PCI_DMA_BIDIRECTIONAL);
565 if (pci_dma_mapping_error(dev->pdev, dma_addr))
566 return -EINVAL;
567#else
568 dma_addr = page_to_phys(page);
569#endif
570 dev_priv->mm.gtt->scratch_page = page;
571 dev_priv->mm.gtt->scratch_page_dma = dma_addr;
572
573 return 0;
574}
575
576static void teardown_scratch_page(struct drm_device *dev)
577{
578 struct drm_i915_private *dev_priv = dev->dev_private;
579 set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
580 pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
581 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
582 put_page(dev_priv->mm.gtt->scratch_page);
583 __free_page(dev_priv->mm.gtt->scratch_page);
584}
585
586static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
587{
588 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
589 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
590 return snb_gmch_ctl << 20;
591}
592
593static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
594{
595 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
596 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
597 return snb_gmch_ctl << 25; /* 32 MB units */
598}
599
600static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
601{
602 static const int stolen_decoder[] = {
603 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
604 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
605 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
606 return stolen_decoder[snb_gmch_ctl] << 20;
607}
608
609int i915_gem_gtt_init(struct drm_device *dev)
610{
611 struct drm_i915_private *dev_priv = dev->dev_private;
612 phys_addr_t gtt_bus_addr;
613 u16 snb_gmch_ctl;
614 int ret;
615
616 /* On modern platforms we need not worry ourself with the legacy
617 * hostbridge query stuff. Skip it entirely
618 */
619 if (INTEL_INFO(dev)->gen < 6) {
620 ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
621 if (!ret) {
622 DRM_ERROR("failed to set up gmch\n");
623 return -EIO;
624 }
625
626 dev_priv->mm.gtt = intel_gtt_get();
627 if (!dev_priv->mm.gtt) {
628 DRM_ERROR("Failed to initialize GTT\n");
629 intel_gmch_remove();
630 return -ENODEV;
631 }
632 return 0;
633 }
634
635 dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
636 if (!dev_priv->mm.gtt)
637 return -ENOMEM;
638
639 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
640 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
641
642#ifdef CONFIG_INTEL_IOMMU
643 dev_priv->mm.gtt->needs_dmar = 1;
644#endif
645
646 /* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
647 gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
648 dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
649
650 /* i9xx_setup */
651 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
652 dev_priv->mm.gtt->gtt_total_entries =
653 gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
654 if (INTEL_INFO(dev)->gen < 7)
655 dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
656 else
657 dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
658
659 dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
660 /* 64/512MB is the current min/max we actually know of, but this is just a
661 * coarse sanity check.
662 */
663 if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
664 dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
665 DRM_ERROR("Unknown GMADR entries (%d)\n",
666 dev_priv->mm.gtt->gtt_mappable_entries);
667 ret = -ENXIO;
668 goto err_out;
669 }
670
671 ret = setup_scratch_page(dev);
672 if (ret) {
673 DRM_ERROR("Scratch setup failed\n");
674 goto err_out;
675 }
676
677 dev_priv->mm.gtt->gtt = ioremap_wc(gtt_bus_addr,
678 dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
679 if (!dev_priv->mm.gtt->gtt) {
680 DRM_ERROR("Failed to map the gtt page table\n");
681 teardown_scratch_page(dev);
682 ret = -ENOMEM;
683 goto err_out;
684 }
685
686 /* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
687 DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
688 DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
689 DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
690
691 return 0;
692
693err_out:
694 kfree(dev_priv->mm.gtt);
695 if (INTEL_INFO(dev)->gen < 6)
696 intel_gmch_remove();
697 return ret;
698}
699
700void i915_gem_gtt_fini(struct drm_device *dev)
701{
702 struct drm_i915_private *dev_priv = dev->dev_private;
703 iounmap(dev_priv->mm.gtt->gtt);
704 teardown_scratch_page(dev);
705 if (INTEL_INFO(dev)->gen < 6)
706 intel_gmch_remove();
707 kfree(dev_priv->mm.gtt);
394} 708}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 32e1bda865b8..a4dc97f8b9f0 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -122,7 +122,10 @@ static int
122i915_pipe_enabled(struct drm_device *dev, int pipe) 122i915_pipe_enabled(struct drm_device *dev, int pipe)
123{ 123{
124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 124 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
125 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
126 pipe);
127
128 return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
126} 129}
127 130
128/* Called from drm generic code, passed a 'crtc', which 131/* Called from drm generic code, passed a 'crtc', which
@@ -182,6 +185,8 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
182 int vbl_start, vbl_end, htotal, vtotal; 185 int vbl_start, vbl_end, htotal, vtotal;
183 bool in_vbl = true; 186 bool in_vbl = true;
184 int ret = 0; 187 int ret = 0;
188 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
189 pipe);
185 190
186 if (!i915_pipe_enabled(dev, pipe)) { 191 if (!i915_pipe_enabled(dev, pipe)) {
187 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 192 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
@@ -190,7 +195,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
190 } 195 }
191 196
192 /* Get vtotal. */ 197 /* Get vtotal. */
193 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); 198 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
194 199
195 if (INTEL_INFO(dev)->gen >= 4) { 200 if (INTEL_INFO(dev)->gen >= 4) {
196 /* No obvious pixelcount register. Only query vertical 201 /* No obvious pixelcount register. Only query vertical
@@ -210,13 +215,13 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
210 */ 215 */
211 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 216 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
212 217
213 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); 218 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
214 *vpos = position / htotal; 219 *vpos = position / htotal;
215 *hpos = position - (*vpos * htotal); 220 *hpos = position - (*vpos * htotal);
216 } 221 }
217 222
218 /* Query vblank area. */ 223 /* Query vblank area. */
219 vbl = I915_READ(VBLANK(pipe)); 224 vbl = I915_READ(VBLANK(cpu_transcoder));
220 225
221 /* Test position against vblank region. */ 226 /* Test position against vblank region. */
222 vbl_start = vbl & 0x1fff; 227 vbl_start = vbl & 0x1fff;
@@ -352,8 +357,7 @@ static void notify_ring(struct drm_device *dev,
352 if (i915_enable_hangcheck) { 357 if (i915_enable_hangcheck) {
353 dev_priv->hangcheck_count = 0; 358 dev_priv->hangcheck_count = 0;
354 mod_timer(&dev_priv->hangcheck_timer, 359 mod_timer(&dev_priv->hangcheck_timer,
355 jiffies + 360 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
356 msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
357 } 361 }
358} 362}
359 363
@@ -374,7 +378,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
374 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 378 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
375 return; 379 return;
376 380
377 mutex_lock(&dev_priv->dev->struct_mutex); 381 mutex_lock(&dev_priv->rps.hw_lock);
378 382
379 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 383 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
380 new_delay = dev_priv->rps.cur_delay + 1; 384 new_delay = dev_priv->rps.cur_delay + 1;
@@ -389,7 +393,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
389 gen6_set_rps(dev_priv->dev, new_delay); 393 gen6_set_rps(dev_priv->dev, new_delay);
390 } 394 }
391 395
392 mutex_unlock(&dev_priv->dev->struct_mutex); 396 mutex_unlock(&dev_priv->rps.hw_lock);
393} 397}
394 398
395 399
@@ -405,7 +409,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
405static void ivybridge_parity_work(struct work_struct *work) 409static void ivybridge_parity_work(struct work_struct *work)
406{ 410{
407 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 411 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
408 parity_error_work); 412 l3_parity.error_work);
409 u32 error_status, row, bank, subbank; 413 u32 error_status, row, bank, subbank;
410 char *parity_event[5]; 414 char *parity_event[5];
411 uint32_t misccpctl; 415 uint32_t misccpctl;
@@ -469,7 +473,7 @@ static void ivybridge_handle_parity_error(struct drm_device *dev)
469 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 473 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
470 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 474 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
471 475
472 queue_work(dev_priv->wq, &dev_priv->parity_error_work); 476 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
473} 477}
474 478
475static void snb_gt_irq_handler(struct drm_device *dev, 479static void snb_gt_irq_handler(struct drm_device *dev,
@@ -520,7 +524,7 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
520 queue_work(dev_priv->wq, &dev_priv->rps.work); 524 queue_work(dev_priv->wq, &dev_priv->rps.work);
521} 525}
522 526
523static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) 527static irqreturn_t valleyview_irq_handler(int irq, void *arg)
524{ 528{
525 struct drm_device *dev = (struct drm_device *) arg; 529 struct drm_device *dev = (struct drm_device *) arg;
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 530 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -606,6 +610,9 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
606 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
607 int pipe; 611 int pipe;
608 612
613 if (pch_iir & SDE_HOTPLUG_MASK)
614 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
615
609 if (pch_iir & SDE_AUDIO_POWER_MASK) 616 if (pch_iir & SDE_AUDIO_POWER_MASK)
610 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 617 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
611 (pch_iir & SDE_AUDIO_POWER_MASK) >> 618 (pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -646,6 +653,9 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
646 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
647 int pipe; 654 int pipe;
648 655
656 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
657 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
658
649 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 659 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
650 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 660 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
651 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 661 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -670,7 +680,7 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
670 I915_READ(FDI_RX_IIR(pipe))); 680 I915_READ(FDI_RX_IIR(pipe)));
671} 681}
672 682
673static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) 683static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
674{ 684{
675 struct drm_device *dev = (struct drm_device *) arg; 685 struct drm_device *dev = (struct drm_device *) arg;
676 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 686 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -709,8 +719,6 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
709 if (de_iir & DE_PCH_EVENT_IVB) { 719 if (de_iir & DE_PCH_EVENT_IVB) {
710 u32 pch_iir = I915_READ(SDEIIR); 720 u32 pch_iir = I915_READ(SDEIIR);
711 721
712 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
713 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
714 cpt_irq_handler(dev, pch_iir); 722 cpt_irq_handler(dev, pch_iir);
715 723
716 /* clear PCH hotplug event before clear CPU irq */ 724 /* clear PCH hotplug event before clear CPU irq */
@@ -745,13 +753,12 @@ static void ilk_gt_irq_handler(struct drm_device *dev,
745 notify_ring(dev, &dev_priv->ring[VCS]); 753 notify_ring(dev, &dev_priv->ring[VCS]);
746} 754}
747 755
748static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) 756static irqreturn_t ironlake_irq_handler(int irq, void *arg)
749{ 757{
750 struct drm_device *dev = (struct drm_device *) arg; 758 struct drm_device *dev = (struct drm_device *) arg;
751 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 759 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
752 int ret = IRQ_NONE; 760 int ret = IRQ_NONE;
753 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 761 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
754 u32 hotplug_mask;
755 762
756 atomic_inc(&dev_priv->irq_received); 763 atomic_inc(&dev_priv->irq_received);
757 764
@@ -769,11 +776,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
769 (!IS_GEN6(dev) || pm_iir == 0)) 776 (!IS_GEN6(dev) || pm_iir == 0))
770 goto done; 777 goto done;
771 778
772 if (HAS_PCH_CPT(dev))
773 hotplug_mask = SDE_HOTPLUG_MASK_CPT;
774 else
775 hotplug_mask = SDE_HOTPLUG_MASK;
776
777 ret = IRQ_HANDLED; 779 ret = IRQ_HANDLED;
778 780
779 if (IS_GEN5(dev)) 781 if (IS_GEN5(dev))
@@ -802,8 +804,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
802 804
803 /* check event from PCH */ 805 /* check event from PCH */
804 if (de_iir & DE_PCH_EVENT) { 806 if (de_iir & DE_PCH_EVENT) {
805 if (pch_iir & hotplug_mask)
806 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
807 if (HAS_PCH_CPT(dev)) 807 if (HAS_PCH_CPT(dev))
808 cpt_irq_handler(dev, pch_iir); 808 cpt_irq_handler(dev, pch_iir);
809 else 809 else
@@ -1120,6 +1120,8 @@ static void i915_record_ring_state(struct drm_device *dev,
1120 = I915_READ(RING_SYNC_0(ring->mmio_base)); 1120 = I915_READ(RING_SYNC_0(ring->mmio_base));
1121 error->semaphore_mboxes[ring->id][1] 1121 error->semaphore_mboxes[ring->id][1]
1122 = I915_READ(RING_SYNC_1(ring->mmio_base)); 1122 = I915_READ(RING_SYNC_1(ring->mmio_base));
1123 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1124 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1123 } 1125 }
1124 1126
1125 if (INTEL_INFO(dev)->gen >= 4) { 1127 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1464,7 +1466,9 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1464 spin_lock_irqsave(&dev->event_lock, flags); 1466 spin_lock_irqsave(&dev->event_lock, flags);
1465 work = intel_crtc->unpin_work; 1467 work = intel_crtc->unpin_work;
1466 1468
1467 if (work == NULL || work->pending || !work->enable_stall_check) { 1469 if (work == NULL ||
1470 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1471 !work->enable_stall_check) {
1468 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 1472 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
1469 spin_unlock_irqrestore(&dev->event_lock, flags); 1473 spin_unlock_irqrestore(&dev->event_lock, flags);
1470 return; 1474 return;
@@ -1751,7 +1755,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1751repeat: 1755repeat:
1752 /* Reset timer case chip hangs without another request being added */ 1756 /* Reset timer case chip hangs without another request being added */
1753 mod_timer(&dev_priv->hangcheck_timer, 1757 mod_timer(&dev_priv->hangcheck_timer,
1754 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1758 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1755} 1759}
1756 1760
1757/* drm_dma.h hooks 1761/* drm_dma.h hooks
@@ -1956,6 +1960,7 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1956 u32 enable_mask; 1960 u32 enable_mask;
1957 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1961 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1958 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; 1962 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1963 u32 render_irqs;
1959 u16 msid; 1964 u16 msid;
1960 1965
1961 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 1966 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
@@ -1995,21 +2000,12 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
1995 I915_WRITE(VLV_IIR, 0xffffffff); 2000 I915_WRITE(VLV_IIR, 0xffffffff);
1996 I915_WRITE(VLV_IIR, 0xffffffff); 2001 I915_WRITE(VLV_IIR, 0xffffffff);
1997 2002
1998 dev_priv->gt_irq_mask = ~0;
1999
2000 I915_WRITE(GTIIR, I915_READ(GTIIR));
2001 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2003 I915_WRITE(GTIIR, I915_READ(GTIIR));
2002 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2004 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2003 I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT | 2005
2004 GT_GEN6_BLT_CS_ERROR_INTERRUPT | 2006 render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
2005 GT_GEN6_BLT_USER_INTERRUPT | 2007 GEN6_BLITTER_USER_INTERRUPT;
2006 GT_GEN6_BSD_USER_INTERRUPT | 2008 I915_WRITE(GTIER, render_irqs);
2007 GT_GEN6_BSD_CS_ERROR_INTERRUPT |
2008 GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
2009 GT_PIPE_NOTIFY |
2010 GT_RENDER_CS_ERROR_INTERRUPT |
2011 GT_SYNC_STATUS |
2012 GT_USER_INTERRUPT);
2013 POSTING_READ(GTIER); 2009 POSTING_READ(GTIER);
2014 2010
2015 /* ack & enable invalid PTE error interrupts */ 2011 /* ack & enable invalid PTE error interrupts */
@@ -2019,7 +2015,6 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2019#endif 2015#endif
2020 2016
2021 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2017 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
2022#if 0 /* FIXME: check register definitions; some have moved */
2023 /* Note HDMI and DP share bits */ 2018 /* Note HDMI and DP share bits */
2024 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 2019 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2025 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 2020 hotplug_en |= HDMIB_HOTPLUG_INT_EN;
@@ -2027,15 +2022,14 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
2027 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 2022 hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2028 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 2023 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2029 hotplug_en |= HDMID_HOTPLUG_INT_EN; 2024 hotplug_en |= HDMID_HOTPLUG_INT_EN;
2030 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 2025 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2031 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 2026 hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2032 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 2027 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2033 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 2028 hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2034 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 2029 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2035 hotplug_en |= CRT_HOTPLUG_INT_EN; 2030 hotplug_en |= CRT_HOTPLUG_INT_EN;
2036 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 2031 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2037 } 2032 }
2038#endif
2039 2033
2040 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 2034 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2041 2035
@@ -2129,7 +2123,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
2129 return 0; 2123 return 0;
2130} 2124}
2131 2125
2132static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS) 2126static irqreturn_t i8xx_irq_handler(int irq, void *arg)
2133{ 2127{
2134 struct drm_device *dev = (struct drm_device *) arg; 2128 struct drm_device *dev = (struct drm_device *) arg;
2135 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2129 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2307,7 +2301,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
2307 return 0; 2301 return 0;
2308} 2302}
2309 2303
2310static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS) 2304static irqreturn_t i915_irq_handler(int irq, void *arg)
2311{ 2305{
2312 struct drm_device *dev = (struct drm_device *) arg; 2306 struct drm_device *dev = (struct drm_device *) arg;
2313 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2307 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2545,7 +2539,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
2545 return 0; 2539 return 0;
2546} 2540}
2547 2541
2548static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS) 2542static irqreturn_t i965_irq_handler(int irq, void *arg)
2549{ 2543{
2550 struct drm_device *dev = (struct drm_device *) arg; 2544 struct drm_device *dev = (struct drm_device *) arg;
2551 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2545 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2691,7 +2685,7 @@ void intel_irq_init(struct drm_device *dev)
2691 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 2685 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2692 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 2686 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2693 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 2687 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2694 INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); 2688 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2695 2689
2696 dev->driver->get_vblank_counter = i915_get_vblank_counter; 2690 dev->driver->get_vblank_counter = i915_get_vblank_counter;
2697 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 2691 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a4162ddff6c5..3f75cfaf1c3f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,6 +26,7 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
29 30
30#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
31 32
@@ -40,6 +41,14 @@
40 */ 41 */
41#define INTEL_GMCH_CTRL 0x52 42#define INTEL_GMCH_CTRL 0x52
42#define INTEL_GMCH_VGA_DISABLE (1 << 1) 43#define INTEL_GMCH_VGA_DISABLE (1 << 1)
44#define SNB_GMCH_CTRL 0x50
45#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51
43 52
44/* PCI config space */ 53/* PCI config space */
45 54
@@ -105,23 +114,6 @@
105#define GEN6_GRDOM_MEDIA (1 << 2) 114#define GEN6_GRDOM_MEDIA (1 << 2)
106#define GEN6_GRDOM_BLT (1 << 3) 115#define GEN6_GRDOM_BLT (1 << 3)
107 116
108/* PPGTT stuff */
109#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
110
111#define GEN6_PDE_VALID (1 << 0)
112#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
113/* gen6+ has bit 11-4 for physical addr bit 39-32 */
114#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
115
116#define GEN6_PTE_VALID (1 << 0)
117#define GEN6_PTE_UNCACHED (1 << 1)
118#define HSW_PTE_UNCACHED (0)
119#define GEN6_PTE_CACHE_LLC (2 << 1)
120#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
121#define GEN6_PTE_CACHE_BITS (3 << 1)
122#define GEN6_PTE_GFDT (1 << 3)
123#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
124
125#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228) 117#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
126#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518) 118#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
127#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220) 119#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
@@ -241,11 +233,18 @@
241 */ 233 */
242#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 234#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
243#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 235#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
244#define MI_INVALIDATE_TLB (1<<18) 236#define MI_FLUSH_DW_STORE_INDEX (1<<21)
245#define MI_INVALIDATE_BSD (1<<7) 237#define MI_INVALIDATE_TLB (1<<18)
238#define MI_FLUSH_DW_OP_STOREDW (1<<14)
239#define MI_INVALIDATE_BSD (1<<7)
240#define MI_FLUSH_DW_USE_GTT (1<<2)
241#define MI_FLUSH_DW_USE_PPGTT (0<<2)
246#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 242#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
247#define MI_BATCH_NON_SECURE (1) 243#define MI_BATCH_NON_SECURE (1)
248#define MI_BATCH_NON_SECURE_I965 (1<<8) 244/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
245#define MI_BATCH_NON_SECURE_I965 (1<<8)
246#define MI_BATCH_PPGTT_HSW (1<<8)
247#define MI_BATCH_NON_SECURE_HSW (1<<13)
249#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 248#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
250#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 249#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
251#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 250#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
@@ -369,6 +368,7 @@
369#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */ 368#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
370#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */ 369#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
371#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */ 370#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
371#define DPIO_PLL_REFCLK_SEL_MASK 3
372#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */ 372#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
373#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */ 373#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
374#define _DPIO_REFSFR_B 0x8034 374#define _DPIO_REFSFR_B 0x8034
@@ -384,6 +384,9 @@
384 384
385#define DPIO_FASTCLK_DISABLE 0x8100 385#define DPIO_FASTCLK_DISABLE 0x8100
386 386
387#define DPIO_DATA_CHANNEL1 0x8220
388#define DPIO_DATA_CHANNEL2 0x8420
389
387/* 390/*
388 * Fence registers 391 * Fence registers
389 */ 392 */
@@ -521,6 +524,7 @@
521 */ 524 */
522# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 525# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
523#define _3D_CHICKEN3 0x02090 526#define _3D_CHICKEN3 0x02090
527#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
524#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5) 528#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
525 529
526#define MI_MODE 0x0209c 530#define MI_MODE 0x0209c
@@ -547,6 +551,8 @@
547#define IIR 0x020a4 551#define IIR 0x020a4
548#define IMR 0x020a8 552#define IMR 0x020a8
549#define ISR 0x020ac 553#define ISR 0x020ac
554#define VLV_GUNIT_CLOCK_GATE 0x182060
555#define GCFG_DIS (1<<8)
550#define VLV_IIR_RW 0x182084 556#define VLV_IIR_RW 0x182084
551#define VLV_IER 0x1820a0 557#define VLV_IER 0x1820a0
552#define VLV_IIR 0x1820a4 558#define VLV_IIR 0x1820a4
@@ -661,6 +667,7 @@
661#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */ 667#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
662 668
663#define CACHE_MODE_0 0x02120 /* 915+ only */ 669#define CACHE_MODE_0 0x02120 /* 915+ only */
670#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
664#define CM0_IZ_OPT_DISABLE (1<<6) 671#define CM0_IZ_OPT_DISABLE (1<<6)
665#define CM0_ZR_OPT_DISABLE (1<<5) 672#define CM0_ZR_OPT_DISABLE (1<<5)
666#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) 673#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
@@ -670,6 +677,8 @@
670#define CM0_RC_OP_FLUSH_DISABLE (1<<0) 677#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
671#define BB_ADDR 0x02140 /* 8 bytes */ 678#define BB_ADDR 0x02140 /* 8 bytes */
672#define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 679#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
680#define GFX_FLSH_CNTL_GEN6 0x101008
681#define GFX_FLSH_CNTL_EN (1<<0)
673#define ECOSKPD 0x021d0 682#define ECOSKPD 0x021d0
674#define ECO_GATING_CX_ONLY (1<<3) 683#define ECO_GATING_CX_ONLY (1<<3)
675#define ECO_FLIP_DONE (1<<0) 684#define ECO_FLIP_DONE (1<<0)
@@ -1559,14 +1568,14 @@
1559#define _VSYNCSHIFT_B 0x61028 1568#define _VSYNCSHIFT_B 0x61028
1560 1569
1561 1570
1562#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B) 1571#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
1563#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B) 1572#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
1564#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B) 1573#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
1565#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B) 1574#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
1566#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B) 1575#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
1567#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B) 1576#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
1568#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B) 1577#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
1569#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B) 1578#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
1570 1579
1571/* VGA port control */ 1580/* VGA port control */
1572#define ADPA 0x61100 1581#define ADPA 0x61100
@@ -2641,6 +2650,7 @@
2641#define PIPECONF_GAMMA (1<<24) 2650#define PIPECONF_GAMMA (1<<24)
2642#define PIPECONF_FORCE_BORDER (1<<25) 2651#define PIPECONF_FORCE_BORDER (1<<25)
2643#define PIPECONF_INTERLACE_MASK (7 << 21) 2652#define PIPECONF_INTERLACE_MASK (7 << 21)
2653#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
2644/* Note that pre-gen3 does not support interlaced display directly. Panel 2654/* Note that pre-gen3 does not support interlaced display directly. Panel
2645 * fitting must be disabled on pre-ilk for interlaced. */ 2655 * fitting must be disabled on pre-ilk for interlaced. */
2646#define PIPECONF_PROGRESSIVE (0 << 21) 2656#define PIPECONF_PROGRESSIVE (0 << 21)
@@ -2711,7 +2721,7 @@
2711#define PIPE_12BPC (3 << 5) 2721#define PIPE_12BPC (3 << 5)
2712 2722
2713#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) 2723#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
2714#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF) 2724#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
2715#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL) 2725#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
2716#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH) 2726#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
2717#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) 2727#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
@@ -2998,12 +3008,19 @@
2998#define DISPPLANE_GAMMA_ENABLE (1<<30) 3008#define DISPPLANE_GAMMA_ENABLE (1<<30)
2999#define DISPPLANE_GAMMA_DISABLE 0 3009#define DISPPLANE_GAMMA_DISABLE 0
3000#define DISPPLANE_PIXFORMAT_MASK (0xf<<26) 3010#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
3011#define DISPPLANE_YUV422 (0x0<<26)
3001#define DISPPLANE_8BPP (0x2<<26) 3012#define DISPPLANE_8BPP (0x2<<26)
3002#define DISPPLANE_15_16BPP (0x4<<26) 3013#define DISPPLANE_BGRA555 (0x3<<26)
3003#define DISPPLANE_16BPP (0x5<<26) 3014#define DISPPLANE_BGRX555 (0x4<<26)
3004#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 3015#define DISPPLANE_BGRX565 (0x5<<26)
3005#define DISPPLANE_32BPP (0x7<<26) 3016#define DISPPLANE_BGRX888 (0x6<<26)
3006#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26) 3017#define DISPPLANE_BGRA888 (0x7<<26)
3018#define DISPPLANE_RGBX101010 (0x8<<26)
3019#define DISPPLANE_RGBA101010 (0x9<<26)
3020#define DISPPLANE_BGRX101010 (0xa<<26)
3021#define DISPPLANE_RGBX161616 (0xc<<26)
3022#define DISPPLANE_RGBX888 (0xe<<26)
3023#define DISPPLANE_RGBA888 (0xf<<26)
3007#define DISPPLANE_STEREO_ENABLE (1<<25) 3024#define DISPPLANE_STEREO_ENABLE (1<<25)
3008#define DISPPLANE_STEREO_DISABLE 0 3025#define DISPPLANE_STEREO_DISABLE 0
3009#define DISPPLANE_SEL_PIPE_SHIFT 24 3026#define DISPPLANE_SEL_PIPE_SHIFT 24
@@ -3024,6 +3041,8 @@
3024#define _DSPASIZE 0x70190 3041#define _DSPASIZE 0x70190
3025#define _DSPASURF 0x7019C /* 965+ only */ 3042#define _DSPASURF 0x7019C /* 965+ only */
3026#define _DSPATILEOFF 0x701A4 /* 965+ only */ 3043#define _DSPATILEOFF 0x701A4 /* 965+ only */
3044#define _DSPAOFFSET 0x701A4 /* HSW */
3045#define _DSPASURFLIVE 0x701AC
3027 3046
3028#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) 3047#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
3029#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) 3048#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@@ -3033,6 +3052,8 @@
3033#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 3052#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
3034#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 3053#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
3035#define DSPLINOFF(plane) DSPADDR(plane) 3054#define DSPLINOFF(plane) DSPADDR(plane)
3055#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
3056#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
3036 3057
3037/* Display/Sprite base address macros */ 3058/* Display/Sprite base address macros */
3038#define DISP_BASEADDR_MASK (0xfffff000) 3059#define DISP_BASEADDR_MASK (0xfffff000)
@@ -3078,6 +3099,8 @@
3078#define _DSPBSIZE 0x71190 3099#define _DSPBSIZE 0x71190
3079#define _DSPBSURF 0x7119C 3100#define _DSPBSURF 0x7119C
3080#define _DSPBTILEOFF 0x711A4 3101#define _DSPBTILEOFF 0x711A4
3102#define _DSPBOFFSET 0x711A4
3103#define _DSPBSURFLIVE 0x711AC
3081 3104
3082/* Sprite A control */ 3105/* Sprite A control */
3083#define _DVSACNTR 0x72180 3106#define _DVSACNTR 0x72180
@@ -3143,6 +3166,7 @@
3143#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF) 3166#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
3144#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL) 3167#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
3145#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK) 3168#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
3169#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
3146 3170
3147#define _SPRA_CTL 0x70280 3171#define _SPRA_CTL 0x70280
3148#define SPRITE_ENABLE (1<<31) 3172#define SPRITE_ENABLE (1<<31)
@@ -3177,6 +3201,8 @@
3177#define _SPRA_SURF 0x7029c 3201#define _SPRA_SURF 0x7029c
3178#define _SPRA_KEYMAX 0x702a0 3202#define _SPRA_KEYMAX 0x702a0
3179#define _SPRA_TILEOFF 0x702a4 3203#define _SPRA_TILEOFF 0x702a4
3204#define _SPRA_OFFSET 0x702a4
3205#define _SPRA_SURFLIVE 0x702ac
3180#define _SPRA_SCALE 0x70304 3206#define _SPRA_SCALE 0x70304
3181#define SPRITE_SCALE_ENABLE (1<<31) 3207#define SPRITE_SCALE_ENABLE (1<<31)
3182#define SPRITE_FILTER_MASK (3<<29) 3208#define SPRITE_FILTER_MASK (3<<29)
@@ -3197,6 +3223,8 @@
3197#define _SPRB_SURF 0x7129c 3223#define _SPRB_SURF 0x7129c
3198#define _SPRB_KEYMAX 0x712a0 3224#define _SPRB_KEYMAX 0x712a0
3199#define _SPRB_TILEOFF 0x712a4 3225#define _SPRB_TILEOFF 0x712a4
3226#define _SPRB_OFFSET 0x712a4
3227#define _SPRB_SURFLIVE 0x712ac
3200#define _SPRB_SCALE 0x71304 3228#define _SPRB_SCALE 0x71304
3201#define _SPRB_GAMC 0x71400 3229#define _SPRB_GAMC 0x71400
3202 3230
@@ -3210,8 +3238,10 @@
3210#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF) 3238#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
3211#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX) 3239#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
3212#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF) 3240#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
3241#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
3213#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE) 3242#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
3214#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC) 3243#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
3244#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
3215 3245
3216/* VBIOS regs */ 3246/* VBIOS regs */
3217#define VGACNTRL 0x71400 3247#define VGACNTRL 0x71400
@@ -3246,12 +3276,6 @@
3246#define DISPLAY_PORT_PLL_BIOS_1 0x46010 3276#define DISPLAY_PORT_PLL_BIOS_1 0x46010
3247#define DISPLAY_PORT_PLL_BIOS_2 0x46014 3277#define DISPLAY_PORT_PLL_BIOS_2 0x46014
3248 3278
3249#define PCH_DSPCLK_GATE_D 0x42020
3250# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
3251# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
3252# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
3253# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
3254
3255#define PCH_3DCGDIS0 0x46020 3279#define PCH_3DCGDIS0 0x46020
3256# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18) 3280# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
3257# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1) 3281# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
@@ -3301,20 +3325,22 @@
3301#define _PIPEB_LINK_M2 0x61048 3325#define _PIPEB_LINK_M2 0x61048
3302#define _PIPEB_LINK_N2 0x6104c 3326#define _PIPEB_LINK_N2 0x6104c
3303 3327
3304#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1) 3328#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
3305#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1) 3329#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
3306#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2) 3330#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
3307#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2) 3331#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
3308#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1) 3332#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
3309#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1) 3333#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
3310#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2) 3334#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
3311#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2) 3335#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
3312 3336
3313/* CPU panel fitter */ 3337/* CPU panel fitter */
3314/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */ 3338/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
3315#define _PFA_CTL_1 0x68080 3339#define _PFA_CTL_1 0x68080
3316#define _PFB_CTL_1 0x68880 3340#define _PFB_CTL_1 0x68880
3317#define PF_ENABLE (1<<31) 3341#define PF_ENABLE (1<<31)
3342#define PF_PIPE_SEL_MASK_IVB (3<<29)
3343#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
3318#define PF_FILTER_MASK (3<<23) 3344#define PF_FILTER_MASK (3<<23)
3319#define PF_FILTER_PROGRAMMED (0<<23) 3345#define PF_FILTER_PROGRAMMED (0<<23)
3320#define PF_FILTER_MED_3x3 (1<<23) 3346#define PF_FILTER_MED_3x3 (1<<23)
@@ -3423,15 +3449,13 @@
3423#define ILK_HDCP_DISABLE (1<<25) 3449#define ILK_HDCP_DISABLE (1<<25)
3424#define ILK_eDP_A_DISABLE (1<<24) 3450#define ILK_eDP_A_DISABLE (1<<24)
3425#define ILK_DESKTOP (1<<23) 3451#define ILK_DESKTOP (1<<23)
3426#define ILK_DSPCLK_GATE 0x42020
3427#define IVB_VRHUNIT_CLK_GATE (1<<28)
3428#define ILK_DPARB_CLK_GATE (1<<5)
3429#define ILK_DPFD_CLK_GATE (1<<7)
3430 3452
3431/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ 3453#define ILK_DSPCLK_GATE_D 0x42020
3432#define ILK_CLK_FBC (1<<7) 3454#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
3433#define ILK_DPFC_DIS1 (1<<8) 3455#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
3434#define ILK_DPFC_DIS2 (1<<9) 3456#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
3457#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
3458#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
3435 3459
3436#define IVB_CHICKEN3 0x4200c 3460#define IVB_CHICKEN3 0x4200c
3437# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5) 3461# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
@@ -3447,14 +3471,21 @@
3447 3471
3448#define GEN7_L3CNTLREG1 0xB01C 3472#define GEN7_L3CNTLREG1 0xB01C
3449#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C 3473#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
3474#define GEN7_L3AGDIS (1<<19)
3450 3475
3451#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030 3476#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
3452#define GEN7_WA_L3_CHICKEN_MODE 0x20000000 3477#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
3453 3478
3479#define GEN7_L3SQCREG4 0xb034
3480#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
3481
3454/* WaCatErrorRejectionIssue */ 3482/* WaCatErrorRejectionIssue */
3455#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 3483#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
3456#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11) 3484#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
3457 3485
3486#define HSW_FUSE_STRAP 0x42014
3487#define HSW_CDCLK_LIMIT (1 << 24)
3488
3458/* PCH */ 3489/* PCH */
3459 3490
3460/* south display engine interrupt: IBX */ 3491/* south display engine interrupt: IBX */
@@ -3686,7 +3717,7 @@
3686#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) 3717#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
3687#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) 3718#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
3688 3719
3689#define VLV_VIDEO_DIP_CTL_A 0x60220 3720#define VLV_VIDEO_DIP_CTL_A 0x60200
3690#define VLV_VIDEO_DIP_DATA_A 0x60208 3721#define VLV_VIDEO_DIP_DATA_A 0x60208
3691#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 3722#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
3692 3723
@@ -3795,18 +3826,26 @@
3795#define TRANS_6BPC (2<<5) 3826#define TRANS_6BPC (2<<5)
3796#define TRANS_12BPC (3<<5) 3827#define TRANS_12BPC (3<<5)
3797 3828
3829#define _TRANSA_CHICKEN1 0xf0060
3830#define _TRANSB_CHICKEN1 0xf1060
3831#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
3832#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
3798#define _TRANSA_CHICKEN2 0xf0064 3833#define _TRANSA_CHICKEN2 0xf0064
3799#define _TRANSB_CHICKEN2 0xf1064 3834#define _TRANSB_CHICKEN2 0xf1064
3800#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) 3835#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
3801#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) 3836#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
3837
3802 3838
3803#define SOUTH_CHICKEN1 0xc2000 3839#define SOUTH_CHICKEN1 0xc2000
3804#define FDIA_PHASE_SYNC_SHIFT_OVR 19 3840#define FDIA_PHASE_SYNC_SHIFT_OVR 19
3805#define FDIA_PHASE_SYNC_SHIFT_EN 18 3841#define FDIA_PHASE_SYNC_SHIFT_EN 18
3806#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2))) 3842#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
3807#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2))) 3843#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
3844#define FDI_BC_BIFURCATION_SELECT (1 << 12)
3808#define SOUTH_CHICKEN2 0xc2004 3845#define SOUTH_CHICKEN2 0xc2004
3809#define DPLS_EDP_PPS_FIX_DIS (1<<0) 3846#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
3847#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
3848#define DPLS_EDP_PPS_FIX_DIS (1<<0)
3810 3849
3811#define _FDI_RXA_CHICKEN 0xc200c 3850#define _FDI_RXA_CHICKEN 0xc200c
3812#define _FDI_RXB_CHICKEN 0xc2010 3851#define _FDI_RXB_CHICKEN 0xc2010
@@ -3816,6 +3855,7 @@
3816 3855
3817#define SOUTH_DSPCLK_GATE_D 0xc2020 3856#define SOUTH_DSPCLK_GATE_D 0xc2020
3818#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) 3857#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
3858#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
3819 3859
3820/* CPU: FDI_TX */ 3860/* CPU: FDI_TX */
3821#define _FDI_TXA_CTL 0x60100 3861#define _FDI_TXA_CTL 0x60100
@@ -3877,6 +3917,7 @@
3877#define FDI_FS_ERRC_ENABLE (1<<27) 3917#define FDI_FS_ERRC_ENABLE (1<<27)
3878#define FDI_FE_ERRC_ENABLE (1<<26) 3918#define FDI_FE_ERRC_ENABLE (1<<26)
3879#define FDI_DP_PORT_WIDTH_X8 (7<<19) 3919#define FDI_DP_PORT_WIDTH_X8 (7<<19)
3920#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
3880#define FDI_8BPC (0<<16) 3921#define FDI_8BPC (0<<16)
3881#define FDI_10BPC (1<<16) 3922#define FDI_10BPC (1<<16)
3882#define FDI_6BPC (2<<16) 3923#define FDI_6BPC (2<<16)
@@ -3901,16 +3942,21 @@
3901#define FDI_PORT_WIDTH_2X_LPT (1<<19) 3942#define FDI_PORT_WIDTH_2X_LPT (1<<19)
3902#define FDI_PORT_WIDTH_1X_LPT (0<<19) 3943#define FDI_PORT_WIDTH_1X_LPT (0<<19)
3903 3944
3904#define _FDI_RXA_MISC 0xf0010 3945#define _FDI_RXA_MISC 0xf0010
3905#define _FDI_RXB_MISC 0xf1010 3946#define _FDI_RXB_MISC 0xf1010
3947#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
3948#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
3949#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
3950#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
3951#define FDI_RX_TP1_TO_TP2_48 (2<<20)
3952#define FDI_RX_TP1_TO_TP2_64 (3<<20)
3953#define FDI_RX_FDI_DELAY_90 (0x90<<0)
3954#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
3955
3906#define _FDI_RXA_TUSIZE1 0xf0030 3956#define _FDI_RXA_TUSIZE1 0xf0030
3907#define _FDI_RXA_TUSIZE2 0xf0038 3957#define _FDI_RXA_TUSIZE2 0xf0038
3908#define _FDI_RXB_TUSIZE1 0xf1030 3958#define _FDI_RXB_TUSIZE1 0xf1030
3909#define _FDI_RXB_TUSIZE2 0xf1038 3959#define _FDI_RXB_TUSIZE2 0xf1038
3910#define FDI_RX_TP1_TO_TP2_48 (2<<20)
3911#define FDI_RX_TP1_TO_TP2_64 (3<<20)
3912#define FDI_RX_FDI_DELAY_90 (0x90<<0)
3913#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
3914#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1) 3960#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
3915#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2) 3961#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
3916 3962
@@ -4003,6 +4049,11 @@
4003#define PANEL_LIGHT_ON_DELAY_SHIFT 0 4049#define PANEL_LIGHT_ON_DELAY_SHIFT 0
4004 4050
4005#define PCH_PP_OFF_DELAYS 0xc720c 4051#define PCH_PP_OFF_DELAYS 0xc720c
4052#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
4053#define PANEL_POWER_PORT_LVDS (0 << 30)
4054#define PANEL_POWER_PORT_DP_A (1 << 30)
4055#define PANEL_POWER_PORT_DP_C (2 << 30)
4056#define PANEL_POWER_PORT_DP_D (3 << 30)
4006#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) 4057#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
4007#define PANEL_POWER_DOWN_DELAY_SHIFT 16 4058#define PANEL_POWER_DOWN_DELAY_SHIFT 16
4008#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) 4059#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
@@ -4050,7 +4101,7 @@
4050#define TRANS_DP_CTL_A 0xe0300 4101#define TRANS_DP_CTL_A 0xe0300
4051#define TRANS_DP_CTL_B 0xe1300 4102#define TRANS_DP_CTL_B 0xe1300
4052#define TRANS_DP_CTL_C 0xe2300 4103#define TRANS_DP_CTL_C 0xe2300
4053#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000) 4104#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
4054#define TRANS_DP_OUTPUT_ENABLE (1<<31) 4105#define TRANS_DP_OUTPUT_ENABLE (1<<31)
4055#define TRANS_DP_PORT_SEL_B (0<<29) 4106#define TRANS_DP_PORT_SEL_B (0<<29)
4056#define TRANS_DP_PORT_SEL_C (1<<29) 4107#define TRANS_DP_PORT_SEL_C (1<<29)
@@ -4108,6 +4159,8 @@
4108#define FORCEWAKE_ACK_HSW 0x130044 4159#define FORCEWAKE_ACK_HSW 0x130044
4109#define FORCEWAKE_ACK 0x130090 4160#define FORCEWAKE_ACK 0x130090
4110#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 4161#define FORCEWAKE_MT 0xa188 /* multi-threaded */
4162#define FORCEWAKE_KERNEL 0x1
4163#define FORCEWAKE_USER 0x2
4111#define FORCEWAKE_MT_ACK 0x130040 4164#define FORCEWAKE_MT_ACK 0x130040
4112#define ECOBUS 0xa180 4165#define ECOBUS 0xa180
4113#define FORCEWAKE_MT_ENABLE (1<<5) 4166#define FORCEWAKE_MT_ENABLE (1<<5)
@@ -4220,6 +4273,10 @@
4220#define GEN6_READ_OC_PARAMS 0xc 4273#define GEN6_READ_OC_PARAMS 0xc
4221#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8 4274#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
4222#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 4275#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
4276#define GEN6_PCODE_WRITE_RC6VIDS 0x4
4277#define GEN6_PCODE_READ_RC6VIDS 0x5
4278#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
4279#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
4223#define GEN6_PCODE_DATA 0x138128 4280#define GEN6_PCODE_DATA 0x138128
4224#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 4281#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
4225 4282
@@ -4251,6 +4308,15 @@
4251#define GEN7_L3LOG_BASE 0xB070 4308#define GEN7_L3LOG_BASE 0xB070
4252#define GEN7_L3LOG_SIZE 0x80 4309#define GEN7_L3LOG_SIZE 0x80
4253 4310
4311#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
4312#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
4313#define GEN7_MAX_PS_THREAD_DEP (8<<12)
4314#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
4315
4316#define GEN7_ROW_CHICKEN2 0xe4f4
4317#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
4318#define DOP_CLOCK_GATING_DISABLE (1<<0)
4319
4254#define G4X_AUD_VID_DID 0x62020 4320#define G4X_AUD_VID_DID 0x62020
4255#define INTEL_AUDIO_DEVCL 0x808629FB 4321#define INTEL_AUDIO_DEVCL 0x808629FB
4256#define INTEL_AUDIO_DEVBLC 0x80862801 4322#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -4380,33 +4446,39 @@
4380#define HSW_PWR_WELL_CTL6 0x45414 4446#define HSW_PWR_WELL_CTL6 0x45414
4381 4447
4382/* Per-pipe DDI Function Control */ 4448/* Per-pipe DDI Function Control */
4383#define PIPE_DDI_FUNC_CTL_A 0x60400 4449#define TRANS_DDI_FUNC_CTL_A 0x60400
4384#define PIPE_DDI_FUNC_CTL_B 0x61400 4450#define TRANS_DDI_FUNC_CTL_B 0x61400
4385#define PIPE_DDI_FUNC_CTL_C 0x62400 4451#define TRANS_DDI_FUNC_CTL_C 0x62400
4386#define PIPE_DDI_FUNC_CTL_EDP 0x6F400 4452#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
4387#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \ 4453#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
4388 PIPE_DDI_FUNC_CTL_B) 4454 TRANS_DDI_FUNC_CTL_B)
4389#define PIPE_DDI_FUNC_ENABLE (1<<31) 4455#define TRANS_DDI_FUNC_ENABLE (1<<31)
4390/* Those bits are ignored by pipe EDP since it can only connect to DDI A */ 4456/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
4391#define PIPE_DDI_PORT_MASK (7<<28) 4457#define TRANS_DDI_PORT_MASK (7<<28)
4392#define PIPE_DDI_SELECT_PORT(x) ((x)<<28) 4458#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
4393#define PIPE_DDI_MODE_SELECT_MASK (7<<24) 4459#define TRANS_DDI_PORT_NONE (0<<28)
4394#define PIPE_DDI_MODE_SELECT_HDMI (0<<24) 4460#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
4395#define PIPE_DDI_MODE_SELECT_DVI (1<<24) 4461#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
4396#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24) 4462#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
4397#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24) 4463#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
4398#define PIPE_DDI_MODE_SELECT_FDI (4<<24) 4464#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
4399#define PIPE_DDI_BPC_MASK (7<<20) 4465#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
4400#define PIPE_DDI_BPC_8 (0<<20) 4466#define TRANS_DDI_BPC_MASK (7<<20)
4401#define PIPE_DDI_BPC_10 (1<<20) 4467#define TRANS_DDI_BPC_8 (0<<20)
4402#define PIPE_DDI_BPC_6 (2<<20) 4468#define TRANS_DDI_BPC_10 (1<<20)
4403#define PIPE_DDI_BPC_12 (3<<20) 4469#define TRANS_DDI_BPC_6 (2<<20)
4404#define PIPE_DDI_PVSYNC (1<<17) 4470#define TRANS_DDI_BPC_12 (3<<20)
4405#define PIPE_DDI_PHSYNC (1<<16) 4471#define TRANS_DDI_PVSYNC (1<<17)
4406#define PIPE_DDI_BFI_ENABLE (1<<4) 4472#define TRANS_DDI_PHSYNC (1<<16)
4407#define PIPE_DDI_PORT_WIDTH_X1 (0<<1) 4473#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
4408#define PIPE_DDI_PORT_WIDTH_X2 (1<<1) 4474#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
4409#define PIPE_DDI_PORT_WIDTH_X4 (3<<1) 4475#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
4476#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
4477#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
4478#define TRANS_DDI_BFI_ENABLE (1<<4)
4479#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
4480#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
4481#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
4410 4482
4411/* DisplayPort Transport Control */ 4483/* DisplayPort Transport Control */
4412#define DP_TP_CTL_A 0x64040 4484#define DP_TP_CTL_A 0x64040
@@ -4420,12 +4492,16 @@
4420#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8) 4492#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
4421#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8) 4493#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
4422#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8) 4494#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
4495#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
4496#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
4423#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8) 4497#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
4498#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
4424 4499
4425/* DisplayPort Transport Status */ 4500/* DisplayPort Transport Status */
4426#define DP_TP_STATUS_A 0x64044 4501#define DP_TP_STATUS_A 0x64044
4427#define DP_TP_STATUS_B 0x64144 4502#define DP_TP_STATUS_B 0x64144
4428#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B) 4503#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
4504#define DP_TP_STATUS_IDLE_DONE (1<<25)
4429#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12) 4505#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
4430 4506
4431/* DDI Buffer Control */ 4507/* DDI Buffer Control */
@@ -4444,6 +4520,7 @@
4444#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ 4520#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
4445#define DDI_BUF_EMP_MASK (0xf<<24) 4521#define DDI_BUF_EMP_MASK (0xf<<24)
4446#define DDI_BUF_IS_IDLE (1<<7) 4522#define DDI_BUF_IS_IDLE (1<<7)
4523#define DDI_A_4_LANES (1<<4)
4447#define DDI_PORT_WIDTH_X1 (0<<1) 4524#define DDI_PORT_WIDTH_X1 (0<<1)
4448#define DDI_PORT_WIDTH_X2 (1<<1) 4525#define DDI_PORT_WIDTH_X2 (1<<1)
4449#define DDI_PORT_WIDTH_X4 (3<<1) 4526#define DDI_PORT_WIDTH_X4 (3<<1)
@@ -4460,6 +4537,10 @@
4460#define SBI_ADDR 0xC6000 4537#define SBI_ADDR 0xC6000
4461#define SBI_DATA 0xC6004 4538#define SBI_DATA 0xC6004
4462#define SBI_CTL_STAT 0xC6008 4539#define SBI_CTL_STAT 0xC6008
4540#define SBI_CTL_DEST_ICLK (0x0<<16)
4541#define SBI_CTL_DEST_MPHY (0x1<<16)
4542#define SBI_CTL_OP_IORD (0x2<<8)
4543#define SBI_CTL_OP_IOWR (0x3<<8)
4463#define SBI_CTL_OP_CRRD (0x6<<8) 4544#define SBI_CTL_OP_CRRD (0x6<<8)
4464#define SBI_CTL_OP_CRWR (0x7<<8) 4545#define SBI_CTL_OP_CRWR (0x7<<8)
4465#define SBI_RESPONSE_FAIL (0x1<<1) 4546#define SBI_RESPONSE_FAIL (0x1<<1)
@@ -4477,10 +4558,12 @@
4477#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) 4558#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
4478#define SBI_SSCCTL 0x020c 4559#define SBI_SSCCTL 0x020c
4479#define SBI_SSCCTL6 0x060C 4560#define SBI_SSCCTL6 0x060C
4561#define SBI_SSCCTL_PATHALT (1<<3)
4480#define SBI_SSCCTL_DISABLE (1<<0) 4562#define SBI_SSCCTL_DISABLE (1<<0)
4481#define SBI_SSCAUXDIV6 0x0610 4563#define SBI_SSCAUXDIV6 0x0610
4482#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) 4564#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
4483#define SBI_DBUFF0 0x2a00 4565#define SBI_DBUFF0 0x2a00
4566#define SBI_DBUFF0_ENABLE (1<<0)
4484 4567
4485/* LPT PIXCLK_GATE */ 4568/* LPT PIXCLK_GATE */
4486#define PIXCLK_GATE 0xC6020 4569#define PIXCLK_GATE 0xC6020
@@ -4490,8 +4573,8 @@
4490/* SPLL */ 4573/* SPLL */
4491#define SPLL_CTL 0x46020 4574#define SPLL_CTL 0x46020
4492#define SPLL_PLL_ENABLE (1<<31) 4575#define SPLL_PLL_ENABLE (1<<31)
4493#define SPLL_PLL_SCC (1<<28) 4576#define SPLL_PLL_SSC (1<<28)
4494#define SPLL_PLL_NON_SCC (2<<28) 4577#define SPLL_PLL_NON_SSC (2<<28)
4495#define SPLL_PLL_FREQ_810MHz (0<<26) 4578#define SPLL_PLL_FREQ_810MHz (0<<26)
4496#define SPLL_PLL_FREQ_1350MHz (1<<26) 4579#define SPLL_PLL_FREQ_1350MHz (1<<26)
4497 4580
@@ -4500,7 +4583,7 @@
4500#define WRPLL_CTL2 0x46060 4583#define WRPLL_CTL2 0x46060
4501#define WRPLL_PLL_ENABLE (1<<31) 4584#define WRPLL_PLL_ENABLE (1<<31)
4502#define WRPLL_PLL_SELECT_SSC (0x01<<28) 4585#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4503#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) 4586#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
4504#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 4587#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4505/* WRPLL divider programming */ 4588/* WRPLL divider programming */
4506#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0) 4589#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
@@ -4517,21 +4600,36 @@
4517#define PORT_CLK_SEL_SPLL (3<<29) 4600#define PORT_CLK_SEL_SPLL (3<<29)
4518#define PORT_CLK_SEL_WRPLL1 (4<<29) 4601#define PORT_CLK_SEL_WRPLL1 (4<<29)
4519#define PORT_CLK_SEL_WRPLL2 (5<<29) 4602#define PORT_CLK_SEL_WRPLL2 (5<<29)
4520 4603#define PORT_CLK_SEL_NONE (7<<29)
4521/* Pipe clock selection */ 4604
4522#define PIPE_CLK_SEL_A 0x46140 4605/* Transcoder clock selection */
4523#define PIPE_CLK_SEL_B 0x46144 4606#define TRANS_CLK_SEL_A 0x46140
4524#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B) 4607#define TRANS_CLK_SEL_B 0x46144
4525/* For each pipe, we need to select the corresponding port clock */ 4608#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
4526#define PIPE_CLK_SEL_DISABLED (0x0<<29) 4609/* For each transcoder, we need to select the corresponding port clock */
4527#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29) 4610#define TRANS_CLK_SEL_DISABLED (0x0<<29)
4611#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
4612
4613#define _TRANSA_MSA_MISC 0x60410
4614#define _TRANSB_MSA_MISC 0x61410
4615#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
4616 _TRANSB_MSA_MISC)
4617#define TRANS_MSA_SYNC_CLK (1<<0)
4618#define TRANS_MSA_6_BPC (0<<5)
4619#define TRANS_MSA_8_BPC (1<<5)
4620#define TRANS_MSA_10_BPC (2<<5)
4621#define TRANS_MSA_12_BPC (3<<5)
4622#define TRANS_MSA_16_BPC (4<<5)
4528 4623
4529/* LCPLL Control */ 4624/* LCPLL Control */
4530#define LCPLL_CTL 0x130040 4625#define LCPLL_CTL 0x130040
4531#define LCPLL_PLL_DISABLE (1<<31) 4626#define LCPLL_PLL_DISABLE (1<<31)
4532#define LCPLL_PLL_LOCK (1<<30) 4627#define LCPLL_PLL_LOCK (1<<30)
4628#define LCPLL_CLK_FREQ_MASK (3<<26)
4629#define LCPLL_CLK_FREQ_450 (0<<26)
4533#define LCPLL_CD_CLOCK_DISABLE (1<<25) 4630#define LCPLL_CD_CLOCK_DISABLE (1<<25)
4534#define LCPLL_CD2X_CLOCK_DISABLE (1<<23) 4631#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
4632#define LCPLL_CD_SOURCE_FCLK (1<<21)
4535 4633
4536/* Pipe WM_LINETIME - watermark line time */ 4634/* Pipe WM_LINETIME - watermark line time */
4537#define PIPE_WM_LINETIME_A 0x45270 4635#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5854bddb1e9f..63d4d30c39de 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -60,9 +60,9 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
60 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 60 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
61 61
62 if (pipe == PIPE_A) 62 if (pipe == PIPE_A)
63 array = dev_priv->save_palette_a; 63 array = dev_priv->regfile.save_palette_a;
64 else 64 else
65 array = dev_priv->save_palette_b; 65 array = dev_priv->regfile.save_palette_b;
66 66
67 for (i = 0; i < 256; i++) 67 for (i = 0; i < 256; i++)
68 array[i] = I915_READ(reg + (i << 2)); 68 array[i] = I915_READ(reg + (i << 2));
@@ -82,9 +82,9 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
82 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B; 82 reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
83 83
84 if (pipe == PIPE_A) 84 if (pipe == PIPE_A)
85 array = dev_priv->save_palette_a; 85 array = dev_priv->regfile.save_palette_a;
86 else 86 else
87 array = dev_priv->save_palette_b; 87 array = dev_priv->regfile.save_palette_b;
88 88
89 for (i = 0; i < 256; i++) 89 for (i = 0; i < 256; i++)
90 I915_WRITE(reg + (i << 2), array[i]); 90 I915_WRITE(reg + (i << 2), array[i]);
@@ -131,11 +131,11 @@ static void i915_save_vga(struct drm_device *dev)
131 u16 cr_index, cr_data, st01; 131 u16 cr_index, cr_data, st01;
132 132
133 /* VGA color palette registers */ 133 /* VGA color palette registers */
134 dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); 134 dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
135 135
136 /* MSR bits */ 136 /* MSR bits */
137 dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); 137 dev_priv->regfile.saveMSR = I915_READ8(VGA_MSR_READ);
138 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 138 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
139 cr_index = VGA_CR_INDEX_CGA; 139 cr_index = VGA_CR_INDEX_CGA;
140 cr_data = VGA_CR_DATA_CGA; 140 cr_data = VGA_CR_DATA_CGA;
141 st01 = VGA_ST01_CGA; 141 st01 = VGA_ST01_CGA;
@@ -150,35 +150,35 @@ static void i915_save_vga(struct drm_device *dev)
150 i915_read_indexed(dev, cr_index, cr_data, 0x11) & 150 i915_read_indexed(dev, cr_index, cr_data, 0x11) &
151 (~0x80)); 151 (~0x80));
152 for (i = 0; i <= 0x24; i++) 152 for (i = 0; i <= 0x24; i++)
153 dev_priv->saveCR[i] = 153 dev_priv->regfile.saveCR[i] =
154 i915_read_indexed(dev, cr_index, cr_data, i); 154 i915_read_indexed(dev, cr_index, cr_data, i);
155 /* Make sure we don't turn off CR group 0 writes */ 155 /* Make sure we don't turn off CR group 0 writes */
156 dev_priv->saveCR[0x11] &= ~0x80; 156 dev_priv->regfile.saveCR[0x11] &= ~0x80;
157 157
158 /* Attribute controller registers */ 158 /* Attribute controller registers */
159 I915_READ8(st01); 159 I915_READ8(st01);
160 dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); 160 dev_priv->regfile.saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
161 for (i = 0; i <= 0x14; i++) 161 for (i = 0; i <= 0x14; i++)
162 dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); 162 dev_priv->regfile.saveAR[i] = i915_read_ar(dev, st01, i, 0);
163 I915_READ8(st01); 163 I915_READ8(st01);
164 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); 164 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX);
165 I915_READ8(st01); 165 I915_READ8(st01);
166 166
167 /* Graphics controller registers */ 167 /* Graphics controller registers */
168 for (i = 0; i < 9; i++) 168 for (i = 0; i < 9; i++)
169 dev_priv->saveGR[i] = 169 dev_priv->regfile.saveGR[i] =
170 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); 170 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
171 171
172 dev_priv->saveGR[0x10] = 172 dev_priv->regfile.saveGR[0x10] =
173 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); 173 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
174 dev_priv->saveGR[0x11] = 174 dev_priv->regfile.saveGR[0x11] =
175 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); 175 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
176 dev_priv->saveGR[0x18] = 176 dev_priv->regfile.saveGR[0x18] =
177 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); 177 i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
178 178
179 /* Sequencer registers */ 179 /* Sequencer registers */
180 for (i = 0; i < 8; i++) 180 for (i = 0; i < 8; i++)
181 dev_priv->saveSR[i] = 181 dev_priv->regfile.saveSR[i] =
182 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); 182 i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
183} 183}
184 184
@@ -189,8 +189,8 @@ static void i915_restore_vga(struct drm_device *dev)
189 u16 cr_index, cr_data, st01; 189 u16 cr_index, cr_data, st01;
190 190
191 /* MSR bits */ 191 /* MSR bits */
192 I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); 192 I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
193 if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 193 if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
194 cr_index = VGA_CR_INDEX_CGA; 194 cr_index = VGA_CR_INDEX_CGA;
195 cr_data = VGA_CR_DATA_CGA; 195 cr_data = VGA_CR_DATA_CGA;
196 st01 = VGA_ST01_CGA; 196 st01 = VGA_ST01_CGA;
@@ -203,36 +203,36 @@ static void i915_restore_vga(struct drm_device *dev)
203 /* Sequencer registers, don't write SR07 */ 203 /* Sequencer registers, don't write SR07 */
204 for (i = 0; i < 7; i++) 204 for (i = 0; i < 7; i++)
205 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, 205 i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
206 dev_priv->saveSR[i]); 206 dev_priv->regfile.saveSR[i]);
207 207
208 /* CRT controller regs */ 208 /* CRT controller regs */
209 /* Enable CR group 0 writes */ 209 /* Enable CR group 0 writes */
210 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); 210 i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->regfile.saveCR[0x11]);
211 for (i = 0; i <= 0x24; i++) 211 for (i = 0; i <= 0x24; i++)
212 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); 212 i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->regfile.saveCR[i]);
213 213
214 /* Graphics controller regs */ 214 /* Graphics controller regs */
215 for (i = 0; i < 9; i++) 215 for (i = 0; i < 9; i++)
216 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, 216 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
217 dev_priv->saveGR[i]); 217 dev_priv->regfile.saveGR[i]);
218 218
219 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, 219 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
220 dev_priv->saveGR[0x10]); 220 dev_priv->regfile.saveGR[0x10]);
221 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, 221 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
222 dev_priv->saveGR[0x11]); 222 dev_priv->regfile.saveGR[0x11]);
223 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, 223 i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
224 dev_priv->saveGR[0x18]); 224 dev_priv->regfile.saveGR[0x18]);
225 225
226 /* Attribute controller registers */ 226 /* Attribute controller registers */
227 I915_READ8(st01); /* switch back to index mode */ 227 I915_READ8(st01); /* switch back to index mode */
228 for (i = 0; i <= 0x14; i++) 228 for (i = 0; i <= 0x14; i++)
229 i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); 229 i915_write_ar(dev, st01, i, dev_priv->regfile.saveAR[i], 0);
230 I915_READ8(st01); /* switch back to index mode */ 230 I915_READ8(st01); /* switch back to index mode */
231 I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); 231 I915_WRITE8(VGA_AR_INDEX, dev_priv->regfile.saveAR_INDEX | 0x20);
232 I915_READ8(st01); 232 I915_READ8(st01);
233 233
234 /* VGA color palette registers */ 234 /* VGA color palette registers */
235 I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); 235 I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
236} 236}
237 237
238static void i915_save_modeset_reg(struct drm_device *dev) 238static void i915_save_modeset_reg(struct drm_device *dev)
@@ -244,156 +244,162 @@ static void i915_save_modeset_reg(struct drm_device *dev)
244 return; 244 return;
245 245
246 /* Cursor state */ 246 /* Cursor state */
247 dev_priv->saveCURACNTR = I915_READ(_CURACNTR); 247 dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
248 dev_priv->saveCURAPOS = I915_READ(_CURAPOS); 248 dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
249 dev_priv->saveCURABASE = I915_READ(_CURABASE); 249 dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
250 dev_priv->saveCURBCNTR = I915_READ(_CURBCNTR); 250 dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
251 dev_priv->saveCURBPOS = I915_READ(_CURBPOS); 251 dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
252 dev_priv->saveCURBBASE = I915_READ(_CURBBASE); 252 dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
253 if (IS_GEN2(dev)) 253 if (IS_GEN2(dev))
254 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 254 dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
255 255
256 if (HAS_PCH_SPLIT(dev)) { 256 if (HAS_PCH_SPLIT(dev)) {
257 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 257 dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
258 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 258 dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
259 } 259 }
260 260
261 /* Pipe & plane A info */ 261 /* Pipe & plane A info */
262 dev_priv->savePIPEACONF = I915_READ(_PIPEACONF); 262 dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
263 dev_priv->savePIPEASRC = I915_READ(_PIPEASRC); 263 dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
264 if (HAS_PCH_SPLIT(dev)) { 264 if (HAS_PCH_SPLIT(dev)) {
265 dev_priv->saveFPA0 = I915_READ(_PCH_FPA0); 265 dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
266 dev_priv->saveFPA1 = I915_READ(_PCH_FPA1); 266 dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
267 dev_priv->saveDPLL_A = I915_READ(_PCH_DPLL_A); 267 dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
268 } else { 268 } else {
269 dev_priv->saveFPA0 = I915_READ(_FPA0); 269 dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
270 dev_priv->saveFPA1 = I915_READ(_FPA1); 270 dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
271 dev_priv->saveDPLL_A = I915_READ(_DPLL_A); 271 dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
272 } 272 }
273 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 273 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
274 dev_priv->saveDPLL_A_MD = I915_READ(_DPLL_A_MD); 274 dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
275 dev_priv->saveHTOTAL_A = I915_READ(_HTOTAL_A); 275 dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
276 dev_priv->saveHBLANK_A = I915_READ(_HBLANK_A); 276 dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
277 dev_priv->saveHSYNC_A = I915_READ(_HSYNC_A); 277 dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
278 dev_priv->saveVTOTAL_A = I915_READ(_VTOTAL_A); 278 dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
279 dev_priv->saveVBLANK_A = I915_READ(_VBLANK_A); 279 dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
280 dev_priv->saveVSYNC_A = I915_READ(_VSYNC_A); 280 dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
281 if (!HAS_PCH_SPLIT(dev)) 281 if (!HAS_PCH_SPLIT(dev))
282 dev_priv->saveBCLRPAT_A = I915_READ(_BCLRPAT_A); 282 dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
283 283
284 if (HAS_PCH_SPLIT(dev)) { 284 if (HAS_PCH_SPLIT(dev)) {
285 dev_priv->savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1); 285 dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
286 dev_priv->savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1); 286 dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
287 dev_priv->savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1); 287 dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
288 dev_priv->savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1); 288 dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
289 289
290 dev_priv->saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL); 290 dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
291 dev_priv->saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL); 291 dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
292 292
293 dev_priv->savePFA_CTL_1 = I915_READ(_PFA_CTL_1); 293 dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
294 dev_priv->savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ); 294 dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
295 dev_priv->savePFA_WIN_POS = I915_READ(_PFA_WIN_POS); 295 dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
296 296
297 dev_priv->saveTRANSACONF = I915_READ(_TRANSACONF); 297 dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
298 dev_priv->saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A); 298 dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
299 dev_priv->saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A); 299 dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
300 dev_priv->saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A); 300 dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
301 dev_priv->saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A); 301 dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
302 dev_priv->saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A); 302 dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
303 dev_priv->saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A); 303 dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
304 } 304 }
305 305
306 dev_priv->saveDSPACNTR = I915_READ(_DSPACNTR); 306 dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
307 dev_priv->saveDSPASTRIDE = I915_READ(_DSPASTRIDE); 307 dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
308 dev_priv->saveDSPASIZE = I915_READ(_DSPASIZE); 308 dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
309 dev_priv->saveDSPAPOS = I915_READ(_DSPAPOS); 309 dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
310 dev_priv->saveDSPAADDR = I915_READ(_DSPAADDR); 310 dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
311 if (INTEL_INFO(dev)->gen >= 4) { 311 if (INTEL_INFO(dev)->gen >= 4) {
312 dev_priv->saveDSPASURF = I915_READ(_DSPASURF); 312 dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
313 dev_priv->saveDSPATILEOFF = I915_READ(_DSPATILEOFF); 313 dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
314 } 314 }
315 i915_save_palette(dev, PIPE_A); 315 i915_save_palette(dev, PIPE_A);
316 dev_priv->savePIPEASTAT = I915_READ(_PIPEASTAT); 316 dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
317 317
318 /* Pipe & plane B info */ 318 /* Pipe & plane B info */
319 dev_priv->savePIPEBCONF = I915_READ(_PIPEBCONF); 319 dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
320 dev_priv->savePIPEBSRC = I915_READ(_PIPEBSRC); 320 dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
321 if (HAS_PCH_SPLIT(dev)) { 321 if (HAS_PCH_SPLIT(dev)) {
322 dev_priv->saveFPB0 = I915_READ(_PCH_FPB0); 322 dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
323 dev_priv->saveFPB1 = I915_READ(_PCH_FPB1); 323 dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
324 dev_priv->saveDPLL_B = I915_READ(_PCH_DPLL_B); 324 dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
325 } else { 325 } else {
326 dev_priv->saveFPB0 = I915_READ(_FPB0); 326 dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
327 dev_priv->saveFPB1 = I915_READ(_FPB1); 327 dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
328 dev_priv->saveDPLL_B = I915_READ(_DPLL_B); 328 dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
329 } 329 }
330 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 330 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
331 dev_priv->saveDPLL_B_MD = I915_READ(_DPLL_B_MD); 331 dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
332 dev_priv->saveHTOTAL_B = I915_READ(_HTOTAL_B); 332 dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
333 dev_priv->saveHBLANK_B = I915_READ(_HBLANK_B); 333 dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
334 dev_priv->saveHSYNC_B = I915_READ(_HSYNC_B); 334 dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
335 dev_priv->saveVTOTAL_B = I915_READ(_VTOTAL_B); 335 dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
336 dev_priv->saveVBLANK_B = I915_READ(_VBLANK_B); 336 dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
337 dev_priv->saveVSYNC_B = I915_READ(_VSYNC_B); 337 dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
338 if (!HAS_PCH_SPLIT(dev)) 338 if (!HAS_PCH_SPLIT(dev))
339 dev_priv->saveBCLRPAT_B = I915_READ(_BCLRPAT_B); 339 dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
340 340
341 if (HAS_PCH_SPLIT(dev)) { 341 if (HAS_PCH_SPLIT(dev)) {
342 dev_priv->savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1); 342 dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
343 dev_priv->savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1); 343 dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
344 dev_priv->savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1); 344 dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
345 dev_priv->savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1); 345 dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
346 346
347 dev_priv->saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL); 347 dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
348 dev_priv->saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL); 348 dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
349 349
350 dev_priv->savePFB_CTL_1 = I915_READ(_PFB_CTL_1); 350 dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
351 dev_priv->savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ); 351 dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
352 dev_priv->savePFB_WIN_POS = I915_READ(_PFB_WIN_POS); 352 dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
353 353
354 dev_priv->saveTRANSBCONF = I915_READ(_TRANSBCONF); 354 dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
355 dev_priv->saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B); 355 dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
356 dev_priv->saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B); 356 dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
357 dev_priv->saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B); 357 dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
358 dev_priv->saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B); 358 dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
359 dev_priv->saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B); 359 dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
360 dev_priv->saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B); 360 dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
361 } 361 }
362 362
363 dev_priv->saveDSPBCNTR = I915_READ(_DSPBCNTR); 363 dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
364 dev_priv->saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE); 364 dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
365 dev_priv->saveDSPBSIZE = I915_READ(_DSPBSIZE); 365 dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
366 dev_priv->saveDSPBPOS = I915_READ(_DSPBPOS); 366 dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
367 dev_priv->saveDSPBADDR = I915_READ(_DSPBADDR); 367 dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
368 if (INTEL_INFO(dev)->gen >= 4) { 368 if (INTEL_INFO(dev)->gen >= 4) {
369 dev_priv->saveDSPBSURF = I915_READ(_DSPBSURF); 369 dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
370 dev_priv->saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF); 370 dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
371 } 371 }
372 i915_save_palette(dev, PIPE_B); 372 i915_save_palette(dev, PIPE_B);
373 dev_priv->savePIPEBSTAT = I915_READ(_PIPEBSTAT); 373 dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
374 374
375 /* Fences */ 375 /* Fences */
376 switch (INTEL_INFO(dev)->gen) { 376 switch (INTEL_INFO(dev)->gen) {
377 case 7: 377 case 7:
378 case 6: 378 case 6:
379 for (i = 0; i < 16; i++) 379 for (i = 0; i < 16; i++)
380 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 380 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
381 break; 381 break;
382 case 5: 382 case 5:
383 case 4: 383 case 4:
384 for (i = 0; i < 16; i++) 384 for (i = 0; i < 16; i++)
385 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); 385 dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
386 break; 386 break;
387 case 3: 387 case 3:
388 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 388 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
389 for (i = 0; i < 8; i++) 389 for (i = 0; i < 8; i++)
390 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); 390 dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
391 case 2: 391 case 2:
392 for (i = 0; i < 8; i++) 392 for (i = 0; i < 8; i++)
393 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 393 dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
394 break; 394 break;
395 } 395 }
396 396
397 /* CRT state */
398 if (HAS_PCH_SPLIT(dev))
399 dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
400 else
401 dev_priv->regfile.saveADPA = I915_READ(ADPA);
402
397 return; 403 return;
398} 404}
399 405
@@ -412,20 +418,20 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
412 case 7: 418 case 7:
413 case 6: 419 case 6:
414 for (i = 0; i < 16; i++) 420 for (i = 0; i < 16; i++)
415 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); 421 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
416 break; 422 break;
417 case 5: 423 case 5:
418 case 4: 424 case 4:
419 for (i = 0; i < 16; i++) 425 for (i = 0; i < 16; i++)
420 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]); 426 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
421 break; 427 break;
422 case 3: 428 case 3:
423 case 2: 429 case 2:
424 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 430 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
425 for (i = 0; i < 8; i++) 431 for (i = 0; i < 8; i++)
426 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]); 432 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
427 for (i = 0; i < 8; i++) 433 for (i = 0; i < 8; i++)
428 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]); 434 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
429 break; 435 break;
430 } 436 }
431 437
@@ -447,158 +453,164 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
447 } 453 }
448 454
449 if (HAS_PCH_SPLIT(dev)) { 455 if (HAS_PCH_SPLIT(dev)) {
450 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); 456 I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
451 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); 457 I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
452 } 458 }
453 459
454 /* Pipe & plane A info */ 460 /* Pipe & plane A info */
455 /* Prime the clock */ 461 /* Prime the clock */
456 if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 462 if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
457 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A & 463 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
458 ~DPLL_VCO_ENABLE); 464 ~DPLL_VCO_ENABLE);
459 POSTING_READ(dpll_a_reg); 465 POSTING_READ(dpll_a_reg);
460 udelay(150); 466 udelay(150);
461 } 467 }
462 I915_WRITE(fpa0_reg, dev_priv->saveFPA0); 468 I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
463 I915_WRITE(fpa1_reg, dev_priv->saveFPA1); 469 I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
464 /* Actually enable it */ 470 /* Actually enable it */
465 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 471 I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
466 POSTING_READ(dpll_a_reg); 472 POSTING_READ(dpll_a_reg);
467 udelay(150); 473 udelay(150);
468 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 474 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
469 I915_WRITE(_DPLL_A_MD, dev_priv->saveDPLL_A_MD); 475 I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
470 POSTING_READ(_DPLL_A_MD); 476 POSTING_READ(_DPLL_A_MD);
471 } 477 }
472 udelay(150); 478 udelay(150);
473 479
474 /* Restore mode */ 480 /* Restore mode */
475 I915_WRITE(_HTOTAL_A, dev_priv->saveHTOTAL_A); 481 I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
476 I915_WRITE(_HBLANK_A, dev_priv->saveHBLANK_A); 482 I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
477 I915_WRITE(_HSYNC_A, dev_priv->saveHSYNC_A); 483 I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
478 I915_WRITE(_VTOTAL_A, dev_priv->saveVTOTAL_A); 484 I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
479 I915_WRITE(_VBLANK_A, dev_priv->saveVBLANK_A); 485 I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
480 I915_WRITE(_VSYNC_A, dev_priv->saveVSYNC_A); 486 I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
481 if (!HAS_PCH_SPLIT(dev)) 487 if (!HAS_PCH_SPLIT(dev))
482 I915_WRITE(_BCLRPAT_A, dev_priv->saveBCLRPAT_A); 488 I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
483 489
484 if (HAS_PCH_SPLIT(dev)) { 490 if (HAS_PCH_SPLIT(dev)) {
485 I915_WRITE(_PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); 491 I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
486 I915_WRITE(_PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); 492 I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
487 I915_WRITE(_PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); 493 I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
488 I915_WRITE(_PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1); 494 I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
489 495
490 I915_WRITE(_FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL); 496 I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
491 I915_WRITE(_FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL); 497 I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
492 498
493 I915_WRITE(_PFA_CTL_1, dev_priv->savePFA_CTL_1); 499 I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
494 I915_WRITE(_PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ); 500 I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
495 I915_WRITE(_PFA_WIN_POS, dev_priv->savePFA_WIN_POS); 501 I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
496 502
497 I915_WRITE(_TRANSACONF, dev_priv->saveTRANSACONF); 503 I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
498 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A); 504 I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
499 I915_WRITE(_TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A); 505 I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
500 I915_WRITE(_TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A); 506 I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
501 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->saveTRANS_VTOTAL_A); 507 I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
502 I915_WRITE(_TRANS_VBLANK_A, dev_priv->saveTRANS_VBLANK_A); 508 I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
503 I915_WRITE(_TRANS_VSYNC_A, dev_priv->saveTRANS_VSYNC_A); 509 I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
504 } 510 }
505 511
506 /* Restore plane info */ 512 /* Restore plane info */
507 I915_WRITE(_DSPASIZE, dev_priv->saveDSPASIZE); 513 I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
508 I915_WRITE(_DSPAPOS, dev_priv->saveDSPAPOS); 514 I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
509 I915_WRITE(_PIPEASRC, dev_priv->savePIPEASRC); 515 I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
510 I915_WRITE(_DSPAADDR, dev_priv->saveDSPAADDR); 516 I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
511 I915_WRITE(_DSPASTRIDE, dev_priv->saveDSPASTRIDE); 517 I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
512 if (INTEL_INFO(dev)->gen >= 4) { 518 if (INTEL_INFO(dev)->gen >= 4) {
513 I915_WRITE(_DSPASURF, dev_priv->saveDSPASURF); 519 I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
514 I915_WRITE(_DSPATILEOFF, dev_priv->saveDSPATILEOFF); 520 I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
515 } 521 }
516 522
517 I915_WRITE(_PIPEACONF, dev_priv->savePIPEACONF); 523 I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
518 524
519 i915_restore_palette(dev, PIPE_A); 525 i915_restore_palette(dev, PIPE_A);
520 /* Enable the plane */ 526 /* Enable the plane */
521 I915_WRITE(_DSPACNTR, dev_priv->saveDSPACNTR); 527 I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
522 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR)); 528 I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
523 529
524 /* Pipe & plane B info */ 530 /* Pipe & plane B info */
525 if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 531 if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
526 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B & 532 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
527 ~DPLL_VCO_ENABLE); 533 ~DPLL_VCO_ENABLE);
528 POSTING_READ(dpll_b_reg); 534 POSTING_READ(dpll_b_reg);
529 udelay(150); 535 udelay(150);
530 } 536 }
531 I915_WRITE(fpb0_reg, dev_priv->saveFPB0); 537 I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
532 I915_WRITE(fpb1_reg, dev_priv->saveFPB1); 538 I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
533 /* Actually enable it */ 539 /* Actually enable it */
534 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 540 I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
535 POSTING_READ(dpll_b_reg); 541 POSTING_READ(dpll_b_reg);
536 udelay(150); 542 udelay(150);
537 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) { 543 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
538 I915_WRITE(_DPLL_B_MD, dev_priv->saveDPLL_B_MD); 544 I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
539 POSTING_READ(_DPLL_B_MD); 545 POSTING_READ(_DPLL_B_MD);
540 } 546 }
541 udelay(150); 547 udelay(150);
542 548
543 /* Restore mode */ 549 /* Restore mode */
544 I915_WRITE(_HTOTAL_B, dev_priv->saveHTOTAL_B); 550 I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
545 I915_WRITE(_HBLANK_B, dev_priv->saveHBLANK_B); 551 I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
546 I915_WRITE(_HSYNC_B, dev_priv->saveHSYNC_B); 552 I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
547 I915_WRITE(_VTOTAL_B, dev_priv->saveVTOTAL_B); 553 I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
548 I915_WRITE(_VBLANK_B, dev_priv->saveVBLANK_B); 554 I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
549 I915_WRITE(_VSYNC_B, dev_priv->saveVSYNC_B); 555 I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
550 if (!HAS_PCH_SPLIT(dev)) 556 if (!HAS_PCH_SPLIT(dev))
551 I915_WRITE(_BCLRPAT_B, dev_priv->saveBCLRPAT_B); 557 I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
552 558
553 if (HAS_PCH_SPLIT(dev)) { 559 if (HAS_PCH_SPLIT(dev)) {
554 I915_WRITE(_PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); 560 I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
555 I915_WRITE(_PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); 561 I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
556 I915_WRITE(_PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); 562 I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
557 I915_WRITE(_PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1); 563 I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
558 564
559 I915_WRITE(_FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL); 565 I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
560 I915_WRITE(_FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL); 566 I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
561 567
562 I915_WRITE(_PFB_CTL_1, dev_priv->savePFB_CTL_1); 568 I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
563 I915_WRITE(_PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ); 569 I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
564 I915_WRITE(_PFB_WIN_POS, dev_priv->savePFB_WIN_POS); 570 I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
565 571
566 I915_WRITE(_TRANSBCONF, dev_priv->saveTRANSBCONF); 572 I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
567 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B); 573 I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
568 I915_WRITE(_TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B); 574 I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
569 I915_WRITE(_TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B); 575 I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
570 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->saveTRANS_VTOTAL_B); 576 I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
571 I915_WRITE(_TRANS_VBLANK_B, dev_priv->saveTRANS_VBLANK_B); 577 I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
572 I915_WRITE(_TRANS_VSYNC_B, dev_priv->saveTRANS_VSYNC_B); 578 I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
573 } 579 }
574 580
575 /* Restore plane info */ 581 /* Restore plane info */
576 I915_WRITE(_DSPBSIZE, dev_priv->saveDSPBSIZE); 582 I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
577 I915_WRITE(_DSPBPOS, dev_priv->saveDSPBPOS); 583 I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
578 I915_WRITE(_PIPEBSRC, dev_priv->savePIPEBSRC); 584 I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
579 I915_WRITE(_DSPBADDR, dev_priv->saveDSPBADDR); 585 I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
580 I915_WRITE(_DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); 586 I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
581 if (INTEL_INFO(dev)->gen >= 4) { 587 if (INTEL_INFO(dev)->gen >= 4) {
582 I915_WRITE(_DSPBSURF, dev_priv->saveDSPBSURF); 588 I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
583 I915_WRITE(_DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 589 I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
584 } 590 }
585 591
586 I915_WRITE(_PIPEBCONF, dev_priv->savePIPEBCONF); 592 I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
587 593
588 i915_restore_palette(dev, PIPE_B); 594 i915_restore_palette(dev, PIPE_B);
589 /* Enable the plane */ 595 /* Enable the plane */
590 I915_WRITE(_DSPBCNTR, dev_priv->saveDSPBCNTR); 596 I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
591 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR)); 597 I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
592 598
593 /* Cursor state */ 599 /* Cursor state */
594 I915_WRITE(_CURAPOS, dev_priv->saveCURAPOS); 600 I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
595 I915_WRITE(_CURACNTR, dev_priv->saveCURACNTR); 601 I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
596 I915_WRITE(_CURABASE, dev_priv->saveCURABASE); 602 I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
597 I915_WRITE(_CURBPOS, dev_priv->saveCURBPOS); 603 I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
598 I915_WRITE(_CURBCNTR, dev_priv->saveCURBCNTR); 604 I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
599 I915_WRITE(_CURBBASE, dev_priv->saveCURBBASE); 605 I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
600 if (IS_GEN2(dev)) 606 if (IS_GEN2(dev))
601 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 607 I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
608
609 /* CRT state */
610 if (HAS_PCH_SPLIT(dev))
611 I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
612 else
613 I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
602 614
603 return; 615 return;
604} 616}
@@ -608,89 +620,84 @@ static void i915_save_display(struct drm_device *dev)
608 struct drm_i915_private *dev_priv = dev->dev_private; 620 struct drm_i915_private *dev_priv = dev->dev_private;
609 621
610 /* Display arbitration control */ 622 /* Display arbitration control */
611 dev_priv->saveDSPARB = I915_READ(DSPARB); 623 dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
612 624
613 /* This is only meaningful in non-KMS mode */ 625 /* This is only meaningful in non-KMS mode */
614 /* Don't save them in KMS mode */ 626 /* Don't regfile.save them in KMS mode */
615 i915_save_modeset_reg(dev); 627 i915_save_modeset_reg(dev);
616 628
617 /* CRT state */
618 if (HAS_PCH_SPLIT(dev)) {
619 dev_priv->saveADPA = I915_READ(PCH_ADPA);
620 } else {
621 dev_priv->saveADPA = I915_READ(ADPA);
622 }
623
624 /* LVDS state */ 629 /* LVDS state */
625 if (HAS_PCH_SPLIT(dev)) { 630 if (HAS_PCH_SPLIT(dev)) {
626 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 631 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
627 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 632 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
628 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 633 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
629 dev_priv->saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL); 634 dev_priv->regfile.saveBLC_CPU_PWM_CTL = I915_READ(BLC_PWM_CPU_CTL);
630 dev_priv->saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2); 635 dev_priv->regfile.saveBLC_CPU_PWM_CTL2 = I915_READ(BLC_PWM_CPU_CTL2);
631 dev_priv->saveLVDS = I915_READ(PCH_LVDS); 636 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
632 } else { 637 } else {
633 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 638 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
634 dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 639 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
635 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 640 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
636 dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL); 641 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
637 if (INTEL_INFO(dev)->gen >= 4) 642 if (INTEL_INFO(dev)->gen >= 4)
638 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 643 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
639 if (IS_MOBILE(dev) && !IS_I830(dev)) 644 if (IS_MOBILE(dev) && !IS_I830(dev))
640 dev_priv->saveLVDS = I915_READ(LVDS); 645 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
641 } 646 }
642 647
643 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 648 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
644 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 649 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
645 650
646 if (HAS_PCH_SPLIT(dev)) { 651 if (HAS_PCH_SPLIT(dev)) {
647 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 652 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
648 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 653 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
649 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 654 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
650 } else { 655 } else {
651 dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 656 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
652 dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 657 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
653 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 658 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
654 } 659 }
655 660
656 /* Display Port state */ 661 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
657 if (SUPPORTS_INTEGRATED_DP(dev)) { 662 /* Display Port state */
658 dev_priv->saveDP_B = I915_READ(DP_B); 663 if (SUPPORTS_INTEGRATED_DP(dev)) {
659 dev_priv->saveDP_C = I915_READ(DP_C); 664 dev_priv->regfile.saveDP_B = I915_READ(DP_B);
660 dev_priv->saveDP_D = I915_READ(DP_D); 665 dev_priv->regfile.saveDP_C = I915_READ(DP_C);
661 dev_priv->savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M); 666 dev_priv->regfile.saveDP_D = I915_READ(DP_D);
662 dev_priv->savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M); 667 dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
663 dev_priv->savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N); 668 dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
664 dev_priv->savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N); 669 dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
665 dev_priv->savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M); 670 dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
666 dev_priv->savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M); 671 dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
667 dev_priv->savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N); 672 dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
668 dev_priv->savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N); 673 dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
669 } 674 dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
670 /* FIXME: save TV & SDVO state */ 675 }
671 676 /* FIXME: regfile.save TV & SDVO state */
672 /* Only save FBC state on the platform that supports FBC */ 677 }
678
679 /* Only regfile.save FBC state on the platform that supports FBC */
673 if (I915_HAS_FBC(dev)) { 680 if (I915_HAS_FBC(dev)) {
674 if (HAS_PCH_SPLIT(dev)) { 681 if (HAS_PCH_SPLIT(dev)) {
675 dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); 682 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
676 } else if (IS_GM45(dev)) { 683 } else if (IS_GM45(dev)) {
677 dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); 684 dev_priv->regfile.saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
678 } else { 685 } else {
679 dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 686 dev_priv->regfile.saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
680 dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 687 dev_priv->regfile.saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
681 dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 688 dev_priv->regfile.saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
682 dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 689 dev_priv->regfile.saveFBC_CONTROL = I915_READ(FBC_CONTROL);
683 } 690 }
684 } 691 }
685 692
686 /* VGA state */ 693 /* VGA state */
687 dev_priv->saveVGA0 = I915_READ(VGA0); 694 dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
688 dev_priv->saveVGA1 = I915_READ(VGA1); 695 dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
689 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 696 dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
690 if (HAS_PCH_SPLIT(dev)) 697 if (HAS_PCH_SPLIT(dev))
691 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); 698 dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
692 else 699 else
693 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 700 dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
694 701
695 i915_save_vga(dev); 702 i915_save_vga(dev);
696} 703}
@@ -700,97 +707,95 @@ static void i915_restore_display(struct drm_device *dev)
700 struct drm_i915_private *dev_priv = dev->dev_private; 707 struct drm_i915_private *dev_priv = dev->dev_private;
701 708
702 /* Display arbitration */ 709 /* Display arbitration */
703 I915_WRITE(DSPARB, dev_priv->saveDSPARB); 710 I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
704 711
705 /* Display port ratios (must be done before clock is set) */ 712 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
706 if (SUPPORTS_INTEGRATED_DP(dev)) { 713 /* Display port ratios (must be done before clock is set) */
707 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->savePIPEA_GMCH_DATA_M); 714 if (SUPPORTS_INTEGRATED_DP(dev)) {
708 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->savePIPEB_GMCH_DATA_M); 715 I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
709 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->savePIPEA_GMCH_DATA_N); 716 I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
710 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->savePIPEB_GMCH_DATA_N); 717 I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
711 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->savePIPEA_DP_LINK_M); 718 I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
712 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->savePIPEB_DP_LINK_M); 719 I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
713 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->savePIPEA_DP_LINK_N); 720 I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
714 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->savePIPEB_DP_LINK_N); 721 I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
722 I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
723 }
715 } 724 }
716 725
717 /* This is only meaningful in non-KMS mode */ 726 /* This is only meaningful in non-KMS mode */
718 /* Don't restore them in KMS mode */ 727 /* Don't restore them in KMS mode */
719 i915_restore_modeset_reg(dev); 728 i915_restore_modeset_reg(dev);
720 729
721 /* CRT state */
722 if (HAS_PCH_SPLIT(dev))
723 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
724 else
725 I915_WRITE(ADPA, dev_priv->saveADPA);
726
727 /* LVDS state */ 730 /* LVDS state */
728 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) 731 if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
729 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 732 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
730 733
731 if (HAS_PCH_SPLIT(dev)) { 734 if (HAS_PCH_SPLIT(dev)) {
732 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); 735 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS);
733 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 736 } else if (IS_MOBILE(dev) && !IS_I830(dev))
734 I915_WRITE(LVDS, dev_priv->saveLVDS); 737 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS);
735 738
736 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 739 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
737 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 740 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
738 741
739 if (HAS_PCH_SPLIT(dev)) { 742 if (HAS_PCH_SPLIT(dev)) {
740 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); 743 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->regfile.saveBLC_PWM_CTL);
741 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); 744 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
742 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2; 745 /* NOTE: BLC_PWM_CPU_CTL must be written after BLC_PWM_CPU_CTL2;
743 * otherwise we get blank eDP screen after S3 on some machines 746 * otherwise we get blank eDP screen after S3 on some machines
744 */ 747 */
745 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->saveBLC_CPU_PWM_CTL2); 748 I915_WRITE(BLC_PWM_CPU_CTL2, dev_priv->regfile.saveBLC_CPU_PWM_CTL2);
746 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); 749 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->regfile.saveBLC_CPU_PWM_CTL);
747 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); 750 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
748 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 751 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
749 I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); 752 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
750 I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); 753 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
751 I915_WRITE(RSTDBYCTL, 754 I915_WRITE(RSTDBYCTL,
752 dev_priv->saveMCHBAR_RENDER_STANDBY); 755 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
753 } else { 756 } else {
754 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 757 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
755 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 758 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
756 I915_WRITE(BLC_HIST_CTL, dev_priv->saveBLC_HIST_CTL); 759 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
757 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); 760 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
758 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 761 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
759 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 762 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
760 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 763 I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
761 } 764 }
762 765
763 /* Display Port state */ 766 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
764 if (SUPPORTS_INTEGRATED_DP(dev)) { 767 /* Display Port state */
765 I915_WRITE(DP_B, dev_priv->saveDP_B); 768 if (SUPPORTS_INTEGRATED_DP(dev)) {
766 I915_WRITE(DP_C, dev_priv->saveDP_C); 769 I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
767 I915_WRITE(DP_D, dev_priv->saveDP_D); 770 I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
771 I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
772 }
773 /* FIXME: restore TV & SDVO state */
768 } 774 }
769 /* FIXME: restore TV & SDVO state */
770 775
771 /* only restore FBC info on the platform that supports FBC*/ 776 /* only restore FBC info on the platform that supports FBC*/
772 intel_disable_fbc(dev); 777 intel_disable_fbc(dev);
773 if (I915_HAS_FBC(dev)) { 778 if (I915_HAS_FBC(dev)) {
774 if (HAS_PCH_SPLIT(dev)) { 779 if (HAS_PCH_SPLIT(dev)) {
775 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 780 I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
776 } else if (IS_GM45(dev)) { 781 } else if (IS_GM45(dev)) {
777 I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); 782 I915_WRITE(DPFC_CB_BASE, dev_priv->regfile.saveDPFC_CB_BASE);
778 } else { 783 } else {
779 I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 784 I915_WRITE(FBC_CFB_BASE, dev_priv->regfile.saveFBC_CFB_BASE);
780 I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 785 I915_WRITE(FBC_LL_BASE, dev_priv->regfile.saveFBC_LL_BASE);
781 I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 786 I915_WRITE(FBC_CONTROL2, dev_priv->regfile.saveFBC_CONTROL2);
782 I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); 787 I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
783 } 788 }
784 } 789 }
785 /* VGA state */ 790 /* VGA state */
786 if (HAS_PCH_SPLIT(dev)) 791 if (HAS_PCH_SPLIT(dev))
787 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 792 I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
788 else 793 else
789 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 794 I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
790 795
791 I915_WRITE(VGA0, dev_priv->saveVGA0); 796 I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
792 I915_WRITE(VGA1, dev_priv->saveVGA1); 797 I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
793 I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 798 I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
794 POSTING_READ(VGA_PD); 799 POSTING_READ(VGA_PD);
795 udelay(150); 800 udelay(150);
796 801
@@ -802,46 +807,45 @@ int i915_save_state(struct drm_device *dev)
802 struct drm_i915_private *dev_priv = dev->dev_private; 807 struct drm_i915_private *dev_priv = dev->dev_private;
803 int i; 808 int i;
804 809
805 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 810 pci_read_config_byte(dev->pdev, LBB, &dev_priv->regfile.saveLBB);
806 811
807 mutex_lock(&dev->struct_mutex); 812 mutex_lock(&dev->struct_mutex);
808 813
809 /* Hardware status page */
810 dev_priv->saveHWS = I915_READ(HWS_PGA);
811
812 i915_save_display(dev); 814 i915_save_display(dev);
813 815
814 /* Interrupt state */ 816 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
815 if (HAS_PCH_SPLIT(dev)) { 817 /* Interrupt state */
816 dev_priv->saveDEIER = I915_READ(DEIER); 818 if (HAS_PCH_SPLIT(dev)) {
817 dev_priv->saveDEIMR = I915_READ(DEIMR); 819 dev_priv->regfile.saveDEIER = I915_READ(DEIER);
818 dev_priv->saveGTIER = I915_READ(GTIER); 820 dev_priv->regfile.saveDEIMR = I915_READ(DEIMR);
819 dev_priv->saveGTIMR = I915_READ(GTIMR); 821 dev_priv->regfile.saveGTIER = I915_READ(GTIER);
820 dev_priv->saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR); 822 dev_priv->regfile.saveGTIMR = I915_READ(GTIMR);
821 dev_priv->saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR); 823 dev_priv->regfile.saveFDI_RXA_IMR = I915_READ(_FDI_RXA_IMR);
822 dev_priv->saveMCHBAR_RENDER_STANDBY = 824 dev_priv->regfile.saveFDI_RXB_IMR = I915_READ(_FDI_RXB_IMR);
823 I915_READ(RSTDBYCTL); 825 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY =
824 dev_priv->savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG); 826 I915_READ(RSTDBYCTL);
825 } else { 827 dev_priv->regfile.savePCH_PORT_HOTPLUG = I915_READ(PCH_PORT_HOTPLUG);
826 dev_priv->saveIER = I915_READ(IER); 828 } else {
827 dev_priv->saveIMR = I915_READ(IMR); 829 dev_priv->regfile.saveIER = I915_READ(IER);
830 dev_priv->regfile.saveIMR = I915_READ(IMR);
831 }
828 } 832 }
829 833
830 intel_disable_gt_powersave(dev); 834 intel_disable_gt_powersave(dev);
831 835
832 /* Cache mode state */ 836 /* Cache mode state */
833 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 837 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
834 838
835 /* Memory Arbitration state */ 839 /* Memory Arbitration state */
836 dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 840 dev_priv->regfile.saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
837 841
838 /* Scratch space */ 842 /* Scratch space */
839 for (i = 0; i < 16; i++) { 843 for (i = 0; i < 16; i++) {
840 dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); 844 dev_priv->regfile.saveSWF0[i] = I915_READ(SWF00 + (i << 2));
841 dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 845 dev_priv->regfile.saveSWF1[i] = I915_READ(SWF10 + (i << 2));
842 } 846 }
843 for (i = 0; i < 3; i++) 847 for (i = 0; i < 3; i++)
844 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 848 dev_priv->regfile.saveSWF2[i] = I915_READ(SWF30 + (i << 2));
845 849
846 mutex_unlock(&dev->struct_mutex); 850 mutex_unlock(&dev->struct_mutex);
847 851
@@ -853,41 +857,40 @@ int i915_restore_state(struct drm_device *dev)
853 struct drm_i915_private *dev_priv = dev->dev_private; 857 struct drm_i915_private *dev_priv = dev->dev_private;
854 int i; 858 int i;
855 859
856 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 860 pci_write_config_byte(dev->pdev, LBB, dev_priv->regfile.saveLBB);
857 861
858 mutex_lock(&dev->struct_mutex); 862 mutex_lock(&dev->struct_mutex);
859 863
860 /* Hardware status page */
861 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
862
863 i915_restore_display(dev); 864 i915_restore_display(dev);
864 865
865 /* Interrupt state */ 866 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
866 if (HAS_PCH_SPLIT(dev)) { 867 /* Interrupt state */
867 I915_WRITE(DEIER, dev_priv->saveDEIER); 868 if (HAS_PCH_SPLIT(dev)) {
868 I915_WRITE(DEIMR, dev_priv->saveDEIMR); 869 I915_WRITE(DEIER, dev_priv->regfile.saveDEIER);
869 I915_WRITE(GTIER, dev_priv->saveGTIER); 870 I915_WRITE(DEIMR, dev_priv->regfile.saveDEIMR);
870 I915_WRITE(GTIMR, dev_priv->saveGTIMR); 871 I915_WRITE(GTIER, dev_priv->regfile.saveGTIER);
871 I915_WRITE(_FDI_RXA_IMR, dev_priv->saveFDI_RXA_IMR); 872 I915_WRITE(GTIMR, dev_priv->regfile.saveGTIMR);
872 I915_WRITE(_FDI_RXB_IMR, dev_priv->saveFDI_RXB_IMR); 873 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
873 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->savePCH_PORT_HOTPLUG); 874 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
874 } else { 875 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
875 I915_WRITE(IER, dev_priv->saveIER); 876 } else {
876 I915_WRITE(IMR, dev_priv->saveIMR); 877 I915_WRITE(IER, dev_priv->regfile.saveIER);
878 I915_WRITE(IMR, dev_priv->regfile.saveIMR);
879 }
877 } 880 }
878 881
879 /* Cache mode state */ 882 /* Cache mode state */
880 I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 883 I915_WRITE(CACHE_MODE_0, dev_priv->regfile.saveCACHE_MODE_0 | 0xffff0000);
881 884
882 /* Memory arbitration state */ 885 /* Memory arbitration state */
883 I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); 886 I915_WRITE(MI_ARB_STATE, dev_priv->regfile.saveMI_ARB_STATE | 0xffff0000);
884 887
885 for (i = 0; i < 16; i++) { 888 for (i = 0; i < 16; i++) {
886 I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); 889 I915_WRITE(SWF00 + (i << 2), dev_priv->regfile.saveSWF0[i]);
887 I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i]); 890 I915_WRITE(SWF10 + (i << 2), dev_priv->regfile.saveSWF1[i]);
888 } 891 }
889 for (i = 0; i < 3; i++) 892 for (i = 0; i < 3; i++)
890 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 893 I915_WRITE(SWF30 + (i << 2), dev_priv->regfile.saveSWF2[i]);
891 894
892 mutex_unlock(&dev->struct_mutex); 895 mutex_unlock(&dev->struct_mutex);
893 896
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 903eebd2117a..9462081b1e60 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -97,7 +97,7 @@ static struct attribute_group rc6_attr_group = {
97 97
98static int l3_access_valid(struct drm_device *dev, loff_t offset) 98static int l3_access_valid(struct drm_device *dev, loff_t offset)
99{ 99{
100 if (!IS_IVYBRIDGE(dev)) 100 if (!HAS_L3_GPU_CACHE(dev))
101 return -EPERM; 101 return -EPERM;
102 102
103 if (offset % 4 != 0) 103 if (offset % 4 != 0)
@@ -162,7 +162,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
162 if (ret) 162 if (ret)
163 return ret; 163 return ret;
164 164
165 if (!dev_priv->mm.l3_remap_info) { 165 if (!dev_priv->l3_parity.remap_info) {
166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); 166 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
167 if (!temp) { 167 if (!temp) {
168 mutex_unlock(&drm_dev->struct_mutex); 168 mutex_unlock(&drm_dev->struct_mutex);
@@ -182,9 +182,9 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
182 * at this point it is left as a TODO. 182 * at this point it is left as a TODO.
183 */ 183 */
184 if (temp) 184 if (temp)
185 dev_priv->mm.l3_remap_info = temp; 185 dev_priv->l3_parity.remap_info = temp;
186 186
187 memcpy(dev_priv->mm.l3_remap_info + (offset/4), 187 memcpy(dev_priv->l3_parity.remap_info + (offset/4),
188 buf + (offset/4), 188 buf + (offset/4),
189 count); 189 count);
190 190
@@ -211,12 +211,9 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
211 struct drm_i915_private *dev_priv = dev->dev_private; 211 struct drm_i915_private *dev_priv = dev->dev_private;
212 int ret; 212 int ret;
213 213
214 ret = i915_mutex_lock_interruptible(dev); 214 mutex_lock(&dev_priv->rps.hw_lock);
215 if (ret)
216 return ret;
217
218 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; 215 ret = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER;
219 mutex_unlock(&dev->struct_mutex); 216 mutex_unlock(&dev_priv->rps.hw_lock);
220 217
221 return snprintf(buf, PAGE_SIZE, "%d", ret); 218 return snprintf(buf, PAGE_SIZE, "%d", ret);
222} 219}
@@ -228,12 +225,9 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
228 struct drm_i915_private *dev_priv = dev->dev_private; 225 struct drm_i915_private *dev_priv = dev->dev_private;
229 int ret; 226 int ret;
230 227
231 ret = i915_mutex_lock_interruptible(dev); 228 mutex_lock(&dev_priv->rps.hw_lock);
232 if (ret)
233 return ret;
234
235 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER; 229 ret = dev_priv->rps.max_delay * GT_FREQUENCY_MULTIPLIER;
236 mutex_unlock(&dev->struct_mutex); 230 mutex_unlock(&dev_priv->rps.hw_lock);
237 231
238 return snprintf(buf, PAGE_SIZE, "%d", ret); 232 return snprintf(buf, PAGE_SIZE, "%d", ret);
239} 233}
@@ -254,16 +248,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
254 248
255 val /= GT_FREQUENCY_MULTIPLIER; 249 val /= GT_FREQUENCY_MULTIPLIER;
256 250
257 ret = mutex_lock_interruptible(&dev->struct_mutex); 251 mutex_lock(&dev_priv->rps.hw_lock);
258 if (ret)
259 return ret;
260 252
261 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 253 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
262 hw_max = (rp_state_cap & 0xff); 254 hw_max = (rp_state_cap & 0xff);
263 hw_min = ((rp_state_cap & 0xff0000) >> 16); 255 hw_min = ((rp_state_cap & 0xff0000) >> 16);
264 256
265 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) { 257 if (val < hw_min || val > hw_max || val < dev_priv->rps.min_delay) {
266 mutex_unlock(&dev->struct_mutex); 258 mutex_unlock(&dev_priv->rps.hw_lock);
267 return -EINVAL; 259 return -EINVAL;
268 } 260 }
269 261
@@ -272,7 +264,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
272 264
273 dev_priv->rps.max_delay = val; 265 dev_priv->rps.max_delay = val;
274 266
275 mutex_unlock(&dev->struct_mutex); 267 mutex_unlock(&dev_priv->rps.hw_lock);
276 268
277 return count; 269 return count;
278} 270}
@@ -284,12 +276,9 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
284 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
285 int ret; 277 int ret;
286 278
287 ret = i915_mutex_lock_interruptible(dev); 279 mutex_lock(&dev_priv->rps.hw_lock);
288 if (ret)
289 return ret;
290
291 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER; 280 ret = dev_priv->rps.min_delay * GT_FREQUENCY_MULTIPLIER;
292 mutex_unlock(&dev->struct_mutex); 281 mutex_unlock(&dev_priv->rps.hw_lock);
293 282
294 return snprintf(buf, PAGE_SIZE, "%d", ret); 283 return snprintf(buf, PAGE_SIZE, "%d", ret);
295} 284}
@@ -310,16 +299,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
310 299
311 val /= GT_FREQUENCY_MULTIPLIER; 300 val /= GT_FREQUENCY_MULTIPLIER;
312 301
313 ret = mutex_lock_interruptible(&dev->struct_mutex); 302 mutex_lock(&dev_priv->rps.hw_lock);
314 if (ret)
315 return ret;
316 303
317 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 304 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
318 hw_max = (rp_state_cap & 0xff); 305 hw_max = (rp_state_cap & 0xff);
319 hw_min = ((rp_state_cap & 0xff0000) >> 16); 306 hw_min = ((rp_state_cap & 0xff0000) >> 16);
320 307
321 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) { 308 if (val < hw_min || val > hw_max || val > dev_priv->rps.max_delay) {
322 mutex_unlock(&dev->struct_mutex); 309 mutex_unlock(&dev_priv->rps.hw_lock);
323 return -EINVAL; 310 return -EINVAL;
324 } 311 }
325 312
@@ -328,7 +315,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
328 315
329 dev_priv->rps.min_delay = val; 316 dev_priv->rps.min_delay = val;
330 317
331 mutex_unlock(&dev->struct_mutex); 318 mutex_unlock(&dev_priv->rps.hw_lock);
332 319
333 return count; 320 return count;
334 321
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8134421b89a6..3db4a6817713 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -229,24 +229,26 @@ TRACE_EVENT(i915_gem_evict_everything,
229); 229);
230 230
231TRACE_EVENT(i915_gem_ring_dispatch, 231TRACE_EVENT(i915_gem_ring_dispatch,
232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno), 232 TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
233 TP_ARGS(ring, seqno), 233 TP_ARGS(ring, seqno, flags),
234 234
235 TP_STRUCT__entry( 235 TP_STRUCT__entry(
236 __field(u32, dev) 236 __field(u32, dev)
237 __field(u32, ring) 237 __field(u32, ring)
238 __field(u32, seqno) 238 __field(u32, seqno)
239 __field(u32, flags)
239 ), 240 ),
240 241
241 TP_fast_assign( 242 TP_fast_assign(
242 __entry->dev = ring->dev->primary->index; 243 __entry->dev = ring->dev->primary->index;
243 __entry->ring = ring->id; 244 __entry->ring = ring->id;
244 __entry->seqno = seqno; 245 __entry->seqno = seqno;
246 __entry->flags = flags;
245 i915_trace_irq_get(ring, seqno); 247 i915_trace_irq_get(ring, seqno);
246 ), 248 ),
247 249
248 TP_printk("dev=%u, ring=%u, seqno=%u", 250 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
249 __entry->dev, __entry->ring, __entry->seqno) 251 __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
250); 252);
251 253
252TRACE_EVENT(i915_gem_ring_flush, 254TRACE_EVENT(i915_gem_ring_flush,
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 56846ed5ee55..55ffba1f5818 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -755,7 +755,8 @@ void intel_setup_bios(struct drm_device *dev)
755 struct drm_i915_private *dev_priv = dev->dev_private; 755 struct drm_i915_private *dev_priv = dev->dev_private;
756 756
757 /* Set the Panel Power On/Off timings if uninitialized. */ 757 /* Set the Panel Power On/Off timings if uninitialized. */
758 if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) { 758 if (!HAS_PCH_SPLIT(dev) &&
759 I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
759 /* Set T2 to 40ms and T5 to 200ms */ 760 /* Set T2 to 40ms and T5 to 200ms */
760 I915_WRITE(PP_ON_DELAYS, 0x019007d0); 761 I915_WRITE(PP_ON_DELAYS, 0x019007d0);
761 762
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 6345878ae1e7..9293878ec7eb 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -198,6 +198,11 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
198 if (mode->clock > max_clock) 198 if (mode->clock > max_clock)
199 return MODE_CLOCK_HIGH; 199 return MODE_CLOCK_HIGH;
200 200
201 /* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
202 if (HAS_PCH_LPT(dev) &&
203 (ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
204 return MODE_CLOCK_HIGH;
205
201 return MODE_OK; 206 return MODE_OK;
202} 207}
203 208
@@ -221,14 +226,20 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
221 struct drm_i915_private *dev_priv = dev->dev_private; 226 struct drm_i915_private *dev_priv = dev->dev_private;
222 u32 adpa; 227 u32 adpa;
223 228
224 adpa = ADPA_HOTPLUG_BITS; 229 if (HAS_PCH_SPLIT(dev))
230 adpa = ADPA_HOTPLUG_BITS;
231 else
232 adpa = 0;
233
225 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 234 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
226 adpa |= ADPA_HSYNC_ACTIVE_HIGH; 235 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
227 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 236 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
228 adpa |= ADPA_VSYNC_ACTIVE_HIGH; 237 adpa |= ADPA_VSYNC_ACTIVE_HIGH;
229 238
230 /* For CPT allow 3 pipe config, for others just use A or B */ 239 /* For CPT allow 3 pipe config, for others just use A or B */
231 if (HAS_PCH_CPT(dev)) 240 if (HAS_PCH_LPT(dev))
241 ; /* Those bits don't exist here */
242 else if (HAS_PCH_CPT(dev))
232 adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); 243 adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
233 else if (intel_crtc->pipe == 0) 244 else if (intel_crtc->pipe == 0)
234 adpa |= ADPA_PIPE_A_SELECT; 245 adpa |= ADPA_PIPE_A_SELECT;
@@ -401,12 +412,16 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
401 struct i2c_adapter *adapter) 412 struct i2c_adapter *adapter)
402{ 413{
403 struct edid *edid; 414 struct edid *edid;
415 int ret;
404 416
405 edid = intel_crt_get_edid(connector, adapter); 417 edid = intel_crt_get_edid(connector, adapter);
406 if (!edid) 418 if (!edid)
407 return 0; 419 return 0;
408 420
409 return intel_connector_update_modes(connector, edid); 421 ret = intel_connector_update_modes(connector, edid);
422 kfree(edid);
423
424 return ret;
410} 425}
411 426
412static bool intel_crt_detect_ddc(struct drm_connector *connector) 427static bool intel_crt_detect_ddc(struct drm_connector *connector)
@@ -644,10 +659,22 @@ static int intel_crt_set_property(struct drm_connector *connector,
644static void intel_crt_reset(struct drm_connector *connector) 659static void intel_crt_reset(struct drm_connector *connector)
645{ 660{
646 struct drm_device *dev = connector->dev; 661 struct drm_device *dev = connector->dev;
662 struct drm_i915_private *dev_priv = dev->dev_private;
647 struct intel_crt *crt = intel_attached_crt(connector); 663 struct intel_crt *crt = intel_attached_crt(connector);
648 664
649 if (HAS_PCH_SPLIT(dev)) 665 if (HAS_PCH_SPLIT(dev)) {
666 u32 adpa;
667
668 adpa = I915_READ(PCH_ADPA);
669 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
670 adpa |= ADPA_HOTPLUG_BITS;
671 I915_WRITE(PCH_ADPA, adpa);
672 POSTING_READ(PCH_ADPA);
673
674 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
650 crt->force_hotplug_required = 1; 675 crt->force_hotplug_required = 1;
676 }
677
651} 678}
652 679
653/* 680/*
@@ -729,7 +756,7 @@ void intel_crt_init(struct drm_device *dev)
729 756
730 crt->base.type = INTEL_OUTPUT_ANALOG; 757 crt->base.type = INTEL_OUTPUT_ANALOG;
731 crt->base.cloneable = true; 758 crt->base.cloneable = true;
732 if (IS_HASWELL(dev) || IS_I830(dev)) 759 if (IS_I830(dev))
733 crt->base.crtc_mask = (1 << 0); 760 crt->base.crtc_mask = (1 << 0);
734 else 761 else
735 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 762 crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
@@ -749,7 +776,10 @@ void intel_crt_init(struct drm_device *dev)
749 776
750 crt->base.disable = intel_disable_crt; 777 crt->base.disable = intel_disable_crt;
751 crt->base.enable = intel_enable_crt; 778 crt->base.enable = intel_enable_crt;
752 crt->base.get_hw_state = intel_crt_get_hw_state; 779 if (IS_HASWELL(dev))
780 crt->base.get_hw_state = intel_ddi_get_hw_state;
781 else
782 crt->base.get_hw_state = intel_crt_get_hw_state;
753 intel_connector->get_hw_state = intel_connector_get_hw_state; 783 intel_connector->get_hw_state = intel_connector_get_hw_state;
754 784
755 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs); 785 drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
@@ -766,18 +796,14 @@ void intel_crt_init(struct drm_device *dev)
766 * Configure the automatic hotplug detection stuff 796 * Configure the automatic hotplug detection stuff
767 */ 797 */
768 crt->force_hotplug_required = 0; 798 crt->force_hotplug_required = 0;
769 if (HAS_PCH_SPLIT(dev)) {
770 u32 adpa;
771
772 adpa = I915_READ(PCH_ADPA);
773 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
774 adpa |= ADPA_HOTPLUG_BITS;
775 I915_WRITE(PCH_ADPA, adpa);
776 POSTING_READ(PCH_ADPA);
777
778 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
779 crt->force_hotplug_required = 1;
780 }
781 799
782 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 800 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
801
802 /*
803 * TODO: find a proper way to discover whether we need to set the
804 * polarity reversal bit or not, instead of relying on the BIOS.
805 */
806 if (HAS_PCH_LPT(dev))
807 dev_priv->fdi_rx_polarity_reversed =
808 !!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
783} 809}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index bfe375466a0e..4bad0f724019 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -58,6 +58,26 @@ static const u32 hsw_ddi_translations_fdi[] = {
58 0x00FFFFFF, 0x00040006 /* HDMI parameters */ 58 0x00FFFFFF, 0x00040006 /* HDMI parameters */
59}; 59};
60 60
61static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
62{
63 struct drm_encoder *encoder = &intel_encoder->base;
64 int type = intel_encoder->type;
65
66 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
67 type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
68 struct intel_digital_port *intel_dig_port =
69 enc_to_dig_port(encoder);
70 return intel_dig_port->port;
71
72 } else if (type == INTEL_OUTPUT_ANALOG) {
73 return PORT_E;
74
75 } else {
76 DRM_ERROR("Invalid DDI encoder type %d\n", type);
77 BUG();
78 }
79}
80
61/* On Haswell, DDI port buffers must be programmed with correct values 81/* On Haswell, DDI port buffers must be programmed with correct values
62 * in advance. The buffer values are different for FDI and DP modes, 82 * in advance. The buffer values are different for FDI and DP modes,
63 * but the HDMI/DVI fields are shared among those. So we program the DDI 83 * but the HDMI/DVI fields are shared among those. So we program the DDI
@@ -118,6 +138,19 @@ static const long hsw_ddi_buf_ctl_values[] = {
118 DDI_BUF_EMP_800MV_3_5DB_HSW 138 DDI_BUF_EMP_800MV_3_5DB_HSW
119}; 139};
120 140
141static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
142 enum port port)
143{
144 uint32_t reg = DDI_BUF_CTL(port);
145 int i;
146
147 for (i = 0; i < 8; i++) {
148 udelay(1);
149 if (I915_READ(reg) & DDI_BUF_IS_IDLE)
150 return;
151 }
152 DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
153}
121 154
122/* Starting with Haswell, different DDI ports can work in FDI mode for 155/* Starting with Haswell, different DDI ports can work in FDI mode for
123 * connection to the PCH-located connectors. For this, it is necessary to train 156 * connection to the PCH-located connectors. For this, it is necessary to train
@@ -133,25 +166,36 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
133 struct drm_device *dev = crtc->dev; 166 struct drm_device *dev = crtc->dev;
134 struct drm_i915_private *dev_priv = dev->dev_private; 167 struct drm_i915_private *dev_priv = dev->dev_private;
135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
136 int pipe = intel_crtc->pipe; 169 u32 temp, i, rx_ctl_val;
137 u32 reg, temp, i;
138
139 /* Configure CPU PLL, wait for warmup */
140 I915_WRITE(SPLL_CTL,
141 SPLL_PLL_ENABLE |
142 SPLL_PLL_FREQ_1350MHz |
143 SPLL_PLL_SCC);
144 170
145 /* Use SPLL to drive the output when in FDI mode */ 171 /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
146 I915_WRITE(PORT_CLK_SEL(PORT_E), 172 * mode set "sequence for CRT port" document:
147 PORT_CLK_SEL_SPLL); 173 * - TP1 to TP2 time with the default value
148 I915_WRITE(PIPE_CLK_SEL(pipe), 174 * - FDI delay to 90h
149 PIPE_CLK_SEL_PORT(PORT_E)); 175 */
150 176 I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
151 udelay(20); 177 FDI_RX_PWRDN_LANE0_VAL(2) |
152 178 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
153 /* Start the training iterating through available voltages and emphasis */ 179
154 for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) { 180 /* Enable the PCH Receiver FDI PLL */
181 rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
182 ((intel_crtc->fdi_lanes - 1) << 19);
183 if (dev_priv->fdi_rx_polarity_reversed)
184 rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
185 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
186 POSTING_READ(_FDI_RXA_CTL);
187 udelay(220);
188
189 /* Switch from Rawclk to PCDclk */
190 rx_ctl_val |= FDI_PCDCLK;
191 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
192
193 /* Configure Port Clock Select */
194 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
195
196 /* Start the training iterating through available voltages and emphasis,
197 * testing each value twice. */
198 for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
155 /* Configure DP_TP_CTL with auto-training */ 199 /* Configure DP_TP_CTL with auto-training */
156 I915_WRITE(DP_TP_CTL(PORT_E), 200 I915_WRITE(DP_TP_CTL(PORT_E),
157 DP_TP_CTL_FDI_AUTOTRAIN | 201 DP_TP_CTL_FDI_AUTOTRAIN |
@@ -160,103 +204,75 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
160 DP_TP_CTL_ENABLE); 204 DP_TP_CTL_ENABLE);
161 205
162 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ 206 /* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
163 temp = I915_READ(DDI_BUF_CTL(PORT_E));
164 temp = (temp & ~DDI_BUF_EMP_MASK);
165 I915_WRITE(DDI_BUF_CTL(PORT_E), 207 I915_WRITE(DDI_BUF_CTL(PORT_E),
166 temp | 208 DDI_BUF_CTL_ENABLE |
167 DDI_BUF_CTL_ENABLE | 209 ((intel_crtc->fdi_lanes - 1) << 1) |
168 DDI_PORT_WIDTH_X2 | 210 hsw_ddi_buf_ctl_values[i / 2]);
169 hsw_ddi_buf_ctl_values[i]); 211 POSTING_READ(DDI_BUF_CTL(PORT_E));
170 212
171 udelay(600); 213 udelay(600);
172 214
173 /* We need to program FDI_RX_MISC with the default TP1 to TP2 215 /* Program PCH FDI Receiver TU */
174 * values before enabling the receiver, and configure the delay 216 I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
175 * for the FDI timing generator to 90h. Luckily, all the other 217
176 * bits are supposed to be zeroed, so we can write those values 218 /* Enable PCH FDI Receiver with auto-training */
177 * directly. 219 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
178 */ 220 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
179 I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 | 221 POSTING_READ(_FDI_RXA_CTL);
180 FDI_RX_FDI_DELAY_90); 222
181 223 /* Wait for FDI receiver lane calibration */
182 /* Enable CPU FDI Receiver with auto-training */ 224 udelay(30);
183 reg = FDI_RX_CTL(pipe); 225
184 I915_WRITE(reg, 226 /* Unset FDI_RX_MISC pwrdn lanes */
185 I915_READ(reg) | 227 temp = I915_READ(_FDI_RXA_MISC);
186 FDI_LINK_TRAIN_AUTO | 228 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
187 FDI_RX_ENABLE | 229 I915_WRITE(_FDI_RXA_MISC, temp);
188 FDI_LINK_TRAIN_PATTERN_1_CPT | 230 POSTING_READ(_FDI_RXA_MISC);
189 FDI_RX_ENHANCE_FRAME_ENABLE | 231
190 FDI_PORT_WIDTH_2X_LPT | 232 /* Wait for FDI auto training time */
191 FDI_RX_PLL_ENABLE); 233 udelay(5);
192 POSTING_READ(reg);
193 udelay(100);
194 234
195 temp = I915_READ(DP_TP_STATUS(PORT_E)); 235 temp = I915_READ(DP_TP_STATUS(PORT_E));
196 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) { 236 if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
197 DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i); 237 DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
198 238
199 /* Enable normal pixel sending for FDI */ 239 /* Enable normal pixel sending for FDI */
200 I915_WRITE(DP_TP_CTL(PORT_E), 240 I915_WRITE(DP_TP_CTL(PORT_E),
201 DP_TP_CTL_FDI_AUTOTRAIN | 241 DP_TP_CTL_FDI_AUTOTRAIN |
202 DP_TP_CTL_LINK_TRAIN_NORMAL | 242 DP_TP_CTL_LINK_TRAIN_NORMAL |
203 DP_TP_CTL_ENHANCED_FRAME_ENABLE | 243 DP_TP_CTL_ENHANCED_FRAME_ENABLE |
204 DP_TP_CTL_ENABLE); 244 DP_TP_CTL_ENABLE);
205
206 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
207 temp = I915_READ(DDI_FUNC_CTL(pipe));
208 temp &= ~PIPE_DDI_PORT_MASK;
209 temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
210 PIPE_DDI_MODE_SELECT_FDI |
211 PIPE_DDI_FUNC_ENABLE |
212 PIPE_DDI_PORT_WIDTH_X2;
213 I915_WRITE(DDI_FUNC_CTL(pipe),
214 temp);
215 break;
216 } else {
217 DRM_ERROR("Error training BUF_CTL %d\n", i);
218 245
219 /* Disable DP_TP_CTL and FDI_RX_CTL) and retry */ 246 return;
220 I915_WRITE(DP_TP_CTL(PORT_E),
221 I915_READ(DP_TP_CTL(PORT_E)) &
222 ~DP_TP_CTL_ENABLE);
223 I915_WRITE(FDI_RX_CTL(pipe),
224 I915_READ(FDI_RX_CTL(pipe)) &
225 ~FDI_RX_PLL_ENABLE);
226 continue;
227 } 247 }
228 }
229 248
230 DRM_DEBUG_KMS("FDI train done.\n"); 249 temp = I915_READ(DDI_BUF_CTL(PORT_E));
231} 250 temp &= ~DDI_BUF_CTL_ENABLE;
232 251 I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
233/* For DDI connections, it is possible to support different outputs over the 252 POSTING_READ(DDI_BUF_CTL(PORT_E));
234 * same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by 253
235 * the time the output is detected what exactly is on the other end of it. This 254 /* Disable DP_TP_CTL and FDI_RX_CTL and retry */
236 * function aims at providing support for this detection and proper output 255 temp = I915_READ(DP_TP_CTL(PORT_E));
237 * configuration. 256 temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
238 */ 257 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
239void intel_ddi_init(struct drm_device *dev, enum port port) 258 I915_WRITE(DP_TP_CTL(PORT_E), temp);
240{ 259 POSTING_READ(DP_TP_CTL(PORT_E));
241 /* For now, we don't do any proper output detection and assume that we 260
242 * handle HDMI only */ 261 intel_wait_ddi_buf_idle(dev_priv, PORT_E);
243 262
244 switch(port){ 263 rx_ctl_val &= ~FDI_RX_ENABLE;
245 case PORT_A: 264 I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
246 /* We don't handle eDP and DP yet */ 265 POSTING_READ(_FDI_RXA_CTL);
247 DRM_DEBUG_DRIVER("Found digital output on DDI port A\n"); 266
248 break; 267 /* Reset FDI_RX_MISC pwrdn lanes */
249 /* Assume that the ports B, C and D are working in HDMI mode for now */ 268 temp = I915_READ(_FDI_RXA_MISC);
250 case PORT_B: 269 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
251 case PORT_C: 270 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
252 case PORT_D: 271 I915_WRITE(_FDI_RXA_MISC, temp);
253 intel_hdmi_init(dev, DDI_BUF_CTL(port), port); 272 POSTING_READ(_FDI_RXA_MISC);
254 break;
255 default:
256 DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
257 port);
258 break;
259 } 273 }
274
275 DRM_ERROR("FDI link training failed!\n");
260} 276}
261 277
262/* WRPLL clock dividers */ 278/* WRPLL clock dividers */
@@ -645,116 +661,435 @@ static const struct wrpll_tmds_clock wrpll_tmds_clock_table[] = {
645 {298000, 2, 21, 19}, 661 {298000, 2, 21, 19},
646}; 662};
647 663
648void intel_ddi_mode_set(struct drm_encoder *encoder, 664static void intel_ddi_mode_set(struct drm_encoder *encoder,
649 struct drm_display_mode *mode, 665 struct drm_display_mode *mode,
650 struct drm_display_mode *adjusted_mode) 666 struct drm_display_mode *adjusted_mode)
651{ 667{
652 struct drm_device *dev = encoder->dev;
653 struct drm_i915_private *dev_priv = dev->dev_private;
654 struct drm_crtc *crtc = encoder->crtc; 668 struct drm_crtc *crtc = encoder->crtc;
655 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 669 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
656 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 670 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
657 int port = intel_hdmi->ddi_port; 671 int port = intel_ddi_get_encoder_port(intel_encoder);
658 int pipe = intel_crtc->pipe; 672 int pipe = intel_crtc->pipe;
659 int p, n2, r2; 673 int type = intel_encoder->type;
660 u32 temp, i;
661 674
662 /* On Haswell, we need to enable the clocks and prepare DDI function to 675 DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
663 * work in HDMI mode for this pipe. 676 port_name(port), pipe_name(pipe));
664 */ 677
665 DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe)); 678 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
680
681 intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
682 switch (intel_dp->lane_count) {
683 case 1:
684 intel_dp->DP |= DDI_PORT_WIDTH_X1;
685 break;
686 case 2:
687 intel_dp->DP |= DDI_PORT_WIDTH_X2;
688 break;
689 case 4:
690 intel_dp->DP |= DDI_PORT_WIDTH_X4;
691 break;
692 default:
693 intel_dp->DP |= DDI_PORT_WIDTH_X4;
694 WARN(1, "Unexpected DP lane count %d\n",
695 intel_dp->lane_count);
696 break;
697 }
698
699 if (intel_dp->has_audio) {
700 DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
701 pipe_name(intel_crtc->pipe));
702
703 /* write eld */
704 DRM_DEBUG_DRIVER("DP audio: write eld information\n");
705 intel_write_eld(encoder, adjusted_mode);
706 }
707
708 intel_dp_init_link_config(intel_dp);
709
710 } else if (type == INTEL_OUTPUT_HDMI) {
711 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
712
713 if (intel_hdmi->has_audio) {
714 /* Proper support for digital audio needs a new logic
715 * and a new set of registers, so we leave it for future
716 * patch bombing.
717 */
718 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
719 pipe_name(intel_crtc->pipe));
720
721 /* write eld */
722 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
723 intel_write_eld(encoder, adjusted_mode);
724 }
725
726 intel_hdmi->set_infoframes(encoder, adjusted_mode);
727 }
728}
729
730static struct intel_encoder *
731intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
732{
733 struct drm_device *dev = crtc->dev;
734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
735 struct intel_encoder *intel_encoder, *ret = NULL;
736 int num_encoders = 0;
737
738 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
739 ret = intel_encoder;
740 num_encoders++;
741 }
742
743 if (num_encoders != 1)
744 WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
745 intel_crtc->pipe);
746
747 BUG_ON(ret == NULL);
748 return ret;
749}
750
751void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
752{
753 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
754 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
755 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756 uint32_t val;
757
758 switch (intel_crtc->ddi_pll_sel) {
759 case PORT_CLK_SEL_SPLL:
760 plls->spll_refcount--;
761 if (plls->spll_refcount == 0) {
762 DRM_DEBUG_KMS("Disabling SPLL\n");
763 val = I915_READ(SPLL_CTL);
764 WARN_ON(!(val & SPLL_PLL_ENABLE));
765 I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
766 POSTING_READ(SPLL_CTL);
767 }
768 break;
769 case PORT_CLK_SEL_WRPLL1:
770 plls->wrpll1_refcount--;
771 if (plls->wrpll1_refcount == 0) {
772 DRM_DEBUG_KMS("Disabling WRPLL 1\n");
773 val = I915_READ(WRPLL_CTL1);
774 WARN_ON(!(val & WRPLL_PLL_ENABLE));
775 I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
776 POSTING_READ(WRPLL_CTL1);
777 }
778 break;
779 case PORT_CLK_SEL_WRPLL2:
780 plls->wrpll2_refcount--;
781 if (plls->wrpll2_refcount == 0) {
782 DRM_DEBUG_KMS("Disabling WRPLL 2\n");
783 val = I915_READ(WRPLL_CTL2);
784 WARN_ON(!(val & WRPLL_PLL_ENABLE));
785 I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
786 POSTING_READ(WRPLL_CTL2);
787 }
788 break;
789 }
790
791 WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
792 WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
793 WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
794
795 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
796}
797
798static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
799{
800 u32 i;
666 801
667 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++) 802 for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
668 if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock) 803 if (clock <= wrpll_tmds_clock_table[i].clock)
669 break; 804 break;
670 805
671 if (i == ARRAY_SIZE(wrpll_tmds_clock_table)) 806 if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
672 i--; 807 i--;
673 808
674 p = wrpll_tmds_clock_table[i].p; 809 *p = wrpll_tmds_clock_table[i].p;
675 n2 = wrpll_tmds_clock_table[i].n2; 810 *n2 = wrpll_tmds_clock_table[i].n2;
676 r2 = wrpll_tmds_clock_table[i].r2; 811 *r2 = wrpll_tmds_clock_table[i].r2;
677 812
678 if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock) 813 if (wrpll_tmds_clock_table[i].clock != clock)
679 DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n", 814 DRM_INFO("WRPLL: using settings for %dKHz on %dKHz mode\n",
680 wrpll_tmds_clock_table[i].clock, crtc->mode.clock); 815 wrpll_tmds_clock_table[i].clock, clock);
681 816
682 DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n", 817 DRM_DEBUG_KMS("WRPLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
683 crtc->mode.clock, p, n2, r2); 818 clock, *p, *n2, *r2);
819}
684 820
685 /* Enable LCPLL if disabled */ 821bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
686 temp = I915_READ(LCPLL_CTL); 822{
687 if (temp & LCPLL_PLL_DISABLE) 823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
688 I915_WRITE(LCPLL_CTL, 824 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
689 temp & ~LCPLL_PLL_DISABLE); 825 struct drm_encoder *encoder = &intel_encoder->base;
826 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
827 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
828 int type = intel_encoder->type;
829 enum pipe pipe = intel_crtc->pipe;
830 uint32_t reg, val;
690 831
691 /* Configure WR PLL 1, program the correct divider values for 832 /* TODO: reuse PLLs when possible (compare values) */
692 * the desired frequency and wait for warmup */
693 I915_WRITE(WRPLL_CTL1,
694 WRPLL_PLL_ENABLE |
695 WRPLL_PLL_SELECT_LCPLL_2700 |
696 WRPLL_DIVIDER_REFERENCE(r2) |
697 WRPLL_DIVIDER_FEEDBACK(n2) |
698 WRPLL_DIVIDER_POST(p));
699 833
700 udelay(20); 834 intel_ddi_put_crtc_pll(crtc);
701 835
702 /* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use 836 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
703 * this port for connection. 837 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
704 */ 838
705 I915_WRITE(PORT_CLK_SEL(port), 839 switch (intel_dp->link_bw) {
706 PORT_CLK_SEL_WRPLL1); 840 case DP_LINK_BW_1_62:
707 I915_WRITE(PIPE_CLK_SEL(pipe), 841 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
708 PIPE_CLK_SEL_PORT(port)); 842 break;
843 case DP_LINK_BW_2_7:
844 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
845 break;
846 case DP_LINK_BW_5_4:
847 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
848 break;
849 default:
850 DRM_ERROR("Link bandwidth %d unsupported\n",
851 intel_dp->link_bw);
852 return false;
853 }
854
855 /* We don't need to turn any PLL on because we'll use LCPLL. */
856 return true;
857
858 } else if (type == INTEL_OUTPUT_HDMI) {
859 int p, n2, r2;
860
861 if (plls->wrpll1_refcount == 0) {
862 DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
863 pipe_name(pipe));
864 plls->wrpll1_refcount++;
865 reg = WRPLL_CTL1;
866 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
867 } else if (plls->wrpll2_refcount == 0) {
868 DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
869 pipe_name(pipe));
870 plls->wrpll2_refcount++;
871 reg = WRPLL_CTL2;
872 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
873 } else {
874 DRM_ERROR("No WRPLLs available!\n");
875 return false;
876 }
709 877
878 WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
879 "WRPLL already enabled\n");
880
881 intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
882
883 val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
884 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
885 WRPLL_DIVIDER_POST(p);
886
887 } else if (type == INTEL_OUTPUT_ANALOG) {
888 if (plls->spll_refcount == 0) {
889 DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
890 pipe_name(pipe));
891 plls->spll_refcount++;
892 reg = SPLL_CTL;
893 intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
894 }
895
896 WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
897 "SPLL already enabled\n");
898
899 val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
900
901 } else {
902 WARN(1, "Invalid DDI encoder type %d\n", type);
903 return false;
904 }
905
906 I915_WRITE(reg, val);
710 udelay(20); 907 udelay(20);
711 908
712 if (intel_hdmi->has_audio) { 909 return true;
713 /* Proper support for digital audio needs a new logic and a new set 910}
714 * of registers, so we leave it for future patch bombing.
715 */
716 DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
717 pipe_name(intel_crtc->pipe));
718 911
719 /* write eld */ 912void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
720 DRM_DEBUG_DRIVER("HDMI audio: write eld information\n"); 913{
721 intel_write_eld(encoder, adjusted_mode); 914 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
915 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
916 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
917 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
918 int type = intel_encoder->type;
919 uint32_t temp;
920
921 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
922
923 temp = TRANS_MSA_SYNC_CLK;
924 switch (intel_crtc->bpp) {
925 case 18:
926 temp |= TRANS_MSA_6_BPC;
927 break;
928 case 24:
929 temp |= TRANS_MSA_8_BPC;
930 break;
931 case 30:
932 temp |= TRANS_MSA_10_BPC;
933 break;
934 case 36:
935 temp |= TRANS_MSA_12_BPC;
936 break;
937 default:
938 temp |= TRANS_MSA_8_BPC;
939 WARN(1, "%d bpp unsupported by DDI function\n",
940 intel_crtc->bpp);
941 }
942 I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
722 } 943 }
944}
723 945
724 /* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */ 946void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
725 temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port); 947{
948 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
949 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
950 struct drm_encoder *encoder = &intel_encoder->base;
951 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
952 enum pipe pipe = intel_crtc->pipe;
953 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
954 enum port port = intel_ddi_get_encoder_port(intel_encoder);
955 int type = intel_encoder->type;
956 uint32_t temp;
957
958 /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
959 temp = TRANS_DDI_FUNC_ENABLE;
960 temp |= TRANS_DDI_SELECT_PORT(port);
726 961
727 switch (intel_crtc->bpp) { 962 switch (intel_crtc->bpp) {
728 case 18: 963 case 18:
729 temp |= PIPE_DDI_BPC_6; 964 temp |= TRANS_DDI_BPC_6;
730 break; 965 break;
731 case 24: 966 case 24:
732 temp |= PIPE_DDI_BPC_8; 967 temp |= TRANS_DDI_BPC_8;
733 break; 968 break;
734 case 30: 969 case 30:
735 temp |= PIPE_DDI_BPC_10; 970 temp |= TRANS_DDI_BPC_10;
736 break; 971 break;
737 case 36: 972 case 36:
738 temp |= PIPE_DDI_BPC_12; 973 temp |= TRANS_DDI_BPC_12;
739 break; 974 break;
740 default: 975 default:
741 WARN(1, "%d bpp unsupported by pipe DDI function\n", 976 WARN(1, "%d bpp unsupported by transcoder DDI function\n",
742 intel_crtc->bpp); 977 intel_crtc->bpp);
743 } 978 }
744 979
745 if (intel_hdmi->has_hdmi_sink) 980 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
746 temp |= PIPE_DDI_MODE_SELECT_HDMI; 981 temp |= TRANS_DDI_PVSYNC;
982 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
983 temp |= TRANS_DDI_PHSYNC;
984
985 if (cpu_transcoder == TRANSCODER_EDP) {
986 switch (pipe) {
987 case PIPE_A:
988 temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
989 break;
990 case PIPE_B:
991 temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
992 break;
993 case PIPE_C:
994 temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
995 break;
996 default:
997 BUG();
998 break;
999 }
1000 }
1001
1002 if (type == INTEL_OUTPUT_HDMI) {
1003 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
1004
1005 if (intel_hdmi->has_hdmi_sink)
1006 temp |= TRANS_DDI_MODE_SELECT_HDMI;
1007 else
1008 temp |= TRANS_DDI_MODE_SELECT_DVI;
1009
1010 } else if (type == INTEL_OUTPUT_ANALOG) {
1011 temp |= TRANS_DDI_MODE_SELECT_FDI;
1012 temp |= (intel_crtc->fdi_lanes - 1) << 1;
1013
1014 } else if (type == INTEL_OUTPUT_DISPLAYPORT ||
1015 type == INTEL_OUTPUT_EDP) {
1016 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1017
1018 temp |= TRANS_DDI_MODE_SELECT_DP_SST;
1019
1020 switch (intel_dp->lane_count) {
1021 case 1:
1022 temp |= TRANS_DDI_PORT_WIDTH_X1;
1023 break;
1024 case 2:
1025 temp |= TRANS_DDI_PORT_WIDTH_X2;
1026 break;
1027 case 4:
1028 temp |= TRANS_DDI_PORT_WIDTH_X4;
1029 break;
1030 default:
1031 temp |= TRANS_DDI_PORT_WIDTH_X4;
1032 WARN(1, "Unsupported lane count %d\n",
1033 intel_dp->lane_count);
1034 }
1035
1036 } else {
1037 WARN(1, "Invalid encoder type %d for pipe %d\n",
1038 intel_encoder->type, pipe);
1039 }
1040
1041 I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
1042}
1043
1044void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1045 enum transcoder cpu_transcoder)
1046{
1047 uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1048 uint32_t val = I915_READ(reg);
1049
1050 val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
1051 val |= TRANS_DDI_PORT_NONE;
1052 I915_WRITE(reg, val);
1053}
1054
1055bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1056{
1057 struct drm_device *dev = intel_connector->base.dev;
1058 struct drm_i915_private *dev_priv = dev->dev_private;
1059 struct intel_encoder *intel_encoder = intel_connector->encoder;
1060 int type = intel_connector->base.connector_type;
1061 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1062 enum pipe pipe = 0;
1063 enum transcoder cpu_transcoder;
1064 uint32_t tmp;
1065
1066 if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
1067 return false;
1068
1069 if (port == PORT_A)
1070 cpu_transcoder = TRANSCODER_EDP;
747 else 1071 else
748 temp |= PIPE_DDI_MODE_SELECT_DVI; 1072 cpu_transcoder = pipe;
1073
1074 tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
749 1075
750 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1076 switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
751 temp |= PIPE_DDI_PVSYNC; 1077 case TRANS_DDI_MODE_SELECT_HDMI:
752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1078 case TRANS_DDI_MODE_SELECT_DVI:
753 temp |= PIPE_DDI_PHSYNC; 1079 return (type == DRM_MODE_CONNECTOR_HDMIA);
1080
1081 case TRANS_DDI_MODE_SELECT_DP_SST:
1082 if (type == DRM_MODE_CONNECTOR_eDP)
1083 return true;
1084 case TRANS_DDI_MODE_SELECT_DP_MST:
1085 return (type == DRM_MODE_CONNECTOR_DisplayPort);
754 1086
755 I915_WRITE(DDI_FUNC_CTL(pipe), temp); 1087 case TRANS_DDI_MODE_SELECT_FDI:
1088 return (type == DRM_MODE_CONNECTOR_VGA);
756 1089
757 intel_hdmi->set_infoframes(encoder, adjusted_mode); 1090 default:
1091 return false;
1092 }
758} 1093}
759 1094
760bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 1095bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -762,58 +1097,418 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
762{ 1097{
763 struct drm_device *dev = encoder->base.dev; 1098 struct drm_device *dev = encoder->base.dev;
764 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
765 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1100 enum port port = intel_ddi_get_encoder_port(encoder);
766 u32 tmp; 1101 u32 tmp;
767 int i; 1102 int i;
768 1103
769 tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port)); 1104 tmp = I915_READ(DDI_BUF_CTL(port));
770 1105
771 if (!(tmp & DDI_BUF_CTL_ENABLE)) 1106 if (!(tmp & DDI_BUF_CTL_ENABLE))
772 return false; 1107 return false;
773 1108
774 for_each_pipe(i) { 1109 if (port == PORT_A) {
775 tmp = I915_READ(DDI_FUNC_CTL(i)); 1110 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
776 1111
777 if ((tmp & PIPE_DDI_PORT_MASK) 1112 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
778 == PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) { 1113 case TRANS_DDI_EDP_INPUT_A_ON:
779 *pipe = i; 1114 case TRANS_DDI_EDP_INPUT_A_ONOFF:
780 return true; 1115 *pipe = PIPE_A;
1116 break;
1117 case TRANS_DDI_EDP_INPUT_B_ONOFF:
1118 *pipe = PIPE_B;
1119 break;
1120 case TRANS_DDI_EDP_INPUT_C_ONOFF:
1121 *pipe = PIPE_C;
1122 break;
1123 }
1124
1125 return true;
1126 } else {
1127 for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
1128 tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
1129
1130 if ((tmp & TRANS_DDI_PORT_MASK)
1131 == TRANS_DDI_SELECT_PORT(port)) {
1132 *pipe = i;
1133 return true;
1134 }
781 } 1135 }
782 } 1136 }
783 1137
784 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port); 1138 DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
785 1139
786 return true; 1140 return true;
787} 1141}
788 1142
789void intel_enable_ddi(struct intel_encoder *encoder) 1143static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
1144 enum pipe pipe)
1145{
1146 uint32_t temp, ret;
1147 enum port port;
1148 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1149 pipe);
1150 int i;
1151
1152 if (cpu_transcoder == TRANSCODER_EDP) {
1153 port = PORT_A;
1154 } else {
1155 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1156 temp &= TRANS_DDI_PORT_MASK;
1157
1158 for (i = PORT_B; i <= PORT_E; i++)
1159 if (temp == TRANS_DDI_SELECT_PORT(i))
1160 port = i;
1161 }
1162
1163 ret = I915_READ(PORT_CLK_SEL(port));
1164
1165 DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
1166 pipe_name(pipe), port_name(port), ret);
1167
1168 return ret;
1169}
1170
1171void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
790{ 1172{
791 struct drm_device *dev = encoder->base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
793 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1174 enum pipe pipe;
794 int port = intel_hdmi->ddi_port; 1175 struct intel_crtc *intel_crtc;
795 u32 temp;
796 1176
797 temp = I915_READ(DDI_BUF_CTL(port)); 1177 for_each_pipe(pipe) {
798 temp |= DDI_BUF_CTL_ENABLE; 1178 intel_crtc =
1179 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
799 1180
800 /* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width, 1181 if (!intel_crtc->active)
801 * and swing/emphasis values are ignored so nothing special needs 1182 continue;
802 * to be done besides enabling the port. 1183
803 */ 1184 intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
804 I915_WRITE(DDI_BUF_CTL(port), temp); 1185 pipe);
1186
1187 switch (intel_crtc->ddi_pll_sel) {
1188 case PORT_CLK_SEL_SPLL:
1189 dev_priv->ddi_plls.spll_refcount++;
1190 break;
1191 case PORT_CLK_SEL_WRPLL1:
1192 dev_priv->ddi_plls.wrpll1_refcount++;
1193 break;
1194 case PORT_CLK_SEL_WRPLL2:
1195 dev_priv->ddi_plls.wrpll2_refcount++;
1196 break;
1197 }
1198 }
805} 1199}
806 1200
807void intel_disable_ddi(struct intel_encoder *encoder) 1201void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
808{ 1202{
809 struct drm_device *dev = encoder->base.dev; 1203 struct drm_crtc *crtc = &intel_crtc->base;
1204 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1205 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1206 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1207 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
1208
1209 if (cpu_transcoder != TRANSCODER_EDP)
1210 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1211 TRANS_CLK_SEL_PORT(port));
1212}
1213
1214void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1215{
1216 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1217 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
1218
1219 if (cpu_transcoder != TRANSCODER_EDP)
1220 I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
1221 TRANS_CLK_SEL_DISABLED);
1222}
1223
1224static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1225{
1226 struct drm_encoder *encoder = &intel_encoder->base;
1227 struct drm_crtc *crtc = encoder->crtc;
1228 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1230 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1231 int type = intel_encoder->type;
1232
1233 if (type == INTEL_OUTPUT_EDP) {
1234 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1235 ironlake_edp_panel_vdd_on(intel_dp);
1236 ironlake_edp_panel_on(intel_dp);
1237 ironlake_edp_panel_vdd_off(intel_dp, true);
1238 }
1239
1240 WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
1241 I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
1242
1243 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1244 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1245
1246 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1247 intel_dp_start_link_train(intel_dp);
1248 intel_dp_complete_link_train(intel_dp);
1249 }
1250}
1251
1252static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1253{
1254 struct drm_encoder *encoder = &intel_encoder->base;
1255 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1256 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1257 int type = intel_encoder->type;
1258 uint32_t val;
1259 bool wait = false;
1260
1261 val = I915_READ(DDI_BUF_CTL(port));
1262 if (val & DDI_BUF_CTL_ENABLE) {
1263 val &= ~DDI_BUF_CTL_ENABLE;
1264 I915_WRITE(DDI_BUF_CTL(port), val);
1265 wait = true;
1266 }
1267
1268 val = I915_READ(DP_TP_CTL(port));
1269 val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
1270 val |= DP_TP_CTL_LINK_TRAIN_PAT1;
1271 I915_WRITE(DP_TP_CTL(port), val);
1272
1273 if (wait)
1274 intel_wait_ddi_buf_idle(dev_priv, port);
1275
1276 if (type == INTEL_OUTPUT_EDP) {
1277 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1278 ironlake_edp_panel_vdd_on(intel_dp);
1279 ironlake_edp_panel_off(intel_dp);
1280 }
1281
1282 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
1283}
1284
1285static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1286{
1287 struct drm_encoder *encoder = &intel_encoder->base;
1288 struct drm_device *dev = encoder->dev;
810 struct drm_i915_private *dev_priv = dev->dev_private; 1289 struct drm_i915_private *dev_priv = dev->dev_private;
811 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1290 enum port port = intel_ddi_get_encoder_port(intel_encoder);
812 int port = intel_hdmi->ddi_port; 1291 int type = intel_encoder->type;
813 u32 temp; 1292
1293 if (type == INTEL_OUTPUT_HDMI) {
1294 /* In HDMI/DVI mode, the port width, and swing/emphasis values
1295 * are ignored so nothing special needs to be done besides
1296 * enabling the port.
1297 */
1298 I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
1299 } else if (type == INTEL_OUTPUT_EDP) {
1300 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1301
1302 ironlake_edp_backlight_on(intel_dp);
1303 }
1304}
1305
1306static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1307{
1308 struct drm_encoder *encoder = &intel_encoder->base;
1309 int type = intel_encoder->type;
1310
1311 if (type == INTEL_OUTPUT_EDP) {
1312 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1313
1314 ironlake_edp_backlight_off(intel_dp);
1315 }
1316}
1317
1318int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1319{
1320 if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
1321 return 450;
1322 else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
1323 LCPLL_CLK_FREQ_450)
1324 return 450;
1325 else if (IS_ULT(dev_priv->dev))
1326 return 338;
1327 else
1328 return 540;
1329}
1330
1331void intel_ddi_pll_init(struct drm_device *dev)
1332{
1333 struct drm_i915_private *dev_priv = dev->dev_private;
1334 uint32_t val = I915_READ(LCPLL_CTL);
1335
1336 /* The LCPLL register should be turned on by the BIOS. For now let's
1337 * just check its state and print errors in case something is wrong.
1338 * Don't even try to turn it on.
1339 */
1340
1341 DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
1342 intel_ddi_get_cdclk_freq(dev_priv));
1343
1344 if (val & LCPLL_CD_SOURCE_FCLK)
1345 DRM_ERROR("CDCLK source is not LCPLL\n");
1346
1347 if (val & LCPLL_PLL_DISABLE)
1348 DRM_ERROR("LCPLL is disabled\n");
1349}
1350
1351void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
1352{
1353 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
1354 struct intel_dp *intel_dp = &intel_dig_port->dp;
1355 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
1356 enum port port = intel_dig_port->port;
1357 bool wait;
1358 uint32_t val;
1359
1360 if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
1361 val = I915_READ(DDI_BUF_CTL(port));
1362 if (val & DDI_BUF_CTL_ENABLE) {
1363 val &= ~DDI_BUF_CTL_ENABLE;
1364 I915_WRITE(DDI_BUF_CTL(port), val);
1365 wait = true;
1366 }
1367
1368 val = I915_READ(DP_TP_CTL(port));
1369 val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
1370 val |= DP_TP_CTL_LINK_TRAIN_PAT1;
1371 I915_WRITE(DP_TP_CTL(port), val);
1372 POSTING_READ(DP_TP_CTL(port));
1373
1374 if (wait)
1375 intel_wait_ddi_buf_idle(dev_priv, port);
1376 }
1377
1378 val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
1379 DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
1380 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
1381 val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
1382 I915_WRITE(DP_TP_CTL(port), val);
1383 POSTING_READ(DP_TP_CTL(port));
1384
1385 intel_dp->DP |= DDI_BUF_CTL_ENABLE;
1386 I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
1387 POSTING_READ(DDI_BUF_CTL(port));
1388
1389 udelay(600);
1390}
1391
1392void intel_ddi_fdi_disable(struct drm_crtc *crtc)
1393{
1394 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
1395 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1396 uint32_t val;
1397
1398 intel_ddi_post_disable(intel_encoder);
1399
1400 val = I915_READ(_FDI_RXA_CTL);
1401 val &= ~FDI_RX_ENABLE;
1402 I915_WRITE(_FDI_RXA_CTL, val);
1403
1404 val = I915_READ(_FDI_RXA_MISC);
1405 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
1406 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
1407 I915_WRITE(_FDI_RXA_MISC, val);
1408
1409 val = I915_READ(_FDI_RXA_CTL);
1410 val &= ~FDI_PCDCLK;
1411 I915_WRITE(_FDI_RXA_CTL, val);
1412
1413 val = I915_READ(_FDI_RXA_CTL);
1414 val &= ~FDI_RX_PLL_ENABLE;
1415 I915_WRITE(_FDI_RXA_CTL, val);
1416}
1417
1418static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
1419{
1420 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
1421 int type = intel_encoder->type;
1422
1423 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
1424 intel_dp_check_link_status(intel_dp);
1425}
1426
1427static void intel_ddi_destroy(struct drm_encoder *encoder)
1428{
1429 /* HDMI has nothing special to destroy, so we can go with this. */
1430 intel_dp_encoder_destroy(encoder);
1431}
1432
1433static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
1434 const struct drm_display_mode *mode,
1435 struct drm_display_mode *adjusted_mode)
1436{
1437 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
1438 int type = intel_encoder->type;
1439
1440 WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
1441
1442 if (type == INTEL_OUTPUT_HDMI)
1443 return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
1444 else
1445 return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
1446}
1447
1448static const struct drm_encoder_funcs intel_ddi_funcs = {
1449 .destroy = intel_ddi_destroy,
1450};
1451
1452static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
1453 .mode_fixup = intel_ddi_mode_fixup,
1454 .mode_set = intel_ddi_mode_set,
1455 .disable = intel_encoder_noop,
1456};
1457
1458void intel_ddi_init(struct drm_device *dev, enum port port)
1459{
1460 struct intel_digital_port *intel_dig_port;
1461 struct intel_encoder *intel_encoder;
1462 struct drm_encoder *encoder;
1463 struct intel_connector *hdmi_connector = NULL;
1464 struct intel_connector *dp_connector = NULL;
1465
1466 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
1467 if (!intel_dig_port)
1468 return;
1469
1470 dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1471 if (!dp_connector) {
1472 kfree(intel_dig_port);
1473 return;
1474 }
1475
1476 if (port != PORT_A) {
1477 hdmi_connector = kzalloc(sizeof(struct intel_connector),
1478 GFP_KERNEL);
1479 if (!hdmi_connector) {
1480 kfree(dp_connector);
1481 kfree(intel_dig_port);
1482 return;
1483 }
1484 }
1485
1486 intel_encoder = &intel_dig_port->base;
1487 encoder = &intel_encoder->base;
1488
1489 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
1490 DRM_MODE_ENCODER_TMDS);
1491 drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
1492
1493 intel_encoder->enable = intel_enable_ddi;
1494 intel_encoder->pre_enable = intel_ddi_pre_enable;
1495 intel_encoder->disable = intel_disable_ddi;
1496 intel_encoder->post_disable = intel_ddi_post_disable;
1497 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
1498
1499 intel_dig_port->port = port;
1500 if (hdmi_connector)
1501 intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
1502 else
1503 intel_dig_port->hdmi.sdvox_reg = 0;
1504 intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
814 1505
815 temp = I915_READ(DDI_BUF_CTL(port)); 1506 intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
816 temp &= ~DDI_BUF_CTL_ENABLE; 1507 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1508 intel_encoder->cloneable = false;
1509 intel_encoder->hot_plug = intel_ddi_hot_plug;
817 1510
818 I915_WRITE(DDI_BUF_CTL(port), temp); 1511 if (hdmi_connector)
1512 intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
1513 intel_dp_init_connector(intel_dig_port, dp_connector);
819} 1514}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b426d44a2b05..5d127e068950 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -41,8 +41,6 @@
41#include <drm/drm_crtc_helper.h> 41#include <drm/drm_crtc_helper.h>
42#include <linux/dma_remapping.h> 42#include <linux/dma_remapping.h>
43 43
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
46bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 44bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
47static void intel_increase_pllclock(struct drm_crtc *crtc); 45static void intel_increase_pllclock(struct drm_crtc *crtc);
48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 46static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -80,6 +78,16 @@ struct intel_limit {
80/* FDI */ 78/* FDI */
81#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ 79#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
82 80
81int
82intel_pch_rawclk(struct drm_device *dev)
83{
84 struct drm_i915_private *dev_priv = dev->dev_private;
85
86 WARN_ON(!HAS_PCH_SPLIT(dev));
87
88 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
89}
90
83static bool 91static bool
84intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 92intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
85 int target, int refclk, intel_clock_t *match_clock, 93 int target, int refclk, intel_clock_t *match_clock,
@@ -380,7 +388,7 @@ static const intel_limit_t intel_limits_vlv_dac = {
380 388
381static const intel_limit_t intel_limits_vlv_hdmi = { 389static const intel_limit_t intel_limits_vlv_hdmi = {
382 .dot = { .min = 20000, .max = 165000 }, 390 .dot = { .min = 20000, .max = 165000 },
383 .vco = { .min = 5994000, .max = 4000000 }, 391 .vco = { .min = 4000000, .max = 5994000},
384 .n = { .min = 1, .max = 7 }, 392 .n = { .min = 1, .max = 7 },
385 .m = { .min = 60, .max = 300 }, /* guess */ 393 .m = { .min = 60, .max = 300 }, /* guess */
386 .m1 = { .min = 2, .max = 3 }, 394 .m1 = { .min = 2, .max = 3 },
@@ -393,10 +401,10 @@ static const intel_limit_t intel_limits_vlv_hdmi = {
393}; 401};
394 402
395static const intel_limit_t intel_limits_vlv_dp = { 403static const intel_limit_t intel_limits_vlv_dp = {
396 .dot = { .min = 162000, .max = 270000 }, 404 .dot = { .min = 25000, .max = 270000 },
397 .vco = { .min = 5994000, .max = 4000000 }, 405 .vco = { .min = 4000000, .max = 6000000 },
398 .n = { .min = 1, .max = 7 }, 406 .n = { .min = 1, .max = 7 },
399 .m = { .min = 60, .max = 300 }, /* guess */ 407 .m = { .min = 22, .max = 450 },
400 .m1 = { .min = 2, .max = 3 }, 408 .m1 = { .min = 2, .max = 3 },
401 .m2 = { .min = 11, .max = 156 }, 409 .m2 = { .min = 11, .max = 156 },
402 .p = { .min = 10, .max = 30 }, 410 .p = { .min = 10, .max = 30 },
@@ -531,7 +539,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
531 limit = &intel_limits_ironlake_single_lvds; 539 limit = &intel_limits_ironlake_single_lvds;
532 } 540 }
533 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 541 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
534 HAS_eDP) 542 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
535 limit = &intel_limits_ironlake_display_port; 543 limit = &intel_limits_ironlake_display_port;
536 else 544 else
537 limit = &intel_limits_ironlake_dac; 545 limit = &intel_limits_ironlake_dac;
@@ -927,6 +935,15 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
927 return true; 935 return true;
928} 936}
929 937
938enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
939 enum pipe pipe)
940{
941 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
942 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
943
944 return intel_crtc->cpu_transcoder;
945}
946
930static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe) 947static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
931{ 948{
932 struct drm_i915_private *dev_priv = dev->dev_private; 949 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -999,9 +1016,11 @@ void intel_wait_for_vblank(struct drm_device *dev, int pipe)
999void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) 1016void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
1000{ 1017{
1001 struct drm_i915_private *dev_priv = dev->dev_private; 1018 struct drm_i915_private *dev_priv = dev->dev_private;
1019 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1020 pipe);
1002 1021
1003 if (INTEL_INFO(dev)->gen >= 4) { 1022 if (INTEL_INFO(dev)->gen >= 4) {
1004 int reg = PIPECONF(pipe); 1023 int reg = PIPECONF(cpu_transcoder);
1005 1024
1006 /* Wait for the Pipe State to go off */ 1025 /* Wait for the Pipe State to go off */
1007 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1026 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
@@ -1103,12 +1122,14 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1103 int reg; 1122 int reg;
1104 u32 val; 1123 u32 val;
1105 bool cur_state; 1124 bool cur_state;
1125 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1126 pipe);
1106 1127
1107 if (IS_HASWELL(dev_priv->dev)) { 1128 if (IS_HASWELL(dev_priv->dev)) {
1108 /* On Haswell, DDI is used instead of FDI_TX_CTL */ 1129 /* On Haswell, DDI is used instead of FDI_TX_CTL */
1109 reg = DDI_FUNC_CTL(pipe); 1130 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
1110 val = I915_READ(reg); 1131 val = I915_READ(reg);
1111 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE); 1132 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1112 } else { 1133 } else {
1113 reg = FDI_TX_CTL(pipe); 1134 reg = FDI_TX_CTL(pipe);
1114 val = I915_READ(reg); 1135 val = I915_READ(reg);
@@ -1128,14 +1149,9 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1128 u32 val; 1149 u32 val;
1129 bool cur_state; 1150 bool cur_state;
1130 1151
1131 if (IS_HASWELL(dev_priv->dev) && pipe > 0) { 1152 reg = FDI_RX_CTL(pipe);
1132 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n"); 1153 val = I915_READ(reg);
1133 return; 1154 cur_state = !!(val & FDI_RX_ENABLE);
1134 } else {
1135 reg = FDI_RX_CTL(pipe);
1136 val = I915_READ(reg);
1137 cur_state = !!(val & FDI_RX_ENABLE);
1138 }
1139 WARN(cur_state != state, 1155 WARN(cur_state != state,
1140 "FDI RX state assertion failure (expected %s, current %s)\n", 1156 "FDI RX state assertion failure (expected %s, current %s)\n",
1141 state_string(state), state_string(cur_state)); 1157 state_string(state), state_string(cur_state));
@@ -1168,10 +1184,6 @@ static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1168 int reg; 1184 int reg;
1169 u32 val; 1185 u32 val;
1170 1186
1171 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1172 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1173 return;
1174 }
1175 reg = FDI_RX_CTL(pipe); 1187 reg = FDI_RX_CTL(pipe);
1176 val = I915_READ(reg); 1188 val = I915_READ(reg);
1177 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); 1189 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
@@ -1212,12 +1224,14 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1212 int reg; 1224 int reg;
1213 u32 val; 1225 u32 val;
1214 bool cur_state; 1226 bool cur_state;
1227 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1228 pipe);
1215 1229
1216 /* if we need the pipe A quirk it must be always on */ 1230 /* if we need the pipe A quirk it must be always on */
1217 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) 1231 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1218 state = true; 1232 state = true;
1219 1233
1220 reg = PIPECONF(pipe); 1234 reg = PIPECONF(cpu_transcoder);
1221 val = I915_READ(reg); 1235 val = I915_READ(reg);
1222 cur_state = !!(val & PIPECONF_ENABLE); 1236 cur_state = !!(val & PIPECONF_ENABLE);
1223 WARN(cur_state != state, 1237 WARN(cur_state != state,
@@ -1492,24 +1506,26 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1492 1506
1493/* SBI access */ 1507/* SBI access */
1494static void 1508static void
1495intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value) 1509intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
1510 enum intel_sbi_destination destination)
1496{ 1511{
1497 unsigned long flags; 1512 unsigned long flags;
1513 u32 tmp;
1498 1514
1499 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1515 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1500 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1516 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1501 100)) {
1502 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1517 DRM_ERROR("timeout waiting for SBI to become ready\n");
1503 goto out_unlock; 1518 goto out_unlock;
1504 } 1519 }
1505 1520
1506 I915_WRITE(SBI_ADDR, 1521 I915_WRITE(SBI_ADDR, (reg << 16));
1507 (reg << 16)); 1522 I915_WRITE(SBI_DATA, value);
1508 I915_WRITE(SBI_DATA, 1523
1509 value); 1524 if (destination == SBI_ICLK)
1510 I915_WRITE(SBI_CTL_STAT, 1525 tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
1511 SBI_BUSY | 1526 else
1512 SBI_CTL_OP_CRWR); 1527 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
1528 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
1513 1529
1514 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1530 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1515 100)) { 1531 100)) {
@@ -1522,23 +1538,25 @@ out_unlock:
1522} 1538}
1523 1539
1524static u32 1540static u32
1525intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg) 1541intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
1542 enum intel_sbi_destination destination)
1526{ 1543{
1527 unsigned long flags; 1544 unsigned long flags;
1528 u32 value = 0; 1545 u32 value = 0;
1529 1546
1530 spin_lock_irqsave(&dev_priv->dpio_lock, flags); 1547 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1531 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 1548 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
1532 100)) {
1533 DRM_ERROR("timeout waiting for SBI to become ready\n"); 1549 DRM_ERROR("timeout waiting for SBI to become ready\n");
1534 goto out_unlock; 1550 goto out_unlock;
1535 } 1551 }
1536 1552
1537 I915_WRITE(SBI_ADDR, 1553 I915_WRITE(SBI_ADDR, (reg << 16));
1538 (reg << 16)); 1554
1539 I915_WRITE(SBI_CTL_STAT, 1555 if (destination == SBI_ICLK)
1540 SBI_BUSY | 1556 value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
1541 SBI_CTL_OP_CRRD); 1557 else
1558 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
1559 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
1542 1560
1543 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 1561 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
1544 100)) { 1562 100)) {
@@ -1554,14 +1572,14 @@ out_unlock:
1554} 1572}
1555 1573
1556/** 1574/**
1557 * intel_enable_pch_pll - enable PCH PLL 1575 * ironlake_enable_pch_pll - enable PCH PLL
1558 * @dev_priv: i915 private structure 1576 * @dev_priv: i915 private structure
1559 * @pipe: pipe PLL to enable 1577 * @pipe: pipe PLL to enable
1560 * 1578 *
1561 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1579 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1562 * drives the transcoder clock. 1580 * drives the transcoder clock.
1563 */ 1581 */
1564static void intel_enable_pch_pll(struct intel_crtc *intel_crtc) 1582static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
1565{ 1583{
1566 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1584 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1567 struct intel_pch_pll *pll; 1585 struct intel_pch_pll *pll;
@@ -1645,12 +1663,12 @@ static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
1645 pll->on = false; 1663 pll->on = false;
1646} 1664}
1647 1665
1648static void intel_enable_transcoder(struct drm_i915_private *dev_priv, 1666static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1649 enum pipe pipe) 1667 enum pipe pipe)
1650{ 1668{
1651 int reg; 1669 struct drm_device *dev = dev_priv->dev;
1652 u32 val, pipeconf_val;
1653 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1670 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1671 uint32_t reg, val, pipeconf_val;
1654 1672
1655 /* PCH only available on ILK+ */ 1673 /* PCH only available on ILK+ */
1656 BUG_ON(dev_priv->info->gen < 5); 1674 BUG_ON(dev_priv->info->gen < 5);
@@ -1664,10 +1682,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1664 assert_fdi_tx_enabled(dev_priv, pipe); 1682 assert_fdi_tx_enabled(dev_priv, pipe);
1665 assert_fdi_rx_enabled(dev_priv, pipe); 1683 assert_fdi_rx_enabled(dev_priv, pipe);
1666 1684
1667 if (IS_HASWELL(dev_priv->dev) && pipe > 0) { 1685 if (HAS_PCH_CPT(dev)) {
1668 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n"); 1686 /* Workaround: Set the timing override bit before enabling the
1669 return; 1687 * pch transcoder. */
1688 reg = TRANS_CHICKEN2(pipe);
1689 val = I915_READ(reg);
1690 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1691 I915_WRITE(reg, val);
1670 } 1692 }
1693
1671 reg = TRANSCONF(pipe); 1694 reg = TRANSCONF(pipe);
1672 val = I915_READ(reg); 1695 val = I915_READ(reg);
1673 pipeconf_val = I915_READ(PIPECONF(pipe)); 1696 pipeconf_val = I915_READ(PIPECONF(pipe));
@@ -1696,11 +1719,42 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1696 DRM_ERROR("failed to enable transcoder %d\n", pipe); 1719 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1697} 1720}
1698 1721
1699static void intel_disable_transcoder(struct drm_i915_private *dev_priv, 1722static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1700 enum pipe pipe) 1723 enum transcoder cpu_transcoder)
1701{ 1724{
1702 int reg; 1725 u32 val, pipeconf_val;
1703 u32 val; 1726
1727 /* PCH only available on ILK+ */
1728 BUG_ON(dev_priv->info->gen < 5);
1729
1730 /* FDI must be feeding us bits for PCH ports */
1731 assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
1732 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1733
1734 /* Workaround: set timing override bit. */
1735 val = I915_READ(_TRANSA_CHICKEN2);
1736 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1737 I915_WRITE(_TRANSA_CHICKEN2, val);
1738
1739 val = TRANS_ENABLE;
1740 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1741
1742 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1743 PIPECONF_INTERLACED_ILK)
1744 val |= TRANS_INTERLACED;
1745 else
1746 val |= TRANS_PROGRESSIVE;
1747
1748 I915_WRITE(TRANSCONF(TRANSCODER_A), val);
1749 if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
1750 DRM_ERROR("Failed to enable PCH transcoder\n");
1751}
1752
1753static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1754 enum pipe pipe)
1755{
1756 struct drm_device *dev = dev_priv->dev;
1757 uint32_t reg, val;
1704 1758
1705 /* FDI relies on the transcoder */ 1759 /* FDI relies on the transcoder */
1706 assert_fdi_tx_disabled(dev_priv, pipe); 1760 assert_fdi_tx_disabled(dev_priv, pipe);
@@ -1716,6 +1770,31 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1716 /* wait for PCH transcoder off, transcoder state */ 1770 /* wait for PCH transcoder off, transcoder state */
1717 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1771 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1718 DRM_ERROR("failed to disable transcoder %d\n", pipe); 1772 DRM_ERROR("failed to disable transcoder %d\n", pipe);
1773
1774 if (!HAS_PCH_IBX(dev)) {
1775 /* Workaround: Clear the timing override chicken bit again. */
1776 reg = TRANS_CHICKEN2(pipe);
1777 val = I915_READ(reg);
1778 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1779 I915_WRITE(reg, val);
1780 }
1781}
1782
1783static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1784{
1785 u32 val;
1786
1787 val = I915_READ(_TRANSACONF);
1788 val &= ~TRANS_ENABLE;
1789 I915_WRITE(_TRANSACONF, val);
1790 /* wait for PCH transcoder off, transcoder state */
1791 if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
1792 DRM_ERROR("Failed to disable PCH transcoder\n");
1793
1794 /* Workaround: clear timing override bit. */
1795 val = I915_READ(_TRANSA_CHICKEN2);
1796 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1797 I915_WRITE(_TRANSA_CHICKEN2, val);
1719} 1798}
1720 1799
1721/** 1800/**
@@ -1735,9 +1814,17 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1735static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, 1814static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1736 bool pch_port) 1815 bool pch_port)
1737{ 1816{
1817 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1818 pipe);
1819 enum transcoder pch_transcoder;
1738 int reg; 1820 int reg;
1739 u32 val; 1821 u32 val;
1740 1822
1823 if (IS_HASWELL(dev_priv->dev))
1824 pch_transcoder = TRANSCODER_A;
1825 else
1826 pch_transcoder = pipe;
1827
1741 /* 1828 /*
1742 * A pipe without a PLL won't actually be able to drive bits from 1829 * A pipe without a PLL won't actually be able to drive bits from
1743 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1830 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
@@ -1748,13 +1835,13 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1748 else { 1835 else {
1749 if (pch_port) { 1836 if (pch_port) {
1750 /* if driving the PCH, we need FDI enabled */ 1837 /* if driving the PCH, we need FDI enabled */
1751 assert_fdi_rx_pll_enabled(dev_priv, pipe); 1838 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1752 assert_fdi_tx_pll_enabled(dev_priv, pipe); 1839 assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
1753 } 1840 }
1754 /* FIXME: assert CPU port conditions for SNB+ */ 1841 /* FIXME: assert CPU port conditions for SNB+ */
1755 } 1842 }
1756 1843
1757 reg = PIPECONF(pipe); 1844 reg = PIPECONF(cpu_transcoder);
1758 val = I915_READ(reg); 1845 val = I915_READ(reg);
1759 if (val & PIPECONF_ENABLE) 1846 if (val & PIPECONF_ENABLE)
1760 return; 1847 return;
@@ -1778,6 +1865,8 @@ static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1778static void intel_disable_pipe(struct drm_i915_private *dev_priv, 1865static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1779 enum pipe pipe) 1866 enum pipe pipe)
1780{ 1867{
1868 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1869 pipe);
1781 int reg; 1870 int reg;
1782 u32 val; 1871 u32 val;
1783 1872
@@ -1791,7 +1880,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1791 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) 1880 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1792 return; 1881 return;
1793 1882
1794 reg = PIPECONF(pipe); 1883 reg = PIPECONF(cpu_transcoder);
1795 val = I915_READ(reg); 1884 val = I915_READ(reg);
1796 if ((val & PIPECONF_ENABLE) == 0) 1885 if ((val & PIPECONF_ENABLE) == 0)
1797 return; 1886 return;
@@ -1807,8 +1896,10 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1807void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1896void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1808 enum plane plane) 1897 enum plane plane)
1809{ 1898{
1810 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1899 if (dev_priv->info->gen >= 4)
1811 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); 1900 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1901 else
1902 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1812} 1903}
1813 1904
1814/** 1905/**
@@ -1926,9 +2017,9 @@ void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1926 2017
1927/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2018/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1928 * is assumed to be a power-of-two. */ 2019 * is assumed to be a power-of-two. */
1929static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y, 2020unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
1930 unsigned int bpp, 2021 unsigned int bpp,
1931 unsigned int pitch) 2022 unsigned int pitch)
1932{ 2023{
1933 int tile_rows, tiles; 2024 int tile_rows, tiles;
1934 2025
@@ -1969,24 +2060,38 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1969 dspcntr = I915_READ(reg); 2060 dspcntr = I915_READ(reg);
1970 /* Mask out pixel format bits in case we change it */ 2061 /* Mask out pixel format bits in case we change it */
1971 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2062 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1972 switch (fb->bits_per_pixel) { 2063 switch (fb->pixel_format) {
1973 case 8: 2064 case DRM_FORMAT_C8:
1974 dspcntr |= DISPPLANE_8BPP; 2065 dspcntr |= DISPPLANE_8BPP;
1975 break; 2066 break;
1976 case 16: 2067 case DRM_FORMAT_XRGB1555:
1977 if (fb->depth == 15) 2068 case DRM_FORMAT_ARGB1555:
1978 dspcntr |= DISPPLANE_15_16BPP; 2069 dspcntr |= DISPPLANE_BGRX555;
1979 else
1980 dspcntr |= DISPPLANE_16BPP;
1981 break; 2070 break;
1982 case 24: 2071 case DRM_FORMAT_RGB565:
1983 case 32: 2072 dspcntr |= DISPPLANE_BGRX565;
1984 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 2073 break;
2074 case DRM_FORMAT_XRGB8888:
2075 case DRM_FORMAT_ARGB8888:
2076 dspcntr |= DISPPLANE_BGRX888;
2077 break;
2078 case DRM_FORMAT_XBGR8888:
2079 case DRM_FORMAT_ABGR8888:
2080 dspcntr |= DISPPLANE_RGBX888;
2081 break;
2082 case DRM_FORMAT_XRGB2101010:
2083 case DRM_FORMAT_ARGB2101010:
2084 dspcntr |= DISPPLANE_BGRX101010;
2085 break;
2086 case DRM_FORMAT_XBGR2101010:
2087 case DRM_FORMAT_ABGR2101010:
2088 dspcntr |= DISPPLANE_RGBX101010;
1985 break; 2089 break;
1986 default: 2090 default:
1987 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 2091 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
1988 return -EINVAL; 2092 return -EINVAL;
1989 } 2093 }
2094
1990 if (INTEL_INFO(dev)->gen >= 4) { 2095 if (INTEL_INFO(dev)->gen >= 4) {
1991 if (obj->tiling_mode != I915_TILING_NONE) 2096 if (obj->tiling_mode != I915_TILING_NONE)
1992 dspcntr |= DISPPLANE_TILED; 2097 dspcntr |= DISPPLANE_TILED;
@@ -2000,9 +2105,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2000 2105
2001 if (INTEL_INFO(dev)->gen >= 4) { 2106 if (INTEL_INFO(dev)->gen >= 4) {
2002 intel_crtc->dspaddr_offset = 2107 intel_crtc->dspaddr_offset =
2003 gen4_compute_dspaddr_offset_xtiled(&x, &y, 2108 intel_gen4_compute_offset_xtiled(&x, &y,
2004 fb->bits_per_pixel / 8, 2109 fb->bits_per_pixel / 8,
2005 fb->pitches[0]); 2110 fb->pitches[0]);
2006 linear_offset -= intel_crtc->dspaddr_offset; 2111 linear_offset -= intel_crtc->dspaddr_offset;
2007 } else { 2112 } else {
2008 intel_crtc->dspaddr_offset = linear_offset; 2113 intel_crtc->dspaddr_offset = linear_offset;
@@ -2053,27 +2158,31 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2053 dspcntr = I915_READ(reg); 2158 dspcntr = I915_READ(reg);
2054 /* Mask out pixel format bits in case we change it */ 2159 /* Mask out pixel format bits in case we change it */
2055 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 2160 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2056 switch (fb->bits_per_pixel) { 2161 switch (fb->pixel_format) {
2057 case 8: 2162 case DRM_FORMAT_C8:
2058 dspcntr |= DISPPLANE_8BPP; 2163 dspcntr |= DISPPLANE_8BPP;
2059 break; 2164 break;
2060 case 16: 2165 case DRM_FORMAT_RGB565:
2061 if (fb->depth != 16) 2166 dspcntr |= DISPPLANE_BGRX565;
2062 return -EINVAL;
2063
2064 dspcntr |= DISPPLANE_16BPP;
2065 break; 2167 break;
2066 case 24: 2168 case DRM_FORMAT_XRGB8888:
2067 case 32: 2169 case DRM_FORMAT_ARGB8888:
2068 if (fb->depth == 24) 2170 dspcntr |= DISPPLANE_BGRX888;
2069 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 2171 break;
2070 else if (fb->depth == 30) 2172 case DRM_FORMAT_XBGR8888:
2071 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; 2173 case DRM_FORMAT_ABGR8888:
2072 else 2174 dspcntr |= DISPPLANE_RGBX888;
2073 return -EINVAL; 2175 break;
2176 case DRM_FORMAT_XRGB2101010:
2177 case DRM_FORMAT_ARGB2101010:
2178 dspcntr |= DISPPLANE_BGRX101010;
2179 break;
2180 case DRM_FORMAT_XBGR2101010:
2181 case DRM_FORMAT_ABGR2101010:
2182 dspcntr |= DISPPLANE_RGBX101010;
2074 break; 2183 break;
2075 default: 2184 default:
2076 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); 2185 DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
2077 return -EINVAL; 2186 return -EINVAL;
2078 } 2187 }
2079 2188
@@ -2089,9 +2198,9 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2089 2198
2090 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); 2199 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
2091 intel_crtc->dspaddr_offset = 2200 intel_crtc->dspaddr_offset =
2092 gen4_compute_dspaddr_offset_xtiled(&x, &y, 2201 intel_gen4_compute_offset_xtiled(&x, &y,
2093 fb->bits_per_pixel / 8, 2202 fb->bits_per_pixel / 8,
2094 fb->pitches[0]); 2203 fb->pitches[0]);
2095 linear_offset -= intel_crtc->dspaddr_offset; 2204 linear_offset -= intel_crtc->dspaddr_offset;
2096 2205
2097 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n", 2206 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
@@ -2099,8 +2208,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2099 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2208 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2100 I915_MODIFY_DISPBASE(DSPSURF(plane), 2209 I915_MODIFY_DISPBASE(DSPSURF(plane),
2101 obj->gtt_offset + intel_crtc->dspaddr_offset); 2210 obj->gtt_offset + intel_crtc->dspaddr_offset);
2102 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2211 if (IS_HASWELL(dev)) {
2103 I915_WRITE(DSPLINOFF(plane), linear_offset); 2212 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2213 } else {
2214 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2215 I915_WRITE(DSPLINOFF(plane), linear_offset);
2216 }
2104 POSTING_READ(reg); 2217 POSTING_READ(reg);
2105 2218
2106 return 0; 2219 return 0;
@@ -2148,13 +2261,39 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
2148 return ret; 2261 return ret;
2149} 2262}
2150 2263
2264static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2265{
2266 struct drm_device *dev = crtc->dev;
2267 struct drm_i915_master_private *master_priv;
2268 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2269
2270 if (!dev->primary->master)
2271 return;
2272
2273 master_priv = dev->primary->master->driver_priv;
2274 if (!master_priv->sarea_priv)
2275 return;
2276
2277 switch (intel_crtc->pipe) {
2278 case 0:
2279 master_priv->sarea_priv->pipeA_x = x;
2280 master_priv->sarea_priv->pipeA_y = y;
2281 break;
2282 case 1:
2283 master_priv->sarea_priv->pipeB_x = x;
2284 master_priv->sarea_priv->pipeB_y = y;
2285 break;
2286 default:
2287 break;
2288 }
2289}
2290
2151static int 2291static int
2152intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2292intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2153 struct drm_framebuffer *fb) 2293 struct drm_framebuffer *fb)
2154{ 2294{
2155 struct drm_device *dev = crtc->dev; 2295 struct drm_device *dev = crtc->dev;
2156 struct drm_i915_private *dev_priv = dev->dev_private; 2296 struct drm_i915_private *dev_priv = dev->dev_private;
2157 struct drm_i915_master_private *master_priv;
2158 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2297 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2159 struct drm_framebuffer *old_fb; 2298 struct drm_framebuffer *old_fb;
2160 int ret; 2299 int ret;
@@ -2206,20 +2345,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2206 intel_update_fbc(dev); 2345 intel_update_fbc(dev);
2207 mutex_unlock(&dev->struct_mutex); 2346 mutex_unlock(&dev->struct_mutex);
2208 2347
2209 if (!dev->primary->master) 2348 intel_crtc_update_sarea_pos(crtc, x, y);
2210 return 0;
2211
2212 master_priv = dev->primary->master->driver_priv;
2213 if (!master_priv->sarea_priv)
2214 return 0;
2215
2216 if (intel_crtc->pipe) {
2217 master_priv->sarea_priv->pipeB_x = x;
2218 master_priv->sarea_priv->pipeB_y = y;
2219 } else {
2220 master_priv->sarea_priv->pipeA_x = x;
2221 master_priv->sarea_priv->pipeA_y = y;
2222 }
2223 2349
2224 return 0; 2350 return 0;
2225} 2351}
@@ -2302,16 +2428,27 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
2302 FDI_FE_ERRC_ENABLE); 2428 FDI_FE_ERRC_ENABLE);
2303} 2429}
2304 2430
2305static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) 2431static void ivb_modeset_global_resources(struct drm_device *dev)
2306{ 2432{
2307 struct drm_i915_private *dev_priv = dev->dev_private; 2433 struct drm_i915_private *dev_priv = dev->dev_private;
2308 u32 flags = I915_READ(SOUTH_CHICKEN1); 2434 struct intel_crtc *pipe_B_crtc =
2435 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2436 struct intel_crtc *pipe_C_crtc =
2437 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2438 uint32_t temp;
2309 2439
2310 flags |= FDI_PHASE_SYNC_OVR(pipe); 2440 /* When everything is off disable fdi C so that we could enable fdi B
2311 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ 2441 * with all lanes. XXX: This misses the case where a pipe is not using
2312 flags |= FDI_PHASE_SYNC_EN(pipe); 2442 * any pch resources and so doesn't need any fdi lanes. */
2313 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ 2443 if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
2314 POSTING_READ(SOUTH_CHICKEN1); 2444 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2445 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2446
2447 temp = I915_READ(SOUTH_CHICKEN1);
2448 temp &= ~FDI_BC_BIFURCATION_SELECT;
2449 DRM_DEBUG_KMS("disabling fdi C rx\n");
2450 I915_WRITE(SOUTH_CHICKEN1, temp);
2451 }
2315} 2452}
2316 2453
2317/* The FDI link training functions for ILK/Ibexpeak. */ 2454/* The FDI link training functions for ILK/Ibexpeak. */
@@ -2357,11 +2494,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2357 udelay(150); 2494 udelay(150);
2358 2495
2359 /* Ironlake workaround, enable clock pointer after FDI enable*/ 2496 /* Ironlake workaround, enable clock pointer after FDI enable*/
2360 if (HAS_PCH_IBX(dev)) { 2497 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2361 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2498 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2362 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 2499 FDI_RX_PHASE_SYNC_POINTER_EN);
2363 FDI_RX_PHASE_SYNC_POINTER_EN);
2364 }
2365 2500
2366 reg = FDI_RX_IIR(pipe); 2501 reg = FDI_RX_IIR(pipe);
2367 for (tries = 0; tries < 5; tries++) { 2502 for (tries = 0; tries < 5; tries++) {
@@ -2450,6 +2585,9 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2450 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 2585 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2451 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2586 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2452 2587
2588 I915_WRITE(FDI_RX_MISC(pipe),
2589 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2590
2453 reg = FDI_RX_CTL(pipe); 2591 reg = FDI_RX_CTL(pipe);
2454 temp = I915_READ(reg); 2592 temp = I915_READ(reg);
2455 if (HAS_PCH_CPT(dev)) { 2593 if (HAS_PCH_CPT(dev)) {
@@ -2464,9 +2602,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
2464 POSTING_READ(reg); 2602 POSTING_READ(reg);
2465 udelay(150); 2603 udelay(150);
2466 2604
2467 if (HAS_PCH_CPT(dev))
2468 cpt_phase_pointer_enable(dev, pipe);
2469
2470 for (i = 0; i < 4; i++) { 2605 for (i = 0; i < 4; i++) {
2471 reg = FDI_TX_CTL(pipe); 2606 reg = FDI_TX_CTL(pipe);
2472 temp = I915_READ(reg); 2607 temp = I915_READ(reg);
@@ -2570,6 +2705,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2570 POSTING_READ(reg); 2705 POSTING_READ(reg);
2571 udelay(150); 2706 udelay(150);
2572 2707
2708 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2709 I915_READ(FDI_RX_IIR(pipe)));
2710
2573 /* enable CPU FDI TX and PCH FDI RX */ 2711 /* enable CPU FDI TX and PCH FDI RX */
2574 reg = FDI_TX_CTL(pipe); 2712 reg = FDI_TX_CTL(pipe);
2575 temp = I915_READ(reg); 2713 temp = I915_READ(reg);
@@ -2582,6 +2720,9 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2582 temp |= FDI_COMPOSITE_SYNC; 2720 temp |= FDI_COMPOSITE_SYNC;
2583 I915_WRITE(reg, temp | FDI_TX_ENABLE); 2721 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2584 2722
2723 I915_WRITE(FDI_RX_MISC(pipe),
2724 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2725
2585 reg = FDI_RX_CTL(pipe); 2726 reg = FDI_RX_CTL(pipe);
2586 temp = I915_READ(reg); 2727 temp = I915_READ(reg);
2587 temp &= ~FDI_LINK_TRAIN_AUTO; 2728 temp &= ~FDI_LINK_TRAIN_AUTO;
@@ -2593,9 +2734,6 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2593 POSTING_READ(reg); 2734 POSTING_READ(reg);
2594 udelay(150); 2735 udelay(150);
2595 2736
2596 if (HAS_PCH_CPT(dev))
2597 cpt_phase_pointer_enable(dev, pipe);
2598
2599 for (i = 0; i < 4; i++) { 2737 for (i = 0; i < 4; i++) {
2600 reg = FDI_TX_CTL(pipe); 2738 reg = FDI_TX_CTL(pipe);
2601 temp = I915_READ(reg); 2739 temp = I915_READ(reg);
@@ -2613,7 +2751,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2613 if (temp & FDI_RX_BIT_LOCK || 2751 if (temp & FDI_RX_BIT_LOCK ||
2614 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 2752 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2615 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 2753 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2616 DRM_DEBUG_KMS("FDI train 1 done.\n"); 2754 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
2617 break; 2755 break;
2618 } 2756 }
2619 } 2757 }
@@ -2654,7 +2792,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2654 2792
2655 if (temp & FDI_RX_SYMBOL_LOCK) { 2793 if (temp & FDI_RX_SYMBOL_LOCK) {
2656 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 2794 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2657 DRM_DEBUG_KMS("FDI train 2 done.\n"); 2795 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
2658 break; 2796 break;
2659 } 2797 }
2660 } 2798 }
@@ -2671,9 +2809,6 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2671 int pipe = intel_crtc->pipe; 2809 int pipe = intel_crtc->pipe;
2672 u32 reg, temp; 2810 u32 reg, temp;
2673 2811
2674 /* Write the TU size bits so error detection works */
2675 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2676 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
2677 2812
2678 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 2813 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
2679 reg = FDI_RX_CTL(pipe); 2814 reg = FDI_RX_CTL(pipe);
@@ -2737,17 +2872,6 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2737 udelay(100); 2872 udelay(100);
2738} 2873}
2739 2874
2740static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2741{
2742 struct drm_i915_private *dev_priv = dev->dev_private;
2743 u32 flags = I915_READ(SOUTH_CHICKEN1);
2744
2745 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2746 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2747 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2748 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2749 POSTING_READ(SOUTH_CHICKEN1);
2750}
2751static void ironlake_fdi_disable(struct drm_crtc *crtc) 2875static void ironlake_fdi_disable(struct drm_crtc *crtc)
2752{ 2876{
2753 struct drm_device *dev = crtc->dev; 2877 struct drm_device *dev = crtc->dev;
@@ -2774,11 +2898,6 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
2774 /* Ironlake workaround, disable clock pointer after downing FDI */ 2898 /* Ironlake workaround, disable clock pointer after downing FDI */
2775 if (HAS_PCH_IBX(dev)) { 2899 if (HAS_PCH_IBX(dev)) {
2776 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 2900 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2777 I915_WRITE(FDI_RX_CHICKEN(pipe),
2778 I915_READ(FDI_RX_CHICKEN(pipe) &
2779 ~FDI_RX_PHASE_SYNC_POINTER_EN));
2780 } else if (HAS_PCH_CPT(dev)) {
2781 cpt_phase_pointer_disable(dev, pipe);
2782 } 2901 }
2783 2902
2784 /* still set train pattern 1 */ 2903 /* still set train pattern 1 */
@@ -2839,7 +2958,7 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2839 mutex_unlock(&dev->struct_mutex); 2958 mutex_unlock(&dev->struct_mutex);
2840} 2959}
2841 2960
2842static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2961static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
2843{ 2962{
2844 struct drm_device *dev = crtc->dev; 2963 struct drm_device *dev = crtc->dev;
2845 struct intel_encoder *intel_encoder; 2964 struct intel_encoder *intel_encoder;
@@ -2849,23 +2968,6 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2849 * must be driven by its own crtc; no sharing is possible. 2968 * must be driven by its own crtc; no sharing is possible.
2850 */ 2969 */
2851 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 2970 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2852
2853 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2854 * CPU handles all others */
2855 if (IS_HASWELL(dev)) {
2856 /* It is still unclear how this will work on PPT, so throw up a warning */
2857 WARN_ON(!HAS_PCH_LPT(dev));
2858
2859 if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
2860 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2861 return true;
2862 } else {
2863 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2864 intel_encoder->type);
2865 return false;
2866 }
2867 }
2868
2869 switch (intel_encoder->type) { 2971 switch (intel_encoder->type) {
2870 case INTEL_OUTPUT_EDP: 2972 case INTEL_OUTPUT_EDP:
2871 if (!intel_encoder_is_pch_edp(&intel_encoder->base)) 2973 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
@@ -2877,6 +2979,11 @@ static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2877 return true; 2979 return true;
2878} 2980}
2879 2981
2982static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2983{
2984 return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2985}
2986
2880/* Program iCLKIP clock to the desired frequency */ 2987/* Program iCLKIP clock to the desired frequency */
2881static void lpt_program_iclkip(struct drm_crtc *crtc) 2988static void lpt_program_iclkip(struct drm_crtc *crtc)
2882{ 2989{
@@ -2892,8 +2999,9 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2892 2999
2893 /* Disable SSCCTL */ 3000 /* Disable SSCCTL */
2894 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3001 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2895 intel_sbi_read(dev_priv, SBI_SSCCTL6) | 3002 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2896 SBI_SSCCTL_DISABLE); 3003 SBI_SSCCTL_DISABLE,
3004 SBI_ICLK);
2897 3005
2898 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3006 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2899 if (crtc->mode.clock == 20000) { 3007 if (crtc->mode.clock == 20000) {
@@ -2934,33 +3042,25 @@ static void lpt_program_iclkip(struct drm_crtc *crtc)
2934 phaseinc); 3042 phaseinc);
2935 3043
2936 /* Program SSCDIVINTPHASE6 */ 3044 /* Program SSCDIVINTPHASE6 */
2937 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6); 3045 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
2938 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3046 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2939 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3047 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2940 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3048 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2941 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3049 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2942 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3050 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2943 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3051 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2944 3052 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
2945 intel_sbi_write(dev_priv,
2946 SBI_SSCDIVINTPHASE6,
2947 temp);
2948 3053
2949 /* Program SSCAUXDIV */ 3054 /* Program SSCAUXDIV */
2950 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6); 3055 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
2951 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3056 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2952 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3057 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2953 intel_sbi_write(dev_priv, 3058 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
2954 SBI_SSCAUXDIV6,
2955 temp);
2956
2957 3059
2958 /* Enable modulator and associated divider */ 3060 /* Enable modulator and associated divider */
2959 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6); 3061 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
2960 temp &= ~SBI_SSCCTL_DISABLE; 3062 temp &= ~SBI_SSCCTL_DISABLE;
2961 intel_sbi_write(dev_priv, 3063 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
2962 SBI_SSCCTL6,
2963 temp);
2964 3064
2965 /* Wait for initialization time */ 3065 /* Wait for initialization time */
2966 udelay(24); 3066 udelay(24);
@@ -2986,15 +3086,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2986 3086
2987 assert_transcoder_disabled(dev_priv, pipe); 3087 assert_transcoder_disabled(dev_priv, pipe);
2988 3088
3089 /* Write the TU size bits before fdi link training, so that error
3090 * detection works. */
3091 I915_WRITE(FDI_RX_TUSIZE1(pipe),
3092 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3093
2989 /* For PCH output, training FDI link */ 3094 /* For PCH output, training FDI link */
2990 dev_priv->display.fdi_link_train(crtc); 3095 dev_priv->display.fdi_link_train(crtc);
2991 3096
2992 intel_enable_pch_pll(intel_crtc); 3097 /* XXX: pch pll's can be enabled any time before we enable the PCH
3098 * transcoder, and we actually should do this to not upset any PCH
3099 * transcoder that already use the clock when we share it.
3100 *
3101 * Note that enable_pch_pll tries to do the right thing, but get_pch_pll
3102 * unconditionally resets the pll - we need that to have the right LVDS
3103 * enable sequence. */
3104 ironlake_enable_pch_pll(intel_crtc);
2993 3105
2994 if (HAS_PCH_LPT(dev)) { 3106 if (HAS_PCH_CPT(dev)) {
2995 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2996 lpt_program_iclkip(crtc);
2997 } else if (HAS_PCH_CPT(dev)) {
2998 u32 sel; 3107 u32 sel;
2999 3108
3000 temp = I915_READ(PCH_DPLL_SEL); 3109 temp = I915_READ(PCH_DPLL_SEL);
@@ -3031,8 +3140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3031 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 3140 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
3032 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe))); 3141 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
3033 3142
3034 if (!IS_HASWELL(dev)) 3143 intel_fdi_normal_train(crtc);
3035 intel_fdi_normal_train(crtc);
3036 3144
3037 /* For PCH DP, enable TRANS_DP_CTL */ 3145 /* For PCH DP, enable TRANS_DP_CTL */
3038 if (HAS_PCH_CPT(dev) && 3146 if (HAS_PCH_CPT(dev) &&
@@ -3064,15 +3172,37 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3064 temp |= TRANS_DP_PORT_SEL_D; 3172 temp |= TRANS_DP_PORT_SEL_D;
3065 break; 3173 break;
3066 default: 3174 default:
3067 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); 3175 BUG();
3068 temp |= TRANS_DP_PORT_SEL_B;
3069 break;
3070 } 3176 }
3071 3177
3072 I915_WRITE(reg, temp); 3178 I915_WRITE(reg, temp);
3073 } 3179 }
3074 3180
3075 intel_enable_transcoder(dev_priv, pipe); 3181 ironlake_enable_pch_transcoder(dev_priv, pipe);
3182}
3183
3184static void lpt_pch_enable(struct drm_crtc *crtc)
3185{
3186 struct drm_device *dev = crtc->dev;
3187 struct drm_i915_private *dev_priv = dev->dev_private;
3188 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3189 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3190
3191 assert_transcoder_disabled(dev_priv, TRANSCODER_A);
3192
3193 lpt_program_iclkip(crtc);
3194
3195 /* Set transcoder timing. */
3196 I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
3197 I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
3198 I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
3199
3200 I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
3201 I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
3202 I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
3203 I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
3204
3205 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
3076} 3206}
3077 3207
3078static void intel_put_pch_pll(struct intel_crtc *intel_crtc) 3208static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
@@ -3165,16 +3295,12 @@ prepare: /* separate function? */
3165void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) 3295void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3166{ 3296{
3167 struct drm_i915_private *dev_priv = dev->dev_private; 3297 struct drm_i915_private *dev_priv = dev->dev_private;
3168 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); 3298 int dslreg = PIPEDSL(pipe);
3169 u32 temp; 3299 u32 temp;
3170 3300
3171 temp = I915_READ(dslreg); 3301 temp = I915_READ(dslreg);
3172 udelay(500); 3302 udelay(500);
3173 if (wait_for(I915_READ(dslreg) != temp, 5)) { 3303 if (wait_for(I915_READ(dslreg) != temp, 5)) {
3174 /* Without this, mode sets may fail silently on FDI */
3175 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3176 udelay(250);
3177 I915_WRITE(tc2reg, 0);
3178 if (wait_for(I915_READ(dslreg) != temp, 5)) 3304 if (wait_for(I915_READ(dslreg) != temp, 5))
3179 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); 3305 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3180 } 3306 }
@@ -3205,9 +3331,12 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3205 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 3331 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3206 } 3332 }
3207 3333
3208 is_pch_port = intel_crtc_driving_pch(crtc); 3334 is_pch_port = ironlake_crtc_driving_pch(crtc);
3209 3335
3210 if (is_pch_port) { 3336 if (is_pch_port) {
3337 /* Note: FDI PLL enabling _must_ be done before we enable the
3338 * cpu pipes, hence this is separate from all the other fdi/pch
3339 * enabling. */
3211 ironlake_fdi_pll_enable(intel_crtc); 3340 ironlake_fdi_pll_enable(intel_crtc);
3212 } else { 3341 } else {
3213 assert_fdi_tx_disabled(dev_priv, pipe); 3342 assert_fdi_tx_disabled(dev_priv, pipe);
@@ -3220,12 +3349,17 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3220 3349
3221 /* Enable panel fitting for LVDS */ 3350 /* Enable panel fitting for LVDS */
3222 if (dev_priv->pch_pf_size && 3351 if (dev_priv->pch_pf_size &&
3223 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { 3352 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
3353 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3224 /* Force use of hard-coded filter coefficients 3354 /* Force use of hard-coded filter coefficients
3225 * as some pre-programmed values are broken, 3355 * as some pre-programmed values are broken,
3226 * e.g. x201. 3356 * e.g. x201.
3227 */ 3357 */
3228 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 3358 if (IS_IVYBRIDGE(dev))
3359 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3360 PF_PIPE_SEL_IVB(pipe));
3361 else
3362 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3229 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); 3363 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3230 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); 3364 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3231 } 3365 }
@@ -3265,6 +3399,83 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
3265 intel_wait_for_vblank(dev, intel_crtc->pipe); 3399 intel_wait_for_vblank(dev, intel_crtc->pipe);
3266} 3400}
3267 3401
3402static void haswell_crtc_enable(struct drm_crtc *crtc)
3403{
3404 struct drm_device *dev = crtc->dev;
3405 struct drm_i915_private *dev_priv = dev->dev_private;
3406 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3407 struct intel_encoder *encoder;
3408 int pipe = intel_crtc->pipe;
3409 int plane = intel_crtc->plane;
3410 bool is_pch_port;
3411
3412 WARN_ON(!crtc->enabled);
3413
3414 if (intel_crtc->active)
3415 return;
3416
3417 intel_crtc->active = true;
3418 intel_update_watermarks(dev);
3419
3420 is_pch_port = haswell_crtc_driving_pch(crtc);
3421
3422 if (is_pch_port)
3423 dev_priv->display.fdi_link_train(crtc);
3424
3425 for_each_encoder_on_crtc(dev, crtc, encoder)
3426 if (encoder->pre_enable)
3427 encoder->pre_enable(encoder);
3428
3429 intel_ddi_enable_pipe_clock(intel_crtc);
3430
3431 /* Enable panel fitting for eDP */
3432 if (dev_priv->pch_pf_size &&
3433 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3434 /* Force use of hard-coded filter coefficients
3435 * as some pre-programmed values are broken,
3436 * e.g. x201.
3437 */
3438 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3439 PF_PIPE_SEL_IVB(pipe));
3440 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3441 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3442 }
3443
3444 /*
3445 * On ILK+ LUT must be loaded before the pipe is running but with
3446 * clocks enabled
3447 */
3448 intel_crtc_load_lut(crtc);
3449
3450 intel_ddi_set_pipe_settings(crtc);
3451 intel_ddi_enable_pipe_func(crtc);
3452
3453 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3454 intel_enable_plane(dev_priv, plane, pipe);
3455
3456 if (is_pch_port)
3457 lpt_pch_enable(crtc);
3458
3459 mutex_lock(&dev->struct_mutex);
3460 intel_update_fbc(dev);
3461 mutex_unlock(&dev->struct_mutex);
3462
3463 intel_crtc_update_cursor(crtc, true);
3464
3465 for_each_encoder_on_crtc(dev, crtc, encoder)
3466 encoder->enable(encoder);
3467
3468 /*
3469 * There seems to be a race in PCH platform hw (at least on some
3470 * outputs) where an enabled pipe still completes any pageflip right
3471 * away (as if the pipe is off) instead of waiting for vblank. As soon
3472 * as the first vblank happend, everything works as expected. Hence just
3473 * wait for one vblank before returning to avoid strange things
3474 * happening.
3475 */
3476 intel_wait_for_vblank(dev, intel_crtc->pipe);
3477}
3478
3268static void ironlake_crtc_disable(struct drm_crtc *crtc) 3479static void ironlake_crtc_disable(struct drm_crtc *crtc)
3269{ 3480{
3270 struct drm_device *dev = crtc->dev; 3481 struct drm_device *dev = crtc->dev;
@@ -3303,7 +3514,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3303 3514
3304 ironlake_fdi_disable(crtc); 3515 ironlake_fdi_disable(crtc);
3305 3516
3306 intel_disable_transcoder(dev_priv, pipe); 3517 ironlake_disable_pch_transcoder(dev_priv, pipe);
3307 3518
3308 if (HAS_PCH_CPT(dev)) { 3519 if (HAS_PCH_CPT(dev)) {
3309 /* disable TRANS_DP_CTL */ 3520 /* disable TRANS_DP_CTL */
@@ -3345,12 +3556,78 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
3345 mutex_unlock(&dev->struct_mutex); 3556 mutex_unlock(&dev->struct_mutex);
3346} 3557}
3347 3558
3559static void haswell_crtc_disable(struct drm_crtc *crtc)
3560{
3561 struct drm_device *dev = crtc->dev;
3562 struct drm_i915_private *dev_priv = dev->dev_private;
3563 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3564 struct intel_encoder *encoder;
3565 int pipe = intel_crtc->pipe;
3566 int plane = intel_crtc->plane;
3567 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
3568 bool is_pch_port;
3569
3570 if (!intel_crtc->active)
3571 return;
3572
3573 is_pch_port = haswell_crtc_driving_pch(crtc);
3574
3575 for_each_encoder_on_crtc(dev, crtc, encoder)
3576 encoder->disable(encoder);
3577
3578 intel_crtc_wait_for_pending_flips(crtc);
3579 drm_vblank_off(dev, pipe);
3580 intel_crtc_update_cursor(crtc, false);
3581
3582 intel_disable_plane(dev_priv, plane, pipe);
3583
3584 if (dev_priv->cfb_plane == plane)
3585 intel_disable_fbc(dev);
3586
3587 intel_disable_pipe(dev_priv, pipe);
3588
3589 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
3590
3591 /* Disable PF */
3592 I915_WRITE(PF_CTL(pipe), 0);
3593 I915_WRITE(PF_WIN_SZ(pipe), 0);
3594
3595 intel_ddi_disable_pipe_clock(intel_crtc);
3596
3597 for_each_encoder_on_crtc(dev, crtc, encoder)
3598 if (encoder->post_disable)
3599 encoder->post_disable(encoder);
3600
3601 if (is_pch_port) {
3602 lpt_disable_pch_transcoder(dev_priv);
3603 intel_ddi_fdi_disable(crtc);
3604 }
3605
3606 intel_crtc->active = false;
3607 intel_update_watermarks(dev);
3608
3609 mutex_lock(&dev->struct_mutex);
3610 intel_update_fbc(dev);
3611 mutex_unlock(&dev->struct_mutex);
3612}
3613
3348static void ironlake_crtc_off(struct drm_crtc *crtc) 3614static void ironlake_crtc_off(struct drm_crtc *crtc)
3349{ 3615{
3350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3616 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3351 intel_put_pch_pll(intel_crtc); 3617 intel_put_pch_pll(intel_crtc);
3352} 3618}
3353 3619
3620static void haswell_crtc_off(struct drm_crtc *crtc)
3621{
3622 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3623
3624 /* Stop saying we're using TRANSCODER_EDP because some other CRTC might
3625 * start using it. */
3626 intel_crtc->cpu_transcoder = intel_crtc->pipe;
3627
3628 intel_ddi_put_crtc_pll(crtc);
3629}
3630
3354static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 3631static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3355{ 3632{
3356 if (!enable && intel_crtc->overlay) { 3633 if (!enable && intel_crtc->overlay) {
@@ -4061,7 +4338,7 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4061 struct drm_display_mode *mode, 4338 struct drm_display_mode *mode,
4062 struct drm_display_mode *adjusted_mode, 4339 struct drm_display_mode *adjusted_mode,
4063 intel_clock_t *clock, intel_clock_t *reduced_clock, 4340 intel_clock_t *clock, intel_clock_t *reduced_clock,
4064 int refclk, int num_connectors) 4341 int num_connectors)
4065{ 4342{
4066 struct drm_device *dev = crtc->dev; 4343 struct drm_device *dev = crtc->dev;
4067 struct drm_i915_private *dev_priv = dev->dev_private; 4344 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4069,9 +4346,19 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4069 int pipe = intel_crtc->pipe; 4346 int pipe = intel_crtc->pipe;
4070 u32 dpll, mdiv, pdiv; 4347 u32 dpll, mdiv, pdiv;
4071 u32 bestn, bestm1, bestm2, bestp1, bestp2; 4348 u32 bestn, bestm1, bestm2, bestp1, bestp2;
4072 bool is_hdmi; 4349 bool is_sdvo;
4350 u32 temp;
4351
4352 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4353 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4354
4355 dpll = DPLL_VGA_MODE_DIS;
4356 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
4357 dpll |= DPLL_REFA_CLK_ENABLE_VLV;
4358 dpll |= DPLL_INTEGRATED_CLOCK_VLV;
4073 4359
4074 is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4360 I915_WRITE(DPLL(pipe), dpll);
4361 POSTING_READ(DPLL(pipe));
4075 4362
4076 bestn = clock->n; 4363 bestn = clock->n;
4077 bestm1 = clock->m1; 4364 bestm1 = clock->m1;
@@ -4079,12 +4366,10 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4079 bestp1 = clock->p1; 4366 bestp1 = clock->p1;
4080 bestp2 = clock->p2; 4367 bestp2 = clock->p2;
4081 4368
4082 /* Enable DPIO clock input */ 4369 /*
4083 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 4370 * In Valleyview PLL and program lane counter registers are exposed
4084 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 4371 * through DPIO interface
4085 I915_WRITE(DPLL(pipe), dpll); 4372 */
4086 POSTING_READ(DPLL(pipe));
4087
4088 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 4373 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4089 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 4374 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4090 mdiv |= ((bestn << DPIO_N_SHIFT)); 4375 mdiv |= ((bestn << DPIO_N_SHIFT));
@@ -4095,12 +4380,13 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4095 4380
4096 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000); 4381 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
4097 4382
4098 pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) | 4383 pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
4099 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) | 4384 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
4100 (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT); 4385 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
4386 (5 << DPIO_CLK_BIAS_CTL_SHIFT);
4101 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv); 4387 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4102 4388
4103 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051); 4389 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
4104 4390
4105 dpll |= DPLL_VCO_ENABLE; 4391 dpll |= DPLL_VCO_ENABLE;
4106 I915_WRITE(DPLL(pipe), dpll); 4392 I915_WRITE(DPLL(pipe), dpll);
@@ -4108,19 +4394,44 @@ static void vlv_update_pll(struct drm_crtc *crtc,
4108 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 4394 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4109 DRM_ERROR("DPLL %d failed to lock\n", pipe); 4395 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4110 4396
4111 if (is_hdmi) { 4397 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
4112 u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4113 4398
4399 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4400 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4401
4402 I915_WRITE(DPLL(pipe), dpll);
4403
4404 /* Wait for the clocks to stabilize. */
4405 POSTING_READ(DPLL(pipe));
4406 udelay(150);
4407
4408 temp = 0;
4409 if (is_sdvo) {
4410 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4114 if (temp > 1) 4411 if (temp > 1)
4115 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 4412 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4116 else 4413 else
4117 temp = 0; 4414 temp = 0;
4118
4119 I915_WRITE(DPLL_MD(pipe), temp);
4120 POSTING_READ(DPLL_MD(pipe));
4121 } 4415 }
4416 I915_WRITE(DPLL_MD(pipe), temp);
4417 POSTING_READ(DPLL_MD(pipe));
4122 4418
4123 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */ 4419 /* Now program lane control registers */
4420 if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
4421 || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4422 {
4423 temp = 0x1000C4;
4424 if(pipe == 1)
4425 temp |= (1 << 21);
4426 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
4427 }
4428 if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
4429 {
4430 temp = 0x1000C4;
4431 if(pipe == 1)
4432 temp |= (1 << 21);
4433 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4434 }
4124} 4435}
4125 4436
4126static void i9xx_update_pll(struct drm_crtc *crtc, 4437static void i9xx_update_pll(struct drm_crtc *crtc,
@@ -4136,6 +4447,8 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4136 u32 dpll; 4447 u32 dpll;
4137 bool is_sdvo; 4448 bool is_sdvo;
4138 4449
4450 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4451
4139 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || 4452 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4140 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); 4453 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4141 4454
@@ -4236,7 +4549,7 @@ static void i9xx_update_pll(struct drm_crtc *crtc,
4236 4549
4237static void i8xx_update_pll(struct drm_crtc *crtc, 4550static void i8xx_update_pll(struct drm_crtc *crtc,
4238 struct drm_display_mode *adjusted_mode, 4551 struct drm_display_mode *adjusted_mode,
4239 intel_clock_t *clock, 4552 intel_clock_t *clock, intel_clock_t *reduced_clock,
4240 int num_connectors) 4553 int num_connectors)
4241{ 4554{
4242 struct drm_device *dev = crtc->dev; 4555 struct drm_device *dev = crtc->dev;
@@ -4245,6 +4558,8 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4245 int pipe = intel_crtc->pipe; 4558 int pipe = intel_crtc->pipe;
4246 u32 dpll; 4559 u32 dpll;
4247 4560
4561 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4562
4248 dpll = DPLL_VGA_MODE_DIS; 4563 dpll = DPLL_VGA_MODE_DIS;
4249 4564
4250 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 4565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -4294,6 +4609,64 @@ static void i8xx_update_pll(struct drm_crtc *crtc,
4294 I915_WRITE(DPLL(pipe), dpll); 4609 I915_WRITE(DPLL(pipe), dpll);
4295} 4610}
4296 4611
4612static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4613 struct drm_display_mode *mode,
4614 struct drm_display_mode *adjusted_mode)
4615{
4616 struct drm_device *dev = intel_crtc->base.dev;
4617 struct drm_i915_private *dev_priv = dev->dev_private;
4618 enum pipe pipe = intel_crtc->pipe;
4619 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
4620 uint32_t vsyncshift;
4621
4622 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4623 /* the chip adds 2 halflines automatically */
4624 adjusted_mode->crtc_vtotal -= 1;
4625 adjusted_mode->crtc_vblank_end -= 1;
4626 vsyncshift = adjusted_mode->crtc_hsync_start
4627 - adjusted_mode->crtc_htotal / 2;
4628 } else {
4629 vsyncshift = 0;
4630 }
4631
4632 if (INTEL_INFO(dev)->gen > 3)
4633 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
4634
4635 I915_WRITE(HTOTAL(cpu_transcoder),
4636 (adjusted_mode->crtc_hdisplay - 1) |
4637 ((adjusted_mode->crtc_htotal - 1) << 16));
4638 I915_WRITE(HBLANK(cpu_transcoder),
4639 (adjusted_mode->crtc_hblank_start - 1) |
4640 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4641 I915_WRITE(HSYNC(cpu_transcoder),
4642 (adjusted_mode->crtc_hsync_start - 1) |
4643 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4644
4645 I915_WRITE(VTOTAL(cpu_transcoder),
4646 (adjusted_mode->crtc_vdisplay - 1) |
4647 ((adjusted_mode->crtc_vtotal - 1) << 16));
4648 I915_WRITE(VBLANK(cpu_transcoder),
4649 (adjusted_mode->crtc_vblank_start - 1) |
4650 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4651 I915_WRITE(VSYNC(cpu_transcoder),
4652 (adjusted_mode->crtc_vsync_start - 1) |
4653 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4654
4655 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4656 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4657 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4658 * bits. */
4659 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4660 (pipe == PIPE_B || pipe == PIPE_C))
4661 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4662
4663 /* pipesrc controls the size that is scaled from, which should
4664 * always be the user's requested size.
4665 */
4666 I915_WRITE(PIPESRC(pipe),
4667 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4668}
4669
4297static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 4670static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4298 struct drm_display_mode *mode, 4671 struct drm_display_mode *mode,
4299 struct drm_display_mode *adjusted_mode, 4672 struct drm_display_mode *adjusted_mode,
@@ -4307,7 +4680,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4307 int plane = intel_crtc->plane; 4680 int plane = intel_crtc->plane;
4308 int refclk, num_connectors = 0; 4681 int refclk, num_connectors = 0;
4309 intel_clock_t clock, reduced_clock; 4682 intel_clock_t clock, reduced_clock;
4310 u32 dspcntr, pipeconf, vsyncshift; 4683 u32 dspcntr, pipeconf;
4311 bool ok, has_reduced_clock = false, is_sdvo = false; 4684 bool ok, has_reduced_clock = false, is_sdvo = false;
4312 bool is_lvds = false, is_tv = false, is_dp = false; 4685 bool is_lvds = false, is_tv = false, is_dp = false;
4313 struct intel_encoder *encoder; 4686 struct intel_encoder *encoder;
@@ -4371,14 +4744,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4371 if (is_sdvo && is_tv) 4744 if (is_sdvo && is_tv)
4372 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock); 4745 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4373 4746
4374 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
4375 &reduced_clock : NULL);
4376
4377 if (IS_GEN2(dev)) 4747 if (IS_GEN2(dev))
4378 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors); 4748 i8xx_update_pll(crtc, adjusted_mode, &clock,
4749 has_reduced_clock ? &reduced_clock : NULL,
4750 num_connectors);
4379 else if (IS_VALLEYVIEW(dev)) 4751 else if (IS_VALLEYVIEW(dev))
4380 vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL, 4752 vlv_update_pll(crtc, mode, adjusted_mode, &clock,
4381 refclk, num_connectors); 4753 has_reduced_clock ? &reduced_clock : NULL,
4754 num_connectors);
4382 else 4755 else
4383 i9xx_update_pll(crtc, mode, adjusted_mode, &clock, 4756 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4384 has_reduced_clock ? &reduced_clock : NULL, 4757 has_reduced_clock ? &reduced_clock : NULL,
@@ -4419,6 +4792,14 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4419 } 4792 }
4420 } 4793 }
4421 4794
4795 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4796 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4797 pipeconf |= PIPECONF_BPP_6 |
4798 PIPECONF_ENABLE |
4799 I965_PIPECONF_ACTIVE;
4800 }
4801 }
4802
4422 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 4803 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
4423 drm_mode_debug_printmodeline(mode); 4804 drm_mode_debug_printmodeline(mode);
4424 4805
@@ -4434,40 +4815,12 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4434 4815
4435 pipeconf &= ~PIPECONF_INTERLACE_MASK; 4816 pipeconf &= ~PIPECONF_INTERLACE_MASK;
4436 if (!IS_GEN2(dev) && 4817 if (!IS_GEN2(dev) &&
4437 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4818 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4438 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 4819 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4439 /* the chip adds 2 halflines automatically */ 4820 else
4440 adjusted_mode->crtc_vtotal -= 1;
4441 adjusted_mode->crtc_vblank_end -= 1;
4442 vsyncshift = adjusted_mode->crtc_hsync_start
4443 - adjusted_mode->crtc_htotal/2;
4444 } else {
4445 pipeconf |= PIPECONF_PROGRESSIVE; 4821 pipeconf |= PIPECONF_PROGRESSIVE;
4446 vsyncshift = 0;
4447 }
4448 4822
4449 if (!IS_GEN3(dev)) 4823 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
4450 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
4451
4452 I915_WRITE(HTOTAL(pipe),
4453 (adjusted_mode->crtc_hdisplay - 1) |
4454 ((adjusted_mode->crtc_htotal - 1) << 16));
4455 I915_WRITE(HBLANK(pipe),
4456 (adjusted_mode->crtc_hblank_start - 1) |
4457 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4458 I915_WRITE(HSYNC(pipe),
4459 (adjusted_mode->crtc_hsync_start - 1) |
4460 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4461
4462 I915_WRITE(VTOTAL(pipe),
4463 (adjusted_mode->crtc_vdisplay - 1) |
4464 ((adjusted_mode->crtc_vtotal - 1) << 16));
4465 I915_WRITE(VBLANK(pipe),
4466 (adjusted_mode->crtc_vblank_start - 1) |
4467 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4468 I915_WRITE(VSYNC(pipe),
4469 (adjusted_mode->crtc_vsync_start - 1) |
4470 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4471 4824
4472 /* pipesrc and dspsize control the size that is scaled from, 4825 /* pipesrc and dspsize control the size that is scaled from,
4473 * which should always be the user's requested size. 4826 * which should always be the user's requested size.
@@ -4476,8 +4829,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4476 ((mode->vdisplay - 1) << 16) | 4829 ((mode->vdisplay - 1) << 16) |
4477 (mode->hdisplay - 1)); 4830 (mode->hdisplay - 1));
4478 I915_WRITE(DSPPOS(plane), 0); 4831 I915_WRITE(DSPPOS(plane), 0);
4479 I915_WRITE(PIPESRC(pipe),
4480 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4481 4832
4482 I915_WRITE(PIPECONF(pipe), pipeconf); 4833 I915_WRITE(PIPECONF(pipe), pipeconf);
4483 POSTING_READ(PIPECONF(pipe)); 4834 POSTING_READ(PIPECONF(pipe));
@@ -4495,10 +4846,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4495 return ret; 4846 return ret;
4496} 4847}
4497 4848
4498/* 4849static void ironlake_init_pch_refclk(struct drm_device *dev)
4499 * Initialize reference clocks when the driver loads
4500 */
4501void ironlake_init_pch_refclk(struct drm_device *dev)
4502{ 4850{
4503 struct drm_i915_private *dev_priv = dev->dev_private; 4851 struct drm_i915_private *dev_priv = dev->dev_private;
4504 struct drm_mode_config *mode_config = &dev->mode_config; 4852 struct drm_mode_config *mode_config = &dev->mode_config;
@@ -4612,6 +4960,182 @@ void ironlake_init_pch_refclk(struct drm_device *dev)
4612 } 4960 }
4613} 4961}
4614 4962
4963/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
4964static void lpt_init_pch_refclk(struct drm_device *dev)
4965{
4966 struct drm_i915_private *dev_priv = dev->dev_private;
4967 struct drm_mode_config *mode_config = &dev->mode_config;
4968 struct intel_encoder *encoder;
4969 bool has_vga = false;
4970 bool is_sdv = false;
4971 u32 tmp;
4972
4973 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4974 switch (encoder->type) {
4975 case INTEL_OUTPUT_ANALOG:
4976 has_vga = true;
4977 break;
4978 }
4979 }
4980
4981 if (!has_vga)
4982 return;
4983
4984 /* XXX: Rip out SDV support once Haswell ships for real. */
4985 if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
4986 is_sdv = true;
4987
4988 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4989 tmp &= ~SBI_SSCCTL_DISABLE;
4990 tmp |= SBI_SSCCTL_PATHALT;
4991 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4992
4993 udelay(24);
4994
4995 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
4996 tmp &= ~SBI_SSCCTL_PATHALT;
4997 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
4998
4999 if (!is_sdv) {
5000 tmp = I915_READ(SOUTH_CHICKEN2);
5001 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5002 I915_WRITE(SOUTH_CHICKEN2, tmp);
5003
5004 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5005 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5006 DRM_ERROR("FDI mPHY reset assert timeout\n");
5007
5008 tmp = I915_READ(SOUTH_CHICKEN2);
5009 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5010 I915_WRITE(SOUTH_CHICKEN2, tmp);
5011
5012 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5013 FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
5014 100))
5015 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
5016 }
5017
5018 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5019 tmp &= ~(0xFF << 24);
5020 tmp |= (0x12 << 24);
5021 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5022
5023 if (!is_sdv) {
5024 tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
5025 tmp &= ~(0x3 << 6);
5026 tmp |= (1 << 6) | (1 << 0);
5027 intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
5028 }
5029
5030 if (is_sdv) {
5031 tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
5032 tmp |= 0x7FFF;
5033 intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
5034 }
5035
5036 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5037 tmp |= (1 << 11);
5038 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5039
5040 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5041 tmp |= (1 << 11);
5042 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5043
5044 if (is_sdv) {
5045 tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
5046 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5047 intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
5048
5049 tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
5050 tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
5051 intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
5052
5053 tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
5054 tmp |= (0x3F << 8);
5055 intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
5056
5057 tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
5058 tmp |= (0x3F << 8);
5059 intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
5060 }
5061
5062 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5063 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5064 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5065
5066 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5067 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5068 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5069
5070 if (!is_sdv) {
5071 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5072 tmp &= ~(7 << 13);
5073 tmp |= (5 << 13);
5074 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
5075
5076 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5077 tmp &= ~(7 << 13);
5078 tmp |= (5 << 13);
5079 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
5080 }
5081
5082 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5083 tmp &= ~0xFF;
5084 tmp |= 0x1C;
5085 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5086
5087 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5088 tmp &= ~0xFF;
5089 tmp |= 0x1C;
5090 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5091
5092 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5093 tmp &= ~(0xFF << 16);
5094 tmp |= (0x1C << 16);
5095 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5096
5097 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5098 tmp &= ~(0xFF << 16);
5099 tmp |= (0x1C << 16);
5100 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5101
5102 if (!is_sdv) {
5103 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5104 tmp |= (1 << 27);
5105 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
5106
5107 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5108 tmp |= (1 << 27);
5109 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
5110
5111 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5112 tmp &= ~(0xF << 28);
5113 tmp |= (4 << 28);
5114 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
5115
5116 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5117 tmp &= ~(0xF << 28);
5118 tmp |= (4 << 28);
5119 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
5120 }
5121
5122 /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
5123 tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
5124 tmp |= SBI_DBUFF0_ENABLE;
5125 intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
5126}
5127
5128/*
5129 * Initialize reference clocks when the driver loads
5130 */
5131void intel_init_pch_refclk(struct drm_device *dev)
5132{
5133 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5134 ironlake_init_pch_refclk(dev);
5135 else if (HAS_PCH_LPT(dev))
5136 lpt_init_pch_refclk(dev);
5137}
5138
4615static int ironlake_get_refclk(struct drm_crtc *crtc) 5139static int ironlake_get_refclk(struct drm_crtc *crtc)
4616{ 5140{
4617 struct drm_device *dev = crtc->dev; 5141 struct drm_device *dev = crtc->dev;
@@ -4668,8 +5192,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
4668 val |= PIPE_12BPC; 5192 val |= PIPE_12BPC;
4669 break; 5193 break;
4670 default: 5194 default:
4671 val |= PIPE_8BPC; 5195 /* Case prevented by intel_choose_pipe_bpp_dither. */
4672 break; 5196 BUG();
4673 } 5197 }
4674 5198
4675 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); 5199 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
@@ -4686,6 +5210,31 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
4686 POSTING_READ(PIPECONF(pipe)); 5210 POSTING_READ(PIPECONF(pipe));
4687} 5211}
4688 5212
5213static void haswell_set_pipeconf(struct drm_crtc *crtc,
5214 struct drm_display_mode *adjusted_mode,
5215 bool dither)
5216{
5217 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5218 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5219 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
5220 uint32_t val;
5221
5222 val = I915_READ(PIPECONF(cpu_transcoder));
5223
5224 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
5225 if (dither)
5226 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5227
5228 val &= ~PIPECONF_INTERLACE_MASK_HSW;
5229 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
5230 val |= PIPECONF_INTERLACED_ILK;
5231 else
5232 val |= PIPECONF_PROGRESSIVE;
5233
5234 I915_WRITE(PIPECONF(cpu_transcoder), val);
5235 POSTING_READ(PIPECONF(cpu_transcoder));
5236}
5237
4689static bool ironlake_compute_clocks(struct drm_crtc *crtc, 5238static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4690 struct drm_display_mode *adjusted_mode, 5239 struct drm_display_mode *adjusted_mode,
4691 intel_clock_t *clock, 5240 intel_clock_t *clock,
@@ -4749,74 +5298,126 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4749 return true; 5298 return true;
4750} 5299}
4751 5300
4752static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 5301static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
4753 struct drm_display_mode *mode, 5302{
4754 struct drm_display_mode *adjusted_mode, 5303 struct drm_i915_private *dev_priv = dev->dev_private;
4755 int x, int y, 5304 uint32_t temp;
4756 struct drm_framebuffer *fb) 5305
5306 temp = I915_READ(SOUTH_CHICKEN1);
5307 if (temp & FDI_BC_BIFURCATION_SELECT)
5308 return;
5309
5310 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5311 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5312
5313 temp |= FDI_BC_BIFURCATION_SELECT;
5314 DRM_DEBUG_KMS("enabling fdi C rx\n");
5315 I915_WRITE(SOUTH_CHICKEN1, temp);
5316 POSTING_READ(SOUTH_CHICKEN1);
5317}
5318
5319static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
5320{
5321 struct drm_device *dev = intel_crtc->base.dev;
5322 struct drm_i915_private *dev_priv = dev->dev_private;
5323 struct intel_crtc *pipe_B_crtc =
5324 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
5325
5326 DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
5327 intel_crtc->pipe, intel_crtc->fdi_lanes);
5328 if (intel_crtc->fdi_lanes > 4) {
5329 DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
5330 intel_crtc->pipe, intel_crtc->fdi_lanes);
5331 /* Clamp lanes to avoid programming the hw with bogus values. */
5332 intel_crtc->fdi_lanes = 4;
5333
5334 return false;
5335 }
5336
5337 if (dev_priv->num_pipe == 2)
5338 return true;
5339
5340 switch (intel_crtc->pipe) {
5341 case PIPE_A:
5342 return true;
5343 case PIPE_B:
5344 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
5345 intel_crtc->fdi_lanes > 2) {
5346 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5347 intel_crtc->pipe, intel_crtc->fdi_lanes);
5348 /* Clamp lanes to avoid programming the hw with bogus values. */
5349 intel_crtc->fdi_lanes = 2;
5350
5351 return false;
5352 }
5353
5354 if (intel_crtc->fdi_lanes > 2)
5355 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5356 else
5357 cpt_enable_fdi_bc_bifurcation(dev);
5358
5359 return true;
5360 case PIPE_C:
5361 if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
5362 if (intel_crtc->fdi_lanes > 2) {
5363 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
5364 intel_crtc->pipe, intel_crtc->fdi_lanes);
5365 /* Clamp lanes to avoid programming the hw with bogus values. */
5366 intel_crtc->fdi_lanes = 2;
5367
5368 return false;
5369 }
5370 } else {
5371 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
5372 return false;
5373 }
5374
5375 cpt_enable_fdi_bc_bifurcation(dev);
5376
5377 return true;
5378 default:
5379 BUG();
5380 }
5381}
5382
5383int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5384{
5385 /*
5386 * Account for spread spectrum to avoid
5387 * oversubscribing the link. Max center spread
5388 * is 2.5%; use 5% for safety's sake.
5389 */
5390 u32 bps = target_clock * bpp * 21 / 20;
5391 return bps / (link_bw * 8) + 1;
5392}
5393
5394static void ironlake_set_m_n(struct drm_crtc *crtc,
5395 struct drm_display_mode *mode,
5396 struct drm_display_mode *adjusted_mode)
4757{ 5397{
4758 struct drm_device *dev = crtc->dev; 5398 struct drm_device *dev = crtc->dev;
4759 struct drm_i915_private *dev_priv = dev->dev_private; 5399 struct drm_i915_private *dev_priv = dev->dev_private;
4760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5400 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4761 int pipe = intel_crtc->pipe; 5401 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
4762 int plane = intel_crtc->plane; 5402 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
4763 int num_connectors = 0;
4764 intel_clock_t clock, reduced_clock;
4765 u32 dpll, fp = 0, fp2 = 0;
4766 bool ok, has_reduced_clock = false, is_sdvo = false;
4767 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
4768 struct intel_encoder *encoder, *edp_encoder = NULL;
4769 int ret;
4770 struct fdi_m_n m_n = {0}; 5403 struct fdi_m_n m_n = {0};
4771 u32 temp; 5404 int target_clock, pixel_multiplier, lane, link_bw;
4772 int target_clock, pixel_multiplier, lane, link_bw, factor; 5405 bool is_dp = false, is_cpu_edp = false;
4773 unsigned int pipe_bpp;
4774 bool dither;
4775 bool is_cpu_edp = false, is_pch_edp = false;
4776 5406
4777 for_each_encoder_on_crtc(dev, crtc, encoder) { 5407 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4778 switch (encoder->type) { 5408 switch (intel_encoder->type) {
4779 case INTEL_OUTPUT_LVDS:
4780 is_lvds = true;
4781 break;
4782 case INTEL_OUTPUT_SDVO:
4783 case INTEL_OUTPUT_HDMI:
4784 is_sdvo = true;
4785 if (encoder->needs_tv_clock)
4786 is_tv = true;
4787 break;
4788 case INTEL_OUTPUT_TVOUT:
4789 is_tv = true;
4790 break;
4791 case INTEL_OUTPUT_ANALOG:
4792 is_crt = true;
4793 break;
4794 case INTEL_OUTPUT_DISPLAYPORT: 5409 case INTEL_OUTPUT_DISPLAYPORT:
4795 is_dp = true; 5410 is_dp = true;
4796 break; 5411 break;
4797 case INTEL_OUTPUT_EDP: 5412 case INTEL_OUTPUT_EDP:
4798 is_dp = true; 5413 is_dp = true;
4799 if (intel_encoder_is_pch_edp(&encoder->base)) 5414 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
4800 is_pch_edp = true;
4801 else
4802 is_cpu_edp = true; 5415 is_cpu_edp = true;
4803 edp_encoder = encoder; 5416 edp_encoder = intel_encoder;
4804 break; 5417 break;
4805 } 5418 }
4806
4807 num_connectors++;
4808 } 5419 }
4809 5420
4810 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
4811 &has_reduced_clock, &reduced_clock);
4812 if (!ok) {
4813 DRM_ERROR("Couldn't find PLL settings for mode!\n");
4814 return -EINVAL;
4815 }
4816
4817 /* Ensure that the cursor is valid for the new mode before changing... */
4818 intel_crtc_update_cursor(crtc, true);
4819
4820 /* FDI link */ 5421 /* FDI link */
4821 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5422 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4822 lane = 0; 5423 lane = 0;
@@ -4843,29 +5444,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4843 else 5444 else
4844 target_clock = adjusted_mode->clock; 5445 target_clock = adjusted_mode->clock;
4845 5446
4846 /* determine panel color depth */ 5447 if (!lane)
4847 dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp, 5448 lane = ironlake_get_lanes_required(target_clock, link_bw,
4848 adjusted_mode); 5449 intel_crtc->bpp);
4849 if (is_lvds && dev_priv->lvds_dither)
4850 dither = true;
4851
4852 if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
4853 pipe_bpp != 36) {
4854 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4855 pipe_bpp);
4856 pipe_bpp = 24;
4857 }
4858 intel_crtc->bpp = pipe_bpp;
4859
4860 if (!lane) {
4861 /*
4862 * Account for spread spectrum to avoid
4863 * oversubscribing the link. Max center spread
4864 * is 2.5%; use 5% for safety's sake.
4865 */
4866 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
4867 lane = bps / (link_bw * 8) + 1;
4868 }
4869 5450
4870 intel_crtc->fdi_lanes = lane; 5451 intel_crtc->fdi_lanes = lane;
4871 5452
@@ -4874,10 +5455,51 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4874 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, 5455 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4875 &m_n); 5456 &m_n);
4876 5457
4877 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 5458 I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
4878 if (has_reduced_clock) 5459 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
4879 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | 5460 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
4880 reduced_clock.m2; 5461 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
5462}
5463
5464static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5465 struct drm_display_mode *adjusted_mode,
5466 intel_clock_t *clock, u32 fp)
5467{
5468 struct drm_crtc *crtc = &intel_crtc->base;
5469 struct drm_device *dev = crtc->dev;
5470 struct drm_i915_private *dev_priv = dev->dev_private;
5471 struct intel_encoder *intel_encoder;
5472 uint32_t dpll;
5473 int factor, pixel_multiplier, num_connectors = 0;
5474 bool is_lvds = false, is_sdvo = false, is_tv = false;
5475 bool is_dp = false, is_cpu_edp = false;
5476
5477 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5478 switch (intel_encoder->type) {
5479 case INTEL_OUTPUT_LVDS:
5480 is_lvds = true;
5481 break;
5482 case INTEL_OUTPUT_SDVO:
5483 case INTEL_OUTPUT_HDMI:
5484 is_sdvo = true;
5485 if (intel_encoder->needs_tv_clock)
5486 is_tv = true;
5487 break;
5488 case INTEL_OUTPUT_TVOUT:
5489 is_tv = true;
5490 break;
5491 case INTEL_OUTPUT_DISPLAYPORT:
5492 is_dp = true;
5493 break;
5494 case INTEL_OUTPUT_EDP:
5495 is_dp = true;
5496 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
5497 is_cpu_edp = true;
5498 break;
5499 }
5500
5501 num_connectors++;
5502 }
4881 5503
4882 /* Enable autotuning of the PLL clock (if permissible) */ 5504 /* Enable autotuning of the PLL clock (if permissible) */
4883 factor = 21; 5505 factor = 21;
@@ -4889,7 +5511,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4889 } else if (is_sdvo && is_tv) 5511 } else if (is_sdvo && is_tv)
4890 factor = 20; 5512 factor = 20;
4891 5513
4892 if (clock.m < factor * clock.n) 5514 if (clock->m < factor * clock->n)
4893 fp |= FP_CB_TUNE; 5515 fp |= FP_CB_TUNE;
4894 5516
4895 dpll = 0; 5517 dpll = 0;
@@ -4899,7 +5521,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4899 else 5521 else
4900 dpll |= DPLLB_MODE_DAC_SERIAL; 5522 dpll |= DPLLB_MODE_DAC_SERIAL;
4901 if (is_sdvo) { 5523 if (is_sdvo) {
4902 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); 5524 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4903 if (pixel_multiplier > 1) { 5525 if (pixel_multiplier > 1) {
4904 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 5526 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
4905 } 5527 }
@@ -4909,11 +5531,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4909 dpll |= DPLL_DVO_HIGH_SPEED; 5531 dpll |= DPLL_DVO_HIGH_SPEED;
4910 5532
4911 /* compute bitmask from p1 value */ 5533 /* compute bitmask from p1 value */
4912 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 5534 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4913 /* also FPA1 */ 5535 /* also FPA1 */
4914 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 5536 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4915 5537
4916 switch (clock.p2) { 5538 switch (clock->p2) {
4917 case 5: 5539 case 5:
4918 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 5540 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4919 break; 5541 break;
@@ -4939,15 +5561,79 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4939 else 5561 else
4940 dpll |= PLL_REF_INPUT_DREFCLK; 5562 dpll |= PLL_REF_INPUT_DREFCLK;
4941 5563
5564 return dpll;
5565}
5566
5567static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5568 struct drm_display_mode *mode,
5569 struct drm_display_mode *adjusted_mode,
5570 int x, int y,
5571 struct drm_framebuffer *fb)
5572{
5573 struct drm_device *dev = crtc->dev;
5574 struct drm_i915_private *dev_priv = dev->dev_private;
5575 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5576 int pipe = intel_crtc->pipe;
5577 int plane = intel_crtc->plane;
5578 int num_connectors = 0;
5579 intel_clock_t clock, reduced_clock;
5580 u32 dpll, fp = 0, fp2 = 0;
5581 bool ok, has_reduced_clock = false;
5582 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5583 struct intel_encoder *encoder;
5584 u32 temp;
5585 int ret;
5586 bool dither, fdi_config_ok;
5587
5588 for_each_encoder_on_crtc(dev, crtc, encoder) {
5589 switch (encoder->type) {
5590 case INTEL_OUTPUT_LVDS:
5591 is_lvds = true;
5592 break;
5593 case INTEL_OUTPUT_DISPLAYPORT:
5594 is_dp = true;
5595 break;
5596 case INTEL_OUTPUT_EDP:
5597 is_dp = true;
5598 if (!intel_encoder_is_pch_edp(&encoder->base))
5599 is_cpu_edp = true;
5600 break;
5601 }
5602
5603 num_connectors++;
5604 }
5605
5606 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5607 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5608
5609 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5610 &has_reduced_clock, &reduced_clock);
5611 if (!ok) {
5612 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5613 return -EINVAL;
5614 }
5615
5616 /* Ensure that the cursor is valid for the new mode before changing... */
5617 intel_crtc_update_cursor(crtc, true);
5618
5619 /* determine panel color depth */
5620 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5621 adjusted_mode);
5622 if (is_lvds && dev_priv->lvds_dither)
5623 dither = true;
5624
5625 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5626 if (has_reduced_clock)
5627 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5628 reduced_clock.m2;
5629
5630 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
5631
4942 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); 5632 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
4943 drm_mode_debug_printmodeline(mode); 5633 drm_mode_debug_printmodeline(mode);
4944 5634
4945 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on 5635 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
4946 * pre-Haswell/LPT generation */ 5636 if (!is_cpu_edp) {
4947 if (HAS_PCH_LPT(dev)) {
4948 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4949 pipe);
4950 } else if (!is_cpu_edp) {
4951 struct intel_pch_pll *pll; 5637 struct intel_pch_pll *pll;
4952 5638
4953 pll = intel_get_pch_pll(intel_crtc, dpll, fp); 5639 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
@@ -5033,47 +5719,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5033 } 5719 }
5034 } 5720 }
5035 5721
5036 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 5722 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5037 /* the chip adds 2 halflines automatically */
5038 adjusted_mode->crtc_vtotal -= 1;
5039 adjusted_mode->crtc_vblank_end -= 1;
5040 I915_WRITE(VSYNCSHIFT(pipe),
5041 adjusted_mode->crtc_hsync_start
5042 - adjusted_mode->crtc_htotal/2);
5043 } else {
5044 I915_WRITE(VSYNCSHIFT(pipe), 0);
5045 }
5046
5047 I915_WRITE(HTOTAL(pipe),
5048 (adjusted_mode->crtc_hdisplay - 1) |
5049 ((adjusted_mode->crtc_htotal - 1) << 16));
5050 I915_WRITE(HBLANK(pipe),
5051 (adjusted_mode->crtc_hblank_start - 1) |
5052 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5053 I915_WRITE(HSYNC(pipe),
5054 (adjusted_mode->crtc_hsync_start - 1) |
5055 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5056
5057 I915_WRITE(VTOTAL(pipe),
5058 (adjusted_mode->crtc_vdisplay - 1) |
5059 ((adjusted_mode->crtc_vtotal - 1) << 16));
5060 I915_WRITE(VBLANK(pipe),
5061 (adjusted_mode->crtc_vblank_start - 1) |
5062 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5063 I915_WRITE(VSYNC(pipe),
5064 (adjusted_mode->crtc_vsync_start - 1) |
5065 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5066 5723
5067 /* pipesrc controls the size that is scaled from, which should 5724 /* Note, this also computes intel_crtc->fdi_lanes which is used below in
5068 * always be the user's requested size. 5725 * ironlake_check_fdi_lanes. */
5069 */ 5726 ironlake_set_m_n(crtc, mode, adjusted_mode);
5070 I915_WRITE(PIPESRC(pipe),
5071 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
5072 5727
5073 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); 5728 fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
5074 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5075 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5076 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5077 5729
5078 if (is_cpu_edp) 5730 if (is_cpu_edp)
5079 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 5731 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
@@ -5092,6 +5744,217 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5092 5744
5093 intel_update_linetime_watermarks(dev, pipe, adjusted_mode); 5745 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5094 5746
5747 return fdi_config_ok ? ret : -EINVAL;
5748}
5749
5750static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5751 struct drm_display_mode *mode,
5752 struct drm_display_mode *adjusted_mode,
5753 int x, int y,
5754 struct drm_framebuffer *fb)
5755{
5756 struct drm_device *dev = crtc->dev;
5757 struct drm_i915_private *dev_priv = dev->dev_private;
5758 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5759 int pipe = intel_crtc->pipe;
5760 int plane = intel_crtc->plane;
5761 int num_connectors = 0;
5762 intel_clock_t clock, reduced_clock;
5763 u32 dpll = 0, fp = 0, fp2 = 0;
5764 bool ok, has_reduced_clock = false;
5765 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5766 struct intel_encoder *encoder;
5767 u32 temp;
5768 int ret;
5769 bool dither;
5770
5771 for_each_encoder_on_crtc(dev, crtc, encoder) {
5772 switch (encoder->type) {
5773 case INTEL_OUTPUT_LVDS:
5774 is_lvds = true;
5775 break;
5776 case INTEL_OUTPUT_DISPLAYPORT:
5777 is_dp = true;
5778 break;
5779 case INTEL_OUTPUT_EDP:
5780 is_dp = true;
5781 if (!intel_encoder_is_pch_edp(&encoder->base))
5782 is_cpu_edp = true;
5783 break;
5784 }
5785
5786 num_connectors++;
5787 }
5788
5789 if (is_cpu_edp)
5790 intel_crtc->cpu_transcoder = TRANSCODER_EDP;
5791 else
5792 intel_crtc->cpu_transcoder = pipe;
5793
5794 /* We are not sure yet this won't happen. */
5795 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5796 INTEL_PCH_TYPE(dev));
5797
5798 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5799 num_connectors, pipe_name(pipe));
5800
5801 WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
5802 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5803
5804 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5805
5806 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5807 return -EINVAL;
5808
5809 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5810 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5811 &has_reduced_clock,
5812 &reduced_clock);
5813 if (!ok) {
5814 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5815 return -EINVAL;
5816 }
5817 }
5818
5819 /* Ensure that the cursor is valid for the new mode before changing... */
5820 intel_crtc_update_cursor(crtc, true);
5821
5822 /* determine panel color depth */
5823 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
5824 adjusted_mode);
5825 if (is_lvds && dev_priv->lvds_dither)
5826 dither = true;
5827
5828 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5829 drm_mode_debug_printmodeline(mode);
5830
5831 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5832 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5833 if (has_reduced_clock)
5834 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5835 reduced_clock.m2;
5836
5837 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
5838 fp);
5839
5840 /* CPU eDP is the only output that doesn't need a PCH PLL of its
5841 * own on pre-Haswell/LPT generation */
5842 if (!is_cpu_edp) {
5843 struct intel_pch_pll *pll;
5844
5845 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5846 if (pll == NULL) {
5847 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5848 pipe);
5849 return -EINVAL;
5850 }
5851 } else
5852 intel_put_pch_pll(intel_crtc);
5853
5854 /* The LVDS pin pair needs to be on before the DPLLs are
5855 * enabled. This is an exception to the general rule that
5856 * mode_set doesn't turn things on.
5857 */
5858 if (is_lvds) {
5859 temp = I915_READ(PCH_LVDS);
5860 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5861 if (HAS_PCH_CPT(dev)) {
5862 temp &= ~PORT_TRANS_SEL_MASK;
5863 temp |= PORT_TRANS_SEL_CPT(pipe);
5864 } else {
5865 if (pipe == 1)
5866 temp |= LVDS_PIPEB_SELECT;
5867 else
5868 temp &= ~LVDS_PIPEB_SELECT;
5869 }
5870
5871 /* set the corresponsding LVDS_BORDER bit */
5872 temp |= dev_priv->lvds_border_bits;
5873 /* Set the B0-B3 data pairs corresponding to whether
5874 * we're going to set the DPLLs for dual-channel mode or
5875 * not.
5876 */
5877 if (clock.p2 == 7)
5878 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
5879 else
5880 temp &= ~(LVDS_B0B3_POWER_UP |
5881 LVDS_CLKB_POWER_UP);
5882
5883 /* It would be nice to set 24 vs 18-bit mode
5884 * (LVDS_A3_POWER_UP) appropriately here, but we need to
5885 * look more thoroughly into how panels behave in the
5886 * two modes.
5887 */
5888 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5889 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5890 temp |= LVDS_HSYNC_POLARITY;
5891 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5892 temp |= LVDS_VSYNC_POLARITY;
5893 I915_WRITE(PCH_LVDS, temp);
5894 }
5895 }
5896
5897 if (is_dp && !is_cpu_edp) {
5898 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5899 } else {
5900 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5901 /* For non-DP output, clear any trans DP clock recovery
5902 * setting.*/
5903 I915_WRITE(TRANSDATA_M1(pipe), 0);
5904 I915_WRITE(TRANSDATA_N1(pipe), 0);
5905 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5906 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5907 }
5908 }
5909
5910 intel_crtc->lowfreq_avail = false;
5911 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5912 if (intel_crtc->pch_pll) {
5913 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5914
5915 /* Wait for the clocks to stabilize. */
5916 POSTING_READ(intel_crtc->pch_pll->pll_reg);
5917 udelay(150);
5918
5919 /* The pixel multiplier can only be updated once the
5920 * DPLL is enabled and the clocks are stable.
5921 *
5922 * So write it again.
5923 */
5924 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5925 }
5926
5927 if (intel_crtc->pch_pll) {
5928 if (is_lvds && has_reduced_clock && i915_powersave) {
5929 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
5930 intel_crtc->lowfreq_avail = true;
5931 } else {
5932 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5933 }
5934 }
5935 }
5936
5937 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5938
5939 if (!is_dp || is_cpu_edp)
5940 ironlake_set_m_n(crtc, mode, adjusted_mode);
5941
5942 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5943 if (is_cpu_edp)
5944 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
5945
5946 haswell_set_pipeconf(crtc, adjusted_mode, dither);
5947
5948 /* Set up the display plane register */
5949 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5950 POSTING_READ(DSPCNTR(plane));
5951
5952 ret = intel_pipe_set_base(crtc, x, y, fb);
5953
5954 intel_update_watermarks(dev);
5955
5956 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5957
5095 return ret; 5958 return ret;
5096} 5959}
5097 5960
@@ -5103,6 +5966,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5103{ 5966{
5104 struct drm_device *dev = crtc->dev; 5967 struct drm_device *dev = crtc->dev;
5105 struct drm_i915_private *dev_priv = dev->dev_private; 5968 struct drm_i915_private *dev_priv = dev->dev_private;
5969 struct drm_encoder_helper_funcs *encoder_funcs;
5970 struct intel_encoder *encoder;
5106 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5971 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5107 int pipe = intel_crtc->pipe; 5972 int pipe = intel_crtc->pipe;
5108 int ret; 5973 int ret;
@@ -5113,7 +5978,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
5113 x, y, fb); 5978 x, y, fb);
5114 drm_vblank_post_modeset(dev, pipe); 5979 drm_vblank_post_modeset(dev, pipe);
5115 5980
5116 return ret; 5981 if (ret != 0)
5982 return ret;
5983
5984 for_each_encoder_on_crtc(dev, crtc, encoder) {
5985 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
5986 encoder->base.base.id,
5987 drm_get_encoder_name(&encoder->base),
5988 mode->base.id, mode->name);
5989 encoder_funcs = encoder->base.helper_private;
5990 encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
5991 }
5992
5993 return 0;
5117} 5994}
5118 5995
5119static bool intel_eld_uptodate(struct drm_connector *connector, 5996static bool intel_eld_uptodate(struct drm_connector *connector,
@@ -5749,7 +6626,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
5749 int depth, int bpp) 6626 int depth, int bpp)
5750{ 6627{
5751 struct drm_i915_gem_object *obj; 6628 struct drm_i915_gem_object *obj;
5752 struct drm_mode_fb_cmd2 mode_cmd; 6629 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
5753 6630
5754 obj = i915_gem_alloc_object(dev, 6631 obj = i915_gem_alloc_object(dev,
5755 intel_framebuffer_size_for_mode(mode, bpp)); 6632 intel_framebuffer_size_for_mode(mode, bpp));
@@ -5879,24 +6756,19 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
5879 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 6756 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5880 if (IS_ERR(fb)) { 6757 if (IS_ERR(fb)) {
5881 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 6758 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5882 goto fail; 6759 return false;
5883 } 6760 }
5884 6761
5885 if (!intel_set_mode(crtc, mode, 0, 0, fb)) { 6762 if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
5886 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 6763 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
5887 if (old->release_fb) 6764 if (old->release_fb)
5888 old->release_fb->funcs->destroy(old->release_fb); 6765 old->release_fb->funcs->destroy(old->release_fb);
5889 goto fail; 6766 return false;
5890 } 6767 }
5891 6768
5892 /* let the connector get through one full cycle before testing */ 6769 /* let the connector get through one full cycle before testing */
5893 intel_wait_for_vblank(dev, intel_crtc->pipe); 6770 intel_wait_for_vblank(dev, intel_crtc->pipe);
5894
5895 return true; 6771 return true;
5896fail:
5897 connector->encoder = NULL;
5898 encoder->crtc = NULL;
5899 return false;
5900} 6772}
5901 6773
5902void intel_release_load_detect_pipe(struct drm_connector *connector, 6774void intel_release_load_detect_pipe(struct drm_connector *connector,
@@ -6021,12 +6893,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6021{ 6893{
6022 struct drm_i915_private *dev_priv = dev->dev_private; 6894 struct drm_i915_private *dev_priv = dev->dev_private;
6023 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6024 int pipe = intel_crtc->pipe; 6896 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
6025 struct drm_display_mode *mode; 6897 struct drm_display_mode *mode;
6026 int htot = I915_READ(HTOTAL(pipe)); 6898 int htot = I915_READ(HTOTAL(cpu_transcoder));
6027 int hsync = I915_READ(HSYNC(pipe)); 6899 int hsync = I915_READ(HSYNC(cpu_transcoder));
6028 int vtot = I915_READ(VTOTAL(pipe)); 6900 int vtot = I915_READ(VTOTAL(cpu_transcoder));
6029 int vsync = I915_READ(VSYNC(pipe)); 6901 int vsync = I915_READ(VSYNC(cpu_transcoder));
6030 6902
6031 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 6903 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6032 if (!mode) 6904 if (!mode)
@@ -6183,14 +7055,19 @@ static void intel_unpin_work_fn(struct work_struct *__work)
6183{ 7055{
6184 struct intel_unpin_work *work = 7056 struct intel_unpin_work *work =
6185 container_of(__work, struct intel_unpin_work, work); 7057 container_of(__work, struct intel_unpin_work, work);
7058 struct drm_device *dev = work->crtc->dev;
6186 7059
6187 mutex_lock(&work->dev->struct_mutex); 7060 mutex_lock(&dev->struct_mutex);
6188 intel_unpin_fb_obj(work->old_fb_obj); 7061 intel_unpin_fb_obj(work->old_fb_obj);
6189 drm_gem_object_unreference(&work->pending_flip_obj->base); 7062 drm_gem_object_unreference(&work->pending_flip_obj->base);
6190 drm_gem_object_unreference(&work->old_fb_obj->base); 7063 drm_gem_object_unreference(&work->old_fb_obj->base);
6191 7064
6192 intel_update_fbc(work->dev); 7065 intel_update_fbc(dev);
6193 mutex_unlock(&work->dev->struct_mutex); 7066 mutex_unlock(&dev->struct_mutex);
7067
7068 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
7069 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
7070
6194 kfree(work); 7071 kfree(work);
6195} 7072}
6196 7073
@@ -6201,8 +7078,6 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6202 struct intel_unpin_work *work; 7079 struct intel_unpin_work *work;
6203 struct drm_i915_gem_object *obj; 7080 struct drm_i915_gem_object *obj;
6204 struct drm_pending_vblank_event *e;
6205 struct timeval tvbl;
6206 unsigned long flags; 7081 unsigned long flags;
6207 7082
6208 /* Ignore early vblank irqs */ 7083 /* Ignore early vblank irqs */
@@ -6211,24 +7086,22 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6211 7086
6212 spin_lock_irqsave(&dev->event_lock, flags); 7087 spin_lock_irqsave(&dev->event_lock, flags);
6213 work = intel_crtc->unpin_work; 7088 work = intel_crtc->unpin_work;
6214 if (work == NULL || !work->pending) { 7089
7090 /* Ensure we don't miss a work->pending update ... */
7091 smp_rmb();
7092
7093 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
6215 spin_unlock_irqrestore(&dev->event_lock, flags); 7094 spin_unlock_irqrestore(&dev->event_lock, flags);
6216 return; 7095 return;
6217 } 7096 }
6218 7097
6219 intel_crtc->unpin_work = NULL; 7098 /* and that the unpin work is consistent wrt ->pending. */
6220 7099 smp_rmb();
6221 if (work->event) {
6222 e = work->event;
6223 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
6224 7100
6225 e->event.tv_sec = tvbl.tv_sec; 7101 intel_crtc->unpin_work = NULL;
6226 e->event.tv_usec = tvbl.tv_usec;
6227 7102
6228 list_add_tail(&e->base.link, 7103 if (work->event)
6229 &e->base.file_priv->event_list); 7104 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
6230 wake_up_interruptible(&e->base.file_priv->event_wait);
6231 }
6232 7105
6233 drm_vblank_put(dev, intel_crtc->pipe); 7106 drm_vblank_put(dev, intel_crtc->pipe);
6234 7107
@@ -6238,9 +7111,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
6238 7111
6239 atomic_clear_mask(1 << intel_crtc->plane, 7112 atomic_clear_mask(1 << intel_crtc->plane,
6240 &obj->pending_flip.counter); 7113 &obj->pending_flip.counter);
6241
6242 wake_up(&dev_priv->pending_flip_queue); 7114 wake_up(&dev_priv->pending_flip_queue);
6243 schedule_work(&work->work); 7115
7116 queue_work(dev_priv->wq, &work->work);
6244 7117
6245 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 7118 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6246} 7119}
@@ -6268,16 +7141,25 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
6268 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 7141 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6269 unsigned long flags; 7142 unsigned long flags;
6270 7143
7144 /* NB: An MMIO update of the plane base pointer will also
7145 * generate a page-flip completion irq, i.e. every modeset
7146 * is also accompanied by a spurious intel_prepare_page_flip().
7147 */
6271 spin_lock_irqsave(&dev->event_lock, flags); 7148 spin_lock_irqsave(&dev->event_lock, flags);
6272 if (intel_crtc->unpin_work) { 7149 if (intel_crtc->unpin_work)
6273 if ((++intel_crtc->unpin_work->pending) > 1) 7150 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6274 DRM_ERROR("Prepared flip multiple times\n");
6275 } else {
6276 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6277 }
6278 spin_unlock_irqrestore(&dev->event_lock, flags); 7151 spin_unlock_irqrestore(&dev->event_lock, flags);
6279} 7152}
6280 7153
7154inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7155{
7156 /* Ensure that the work item is consistent when activating it ... */
7157 smp_wmb();
7158 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7159 /* and that it is marked active as soon as the irq could fire. */
7160 smp_wmb();
7161}
7162
6281static int intel_gen2_queue_flip(struct drm_device *dev, 7163static int intel_gen2_queue_flip(struct drm_device *dev,
6282 struct drm_crtc *crtc, 7164 struct drm_crtc *crtc,
6283 struct drm_framebuffer *fb, 7165 struct drm_framebuffer *fb,
@@ -6311,6 +7193,8 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
6311 intel_ring_emit(ring, fb->pitches[0]); 7193 intel_ring_emit(ring, fb->pitches[0]);
6312 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7194 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6313 intel_ring_emit(ring, 0); /* aux display base address, unused */ 7195 intel_ring_emit(ring, 0); /* aux display base address, unused */
7196
7197 intel_mark_page_flip_active(intel_crtc);
6314 intel_ring_advance(ring); 7198 intel_ring_advance(ring);
6315 return 0; 7199 return 0;
6316 7200
@@ -6351,6 +7235,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
6351 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7235 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6352 intel_ring_emit(ring, MI_NOOP); 7236 intel_ring_emit(ring, MI_NOOP);
6353 7237
7238 intel_mark_page_flip_active(intel_crtc);
6354 intel_ring_advance(ring); 7239 intel_ring_advance(ring);
6355 return 0; 7240 return 0;
6356 7241
@@ -6397,6 +7282,8 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
6397 pf = 0; 7282 pf = 0;
6398 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7283 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6399 intel_ring_emit(ring, pf | pipesrc); 7284 intel_ring_emit(ring, pf | pipesrc);
7285
7286 intel_mark_page_flip_active(intel_crtc);
6400 intel_ring_advance(ring); 7287 intel_ring_advance(ring);
6401 return 0; 7288 return 0;
6402 7289
@@ -6439,6 +7326,8 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
6439 pf = 0; 7326 pf = 0;
6440 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 7327 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6441 intel_ring_emit(ring, pf | pipesrc); 7328 intel_ring_emit(ring, pf | pipesrc);
7329
7330 intel_mark_page_flip_active(intel_crtc);
6442 intel_ring_advance(ring); 7331 intel_ring_advance(ring);
6443 return 0; 7332 return 0;
6444 7333
@@ -6493,6 +7382,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
6493 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 7382 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6494 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset); 7383 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6495 intel_ring_emit(ring, (MI_NOOP)); 7384 intel_ring_emit(ring, (MI_NOOP));
7385
7386 intel_mark_page_flip_active(intel_crtc);
6496 intel_ring_advance(ring); 7387 intel_ring_advance(ring);
6497 return 0; 7388 return 0;
6498 7389
@@ -6541,7 +7432,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6541 return -ENOMEM; 7432 return -ENOMEM;
6542 7433
6543 work->event = event; 7434 work->event = event;
6544 work->dev = crtc->dev; 7435 work->crtc = crtc;
6545 intel_fb = to_intel_framebuffer(crtc->fb); 7436 intel_fb = to_intel_framebuffer(crtc->fb);
6546 work->old_fb_obj = intel_fb->obj; 7437 work->old_fb_obj = intel_fb->obj;
6547 INIT_WORK(&work->work, intel_unpin_work_fn); 7438 INIT_WORK(&work->work, intel_unpin_work_fn);
@@ -6566,6 +7457,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6566 intel_fb = to_intel_framebuffer(fb); 7457 intel_fb = to_intel_framebuffer(fb);
6567 obj = intel_fb->obj; 7458 obj = intel_fb->obj;
6568 7459
7460 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
7461 flush_workqueue(dev_priv->wq);
7462
6569 ret = i915_mutex_lock_interruptible(dev); 7463 ret = i915_mutex_lock_interruptible(dev);
6570 if (ret) 7464 if (ret)
6571 goto cleanup; 7465 goto cleanup;
@@ -6584,6 +7478,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6584 * the flip occurs and the object is no longer visible. 7478 * the flip occurs and the object is no longer visible.
6585 */ 7479 */
6586 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7480 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
7481 atomic_inc(&intel_crtc->unpin_work_count);
6587 7482
6588 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); 7483 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6589 if (ret) 7484 if (ret)
@@ -6598,6 +7493,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
6598 return 0; 7493 return 0;
6599 7494
6600cleanup_pending: 7495cleanup_pending:
7496 atomic_dec(&intel_crtc->unpin_work_count);
6601 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); 7497 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
6602 drm_gem_object_unreference(&work->old_fb_obj->base); 7498 drm_gem_object_unreference(&work->old_fb_obj->base);
6603 drm_gem_object_unreference(&obj->base); 7499 drm_gem_object_unreference(&obj->base);
@@ -6893,7 +7789,7 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
6893 dev->mode_config.dpms_property; 7789 dev->mode_config.dpms_property;
6894 7790
6895 connector->dpms = DRM_MODE_DPMS_ON; 7791 connector->dpms = DRM_MODE_DPMS_ON;
6896 drm_connector_property_set_value(connector, 7792 drm_object_property_set_value(&connector->base,
6897 dpms_property, 7793 dpms_property,
6898 DRM_MODE_DPMS_ON); 7794 DRM_MODE_DPMS_ON);
6899 7795
@@ -7015,8 +7911,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
7015 struct drm_device *dev = crtc->dev; 7911 struct drm_device *dev = crtc->dev;
7016 drm_i915_private_t *dev_priv = dev->dev_private; 7912 drm_i915_private_t *dev_priv = dev->dev_private;
7017 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; 7913 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
7018 struct drm_encoder_helper_funcs *encoder_funcs;
7019 struct drm_encoder *encoder;
7020 struct intel_crtc *intel_crtc; 7914 struct intel_crtc *intel_crtc;
7021 unsigned disable_pipes, prepare_pipes, modeset_pipes; 7915 unsigned disable_pipes, prepare_pipes, modeset_pipes;
7022 bool ret = true; 7916 bool ret = true;
@@ -7061,6 +7955,9 @@ bool intel_set_mode(struct drm_crtc *crtc,
7061 * update the the output configuration. */ 7955 * update the the output configuration. */
7062 intel_modeset_update_state(dev, prepare_pipes); 7956 intel_modeset_update_state(dev, prepare_pipes);
7063 7957
7958 if (dev_priv->display.modeset_global_resources)
7959 dev_priv->display.modeset_global_resources(dev);
7960
7064 /* Set up the DPLL and any encoders state that needs to adjust or depend 7961 /* Set up the DPLL and any encoders state that needs to adjust or depend
7065 * on the DPLL. 7962 * on the DPLL.
7066 */ 7963 */
@@ -7070,18 +7967,6 @@ bool intel_set_mode(struct drm_crtc *crtc,
7070 x, y, fb); 7967 x, y, fb);
7071 if (!ret) 7968 if (!ret)
7072 goto done; 7969 goto done;
7073
7074 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
7075
7076 if (encoder->crtc != &intel_crtc->base)
7077 continue;
7078
7079 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7080 encoder->base.id, drm_get_encoder_name(encoder),
7081 mode->base.id, mode->name);
7082 encoder_funcs = encoder->helper_private;
7083 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
7084 }
7085 } 7970 }
7086 7971
7087 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 7972 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -7420,6 +8305,12 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
7420 .page_flip = intel_crtc_page_flip, 8305 .page_flip = intel_crtc_page_flip,
7421}; 8306};
7422 8307
8308static void intel_cpu_pll_init(struct drm_device *dev)
8309{
8310 if (IS_HASWELL(dev))
8311 intel_ddi_pll_init(dev);
8312}
8313
7423static void intel_pch_pll_init(struct drm_device *dev) 8314static void intel_pch_pll_init(struct drm_device *dev)
7424{ 8315{
7425 drm_i915_private_t *dev_priv = dev->dev_private; 8316 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -7459,6 +8350,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
7459 /* Swap pipes & planes for FBC on pre-965 */ 8350 /* Swap pipes & planes for FBC on pre-965 */
7460 intel_crtc->pipe = pipe; 8351 intel_crtc->pipe = pipe;
7461 intel_crtc->plane = pipe; 8352 intel_crtc->plane = pipe;
8353 intel_crtc->cpu_transcoder = pipe;
7462 if (IS_MOBILE(dev) && IS_GEN3(dev)) { 8354 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
7463 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 8355 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
7464 intel_crtc->plane = !pipe; 8356 intel_crtc->plane = !pipe;
@@ -7551,17 +8443,9 @@ static void intel_setup_outputs(struct drm_device *dev)
7551 I915_WRITE(PFIT_CONTROL, 0); 8443 I915_WRITE(PFIT_CONTROL, 0);
7552 } 8444 }
7553 8445
7554 if (HAS_PCH_SPLIT(dev)) { 8446 if (!(IS_HASWELL(dev) &&
7555 dpd_is_edp = intel_dpd_is_edp(dev); 8447 (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
7556 8448 intel_crt_init(dev);
7557 if (has_edp_a(dev))
7558 intel_dp_init(dev, DP_A, PORT_A);
7559
7560 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
7561 intel_dp_init(dev, PCH_DP_D, PORT_D);
7562 }
7563
7564 intel_crt_init(dev);
7565 8449
7566 if (IS_HASWELL(dev)) { 8450 if (IS_HASWELL(dev)) {
7567 int found; 8451 int found;
@@ -7584,6 +8468,10 @@ static void intel_setup_outputs(struct drm_device *dev)
7584 intel_ddi_init(dev, PORT_D); 8468 intel_ddi_init(dev, PORT_D);
7585 } else if (HAS_PCH_SPLIT(dev)) { 8469 } else if (HAS_PCH_SPLIT(dev)) {
7586 int found; 8470 int found;
8471 dpd_is_edp = intel_dpd_is_edp(dev);
8472
8473 if (has_edp_a(dev))
8474 intel_dp_init(dev, DP_A, PORT_A);
7587 8475
7588 if (I915_READ(HDMIB) & PORT_DETECTED) { 8476 if (I915_READ(HDMIB) & PORT_DETECTED) {
7589 /* PCH SDVOB multiplex with HDMIB */ 8477 /* PCH SDVOB multiplex with HDMIB */
@@ -7603,11 +8491,15 @@ static void intel_setup_outputs(struct drm_device *dev)
7603 if (I915_READ(PCH_DP_C) & DP_DETECTED) 8491 if (I915_READ(PCH_DP_C) & DP_DETECTED)
7604 intel_dp_init(dev, PCH_DP_C, PORT_C); 8492 intel_dp_init(dev, PCH_DP_C, PORT_C);
7605 8493
7606 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) 8494 if (I915_READ(PCH_DP_D) & DP_DETECTED)
7607 intel_dp_init(dev, PCH_DP_D, PORT_D); 8495 intel_dp_init(dev, PCH_DP_D, PORT_D);
7608 } else if (IS_VALLEYVIEW(dev)) { 8496 } else if (IS_VALLEYVIEW(dev)) {
7609 int found; 8497 int found;
7610 8498
8499 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8500 if (I915_READ(DP_C) & DP_DETECTED)
8501 intel_dp_init(dev, DP_C, PORT_C);
8502
7611 if (I915_READ(SDVOB) & PORT_DETECTED) { 8503 if (I915_READ(SDVOB) & PORT_DETECTED) {
7612 /* SDVOB multiplex with HDMIB */ 8504 /* SDVOB multiplex with HDMIB */
7613 found = intel_sdvo_init(dev, SDVOB, true); 8505 found = intel_sdvo_init(dev, SDVOB, true);
@@ -7620,9 +8512,6 @@ static void intel_setup_outputs(struct drm_device *dev)
7620 if (I915_READ(SDVOC) & PORT_DETECTED) 8512 if (I915_READ(SDVOC) & PORT_DETECTED)
7621 intel_hdmi_init(dev, SDVOC, PORT_C); 8513 intel_hdmi_init(dev, SDVOC, PORT_C);
7622 8514
7623 /* Shares lanes with HDMI on SDVOC */
7624 if (I915_READ(DP_C) & DP_DETECTED)
7625 intel_dp_init(dev, DP_C, PORT_C);
7626 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 8515 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
7627 bool found = false; 8516 bool found = false;
7628 8517
@@ -7676,8 +8565,9 @@ static void intel_setup_outputs(struct drm_device *dev)
7676 intel_encoder_clones(encoder); 8565 intel_encoder_clones(encoder);
7677 } 8566 }
7678 8567
7679 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8568 intel_init_pch_refclk(dev);
7680 ironlake_init_pch_refclk(dev); 8569
8570 drm_helper_move_panel_connectors_to_head(dev);
7681} 8571}
7682 8572
7683static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 8573static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -7718,27 +8608,51 @@ int intel_framebuffer_init(struct drm_device *dev,
7718 if (mode_cmd->pitches[0] & 63) 8608 if (mode_cmd->pitches[0] & 63)
7719 return -EINVAL; 8609 return -EINVAL;
7720 8610
8611 /* FIXME <= Gen4 stride limits are bit unclear */
8612 if (mode_cmd->pitches[0] > 32768)
8613 return -EINVAL;
8614
8615 if (obj->tiling_mode != I915_TILING_NONE &&
8616 mode_cmd->pitches[0] != obj->stride)
8617 return -EINVAL;
8618
8619 /* Reject formats not supported by any plane early. */
7721 switch (mode_cmd->pixel_format) { 8620 switch (mode_cmd->pixel_format) {
7722 case DRM_FORMAT_RGB332: 8621 case DRM_FORMAT_C8:
7723 case DRM_FORMAT_RGB565: 8622 case DRM_FORMAT_RGB565:
7724 case DRM_FORMAT_XRGB8888: 8623 case DRM_FORMAT_XRGB8888:
7725 case DRM_FORMAT_XBGR8888:
7726 case DRM_FORMAT_ARGB8888: 8624 case DRM_FORMAT_ARGB8888:
8625 break;
8626 case DRM_FORMAT_XRGB1555:
8627 case DRM_FORMAT_ARGB1555:
8628 if (INTEL_INFO(dev)->gen > 3)
8629 return -EINVAL;
8630 break;
8631 case DRM_FORMAT_XBGR8888:
8632 case DRM_FORMAT_ABGR8888:
7727 case DRM_FORMAT_XRGB2101010: 8633 case DRM_FORMAT_XRGB2101010:
7728 case DRM_FORMAT_ARGB2101010: 8634 case DRM_FORMAT_ARGB2101010:
7729 /* RGB formats are common across chipsets */ 8635 case DRM_FORMAT_XBGR2101010:
8636 case DRM_FORMAT_ABGR2101010:
8637 if (INTEL_INFO(dev)->gen < 4)
8638 return -EINVAL;
7730 break; 8639 break;
7731 case DRM_FORMAT_YUYV: 8640 case DRM_FORMAT_YUYV:
7732 case DRM_FORMAT_UYVY: 8641 case DRM_FORMAT_UYVY:
7733 case DRM_FORMAT_YVYU: 8642 case DRM_FORMAT_YVYU:
7734 case DRM_FORMAT_VYUY: 8643 case DRM_FORMAT_VYUY:
8644 if (INTEL_INFO(dev)->gen < 6)
8645 return -EINVAL;
7735 break; 8646 break;
7736 default: 8647 default:
7737 DRM_DEBUG_KMS("unsupported pixel format %u\n", 8648 DRM_DEBUG_KMS("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
7738 mode_cmd->pixel_format);
7739 return -EINVAL; 8649 return -EINVAL;
7740 } 8650 }
7741 8651
8652 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
8653 if (mode_cmd->offsets[0] != 0)
8654 return -EINVAL;
8655
7742 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 8656 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
7743 if (ret) { 8657 if (ret) {
7744 DRM_ERROR("framebuffer init failed %d\n", ret); 8658 DRM_ERROR("framebuffer init failed %d\n", ret);
@@ -7776,7 +8690,13 @@ static void intel_init_display(struct drm_device *dev)
7776 struct drm_i915_private *dev_priv = dev->dev_private; 8690 struct drm_i915_private *dev_priv = dev->dev_private;
7777 8691
7778 /* We always want a DPMS function */ 8692 /* We always want a DPMS function */
7779 if (HAS_PCH_SPLIT(dev)) { 8693 if (IS_HASWELL(dev)) {
8694 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
8695 dev_priv->display.crtc_enable = haswell_crtc_enable;
8696 dev_priv->display.crtc_disable = haswell_crtc_disable;
8697 dev_priv->display.off = haswell_crtc_off;
8698 dev_priv->display.update_plane = ironlake_update_plane;
8699 } else if (HAS_PCH_SPLIT(dev)) {
7780 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 8700 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
7781 dev_priv->display.crtc_enable = ironlake_crtc_enable; 8701 dev_priv->display.crtc_enable = ironlake_crtc_enable;
7782 dev_priv->display.crtc_disable = ironlake_crtc_disable; 8702 dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -7827,6 +8747,8 @@ static void intel_init_display(struct drm_device *dev)
7827 /* FIXME: detect B0+ stepping and use auto training */ 8747 /* FIXME: detect B0+ stepping and use auto training */
7828 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 8748 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
7829 dev_priv->display.write_eld = ironlake_write_eld; 8749 dev_priv->display.write_eld = ironlake_write_eld;
8750 dev_priv->display.modeset_global_resources =
8751 ivb_modeset_global_resources;
7830 } else if (IS_HASWELL(dev)) { 8752 } else if (IS_HASWELL(dev)) {
7831 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 8753 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
7832 dev_priv->display.write_eld = haswell_write_eld; 8754 dev_priv->display.write_eld = haswell_write_eld;
@@ -8058,6 +8980,7 @@ void intel_modeset_init(struct drm_device *dev)
8058 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret); 8980 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
8059 } 8981 }
8060 8982
8983 intel_cpu_pll_init(dev);
8061 intel_pch_pll_init(dev); 8984 intel_pch_pll_init(dev);
8062 8985
8063 /* Just disable it once at startup */ 8986 /* Just disable it once at startup */
@@ -8127,7 +9050,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
8127 u32 reg; 9050 u32 reg;
8128 9051
8129 /* Clear any frame start delays used for debugging left by the BIOS */ 9052 /* Clear any frame start delays used for debugging left by the BIOS */
8130 reg = PIPECONF(crtc->pipe); 9053 reg = PIPECONF(crtc->cpu_transcoder);
8131 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 9054 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8132 9055
8133 /* We need to sanitize the plane -> pipe mapping first because this will 9056 /* We need to sanitize the plane -> pipe mapping first because this will
@@ -8246,7 +9169,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
8246 9169
8247/* Scan out the current hw modeset state, sanitizes it and maps it into the drm 9170/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
8248 * and i915 state tracking structures. */ 9171 * and i915 state tracking structures. */
8249void intel_modeset_setup_hw_state(struct drm_device *dev) 9172void intel_modeset_setup_hw_state(struct drm_device *dev,
9173 bool force_restore)
8250{ 9174{
8251 struct drm_i915_private *dev_priv = dev->dev_private; 9175 struct drm_i915_private *dev_priv = dev->dev_private;
8252 enum pipe pipe; 9176 enum pipe pipe;
@@ -8255,10 +9179,35 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
8255 struct intel_encoder *encoder; 9179 struct intel_encoder *encoder;
8256 struct intel_connector *connector; 9180 struct intel_connector *connector;
8257 9181
9182 if (IS_HASWELL(dev)) {
9183 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9184
9185 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9186 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9187 case TRANS_DDI_EDP_INPUT_A_ON:
9188 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9189 pipe = PIPE_A;
9190 break;
9191 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9192 pipe = PIPE_B;
9193 break;
9194 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9195 pipe = PIPE_C;
9196 break;
9197 }
9198
9199 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9200 crtc->cpu_transcoder = TRANSCODER_EDP;
9201
9202 DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
9203 pipe_name(pipe));
9204 }
9205 }
9206
8258 for_each_pipe(pipe) { 9207 for_each_pipe(pipe) {
8259 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 9208 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8260 9209
8261 tmp = I915_READ(PIPECONF(pipe)); 9210 tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
8262 if (tmp & PIPECONF_ENABLE) 9211 if (tmp & PIPECONF_ENABLE)
8263 crtc->active = true; 9212 crtc->active = true;
8264 else 9213 else
@@ -8271,6 +9220,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
8271 crtc->active ? "enabled" : "disabled"); 9220 crtc->active ? "enabled" : "disabled");
8272 } 9221 }
8273 9222
9223 if (IS_HASWELL(dev))
9224 intel_ddi_setup_hw_pll_state(dev);
9225
8274 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 9226 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8275 base.head) { 9227 base.head) {
8276 pipe = 0; 9228 pipe = 0;
@@ -8317,9 +9269,19 @@ void intel_modeset_setup_hw_state(struct drm_device *dev)
8317 intel_sanitize_crtc(crtc); 9269 intel_sanitize_crtc(crtc);
8318 } 9270 }
8319 9271
8320 intel_modeset_update_staged_output_state(dev); 9272 if (force_restore) {
9273 for_each_pipe(pipe) {
9274 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
9275 intel_set_mode(&crtc->base, &crtc->base.mode,
9276 crtc->base.x, crtc->base.y, crtc->base.fb);
9277 }
9278 } else {
9279 intel_modeset_update_staged_output_state(dev);
9280 }
8321 9281
8322 intel_modeset_check_state(dev); 9282 intel_modeset_check_state(dev);
9283
9284 drm_mode_config_reset(dev);
8323} 9285}
8324 9286
8325void intel_modeset_gem_init(struct drm_device *dev) 9287void intel_modeset_gem_init(struct drm_device *dev)
@@ -8328,7 +9290,7 @@ void intel_modeset_gem_init(struct drm_device *dev)
8328 9290
8329 intel_setup_overlay(dev); 9291 intel_setup_overlay(dev);
8330 9292
8331 intel_modeset_setup_hw_state(dev); 9293 intel_modeset_setup_hw_state(dev, false);
8332} 9294}
8333 9295
8334void intel_modeset_cleanup(struct drm_device *dev) 9296void intel_modeset_cleanup(struct drm_device *dev)
@@ -8447,6 +9409,7 @@ intel_display_capture_error_state(struct drm_device *dev)
8447{ 9409{
8448 drm_i915_private_t *dev_priv = dev->dev_private; 9410 drm_i915_private_t *dev_priv = dev->dev_private;
8449 struct intel_display_error_state *error; 9411 struct intel_display_error_state *error;
9412 enum transcoder cpu_transcoder;
8450 int i; 9413 int i;
8451 9414
8452 error = kmalloc(sizeof(*error), GFP_ATOMIC); 9415 error = kmalloc(sizeof(*error), GFP_ATOMIC);
@@ -8454,6 +9417,8 @@ intel_display_capture_error_state(struct drm_device *dev)
8454 return NULL; 9417 return NULL;
8455 9418
8456 for_each_pipe(i) { 9419 for_each_pipe(i) {
9420 cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
9421
8457 error->cursor[i].control = I915_READ(CURCNTR(i)); 9422 error->cursor[i].control = I915_READ(CURCNTR(i));
8458 error->cursor[i].position = I915_READ(CURPOS(i)); 9423 error->cursor[i].position = I915_READ(CURPOS(i));
8459 error->cursor[i].base = I915_READ(CURBASE(i)); 9424 error->cursor[i].base = I915_READ(CURBASE(i));
@@ -8468,14 +9433,14 @@ intel_display_capture_error_state(struct drm_device *dev)
8468 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 9433 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
8469 } 9434 }
8470 9435
8471 error->pipe[i].conf = I915_READ(PIPECONF(i)); 9436 error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
8472 error->pipe[i].source = I915_READ(PIPESRC(i)); 9437 error->pipe[i].source = I915_READ(PIPESRC(i));
8473 error->pipe[i].htotal = I915_READ(HTOTAL(i)); 9438 error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
8474 error->pipe[i].hblank = I915_READ(HBLANK(i)); 9439 error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
8475 error->pipe[i].hsync = I915_READ(HSYNC(i)); 9440 error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
8476 error->pipe[i].vtotal = I915_READ(VTOTAL(i)); 9441 error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
8477 error->pipe[i].vblank = I915_READ(VBLANK(i)); 9442 error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
8478 error->pipe[i].vsync = I915_READ(VSYNC(i)); 9443 error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
8479 } 9444 }
8480 9445
8481 return error; 9446 return error;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 368ed8ef1600..1b63d55318a0 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -36,8 +36,6 @@
36#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
37#include "i915_drv.h" 37#include "i915_drv.h"
38 38
39#define DP_RECEIVER_CAP_SIZE 0xf
40#define DP_LINK_STATUS_SIZE 6
41#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
42 40
43/** 41/**
@@ -49,7 +47,9 @@
49 */ 47 */
50static bool is_edp(struct intel_dp *intel_dp) 48static bool is_edp(struct intel_dp *intel_dp)
51{ 49{
52 return intel_dp->base.type == INTEL_OUTPUT_EDP; 50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53} 53}
54 54
55/** 55/**
@@ -76,15 +76,16 @@ static bool is_cpu_edp(struct intel_dp *intel_dp)
76 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 76 return is_edp(intel_dp) && !is_pch_edp(intel_dp);
77} 77}
78 78
79static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 79static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
80{ 80{
81 return container_of(encoder, struct intel_dp, base.base); 81 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
82
83 return intel_dig_port->base.base.dev;
82} 84}
83 85
84static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 86static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
85{ 87{
86 return container_of(intel_attached_encoder(connector), 88 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
87 struct intel_dp, base);
88} 89}
89 90
90/** 91/**
@@ -106,49 +107,32 @@ bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
106 return is_pch_edp(intel_dp); 107 return is_pch_edp(intel_dp);
107} 108}
108 109
109static void intel_dp_start_link_train(struct intel_dp *intel_dp);
110static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
111static void intel_dp_link_down(struct intel_dp *intel_dp); 110static void intel_dp_link_down(struct intel_dp *intel_dp);
112 111
113void 112void
114intel_edp_link_config(struct intel_encoder *intel_encoder, 113intel_edp_link_config(struct intel_encoder *intel_encoder,
115 int *lane_num, int *link_bw) 114 int *lane_num, int *link_bw)
116{ 115{
117 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 116 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
118 117
119 *lane_num = intel_dp->lane_count; 118 *lane_num = intel_dp->lane_count;
120 if (intel_dp->link_bw == DP_LINK_BW_1_62) 119 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
121 *link_bw = 162000;
122 else if (intel_dp->link_bw == DP_LINK_BW_2_7)
123 *link_bw = 270000;
124} 120}
125 121
126int 122int
127intel_edp_target_clock(struct intel_encoder *intel_encoder, 123intel_edp_target_clock(struct intel_encoder *intel_encoder,
128 struct drm_display_mode *mode) 124 struct drm_display_mode *mode)
129{ 125{
130 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 126 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
127 struct intel_connector *intel_connector = intel_dp->attached_connector;
131 128
132 if (intel_dp->panel_fixed_mode) 129 if (intel_connector->panel.fixed_mode)
133 return intel_dp->panel_fixed_mode->clock; 130 return intel_connector->panel.fixed_mode->clock;
134 else 131 else
135 return mode->clock; 132 return mode->clock;
136} 133}
137 134
138static int 135static int
139intel_dp_max_lane_count(struct intel_dp *intel_dp)
140{
141 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
142 switch (max_lane_count) {
143 case 1: case 2: case 4:
144 break;
145 default:
146 max_lane_count = 4;
147 }
148 return max_lane_count;
149}
150
151static int
152intel_dp_max_link_bw(struct intel_dp *intel_dp) 136intel_dp_max_link_bw(struct intel_dp *intel_dp)
153{ 137{
154 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 138 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -208,7 +192,7 @@ intel_dp_adjust_dithering(struct intel_dp *intel_dp,
208 bool adjust_mode) 192 bool adjust_mode)
209{ 193{
210 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 194 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
211 int max_lanes = intel_dp_max_lane_count(intel_dp); 195 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
212 int max_rate, mode_rate; 196 int max_rate, mode_rate;
213 197
214 mode_rate = intel_dp_link_required(mode->clock, 24); 198 mode_rate = intel_dp_link_required(mode->clock, 24);
@@ -234,12 +218,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
234 struct drm_display_mode *mode) 218 struct drm_display_mode *mode)
235{ 219{
236 struct intel_dp *intel_dp = intel_attached_dp(connector); 220 struct intel_dp *intel_dp = intel_attached_dp(connector);
221 struct intel_connector *intel_connector = to_intel_connector(connector);
222 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
237 223
238 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 224 if (is_edp(intel_dp) && fixed_mode) {
239 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 225 if (mode->hdisplay > fixed_mode->hdisplay)
240 return MODE_PANEL; 226 return MODE_PANEL;
241 227
242 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) 228 if (mode->vdisplay > fixed_mode->vdisplay)
243 return MODE_PANEL; 229 return MODE_PANEL;
244 } 230 }
245 231
@@ -285,6 +271,10 @@ intel_hrawclk(struct drm_device *dev)
285 struct drm_i915_private *dev_priv = dev->dev_private; 271 struct drm_i915_private *dev_priv = dev->dev_private;
286 uint32_t clkcfg; 272 uint32_t clkcfg;
287 273
274 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
275 if (IS_VALLEYVIEW(dev))
276 return 200;
277
288 clkcfg = I915_READ(CLKCFG); 278 clkcfg = I915_READ(CLKCFG);
289 switch (clkcfg & CLKCFG_FSB_MASK) { 279 switch (clkcfg & CLKCFG_FSB_MASK) {
290 case CLKCFG_FSB_400: 280 case CLKCFG_FSB_400:
@@ -310,7 +300,7 @@ intel_hrawclk(struct drm_device *dev)
310 300
311static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 301static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
312{ 302{
313 struct drm_device *dev = intel_dp->base.base.dev; 303 struct drm_device *dev = intel_dp_to_dev(intel_dp);
314 struct drm_i915_private *dev_priv = dev->dev_private; 304 struct drm_i915_private *dev_priv = dev->dev_private;
315 305
316 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 306 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
@@ -318,7 +308,7 @@ static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
318 308
319static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 309static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
320{ 310{
321 struct drm_device *dev = intel_dp->base.base.dev; 311 struct drm_device *dev = intel_dp_to_dev(intel_dp);
322 struct drm_i915_private *dev_priv = dev->dev_private; 312 struct drm_i915_private *dev_priv = dev->dev_private;
323 313
324 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 314 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
@@ -327,7 +317,7 @@ static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
327static void 317static void
328intel_dp_check_edp(struct intel_dp *intel_dp) 318intel_dp_check_edp(struct intel_dp *intel_dp)
329{ 319{
330 struct drm_device *dev = intel_dp->base.base.dev; 320 struct drm_device *dev = intel_dp_to_dev(intel_dp);
331 struct drm_i915_private *dev_priv = dev->dev_private; 321 struct drm_i915_private *dev_priv = dev->dev_private;
332 322
333 if (!is_edp(intel_dp)) 323 if (!is_edp(intel_dp))
@@ -346,7 +336,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
346 uint8_t *recv, int recv_size) 336 uint8_t *recv, int recv_size)
347{ 337{
348 uint32_t output_reg = intel_dp->output_reg; 338 uint32_t output_reg = intel_dp->output_reg;
349 struct drm_device *dev = intel_dp->base.base.dev; 339 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
340 struct drm_device *dev = intel_dig_port->base.base.dev;
350 struct drm_i915_private *dev_priv = dev->dev_private; 341 struct drm_i915_private *dev_priv = dev->dev_private;
351 uint32_t ch_ctl = output_reg + 0x10; 342 uint32_t ch_ctl = output_reg + 0x10;
352 uint32_t ch_data = ch_ctl + 4; 343 uint32_t ch_data = ch_ctl + 4;
@@ -356,6 +347,29 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
356 uint32_t aux_clock_divider; 347 uint32_t aux_clock_divider;
357 int try, precharge; 348 int try, precharge;
358 349
350 if (IS_HASWELL(dev)) {
351 switch (intel_dig_port->port) {
352 case PORT_A:
353 ch_ctl = DPA_AUX_CH_CTL;
354 ch_data = DPA_AUX_CH_DATA1;
355 break;
356 case PORT_B:
357 ch_ctl = PCH_DPB_AUX_CH_CTL;
358 ch_data = PCH_DPB_AUX_CH_DATA1;
359 break;
360 case PORT_C:
361 ch_ctl = PCH_DPC_AUX_CH_CTL;
362 ch_data = PCH_DPC_AUX_CH_DATA1;
363 break;
364 case PORT_D:
365 ch_ctl = PCH_DPD_AUX_CH_CTL;
366 ch_data = PCH_DPD_AUX_CH_DATA1;
367 break;
368 default:
369 BUG();
370 }
371 }
372
359 intel_dp_check_edp(intel_dp); 373 intel_dp_check_edp(intel_dp);
360 /* The clock divider is based off the hrawclk, 374 /* The clock divider is based off the hrawclk,
361 * and would like to run at 2MHz. So, take the 375 * and would like to run at 2MHz. So, take the
@@ -365,12 +379,16 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
365 * clock divider. 379 * clock divider.
366 */ 380 */
367 if (is_cpu_edp(intel_dp)) { 381 if (is_cpu_edp(intel_dp)) {
368 if (IS_GEN6(dev) || IS_GEN7(dev)) 382 if (IS_HASWELL(dev))
383 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
384 else if (IS_VALLEYVIEW(dev))
385 aux_clock_divider = 100;
386 else if (IS_GEN6(dev) || IS_GEN7(dev))
369 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 387 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
370 else 388 else
371 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 389 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
372 } else if (HAS_PCH_SPLIT(dev)) 390 } else if (HAS_PCH_SPLIT(dev))
373 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ 391 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
374 else 392 else
375 aux_clock_divider = intel_hrawclk(dev) / 2; 393 aux_clock_divider = intel_hrawclk(dev) / 2;
376 394
@@ -642,9 +660,6 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
642 return -EREMOTEIO; 660 return -EREMOTEIO;
643} 661}
644 662
645static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
646static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
647
648static int 663static int
649intel_dp_i2c_init(struct intel_dp *intel_dp, 664intel_dp_i2c_init(struct intel_dp *intel_dp,
650 struct intel_connector *intel_connector, const char *name) 665 struct intel_connector *intel_connector, const char *name)
@@ -670,22 +685,25 @@ intel_dp_i2c_init(struct intel_dp *intel_dp,
670 return ret; 685 return ret;
671} 686}
672 687
673static bool 688bool
674intel_dp_mode_fixup(struct drm_encoder *encoder, 689intel_dp_mode_fixup(struct drm_encoder *encoder,
675 const struct drm_display_mode *mode, 690 const struct drm_display_mode *mode,
676 struct drm_display_mode *adjusted_mode) 691 struct drm_display_mode *adjusted_mode)
677{ 692{
678 struct drm_device *dev = encoder->dev; 693 struct drm_device *dev = encoder->dev;
679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 694 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
695 struct intel_connector *intel_connector = intel_dp->attached_connector;
680 int lane_count, clock; 696 int lane_count, clock;
681 int max_lane_count = intel_dp_max_lane_count(intel_dp); 697 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
682 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 698 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
683 int bpp, mode_rate; 699 int bpp, mode_rate;
684 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 700 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
685 701
686 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 702 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
687 intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); 703 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
688 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, 704 adjusted_mode);
705 intel_pch_panel_fitting(dev,
706 intel_connector->panel.fitting_mode,
689 mode, adjusted_mode); 707 mode, adjusted_mode);
690 } 708 }
691 709
@@ -762,21 +780,23 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
762 struct drm_display_mode *adjusted_mode) 780 struct drm_display_mode *adjusted_mode)
763{ 781{
764 struct drm_device *dev = crtc->dev; 782 struct drm_device *dev = crtc->dev;
765 struct intel_encoder *encoder; 783 struct intel_encoder *intel_encoder;
784 struct intel_dp *intel_dp;
766 struct drm_i915_private *dev_priv = dev->dev_private; 785 struct drm_i915_private *dev_priv = dev->dev_private;
767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
768 int lane_count = 4; 787 int lane_count = 4;
769 struct intel_dp_m_n m_n; 788 struct intel_dp_m_n m_n;
770 int pipe = intel_crtc->pipe; 789 int pipe = intel_crtc->pipe;
790 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
771 791
772 /* 792 /*
773 * Find the lane count in the intel_encoder private 793 * Find the lane count in the intel_encoder private
774 */ 794 */
775 for_each_encoder_on_crtc(dev, crtc, encoder) { 795 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
776 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 796 intel_dp = enc_to_intel_dp(&intel_encoder->base);
777 797
778 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 798 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
779 intel_dp->base.type == INTEL_OUTPUT_EDP) 799 intel_encoder->type == INTEL_OUTPUT_EDP)
780 { 800 {
781 lane_count = intel_dp->lane_count; 801 lane_count = intel_dp->lane_count;
782 break; 802 break;
@@ -791,23 +811,46 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
791 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 811 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
792 mode->clock, adjusted_mode->clock, &m_n); 812 mode->clock, adjusted_mode->clock, &m_n);
793 813
794 if (HAS_PCH_SPLIT(dev)) { 814 if (IS_HASWELL(dev)) {
795 I915_WRITE(TRANSDATA_M1(pipe), 815 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
796 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 816 TU_SIZE(m_n.tu) | m_n.gmch_m);
797 m_n.gmch_m); 817 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
818 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
819 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
820 } else if (HAS_PCH_SPLIT(dev)) {
821 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
798 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 822 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
799 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 823 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
800 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 824 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
825 } else if (IS_VALLEYVIEW(dev)) {
826 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
827 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
828 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
829 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
801 } else { 830 } else {
802 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 831 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
803 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 832 TU_SIZE(m_n.tu) | m_n.gmch_m);
804 m_n.gmch_m);
805 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 833 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
806 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 834 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
807 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 835 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
808 } 836 }
809} 837}
810 838
839void intel_dp_init_link_config(struct intel_dp *intel_dp)
840{
841 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
842 intel_dp->link_configuration[0] = intel_dp->link_bw;
843 intel_dp->link_configuration[1] = intel_dp->lane_count;
844 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
845 /*
846 * Check for DPCD version > 1.1 and enhanced framing support
847 */
848 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
849 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
850 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
851 }
852}
853
811static void 854static void
812intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 855intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
813 struct drm_display_mode *adjusted_mode) 856 struct drm_display_mode *adjusted_mode)
@@ -815,7 +858,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
815 struct drm_device *dev = encoder->dev; 858 struct drm_device *dev = encoder->dev;
816 struct drm_i915_private *dev_priv = dev->dev_private; 859 struct drm_i915_private *dev_priv = dev->dev_private;
817 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 860 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
818 struct drm_crtc *crtc = intel_dp->base.base.crtc; 861 struct drm_crtc *crtc = encoder->crtc;
819 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 862 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
820 863
821 /* 864 /*
@@ -860,21 +903,12 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
860 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 903 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
861 intel_write_eld(encoder, adjusted_mode); 904 intel_write_eld(encoder, adjusted_mode);
862 } 905 }
863 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 906
864 intel_dp->link_configuration[0] = intel_dp->link_bw; 907 intel_dp_init_link_config(intel_dp);
865 intel_dp->link_configuration[1] = intel_dp->lane_count;
866 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
867 /*
868 * Check for DPCD version > 1.1 and enhanced framing support
869 */
870 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
871 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
872 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
873 }
874 908
875 /* Split out the IBX/CPU vs CPT settings */ 909 /* Split out the IBX/CPU vs CPT settings */
876 910
877 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 911 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
878 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 912 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
879 intel_dp->DP |= DP_SYNC_HS_HIGH; 913 intel_dp->DP |= DP_SYNC_HS_HIGH;
880 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 914 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -931,7 +965,7 @@ static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
931 u32 mask, 965 u32 mask,
932 u32 value) 966 u32 value)
933{ 967{
934 struct drm_device *dev = intel_dp->base.base.dev; 968 struct drm_device *dev = intel_dp_to_dev(intel_dp);
935 struct drm_i915_private *dev_priv = dev->dev_private; 969 struct drm_i915_private *dev_priv = dev->dev_private;
936 970
937 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 971 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
@@ -978,9 +1012,9 @@ static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
978 return control; 1012 return control;
979} 1013}
980 1014
981static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 1015void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
982{ 1016{
983 struct drm_device *dev = intel_dp->base.base.dev; 1017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
984 struct drm_i915_private *dev_priv = dev->dev_private; 1018 struct drm_i915_private *dev_priv = dev->dev_private;
985 u32 pp; 1019 u32 pp;
986 1020
@@ -1019,7 +1053,7 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
1019 1053
1020static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1054static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1021{ 1055{
1022 struct drm_device *dev = intel_dp->base.base.dev; 1056 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1023 struct drm_i915_private *dev_priv = dev->dev_private; 1057 struct drm_i915_private *dev_priv = dev->dev_private;
1024 u32 pp; 1058 u32 pp;
1025 1059
@@ -1041,14 +1075,14 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
1041{ 1075{
1042 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1076 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1043 struct intel_dp, panel_vdd_work); 1077 struct intel_dp, panel_vdd_work);
1044 struct drm_device *dev = intel_dp->base.base.dev; 1078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1045 1079
1046 mutex_lock(&dev->mode_config.mutex); 1080 mutex_lock(&dev->mode_config.mutex);
1047 ironlake_panel_vdd_off_sync(intel_dp); 1081 ironlake_panel_vdd_off_sync(intel_dp);
1048 mutex_unlock(&dev->mode_config.mutex); 1082 mutex_unlock(&dev->mode_config.mutex);
1049} 1083}
1050 1084
1051static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1085void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1052{ 1086{
1053 if (!is_edp(intel_dp)) 1087 if (!is_edp(intel_dp))
1054 return; 1088 return;
@@ -1071,9 +1105,9 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1071 } 1105 }
1072} 1106}
1073 1107
1074static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1108void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1075{ 1109{
1076 struct drm_device *dev = intel_dp->base.base.dev; 1110 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1077 struct drm_i915_private *dev_priv = dev->dev_private; 1111 struct drm_i915_private *dev_priv = dev->dev_private;
1078 u32 pp; 1112 u32 pp;
1079 1113
@@ -1113,9 +1147,9 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1113 } 1147 }
1114} 1148}
1115 1149
1116static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1150void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1117{ 1151{
1118 struct drm_device *dev = intel_dp->base.base.dev; 1152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1119 struct drm_i915_private *dev_priv = dev->dev_private; 1153 struct drm_i915_private *dev_priv = dev->dev_private;
1120 u32 pp; 1154 u32 pp;
1121 1155
@@ -1138,10 +1172,12 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1138 ironlake_wait_panel_off(intel_dp); 1172 ironlake_wait_panel_off(intel_dp);
1139} 1173}
1140 1174
1141static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1175void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1142{ 1176{
1143 struct drm_device *dev = intel_dp->base.base.dev; 1177 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1178 struct drm_device *dev = intel_dig_port->base.base.dev;
1144 struct drm_i915_private *dev_priv = dev->dev_private; 1179 struct drm_i915_private *dev_priv = dev->dev_private;
1180 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1145 u32 pp; 1181 u32 pp;
1146 1182
1147 if (!is_edp(intel_dp)) 1183 if (!is_edp(intel_dp))
@@ -1159,17 +1195,21 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1159 pp |= EDP_BLC_ENABLE; 1195 pp |= EDP_BLC_ENABLE;
1160 I915_WRITE(PCH_PP_CONTROL, pp); 1196 I915_WRITE(PCH_PP_CONTROL, pp);
1161 POSTING_READ(PCH_PP_CONTROL); 1197 POSTING_READ(PCH_PP_CONTROL);
1198
1199 intel_panel_enable_backlight(dev, pipe);
1162} 1200}
1163 1201
1164static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1202void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1165{ 1203{
1166 struct drm_device *dev = intel_dp->base.base.dev; 1204 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1167 struct drm_i915_private *dev_priv = dev->dev_private; 1205 struct drm_i915_private *dev_priv = dev->dev_private;
1168 u32 pp; 1206 u32 pp;
1169 1207
1170 if (!is_edp(intel_dp)) 1208 if (!is_edp(intel_dp))
1171 return; 1209 return;
1172 1210
1211 intel_panel_disable_backlight(dev);
1212
1173 DRM_DEBUG_KMS("\n"); 1213 DRM_DEBUG_KMS("\n");
1174 pp = ironlake_get_pp_control(dev_priv); 1214 pp = ironlake_get_pp_control(dev_priv);
1175 pp &= ~EDP_BLC_ENABLE; 1215 pp &= ~EDP_BLC_ENABLE;
@@ -1180,8 +1220,9 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1180 1220
1181static void ironlake_edp_pll_on(struct intel_dp *intel_dp) 1221static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1182{ 1222{
1183 struct drm_device *dev = intel_dp->base.base.dev; 1223 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1184 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1224 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1225 struct drm_device *dev = crtc->dev;
1185 struct drm_i915_private *dev_priv = dev->dev_private; 1226 struct drm_i915_private *dev_priv = dev->dev_private;
1186 u32 dpa_ctl; 1227 u32 dpa_ctl;
1187 1228
@@ -1205,8 +1246,9 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1205 1246
1206static void ironlake_edp_pll_off(struct intel_dp *intel_dp) 1247static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1207{ 1248{
1208 struct drm_device *dev = intel_dp->base.base.dev; 1249 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1209 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1250 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1251 struct drm_device *dev = crtc->dev;
1210 struct drm_i915_private *dev_priv = dev->dev_private; 1252 struct drm_i915_private *dev_priv = dev->dev_private;
1211 u32 dpa_ctl; 1253 u32 dpa_ctl;
1212 1254
@@ -1228,7 +1270,7 @@ static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1228} 1270}
1229 1271
1230/* If the sink supports it, try to set the power state appropriately */ 1272/* If the sink supports it, try to set the power state appropriately */
1231static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1273void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1232{ 1274{
1233 int ret, i; 1275 int ret, i;
1234 1276
@@ -1298,9 +1340,10 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1298 return true; 1340 return true;
1299 } 1341 }
1300 } 1342 }
1301 }
1302 1343
1303 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); 1344 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1345 intel_dp->output_reg);
1346 }
1304 1347
1305 return true; 1348 return true;
1306} 1349}
@@ -1396,38 +1439,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
1396 DP_LINK_STATUS_SIZE); 1439 DP_LINK_STATUS_SIZE);
1397} 1440}
1398 1441
1399static uint8_t
1400intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1401 int r)
1402{
1403 return link_status[r - DP_LANE0_1_STATUS];
1404}
1405
1406static uint8_t
1407intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1408 int lane)
1409{
1410 int s = ((lane & 1) ?
1411 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1412 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1413 uint8_t l = adjust_request[lane>>1];
1414
1415 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1416}
1417
1418static uint8_t
1419intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1420 int lane)
1421{
1422 int s = ((lane & 1) ?
1423 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1424 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1425 uint8_t l = adjust_request[lane>>1];
1426
1427 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1428}
1429
1430
1431#if 0 1442#if 0
1432static char *voltage_names[] = { 1443static char *voltage_names[] = {
1433 "0.4V", "0.6V", "0.8V", "1.2V" 1444 "0.4V", "0.6V", "0.8V", "1.2V"
@@ -1448,7 +1459,7 @@ static char *link_train_names[] = {
1448static uint8_t 1459static uint8_t
1449intel_dp_voltage_max(struct intel_dp *intel_dp) 1460intel_dp_voltage_max(struct intel_dp *intel_dp)
1450{ 1461{
1451 struct drm_device *dev = intel_dp->base.base.dev; 1462 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1452 1463
1453 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1464 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1454 return DP_TRAIN_VOLTAGE_SWING_800; 1465 return DP_TRAIN_VOLTAGE_SWING_800;
@@ -1461,9 +1472,21 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
1461static uint8_t 1472static uint8_t
1462intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1473intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1463{ 1474{
1464 struct drm_device *dev = intel_dp->base.base.dev; 1475 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1465 1476
1466 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1477 if (IS_HASWELL(dev)) {
1478 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1479 case DP_TRAIN_VOLTAGE_SWING_400:
1480 return DP_TRAIN_PRE_EMPHASIS_9_5;
1481 case DP_TRAIN_VOLTAGE_SWING_600:
1482 return DP_TRAIN_PRE_EMPHASIS_6;
1483 case DP_TRAIN_VOLTAGE_SWING_800:
1484 return DP_TRAIN_PRE_EMPHASIS_3_5;
1485 case DP_TRAIN_VOLTAGE_SWING_1200:
1486 default:
1487 return DP_TRAIN_PRE_EMPHASIS_0;
1488 }
1489 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1467 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1490 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1468 case DP_TRAIN_VOLTAGE_SWING_400: 1491 case DP_TRAIN_VOLTAGE_SWING_400:
1469 return DP_TRAIN_PRE_EMPHASIS_6; 1492 return DP_TRAIN_PRE_EMPHASIS_6;
@@ -1494,13 +1517,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
1494 uint8_t v = 0; 1517 uint8_t v = 0;
1495 uint8_t p = 0; 1518 uint8_t p = 0;
1496 int lane; 1519 int lane;
1497 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1498 uint8_t voltage_max; 1520 uint8_t voltage_max;
1499 uint8_t preemph_max; 1521 uint8_t preemph_max;
1500 1522
1501 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1523 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1502 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1524 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1503 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1525 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1504 1526
1505 if (this_v > v) 1527 if (this_v > v)
1506 v = this_v; 1528 v = this_v;
@@ -1617,52 +1639,38 @@ intel_gen7_edp_signal_levels(uint8_t train_set)
1617 } 1639 }
1618} 1640}
1619 1641
1620static uint8_t 1642/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1621intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1643static uint32_t
1622 int lane) 1644intel_dp_signal_levels_hsw(uint8_t train_set)
1623{
1624 int s = (lane & 1) * 4;
1625 uint8_t l = link_status[lane>>1];
1626
1627 return (l >> s) & 0xf;
1628}
1629
1630/* Check for clock recovery is done on all channels */
1631static bool
1632intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
1633{ 1645{
1634 int lane; 1646 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1635 uint8_t lane_status; 1647 DP_TRAIN_PRE_EMPHASIS_MASK);
1636 1648 switch (signal_levels) {
1637 for (lane = 0; lane < lane_count; lane++) { 1649 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1638 lane_status = intel_get_lane_status(link_status, lane); 1650 return DDI_BUF_EMP_400MV_0DB_HSW;
1639 if ((lane_status & DP_LANE_CR_DONE) == 0) 1651 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1640 return false; 1652 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1641 } 1653 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1642 return true; 1654 return DDI_BUF_EMP_400MV_6DB_HSW;
1643} 1655 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1656 return DDI_BUF_EMP_400MV_9_5DB_HSW;
1644 1657
1645/* Check to see if channel eq is done on all channels */ 1658 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1646#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ 1659 return DDI_BUF_EMP_600MV_0DB_HSW;
1647 DP_LANE_CHANNEL_EQ_DONE|\ 1660 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1648 DP_LANE_SYMBOL_LOCKED) 1661 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1649static bool 1662 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1650intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1663 return DDI_BUF_EMP_600MV_6DB_HSW;
1651{
1652 uint8_t lane_align;
1653 uint8_t lane_status;
1654 int lane;
1655 1664
1656 lane_align = intel_dp_link_status(link_status, 1665 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1657 DP_LANE_ALIGN_STATUS_UPDATED); 1666 return DDI_BUF_EMP_800MV_0DB_HSW;
1658 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1667 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1659 return false; 1668 return DDI_BUF_EMP_800MV_3_5DB_HSW;
1660 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1669 default:
1661 lane_status = intel_get_lane_status(link_status, lane); 1670 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1662 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1671 "0x%x\n", signal_levels);
1663 return false; 1672 return DDI_BUF_EMP_400MV_0DB_HSW;
1664 } 1673 }
1665 return true;
1666} 1674}
1667 1675
1668static bool 1676static bool
@@ -1670,11 +1678,49 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1670 uint32_t dp_reg_value, 1678 uint32_t dp_reg_value,
1671 uint8_t dp_train_pat) 1679 uint8_t dp_train_pat)
1672{ 1680{
1673 struct drm_device *dev = intel_dp->base.base.dev; 1681 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1682 struct drm_device *dev = intel_dig_port->base.base.dev;
1674 struct drm_i915_private *dev_priv = dev->dev_private; 1683 struct drm_i915_private *dev_priv = dev->dev_private;
1684 enum port port = intel_dig_port->port;
1675 int ret; 1685 int ret;
1686 uint32_t temp;
1676 1687
1677 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1688 if (IS_HASWELL(dev)) {
1689 temp = I915_READ(DP_TP_CTL(port));
1690
1691 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1692 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1693 else
1694 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1695
1696 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1697 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1698 case DP_TRAINING_PATTERN_DISABLE:
1699 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1700 I915_WRITE(DP_TP_CTL(port), temp);
1701
1702 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1703 DP_TP_STATUS_IDLE_DONE), 1))
1704 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1705
1706 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1707 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1708
1709 break;
1710 case DP_TRAINING_PATTERN_1:
1711 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1712 break;
1713 case DP_TRAINING_PATTERN_2:
1714 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1715 break;
1716 case DP_TRAINING_PATTERN_3:
1717 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1718 break;
1719 }
1720 I915_WRITE(DP_TP_CTL(port), temp);
1721
1722 } else if (HAS_PCH_CPT(dev) &&
1723 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1678 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1724 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1679 1725
1680 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1726 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
@@ -1734,16 +1780,20 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1734} 1780}
1735 1781
1736/* Enable corresponding port and start training pattern 1 */ 1782/* Enable corresponding port and start training pattern 1 */
1737static void 1783void
1738intel_dp_start_link_train(struct intel_dp *intel_dp) 1784intel_dp_start_link_train(struct intel_dp *intel_dp)
1739{ 1785{
1740 struct drm_device *dev = intel_dp->base.base.dev; 1786 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
1787 struct drm_device *dev = encoder->dev;
1741 int i; 1788 int i;
1742 uint8_t voltage; 1789 uint8_t voltage;
1743 bool clock_recovery = false; 1790 bool clock_recovery = false;
1744 int voltage_tries, loop_tries; 1791 int voltage_tries, loop_tries;
1745 uint32_t DP = intel_dp->DP; 1792 uint32_t DP = intel_dp->DP;
1746 1793
1794 if (IS_HASWELL(dev))
1795 intel_ddi_prepare_link_retrain(encoder);
1796
1747 /* Write the link configuration data */ 1797 /* Write the link configuration data */
1748 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1798 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1749 intel_dp->link_configuration, 1799 intel_dp->link_configuration,
@@ -1761,8 +1811,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1761 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1811 uint8_t link_status[DP_LINK_STATUS_SIZE];
1762 uint32_t signal_levels; 1812 uint32_t signal_levels;
1763 1813
1764 1814 if (IS_HASWELL(dev)) {
1765 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1815 signal_levels = intel_dp_signal_levels_hsw(
1816 intel_dp->train_set[0]);
1817 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1818 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1766 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1819 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1767 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1820 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1768 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1821 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1770,23 +1823,24 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1770 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1823 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1771 } else { 1824 } else {
1772 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1825 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1773 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1774 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1826 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1775 } 1827 }
1828 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1829 signal_levels);
1776 1830
1831 /* Set training pattern 1 */
1777 if (!intel_dp_set_link_train(intel_dp, DP, 1832 if (!intel_dp_set_link_train(intel_dp, DP,
1778 DP_TRAINING_PATTERN_1 | 1833 DP_TRAINING_PATTERN_1 |
1779 DP_LINK_SCRAMBLING_DISABLE)) 1834 DP_LINK_SCRAMBLING_DISABLE))
1780 break; 1835 break;
1781 /* Set training pattern 1 */
1782 1836
1783 udelay(100); 1837 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
1784 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1838 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1785 DRM_ERROR("failed to get link status\n"); 1839 DRM_ERROR("failed to get link status\n");
1786 break; 1840 break;
1787 } 1841 }
1788 1842
1789 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1843 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1790 DRM_DEBUG_KMS("clock recovery OK\n"); 1844 DRM_DEBUG_KMS("clock recovery OK\n");
1791 clock_recovery = true; 1845 clock_recovery = true;
1792 break; 1846 break;
@@ -1825,10 +1879,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1825 intel_dp->DP = DP; 1879 intel_dp->DP = DP;
1826} 1880}
1827 1881
1828static void 1882void
1829intel_dp_complete_link_train(struct intel_dp *intel_dp) 1883intel_dp_complete_link_train(struct intel_dp *intel_dp)
1830{ 1884{
1831 struct drm_device *dev = intel_dp->base.base.dev; 1885 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1832 bool channel_eq = false; 1886 bool channel_eq = false;
1833 int tries, cr_tries; 1887 int tries, cr_tries;
1834 uint32_t DP = intel_dp->DP; 1888 uint32_t DP = intel_dp->DP;
@@ -1848,7 +1902,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1848 break; 1902 break;
1849 } 1903 }
1850 1904
1851 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1905 if (IS_HASWELL(dev)) {
1906 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1907 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1908 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1852 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1909 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1853 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1910 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1854 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1911 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
@@ -1865,18 +1922,18 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1865 DP_LINK_SCRAMBLING_DISABLE)) 1922 DP_LINK_SCRAMBLING_DISABLE))
1866 break; 1923 break;
1867 1924
1868 udelay(400); 1925 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
1869 if (!intel_dp_get_link_status(intel_dp, link_status)) 1926 if (!intel_dp_get_link_status(intel_dp, link_status))
1870 break; 1927 break;
1871 1928
1872 /* Make sure clock is still ok */ 1929 /* Make sure clock is still ok */
1873 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1930 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1874 intel_dp_start_link_train(intel_dp); 1931 intel_dp_start_link_train(intel_dp);
1875 cr_tries++; 1932 cr_tries++;
1876 continue; 1933 continue;
1877 } 1934 }
1878 1935
1879 if (intel_channel_eq_ok(intel_dp, link_status)) { 1936 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
1880 channel_eq = true; 1937 channel_eq = true;
1881 break; 1938 break;
1882 } 1939 }
@@ -1895,16 +1952,38 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1895 ++tries; 1952 ++tries;
1896 } 1953 }
1897 1954
1955 if (channel_eq)
1956 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
1957
1898 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1958 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
1899} 1959}
1900 1960
1901static void 1961static void
1902intel_dp_link_down(struct intel_dp *intel_dp) 1962intel_dp_link_down(struct intel_dp *intel_dp)
1903{ 1963{
1904 struct drm_device *dev = intel_dp->base.base.dev; 1964 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1965 struct drm_device *dev = intel_dig_port->base.base.dev;
1905 struct drm_i915_private *dev_priv = dev->dev_private; 1966 struct drm_i915_private *dev_priv = dev->dev_private;
1906 uint32_t DP = intel_dp->DP; 1967 uint32_t DP = intel_dp->DP;
1907 1968
1969 /*
1970 * DDI code has a strict mode set sequence and we should try to respect
1971 * it, otherwise we might hang the machine in many different ways. So we
1972 * really should be disabling the port only on a complete crtc_disable
1973 * sequence. This function is just called under two conditions on DDI
1974 * code:
1975 * - Link train failed while doing crtc_enable, and on this case we
1976 * really should respect the mode set sequence and wait for a
1977 * crtc_disable.
1978 * - Someone turned the monitor off and intel_dp_check_link_status
1979 * called us. We don't need to disable the whole port on this case, so
1980 * when someone turns the monitor on again,
1981 * intel_ddi_prepare_link_retrain will take care of redoing the link
1982 * train.
1983 */
1984 if (IS_HASWELL(dev))
1985 return;
1986
1908 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) 1987 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1909 return; 1988 return;
1910 1989
@@ -1923,7 +2002,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1923 2002
1924 if (HAS_PCH_IBX(dev) && 2003 if (HAS_PCH_IBX(dev) &&
1925 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 2004 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1926 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2005 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1927 2006
1928 /* Hardware workaround: leaving our transcoder select 2007 /* Hardware workaround: leaving our transcoder select
1929 * set to transcoder B while it's off will prevent the 2008 * set to transcoder B while it's off will prevent the
@@ -2024,7 +2103,7 @@ static void
2024intel_dp_handle_test_request(struct intel_dp *intel_dp) 2103intel_dp_handle_test_request(struct intel_dp *intel_dp)
2025{ 2104{
2026 /* NAK by default */ 2105 /* NAK by default */
2027 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); 2106 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2028} 2107}
2029 2108
2030/* 2109/*
@@ -2036,16 +2115,17 @@ intel_dp_handle_test_request(struct intel_dp *intel_dp)
2036 * 4. Check link status on receipt of hot-plug interrupt 2115 * 4. Check link status on receipt of hot-plug interrupt
2037 */ 2116 */
2038 2117
2039static void 2118void
2040intel_dp_check_link_status(struct intel_dp *intel_dp) 2119intel_dp_check_link_status(struct intel_dp *intel_dp)
2041{ 2120{
2121 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2042 u8 sink_irq_vector; 2122 u8 sink_irq_vector;
2043 u8 link_status[DP_LINK_STATUS_SIZE]; 2123 u8 link_status[DP_LINK_STATUS_SIZE];
2044 2124
2045 if (!intel_dp->base.connectors_active) 2125 if (!intel_encoder->connectors_active)
2046 return; 2126 return;
2047 2127
2048 if (WARN_ON(!intel_dp->base.base.crtc)) 2128 if (WARN_ON(!intel_encoder->base.crtc))
2049 return; 2129 return;
2050 2130
2051 /* Try to read receiver status if the link appears to be up */ 2131 /* Try to read receiver status if the link appears to be up */
@@ -2074,9 +2154,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2074 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2154 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2075 } 2155 }
2076 2156
2077 if (!intel_channel_eq_ok(intel_dp, link_status)) { 2157 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2078 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2158 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2079 drm_get_encoder_name(&intel_dp->base.base)); 2159 drm_get_encoder_name(&intel_encoder->base));
2080 intel_dp_start_link_train(intel_dp); 2160 intel_dp_start_link_train(intel_dp);
2081 intel_dp_complete_link_train(intel_dp); 2161 intel_dp_complete_link_train(intel_dp);
2082 } 2162 }
@@ -2125,11 +2205,12 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2125static enum drm_connector_status 2205static enum drm_connector_status
2126ironlake_dp_detect(struct intel_dp *intel_dp) 2206ironlake_dp_detect(struct intel_dp *intel_dp)
2127{ 2207{
2208 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2128 enum drm_connector_status status; 2209 enum drm_connector_status status;
2129 2210
2130 /* Can't disconnect eDP, but you can close the lid... */ 2211 /* Can't disconnect eDP, but you can close the lid... */
2131 if (is_edp(intel_dp)) { 2212 if (is_edp(intel_dp)) {
2132 status = intel_panel_detect(intel_dp->base.base.dev); 2213 status = intel_panel_detect(dev);
2133 if (status == connector_status_unknown) 2214 if (status == connector_status_unknown)
2134 status = connector_status_connected; 2215 status = connector_status_connected;
2135 return status; 2216 return status;
@@ -2141,7 +2222,7 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
2141static enum drm_connector_status 2222static enum drm_connector_status
2142g4x_dp_detect(struct intel_dp *intel_dp) 2223g4x_dp_detect(struct intel_dp *intel_dp)
2143{ 2224{
2144 struct drm_device *dev = intel_dp->base.base.dev; 2225 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2226 struct drm_i915_private *dev_priv = dev->dev_private;
2146 uint32_t bit; 2227 uint32_t bit;
2147 2228
@@ -2168,44 +2249,45 @@ g4x_dp_detect(struct intel_dp *intel_dp)
2168static struct edid * 2249static struct edid *
2169intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2250intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2170{ 2251{
2171 struct intel_dp *intel_dp = intel_attached_dp(connector); 2252 struct intel_connector *intel_connector = to_intel_connector(connector);
2172 struct edid *edid;
2173 int size;
2174 2253
2175 if (is_edp(intel_dp)) { 2254 /* use cached edid if we have one */
2176 if (!intel_dp->edid) 2255 if (intel_connector->edid) {
2256 struct edid *edid;
2257 int size;
2258
2259 /* invalid edid */
2260 if (IS_ERR(intel_connector->edid))
2177 return NULL; 2261 return NULL;
2178 2262
2179 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; 2263 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2180 edid = kmalloc(size, GFP_KERNEL); 2264 edid = kmalloc(size, GFP_KERNEL);
2181 if (!edid) 2265 if (!edid)
2182 return NULL; 2266 return NULL;
2183 2267
2184 memcpy(edid, intel_dp->edid, size); 2268 memcpy(edid, intel_connector->edid, size);
2185 return edid; 2269 return edid;
2186 } 2270 }
2187 2271
2188 edid = drm_get_edid(connector, adapter); 2272 return drm_get_edid(connector, adapter);
2189 return edid;
2190} 2273}
2191 2274
2192static int 2275static int
2193intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2276intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2194{ 2277{
2195 struct intel_dp *intel_dp = intel_attached_dp(connector); 2278 struct intel_connector *intel_connector = to_intel_connector(connector);
2196 int ret;
2197 2279
2198 if (is_edp(intel_dp)) { 2280 /* use cached edid if we have one */
2199 drm_mode_connector_update_edid_property(connector, 2281 if (intel_connector->edid) {
2200 intel_dp->edid); 2282 /* invalid edid */
2201 ret = drm_add_edid_modes(connector, intel_dp->edid); 2283 if (IS_ERR(intel_connector->edid))
2202 drm_edid_to_eld(connector, 2284 return 0;
2203 intel_dp->edid); 2285
2204 return intel_dp->edid_mode_count; 2286 return intel_connector_update_modes(connector,
2287 intel_connector->edid);
2205 } 2288 }
2206 2289
2207 ret = intel_ddc_get_modes(connector, adapter); 2290 return intel_ddc_get_modes(connector, adapter);
2208 return ret;
2209} 2291}
2210 2292
2211 2293
@@ -2219,9 +2301,12 @@ static enum drm_connector_status
2219intel_dp_detect(struct drm_connector *connector, bool force) 2301intel_dp_detect(struct drm_connector *connector, bool force)
2220{ 2302{
2221 struct intel_dp *intel_dp = intel_attached_dp(connector); 2303 struct intel_dp *intel_dp = intel_attached_dp(connector);
2222 struct drm_device *dev = intel_dp->base.base.dev; 2304 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2305 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2306 struct drm_device *dev = connector->dev;
2223 enum drm_connector_status status; 2307 enum drm_connector_status status;
2224 struct edid *edid = NULL; 2308 struct edid *edid = NULL;
2309 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2225 2310
2226 intel_dp->has_audio = false; 2311 intel_dp->has_audio = false;
2227 2312
@@ -2230,10 +2315,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2230 else 2315 else
2231 status = g4x_dp_detect(intel_dp); 2316 status = g4x_dp_detect(intel_dp);
2232 2317
2233 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", 2318 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2234 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], 2319 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2235 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], 2320 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2236 intel_dp->dpcd[6], intel_dp->dpcd[7]);
2237 2321
2238 if (status != connector_status_connected) 2322 if (status != connector_status_connected)
2239 return status; 2323 return status;
@@ -2250,49 +2334,31 @@ intel_dp_detect(struct drm_connector *connector, bool force)
2250 } 2334 }
2251 } 2335 }
2252 2336
2337 if (intel_encoder->type != INTEL_OUTPUT_EDP)
2338 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2253 return connector_status_connected; 2339 return connector_status_connected;
2254} 2340}
2255 2341
2256static int intel_dp_get_modes(struct drm_connector *connector) 2342static int intel_dp_get_modes(struct drm_connector *connector)
2257{ 2343{
2258 struct intel_dp *intel_dp = intel_attached_dp(connector); 2344 struct intel_dp *intel_dp = intel_attached_dp(connector);
2259 struct drm_device *dev = intel_dp->base.base.dev; 2345 struct intel_connector *intel_connector = to_intel_connector(connector);
2260 struct drm_i915_private *dev_priv = dev->dev_private; 2346 struct drm_device *dev = connector->dev;
2261 int ret; 2347 int ret;
2262 2348
2263 /* We should parse the EDID data and find out if it has an audio sink 2349 /* We should parse the EDID data and find out if it has an audio sink
2264 */ 2350 */
2265 2351
2266 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2352 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2267 if (ret) { 2353 if (ret)
2268 if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
2269 struct drm_display_mode *newmode;
2270 list_for_each_entry(newmode, &connector->probed_modes,
2271 head) {
2272 if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
2273 intel_dp->panel_fixed_mode =
2274 drm_mode_duplicate(dev, newmode);
2275 break;
2276 }
2277 }
2278 }
2279 return ret; 2354 return ret;
2280 }
2281 2355
2282 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 2356 /* if eDP has no EDID, fall back to fixed mode */
2283 if (is_edp(intel_dp)) { 2357 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2284 /* initialize panel mode from VBT if available for eDP */ 2358 struct drm_display_mode *mode;
2285 if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { 2359 mode = drm_mode_duplicate(dev,
2286 intel_dp->panel_fixed_mode = 2360 intel_connector->panel.fixed_mode);
2287 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2361 if (mode) {
2288 if (intel_dp->panel_fixed_mode) {
2289 intel_dp->panel_fixed_mode->type |=
2290 DRM_MODE_TYPE_PREFERRED;
2291 }
2292 }
2293 if (intel_dp->panel_fixed_mode) {
2294 struct drm_display_mode *mode;
2295 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
2296 drm_mode_probed_add(connector, mode); 2362 drm_mode_probed_add(connector, mode);
2297 return 1; 2363 return 1;
2298 } 2364 }
@@ -2322,10 +2388,12 @@ intel_dp_set_property(struct drm_connector *connector,
2322 uint64_t val) 2388 uint64_t val)
2323{ 2389{
2324 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2390 struct drm_i915_private *dev_priv = connector->dev->dev_private;
2325 struct intel_dp *intel_dp = intel_attached_dp(connector); 2391 struct intel_connector *intel_connector = to_intel_connector(connector);
2392 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2393 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2326 int ret; 2394 int ret;
2327 2395
2328 ret = drm_connector_property_set_value(connector, property, val); 2396 ret = drm_object_property_set_value(&connector->base, property, val);
2329 if (ret) 2397 if (ret)
2330 return ret; 2398 return ret;
2331 2399
@@ -2358,11 +2426,27 @@ intel_dp_set_property(struct drm_connector *connector,
2358 goto done; 2426 goto done;
2359 } 2427 }
2360 2428
2429 if (is_edp(intel_dp) &&
2430 property == connector->dev->mode_config.scaling_mode_property) {
2431 if (val == DRM_MODE_SCALE_NONE) {
2432 DRM_DEBUG_KMS("no scaling not supported\n");
2433 return -EINVAL;
2434 }
2435
2436 if (intel_connector->panel.fitting_mode == val) {
2437 /* the eDP scaling property is not changed */
2438 return 0;
2439 }
2440 intel_connector->panel.fitting_mode = val;
2441
2442 goto done;
2443 }
2444
2361 return -EINVAL; 2445 return -EINVAL;
2362 2446
2363done: 2447done:
2364 if (intel_dp->base.base.crtc) { 2448 if (intel_encoder->base.crtc) {
2365 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2449 struct drm_crtc *crtc = intel_encoder->base.crtc;
2366 intel_set_mode(crtc, &crtc->mode, 2450 intel_set_mode(crtc, &crtc->mode,
2367 crtc->x, crtc->y, crtc->fb); 2451 crtc->x, crtc->y, crtc->fb);
2368 } 2452 }
@@ -2375,27 +2459,33 @@ intel_dp_destroy(struct drm_connector *connector)
2375{ 2459{
2376 struct drm_device *dev = connector->dev; 2460 struct drm_device *dev = connector->dev;
2377 struct intel_dp *intel_dp = intel_attached_dp(connector); 2461 struct intel_dp *intel_dp = intel_attached_dp(connector);
2462 struct intel_connector *intel_connector = to_intel_connector(connector);
2378 2463
2379 if (is_edp(intel_dp)) 2464 if (!IS_ERR_OR_NULL(intel_connector->edid))
2465 kfree(intel_connector->edid);
2466
2467 if (is_edp(intel_dp)) {
2380 intel_panel_destroy_backlight(dev); 2468 intel_panel_destroy_backlight(dev);
2469 intel_panel_fini(&intel_connector->panel);
2470 }
2381 2471
2382 drm_sysfs_connector_remove(connector); 2472 drm_sysfs_connector_remove(connector);
2383 drm_connector_cleanup(connector); 2473 drm_connector_cleanup(connector);
2384 kfree(connector); 2474 kfree(connector);
2385} 2475}
2386 2476
2387static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2477void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2388{ 2478{
2389 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2479 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2480 struct intel_dp *intel_dp = &intel_dig_port->dp;
2390 2481
2391 i2c_del_adapter(&intel_dp->adapter); 2482 i2c_del_adapter(&intel_dp->adapter);
2392 drm_encoder_cleanup(encoder); 2483 drm_encoder_cleanup(encoder);
2393 if (is_edp(intel_dp)) { 2484 if (is_edp(intel_dp)) {
2394 kfree(intel_dp->edid);
2395 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2485 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2396 ironlake_panel_vdd_off_sync(intel_dp); 2486 ironlake_panel_vdd_off_sync(intel_dp);
2397 } 2487 }
2398 kfree(intel_dp); 2488 kfree(intel_dig_port);
2399} 2489}
2400 2490
2401static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2491static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
@@ -2425,7 +2515,7 @@ static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2425static void 2515static void
2426intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2516intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2427{ 2517{
2428 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 2518 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2429 2519
2430 intel_dp_check_link_status(intel_dp); 2520 intel_dp_check_link_status(intel_dp);
2431} 2521}
@@ -2435,13 +2525,14 @@ int
2435intel_trans_dp_port_sel(struct drm_crtc *crtc) 2525intel_trans_dp_port_sel(struct drm_crtc *crtc)
2436{ 2526{
2437 struct drm_device *dev = crtc->dev; 2527 struct drm_device *dev = crtc->dev;
2438 struct intel_encoder *encoder; 2528 struct intel_encoder *intel_encoder;
2529 struct intel_dp *intel_dp;
2439 2530
2440 for_each_encoder_on_crtc(dev, crtc, encoder) { 2531 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2441 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2532 intel_dp = enc_to_intel_dp(&intel_encoder->base);
2442 2533
2443 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2534 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2444 intel_dp->base.type == INTEL_OUTPUT_EDP) 2535 intel_encoder->type == INTEL_OUTPUT_EDP)
2445 return intel_dp->output_reg; 2536 return intel_dp->output_reg;
2446 } 2537 }
2447 2538
@@ -2471,78 +2562,191 @@ bool intel_dpd_is_edp(struct drm_device *dev)
2471static void 2562static void
2472intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2563intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2473{ 2564{
2565 struct intel_connector *intel_connector = to_intel_connector(connector);
2566
2474 intel_attach_force_audio_property(connector); 2567 intel_attach_force_audio_property(connector);
2475 intel_attach_broadcast_rgb_property(connector); 2568 intel_attach_broadcast_rgb_property(connector);
2569
2570 if (is_edp(intel_dp)) {
2571 drm_mode_create_scaling_mode_property(connector->dev);
2572 drm_object_attach_property(
2573 &connector->base,
2574 connector->dev->mode_config.scaling_mode_property,
2575 DRM_MODE_SCALE_ASPECT);
2576 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2577 }
2578}
2579
2580static void
2581intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2582 struct intel_dp *intel_dp)
2583{
2584 struct drm_i915_private *dev_priv = dev->dev_private;
2585 struct edp_power_seq cur, vbt, spec, final;
2586 u32 pp_on, pp_off, pp_div, pp;
2587
2588 /* Workaround: Need to write PP_CONTROL with the unlock key as
2589 * the very first thing. */
2590 pp = ironlake_get_pp_control(dev_priv);
2591 I915_WRITE(PCH_PP_CONTROL, pp);
2592
2593 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2594 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2595 pp_div = I915_READ(PCH_PP_DIVISOR);
2596
2597 /* Pull timing values out of registers */
2598 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2599 PANEL_POWER_UP_DELAY_SHIFT;
2600
2601 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2602 PANEL_LIGHT_ON_DELAY_SHIFT;
2603
2604 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2605 PANEL_LIGHT_OFF_DELAY_SHIFT;
2606
2607 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2608 PANEL_POWER_DOWN_DELAY_SHIFT;
2609
2610 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2611 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2612
2613 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2614 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2615
2616 vbt = dev_priv->edp.pps;
2617
2618 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2619 * our hw here, which are all in 100usec. */
2620 spec.t1_t3 = 210 * 10;
2621 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2622 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2623 spec.t10 = 500 * 10;
2624 /* This one is special and actually in units of 100ms, but zero
2625 * based in the hw (so we need to add 100 ms). But the sw vbt
2626 * table multiplies it with 1000 to make it in units of 100usec,
2627 * too. */
2628 spec.t11_t12 = (510 + 100) * 10;
2629
2630 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2631 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2632
2633 /* Use the max of the register settings and vbt. If both are
2634 * unset, fall back to the spec limits. */
2635#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
2636 spec.field : \
2637 max(cur.field, vbt.field))
2638 assign_final(t1_t3);
2639 assign_final(t8);
2640 assign_final(t9);
2641 assign_final(t10);
2642 assign_final(t11_t12);
2643#undef assign_final
2644
2645#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
2646 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2647 intel_dp->backlight_on_delay = get_delay(t8);
2648 intel_dp->backlight_off_delay = get_delay(t9);
2649 intel_dp->panel_power_down_delay = get_delay(t10);
2650 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2651#undef get_delay
2652
2653 /* And finally store the new values in the power sequencer. */
2654 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2655 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2656 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2657 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2658 /* Compute the divisor for the pp clock, simply match the Bspec
2659 * formula. */
2660 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2661 << PP_REFERENCE_DIVIDER_SHIFT;
2662 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
2663 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2664
2665 /* Haswell doesn't have any port selection bits for the panel
2666 * power sequencer any more. */
2667 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2668 if (is_cpu_edp(intel_dp))
2669 pp_on |= PANEL_POWER_PORT_DP_A;
2670 else
2671 pp_on |= PANEL_POWER_PORT_DP_D;
2672 }
2673
2674 I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
2675 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2676 I915_WRITE(PCH_PP_DIVISOR, pp_div);
2677
2678
2679 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2680 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2681 intel_dp->panel_power_cycle_delay);
2682
2683 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2684 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2685
2686 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2687 I915_READ(PCH_PP_ON_DELAYS),
2688 I915_READ(PCH_PP_OFF_DELAYS),
2689 I915_READ(PCH_PP_DIVISOR));
2476} 2690}
2477 2691
2478void 2692void
2479intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2693intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2694 struct intel_connector *intel_connector)
2480{ 2695{
2696 struct drm_connector *connector = &intel_connector->base;
2697 struct intel_dp *intel_dp = &intel_dig_port->dp;
2698 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2699 struct drm_device *dev = intel_encoder->base.dev;
2481 struct drm_i915_private *dev_priv = dev->dev_private; 2700 struct drm_i915_private *dev_priv = dev->dev_private;
2482 struct drm_connector *connector; 2701 struct drm_display_mode *fixed_mode = NULL;
2483 struct intel_dp *intel_dp; 2702 enum port port = intel_dig_port->port;
2484 struct intel_encoder *intel_encoder;
2485 struct intel_connector *intel_connector;
2486 const char *name = NULL; 2703 const char *name = NULL;
2487 int type; 2704 int type;
2488 2705
2489 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
2490 if (!intel_dp)
2491 return;
2492
2493 intel_dp->output_reg = output_reg;
2494 intel_dp->port = port;
2495 /* Preserve the current hw state. */ 2706 /* Preserve the current hw state. */
2496 intel_dp->DP = I915_READ(intel_dp->output_reg); 2707 intel_dp->DP = I915_READ(intel_dp->output_reg);
2708 intel_dp->attached_connector = intel_connector;
2497 2709
2498 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2710 if (HAS_PCH_SPLIT(dev) && port == PORT_D)
2499 if (!intel_connector) {
2500 kfree(intel_dp);
2501 return;
2502 }
2503 intel_encoder = &intel_dp->base;
2504
2505 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
2506 if (intel_dpd_is_edp(dev)) 2711 if (intel_dpd_is_edp(dev))
2507 intel_dp->is_pch_edp = true; 2712 intel_dp->is_pch_edp = true;
2508 2713
2509 if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2714 /*
2715 * FIXME : We need to initialize built-in panels before external panels.
2716 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2717 */
2718 if (IS_VALLEYVIEW(dev) && port == PORT_C) {
2719 type = DRM_MODE_CONNECTOR_eDP;
2720 intel_encoder->type = INTEL_OUTPUT_EDP;
2721 } else if (port == PORT_A || is_pch_edp(intel_dp)) {
2510 type = DRM_MODE_CONNECTOR_eDP; 2722 type = DRM_MODE_CONNECTOR_eDP;
2511 intel_encoder->type = INTEL_OUTPUT_EDP; 2723 intel_encoder->type = INTEL_OUTPUT_EDP;
2512 } else { 2724 } else {
2725 /* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
2726 * DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
2727 * rewrite it.
2728 */
2513 type = DRM_MODE_CONNECTOR_DisplayPort; 2729 type = DRM_MODE_CONNECTOR_DisplayPort;
2514 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2515 } 2730 }
2516 2731
2517 connector = &intel_connector->base;
2518 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2732 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
2519 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2733 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2520 2734
2521 connector->polled = DRM_CONNECTOR_POLL_HPD; 2735 connector->polled = DRM_CONNECTOR_POLL_HPD;
2522
2523 intel_encoder->cloneable = false;
2524
2525 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2526 ironlake_panel_vdd_work);
2527
2528 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2529
2530 connector->interlace_allowed = true; 2736 connector->interlace_allowed = true;
2531 connector->doublescan_allowed = 0; 2737 connector->doublescan_allowed = 0;
2532 2738
2533 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2739 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2534 DRM_MODE_ENCODER_TMDS); 2740 ironlake_panel_vdd_work);
2535 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2536 2741
2537 intel_connector_attach_encoder(intel_connector, intel_encoder); 2742 intel_connector_attach_encoder(intel_connector, intel_encoder);
2538 drm_sysfs_connector_add(connector); 2743 drm_sysfs_connector_add(connector);
2539 2744
2540 intel_encoder->enable = intel_enable_dp; 2745 if (IS_HASWELL(dev))
2541 intel_encoder->pre_enable = intel_pre_enable_dp; 2746 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2542 intel_encoder->disable = intel_disable_dp; 2747 else
2543 intel_encoder->post_disable = intel_post_disable_dp; 2748 intel_connector->get_hw_state = intel_connector_get_hw_state;
2544 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2749
2545 intel_connector->get_hw_state = intel_connector_get_hw_state;
2546 2750
2547 /* Set up the DDC bus. */ 2751 /* Set up the DDC bus. */
2548 switch (port) { 2752 switch (port) {
@@ -2566,66 +2770,15 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2566 break; 2770 break;
2567 } 2771 }
2568 2772
2569 /* Cache some DPCD data in the eDP case */ 2773 if (is_edp(intel_dp))
2570 if (is_edp(intel_dp)) { 2774 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2571 struct edp_power_seq cur, vbt;
2572 u32 pp_on, pp_off, pp_div;
2573
2574 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2575 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2576 pp_div = I915_READ(PCH_PP_DIVISOR);
2577
2578 if (!pp_on || !pp_off || !pp_div) {
2579 DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2580 intel_dp_encoder_destroy(&intel_dp->base.base);
2581 intel_dp_destroy(&intel_connector->base);
2582 return;
2583 }
2584
2585 /* Pull timing values out of registers */
2586 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2587 PANEL_POWER_UP_DELAY_SHIFT;
2588
2589 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2590 PANEL_LIGHT_ON_DELAY_SHIFT;
2591
2592 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2593 PANEL_LIGHT_OFF_DELAY_SHIFT;
2594
2595 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2596 PANEL_POWER_DOWN_DELAY_SHIFT;
2597
2598 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2599 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2600
2601 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2602 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2603
2604 vbt = dev_priv->edp.pps;
2605
2606 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2607 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2608
2609#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
2610
2611 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2612 intel_dp->backlight_on_delay = get_delay(t8);
2613 intel_dp->backlight_off_delay = get_delay(t9);
2614 intel_dp->panel_power_down_delay = get_delay(t10);
2615 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2616
2617 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2618 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2619 intel_dp->panel_power_cycle_delay);
2620
2621 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2622 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2623 }
2624 2775
2625 intel_dp_i2c_init(intel_dp, intel_connector, name); 2776 intel_dp_i2c_init(intel_dp, intel_connector, name);
2626 2777
2778 /* Cache DPCD and EDID for edp. */
2627 if (is_edp(intel_dp)) { 2779 if (is_edp(intel_dp)) {
2628 bool ret; 2780 bool ret;
2781 struct drm_display_mode *scan;
2629 struct edid *edid; 2782 struct edid *edid;
2630 2783
2631 ironlake_edp_panel_vdd_on(intel_dp); 2784 ironlake_edp_panel_vdd_on(intel_dp);
@@ -2640,29 +2793,47 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2640 } else { 2793 } else {
2641 /* if this fails, presume the device is a ghost */ 2794 /* if this fails, presume the device is a ghost */
2642 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2795 DRM_INFO("failed to retrieve link info, disabling eDP\n");
2643 intel_dp_encoder_destroy(&intel_dp->base.base); 2796 intel_dp_encoder_destroy(&intel_encoder->base);
2644 intel_dp_destroy(&intel_connector->base); 2797 intel_dp_destroy(connector);
2645 return; 2798 return;
2646 } 2799 }
2647 2800
2648 ironlake_edp_panel_vdd_on(intel_dp); 2801 ironlake_edp_panel_vdd_on(intel_dp);
2649 edid = drm_get_edid(connector, &intel_dp->adapter); 2802 edid = drm_get_edid(connector, &intel_dp->adapter);
2650 if (edid) { 2803 if (edid) {
2651 drm_mode_connector_update_edid_property(connector, 2804 if (drm_add_edid_modes(connector, edid)) {
2652 edid); 2805 drm_mode_connector_update_edid_property(connector, edid);
2653 intel_dp->edid_mode_count = 2806 drm_edid_to_eld(connector, edid);
2654 drm_add_edid_modes(connector, edid); 2807 } else {
2655 drm_edid_to_eld(connector, edid); 2808 kfree(edid);
2656 intel_dp->edid = edid; 2809 edid = ERR_PTR(-EINVAL);
2810 }
2811 } else {
2812 edid = ERR_PTR(-ENOENT);
2813 }
2814 intel_connector->edid = edid;
2815
2816 /* prefer fixed mode from EDID if available */
2817 list_for_each_entry(scan, &connector->probed_modes, head) {
2818 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2819 fixed_mode = drm_mode_duplicate(dev, scan);
2820 break;
2821 }
2657 } 2822 }
2823
2824 /* fallback to VBT if available for eDP */
2825 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2826 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2827 if (fixed_mode)
2828 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2829 }
2830
2658 ironlake_edp_panel_vdd_off(intel_dp, false); 2831 ironlake_edp_panel_vdd_off(intel_dp, false);
2659 } 2832 }
2660 2833
2661 intel_encoder->hot_plug = intel_dp_hot_plug;
2662
2663 if (is_edp(intel_dp)) { 2834 if (is_edp(intel_dp)) {
2664 dev_priv->int_edp_connector = connector; 2835 intel_panel_init(&intel_connector->panel, fixed_mode);
2665 intel_panel_setup_backlight(dev); 2836 intel_panel_setup_backlight(connector);
2666 } 2837 }
2667 2838
2668 intel_dp_add_properties(intel_dp, connector); 2839 intel_dp_add_properties(intel_dp, connector);
@@ -2676,3 +2847,45 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2676 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2847 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2677 } 2848 }
2678} 2849}
2850
2851void
2852intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
2853{
2854 struct intel_digital_port *intel_dig_port;
2855 struct intel_encoder *intel_encoder;
2856 struct drm_encoder *encoder;
2857 struct intel_connector *intel_connector;
2858
2859 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
2860 if (!intel_dig_port)
2861 return;
2862
2863 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2864 if (!intel_connector) {
2865 kfree(intel_dig_port);
2866 return;
2867 }
2868
2869 intel_encoder = &intel_dig_port->base;
2870 encoder = &intel_encoder->base;
2871
2872 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
2873 DRM_MODE_ENCODER_TMDS);
2874 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
2875
2876 intel_encoder->enable = intel_enable_dp;
2877 intel_encoder->pre_enable = intel_pre_enable_dp;
2878 intel_encoder->disable = intel_disable_dp;
2879 intel_encoder->post_disable = intel_post_disable_dp;
2880 intel_encoder->get_hw_state = intel_dp_get_hw_state;
2881
2882 intel_dig_port->port = port;
2883 intel_dig_port->dp.output_reg = output_reg;
2884
2885 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2886 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
2887 intel_encoder->cloneable = false;
2888 intel_encoder->hot_plug = intel_dp_hot_plug;
2889
2890 intel_dp_init_connector(intel_dig_port, intel_connector);
2891}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fe7142502f43..8a1bd4a3ad0d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -94,6 +94,7 @@
94#define INTEL_OUTPUT_HDMI 6 94#define INTEL_OUTPUT_HDMI 6
95#define INTEL_OUTPUT_DISPLAYPORT 7 95#define INTEL_OUTPUT_DISPLAYPORT 7
96#define INTEL_OUTPUT_EDP 8 96#define INTEL_OUTPUT_EDP 8
97#define INTEL_OUTPUT_UNKNOWN 9
97 98
98#define INTEL_DVO_CHIP_NONE 0 99#define INTEL_DVO_CHIP_NONE 0
99#define INTEL_DVO_CHIP_LVDS 1 100#define INTEL_DVO_CHIP_LVDS 1
@@ -163,6 +164,11 @@ struct intel_encoder {
163 int crtc_mask; 164 int crtc_mask;
164}; 165};
165 166
167struct intel_panel {
168 struct drm_display_mode *fixed_mode;
169 int fitting_mode;
170};
171
166struct intel_connector { 172struct intel_connector {
167 struct drm_connector base; 173 struct drm_connector base;
168 /* 174 /*
@@ -179,12 +185,19 @@ struct intel_connector {
179 /* Reads out the current hw, returning true if the connector is enabled 185 /* Reads out the current hw, returning true if the connector is enabled
180 * and active (i.e. dpms ON state). */ 186 * and active (i.e. dpms ON state). */
181 bool (*get_hw_state)(struct intel_connector *); 187 bool (*get_hw_state)(struct intel_connector *);
188
189 /* Panel info for eDP and LVDS */
190 struct intel_panel panel;
191
192 /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
193 struct edid *edid;
182}; 194};
183 195
184struct intel_crtc { 196struct intel_crtc {
185 struct drm_crtc base; 197 struct drm_crtc base;
186 enum pipe pipe; 198 enum pipe pipe;
187 enum plane plane; 199 enum plane plane;
200 enum transcoder cpu_transcoder;
188 u8 lut_r[256], lut_g[256], lut_b[256]; 201 u8 lut_r[256], lut_g[256], lut_b[256];
189 /* 202 /*
190 * Whether the crtc and the connected output pipeline is active. Implies 203 * Whether the crtc and the connected output pipeline is active. Implies
@@ -198,6 +211,8 @@ struct intel_crtc {
198 struct intel_unpin_work *unpin_work; 211 struct intel_unpin_work *unpin_work;
199 int fdi_lanes; 212 int fdi_lanes;
200 213
214 atomic_t unpin_work_count;
215
201 /* Display surface base address adjustement for pageflips. Note that on 216 /* Display surface base address adjustement for pageflips. Note that on
202 * gen4+ this only adjusts up to a tile, offsets within a tile are 217 * gen4+ this only adjusts up to a tile, offsets within a tile are
203 * handled in the hw itself (with the TILEOFF register). */ 218 * handled in the hw itself (with the TILEOFF register). */
@@ -212,12 +227,14 @@ struct intel_crtc {
212 227
213 /* We can share PLLs across outputs if the timings match */ 228 /* We can share PLLs across outputs if the timings match */
214 struct intel_pch_pll *pch_pll; 229 struct intel_pch_pll *pch_pll;
230 uint32_t ddi_pll_sel;
215}; 231};
216 232
217struct intel_plane { 233struct intel_plane {
218 struct drm_plane base; 234 struct drm_plane base;
219 enum pipe pipe; 235 enum pipe pipe;
220 struct drm_i915_gem_object *obj; 236 struct drm_i915_gem_object *obj;
237 bool can_scale;
221 int max_downscale; 238 int max_downscale;
222 u32 lut_r[1024], lut_g[1024], lut_b[1024]; 239 u32 lut_r[1024], lut_g[1024], lut_b[1024];
223 void (*update_plane)(struct drm_plane *plane, 240 void (*update_plane)(struct drm_plane *plane,
@@ -317,10 +334,8 @@ struct dip_infoframe {
317} __attribute__((packed)); 334} __attribute__((packed));
318 335
319struct intel_hdmi { 336struct intel_hdmi {
320 struct intel_encoder base;
321 u32 sdvox_reg; 337 u32 sdvox_reg;
322 int ddc_bus; 338 int ddc_bus;
323 int ddi_port;
324 uint32_t color_range; 339 uint32_t color_range;
325 bool has_hdmi_sink; 340 bool has_hdmi_sink;
326 bool has_audio; 341 bool has_audio;
@@ -331,18 +346,15 @@ struct intel_hdmi {
331 struct drm_display_mode *adjusted_mode); 346 struct drm_display_mode *adjusted_mode);
332}; 347};
333 348
334#define DP_RECEIVER_CAP_SIZE 0xf
335#define DP_MAX_DOWNSTREAM_PORTS 0x10 349#define DP_MAX_DOWNSTREAM_PORTS 0x10
336#define DP_LINK_CONFIGURATION_SIZE 9 350#define DP_LINK_CONFIGURATION_SIZE 9
337 351
338struct intel_dp { 352struct intel_dp {
339 struct intel_encoder base;
340 uint32_t output_reg; 353 uint32_t output_reg;
341 uint32_t DP; 354 uint32_t DP;
342 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE]; 355 uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
343 bool has_audio; 356 bool has_audio;
344 enum hdmi_force_audio force_audio; 357 enum hdmi_force_audio force_audio;
345 enum port port;
346 uint32_t color_range; 358 uint32_t color_range;
347 uint8_t link_bw; 359 uint8_t link_bw;
348 uint8_t lane_count; 360 uint8_t lane_count;
@@ -357,11 +369,16 @@ struct intel_dp {
357 int panel_power_cycle_delay; 369 int panel_power_cycle_delay;
358 int backlight_on_delay; 370 int backlight_on_delay;
359 int backlight_off_delay; 371 int backlight_off_delay;
360 struct drm_display_mode *panel_fixed_mode; /* for eDP */
361 struct delayed_work panel_vdd_work; 372 struct delayed_work panel_vdd_work;
362 bool want_panel_vdd; 373 bool want_panel_vdd;
363 struct edid *edid; /* cached EDID for eDP */ 374 struct intel_connector *attached_connector;
364 int edid_mode_count; 375};
376
377struct intel_digital_port {
378 struct intel_encoder base;
379 enum port port;
380 struct intel_dp dp;
381 struct intel_hdmi hdmi;
365}; 382};
366 383
367static inline struct drm_crtc * 384static inline struct drm_crtc *
@@ -380,11 +397,14 @@ intel_get_crtc_for_plane(struct drm_device *dev, int plane)
380 397
381struct intel_unpin_work { 398struct intel_unpin_work {
382 struct work_struct work; 399 struct work_struct work;
383 struct drm_device *dev; 400 struct drm_crtc *crtc;
384 struct drm_i915_gem_object *old_fb_obj; 401 struct drm_i915_gem_object *old_fb_obj;
385 struct drm_i915_gem_object *pending_flip_obj; 402 struct drm_i915_gem_object *pending_flip_obj;
386 struct drm_pending_vblank_event *event; 403 struct drm_pending_vblank_event *event;
387 int pending; 404 atomic_t pending;
405#define INTEL_FLIP_INACTIVE 0
406#define INTEL_FLIP_PENDING 1
407#define INTEL_FLIP_COMPLETE 2
388 bool enable_stall_check; 408 bool enable_stall_check;
389}; 409};
390 410
@@ -395,6 +415,8 @@ struct intel_fbc_work {
395 int interval; 415 int interval;
396}; 416};
397 417
418int intel_pch_rawclk(struct drm_device *dev);
419
398int intel_connector_update_modes(struct drm_connector *connector, 420int intel_connector_update_modes(struct drm_connector *connector,
399 struct edid *edid); 421 struct edid *edid);
400int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 422int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
@@ -405,7 +427,12 @@ extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector)
405extern void intel_crt_init(struct drm_device *dev); 427extern void intel_crt_init(struct drm_device *dev);
406extern void intel_hdmi_init(struct drm_device *dev, 428extern void intel_hdmi_init(struct drm_device *dev,
407 int sdvox_reg, enum port port); 429 int sdvox_reg, enum port port);
430extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
431 struct intel_connector *intel_connector);
408extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 432extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
433extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
434 const struct drm_display_mode *mode,
435 struct drm_display_mode *adjusted_mode);
409extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); 436extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
410extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, 437extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
411 bool is_sdvob); 438 bool is_sdvob);
@@ -418,10 +445,27 @@ extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
418extern bool intel_lvds_init(struct drm_device *dev); 445extern bool intel_lvds_init(struct drm_device *dev);
419extern void intel_dp_init(struct drm_device *dev, int output_reg, 446extern void intel_dp_init(struct drm_device *dev, int output_reg,
420 enum port port); 447 enum port port);
448extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
449 struct intel_connector *intel_connector);
421void 450void
422intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 451intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
423 struct drm_display_mode *adjusted_mode); 452 struct drm_display_mode *adjusted_mode);
453extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
454extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
455extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
456extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
457extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
458extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
459extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
460 const struct drm_display_mode *mode,
461 struct drm_display_mode *adjusted_mode);
424extern bool intel_dpd_is_edp(struct drm_device *dev); 462extern bool intel_dpd_is_edp(struct drm_device *dev);
463extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
464extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
465extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
466extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
467extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
468extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
425extern void intel_edp_link_config(struct intel_encoder *, int *, int *); 469extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
426extern int intel_edp_target_clock(struct intel_encoder *, 470extern int intel_edp_target_clock(struct intel_encoder *,
427 struct drm_display_mode *mode); 471 struct drm_display_mode *mode);
@@ -431,6 +475,10 @@ extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
431 enum plane plane); 475 enum plane plane);
432 476
433/* intel_panel.c */ 477/* intel_panel.c */
478extern int intel_panel_init(struct intel_panel *panel,
479 struct drm_display_mode *fixed_mode);
480extern void intel_panel_fini(struct intel_panel *panel);
481
434extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 482extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
435 struct drm_display_mode *adjusted_mode); 483 struct drm_display_mode *adjusted_mode);
436extern void intel_pch_panel_fitting(struct drm_device *dev, 484extern void intel_pch_panel_fitting(struct drm_device *dev,
@@ -439,7 +487,7 @@ extern void intel_pch_panel_fitting(struct drm_device *dev,
439 struct drm_display_mode *adjusted_mode); 487 struct drm_display_mode *adjusted_mode);
440extern u32 intel_panel_get_max_backlight(struct drm_device *dev); 488extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
441extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); 489extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
442extern int intel_panel_setup_backlight(struct drm_device *dev); 490extern int intel_panel_setup_backlight(struct drm_connector *connector);
443extern void intel_panel_enable_backlight(struct drm_device *dev, 491extern void intel_panel_enable_backlight(struct drm_device *dev,
444 enum pipe pipe); 492 enum pipe pipe);
445extern void intel_panel_disable_backlight(struct drm_device *dev); 493extern void intel_panel_disable_backlight(struct drm_device *dev);
@@ -473,6 +521,31 @@ static inline struct intel_encoder *intel_attached_encoder(struct drm_connector
473 return to_intel_connector(connector)->encoder; 521 return to_intel_connector(connector)->encoder;
474} 522}
475 523
524static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
525{
526 struct intel_digital_port *intel_dig_port =
527 container_of(encoder, struct intel_digital_port, base.base);
528 return &intel_dig_port->dp;
529}
530
531static inline struct intel_digital_port *
532enc_to_dig_port(struct drm_encoder *encoder)
533{
534 return container_of(encoder, struct intel_digital_port, base.base);
535}
536
537static inline struct intel_digital_port *
538dp_to_dig_port(struct intel_dp *intel_dp)
539{
540 return container_of(intel_dp, struct intel_digital_port, dp);
541}
542
543static inline struct intel_digital_port *
544hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
545{
546 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
547}
548
476extern void intel_connector_attach_encoder(struct intel_connector *connector, 549extern void intel_connector_attach_encoder(struct intel_connector *connector,
477 struct intel_encoder *encoder); 550 struct intel_encoder *encoder);
478extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); 551extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
@@ -481,8 +554,12 @@ extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
481 struct drm_crtc *crtc); 554 struct drm_crtc *crtc);
482int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 555int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
483 struct drm_file *file_priv); 556 struct drm_file *file_priv);
557extern enum transcoder
558intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
559 enum pipe pipe);
484extern void intel_wait_for_vblank(struct drm_device *dev, int pipe); 560extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
485extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe); 561extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
562extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
486 563
487struct intel_load_detect_pipe { 564struct intel_load_detect_pipe {
488 struct drm_framebuffer *release_fb; 565 struct drm_framebuffer *release_fb;
@@ -550,6 +627,10 @@ extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
550extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, 627extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
551 struct drm_display_mode *mode); 628 struct drm_display_mode *mode);
552 629
630extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
631 unsigned int bpp,
632 unsigned int pitch);
633
553extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 634extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
554 struct drm_file *file_priv); 635 struct drm_file *file_priv);
555extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 636extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
@@ -573,12 +654,22 @@ extern void intel_disable_gt_powersave(struct drm_device *dev);
573extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); 654extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
574extern void ironlake_teardown_rc6(struct drm_device *dev); 655extern void ironlake_teardown_rc6(struct drm_device *dev);
575 656
576extern void intel_enable_ddi(struct intel_encoder *encoder);
577extern void intel_disable_ddi(struct intel_encoder *encoder);
578extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder, 657extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
579 enum pipe *pipe); 658 enum pipe *pipe);
580extern void intel_ddi_mode_set(struct drm_encoder *encoder, 659extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
581 struct drm_display_mode *mode, 660extern void intel_ddi_pll_init(struct drm_device *dev);
582 struct drm_display_mode *adjusted_mode); 661extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
662extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
663 enum transcoder cpu_transcoder);
664extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
665extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
666extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
667extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
668extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
669extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
670extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
671extern bool
672intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
673extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
583 674
584#endif /* __INTEL_DRV_H__ */ 675#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 9ba0aaed7ee8..2ee9821b9d93 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -36,10 +36,15 @@
36#include <drm/i915_drm.h> 36#include <drm/i915_drm.h>
37#include "i915_drv.h" 37#include "i915_drv.h"
38 38
39static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
40{
41 return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
42}
43
39static void 44static void
40assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) 45assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
41{ 46{
42 struct drm_device *dev = intel_hdmi->base.base.dev; 47 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
43 struct drm_i915_private *dev_priv = dev->dev_private; 48 struct drm_i915_private *dev_priv = dev->dev_private;
44 uint32_t enabled_bits; 49 uint32_t enabled_bits;
45 50
@@ -51,13 +56,14 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
51 56
52struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 57struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
53{ 58{
54 return container_of(encoder, struct intel_hdmi, base.base); 59 struct intel_digital_port *intel_dig_port =
60 container_of(encoder, struct intel_digital_port, base.base);
61 return &intel_dig_port->hdmi;
55} 62}
56 63
57static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector) 64static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
58{ 65{
59 return container_of(intel_attached_encoder(connector), 66 return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
60 struct intel_hdmi, base);
61} 67}
62 68
63void intel_dip_infoframe_csum(struct dip_infoframe *frame) 69void intel_dip_infoframe_csum(struct dip_infoframe *frame)
@@ -334,6 +340,8 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
334 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 340 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
335 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; 341 avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
336 342
343 avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
344
337 intel_set_infoframe(encoder, &avi_if); 345 intel_set_infoframe(encoder, &avi_if);
338} 346}
339 347
@@ -754,16 +762,16 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
754 return MODE_OK; 762 return MODE_OK;
755} 763}
756 764
757static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, 765bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
758 const struct drm_display_mode *mode, 766 const struct drm_display_mode *mode,
759 struct drm_display_mode *adjusted_mode) 767 struct drm_display_mode *adjusted_mode)
760{ 768{
761 return true; 769 return true;
762} 770}
763 771
764static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) 772static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
765{ 773{
766 struct drm_device *dev = intel_hdmi->base.base.dev; 774 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
767 struct drm_i915_private *dev_priv = dev->dev_private; 775 struct drm_i915_private *dev_priv = dev->dev_private;
768 uint32_t bit; 776 uint32_t bit;
769 777
@@ -786,6 +794,9 @@ static enum drm_connector_status
786intel_hdmi_detect(struct drm_connector *connector, bool force) 794intel_hdmi_detect(struct drm_connector *connector, bool force)
787{ 795{
788 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 796 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
797 struct intel_digital_port *intel_dig_port =
798 hdmi_to_dig_port(intel_hdmi);
799 struct intel_encoder *intel_encoder = &intel_dig_port->base;
789 struct drm_i915_private *dev_priv = connector->dev->dev_private; 800 struct drm_i915_private *dev_priv = connector->dev->dev_private;
790 struct edid *edid; 801 struct edid *edid;
791 enum drm_connector_status status = connector_status_disconnected; 802 enum drm_connector_status status = connector_status_disconnected;
@@ -814,6 +825,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
814 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO) 825 if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
815 intel_hdmi->has_audio = 826 intel_hdmi->has_audio =
816 (intel_hdmi->force_audio == HDMI_AUDIO_ON); 827 (intel_hdmi->force_audio == HDMI_AUDIO_ON);
828 intel_encoder->type = INTEL_OUTPUT_HDMI;
817 } 829 }
818 830
819 return status; 831 return status;
@@ -859,10 +871,12 @@ intel_hdmi_set_property(struct drm_connector *connector,
859 uint64_t val) 871 uint64_t val)
860{ 872{
861 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 873 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
874 struct intel_digital_port *intel_dig_port =
875 hdmi_to_dig_port(intel_hdmi);
862 struct drm_i915_private *dev_priv = connector->dev->dev_private; 876 struct drm_i915_private *dev_priv = connector->dev->dev_private;
863 int ret; 877 int ret;
864 878
865 ret = drm_connector_property_set_value(connector, property, val); 879 ret = drm_object_property_set_value(&connector->base, property, val);
866 if (ret) 880 if (ret)
867 return ret; 881 return ret;
868 882
@@ -898,8 +912,8 @@ intel_hdmi_set_property(struct drm_connector *connector,
898 return -EINVAL; 912 return -EINVAL;
899 913
900done: 914done:
901 if (intel_hdmi->base.base.crtc) { 915 if (intel_dig_port->base.base.crtc) {
902 struct drm_crtc *crtc = intel_hdmi->base.base.crtc; 916 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
903 intel_set_mode(crtc, &crtc->mode, 917 intel_set_mode(crtc, &crtc->mode,
904 crtc->x, crtc->y, crtc->fb); 918 crtc->x, crtc->y, crtc->fb);
905 } 919 }
@@ -914,12 +928,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
914 kfree(connector); 928 kfree(connector);
915} 929}
916 930
917static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
918 .mode_fixup = intel_hdmi_mode_fixup,
919 .mode_set = intel_ddi_mode_set,
920 .disable = intel_encoder_noop,
921};
922
923static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { 931static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
924 .mode_fixup = intel_hdmi_mode_fixup, 932 .mode_fixup = intel_hdmi_mode_fixup,
925 .mode_set = intel_hdmi_mode_set, 933 .mode_set = intel_hdmi_mode_set,
@@ -951,43 +959,24 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
951 intel_attach_broadcast_rgb_property(connector); 959 intel_attach_broadcast_rgb_property(connector);
952} 960}
953 961
954void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port) 962void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
963 struct intel_connector *intel_connector)
955{ 964{
965 struct drm_connector *connector = &intel_connector->base;
966 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
967 struct intel_encoder *intel_encoder = &intel_dig_port->base;
968 struct drm_device *dev = intel_encoder->base.dev;
956 struct drm_i915_private *dev_priv = dev->dev_private; 969 struct drm_i915_private *dev_priv = dev->dev_private;
957 struct drm_connector *connector; 970 enum port port = intel_dig_port->port;
958 struct intel_encoder *intel_encoder;
959 struct intel_connector *intel_connector;
960 struct intel_hdmi *intel_hdmi;
961
962 intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
963 if (!intel_hdmi)
964 return;
965
966 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
967 if (!intel_connector) {
968 kfree(intel_hdmi);
969 return;
970 }
971
972 intel_encoder = &intel_hdmi->base;
973 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
974 DRM_MODE_ENCODER_TMDS);
975 971
976 connector = &intel_connector->base;
977 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, 972 drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
978 DRM_MODE_CONNECTOR_HDMIA); 973 DRM_MODE_CONNECTOR_HDMIA);
979 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs); 974 drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
980 975
981 intel_encoder->type = INTEL_OUTPUT_HDMI;
982
983 connector->polled = DRM_CONNECTOR_POLL_HPD; 976 connector->polled = DRM_CONNECTOR_POLL_HPD;
984 connector->interlace_allowed = 1; 977 connector->interlace_allowed = 1;
985 connector->doublescan_allowed = 0; 978 connector->doublescan_allowed = 0;
986 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
987 979
988 intel_encoder->cloneable = false;
989
990 intel_hdmi->ddi_port = port;
991 switch (port) { 980 switch (port) {
992 case PORT_B: 981 case PORT_B:
993 intel_hdmi->ddc_bus = GMBUS_PORT_DPB; 982 intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
@@ -1007,8 +996,6 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1007 BUG(); 996 BUG();
1008 } 997 }
1009 998
1010 intel_hdmi->sdvox_reg = sdvox_reg;
1011
1012 if (!HAS_PCH_SPLIT(dev)) { 999 if (!HAS_PCH_SPLIT(dev)) {
1013 intel_hdmi->write_infoframe = g4x_write_infoframe; 1000 intel_hdmi->write_infoframe = g4x_write_infoframe;
1014 intel_hdmi->set_infoframes = g4x_set_infoframes; 1001 intel_hdmi->set_infoframes = g4x_set_infoframes;
@@ -1026,21 +1013,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1026 intel_hdmi->set_infoframes = cpt_set_infoframes; 1013 intel_hdmi->set_infoframes = cpt_set_infoframes;
1027 } 1014 }
1028 1015
1029 if (IS_HASWELL(dev)) { 1016 if (IS_HASWELL(dev))
1030 intel_encoder->enable = intel_enable_ddi; 1017 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
1031 intel_encoder->disable = intel_disable_ddi; 1018 else
1032 intel_encoder->get_hw_state = intel_ddi_get_hw_state; 1019 intel_connector->get_hw_state = intel_connector_get_hw_state;
1033 drm_encoder_helper_add(&intel_encoder->base,
1034 &intel_hdmi_helper_funcs_hsw);
1035 } else {
1036 intel_encoder->enable = intel_enable_hdmi;
1037 intel_encoder->disable = intel_disable_hdmi;
1038 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1039 drm_encoder_helper_add(&intel_encoder->base,
1040 &intel_hdmi_helper_funcs);
1041 }
1042 intel_connector->get_hw_state = intel_connector_get_hw_state;
1043
1044 1020
1045 intel_hdmi_add_properties(intel_hdmi, connector); 1021 intel_hdmi_add_properties(intel_hdmi, connector);
1046 1022
@@ -1056,3 +1032,42 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1056 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 1032 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
1057 } 1033 }
1058} 1034}
1035
1036void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
1037{
1038 struct intel_digital_port *intel_dig_port;
1039 struct intel_encoder *intel_encoder;
1040 struct drm_encoder *encoder;
1041 struct intel_connector *intel_connector;
1042
1043 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
1044 if (!intel_dig_port)
1045 return;
1046
1047 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
1048 if (!intel_connector) {
1049 kfree(intel_dig_port);
1050 return;
1051 }
1052
1053 intel_encoder = &intel_dig_port->base;
1054 encoder = &intel_encoder->base;
1055
1056 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
1057 DRM_MODE_ENCODER_TMDS);
1058 drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
1059
1060 intel_encoder->enable = intel_enable_hdmi;
1061 intel_encoder->disable = intel_disable_hdmi;
1062 intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
1063
1064 intel_encoder->type = INTEL_OUTPUT_HDMI;
1065 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
1066 intel_encoder->cloneable = false;
1067
1068 intel_dig_port->port = port;
1069 intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
1070 intel_dig_port->dp.output_reg = 0;
1071
1072 intel_hdmi_init_connector(intel_dig_port, intel_connector);
1073}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c2c6dbc0971c..3ef5af15b812 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -432,7 +432,7 @@ timeout:
432 I915_WRITE(GMBUS0 + reg_offset, 0); 432 I915_WRITE(GMBUS0 + reg_offset, 0);
433 433
434 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ 434 /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
435 bus->force_bit = true; 435 bus->force_bit = 1;
436 ret = i2c_bit_algo.master_xfer(adapter, msgs, num); 436 ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
437 437
438out: 438out:
@@ -491,7 +491,7 @@ int intel_setup_gmbus(struct drm_device *dev)
491 491
492 /* gmbus seems to be broken on i830 */ 492 /* gmbus seems to be broken on i830 */
493 if (IS_I830(dev)) 493 if (IS_I830(dev))
494 bus->force_bit = true; 494 bus->force_bit = 1;
495 495
496 intel_gpio_setup(bus, port); 496 intel_gpio_setup(bus, port);
497 497
@@ -532,7 +532,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
532{ 532{
533 struct intel_gmbus *bus = to_intel_gmbus(adapter); 533 struct intel_gmbus *bus = to_intel_gmbus(adapter);
534 534
535 bus->force_bit = force_bit; 535 bus->force_bit += force_bit ? 1 : -1;
536 DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
537 force_bit ? "en" : "dis", adapter->name,
538 bus->force_bit);
536} 539}
537 540
538void intel_teardown_gmbus(struct drm_device *dev) 541void intel_teardown_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index edba93b3474b..b9a660a53677 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -40,28 +40,30 @@
40#include <linux/acpi.h> 40#include <linux/acpi.h>
41 41
42/* Private structure for the integrated LVDS support */ 42/* Private structure for the integrated LVDS support */
43struct intel_lvds { 43struct intel_lvds_connector {
44 struct intel_encoder base; 44 struct intel_connector base;
45 45
46 struct edid *edid; 46 struct notifier_block lid_notifier;
47};
48
49struct intel_lvds_encoder {
50 struct intel_encoder base;
47 51
48 int fitting_mode;
49 u32 pfit_control; 52 u32 pfit_control;
50 u32 pfit_pgm_ratios; 53 u32 pfit_pgm_ratios;
51 bool pfit_dirty; 54 bool pfit_dirty;
52 55
53 struct drm_display_mode *fixed_mode; 56 struct intel_lvds_connector *attached_connector;
54}; 57};
55 58
56static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder) 59static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
57{ 60{
58 return container_of(encoder, struct intel_lvds, base.base); 61 return container_of(encoder, struct intel_lvds_encoder, base.base);
59} 62}
60 63
61static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) 64static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
62{ 65{
63 return container_of(intel_attached_encoder(connector), 66 return container_of(connector, struct intel_lvds_connector, base.base);
64 struct intel_lvds, base);
65} 67}
66 68
67static bool intel_lvds_get_hw_state(struct intel_encoder *encoder, 69static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
@@ -96,7 +98,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
96static void intel_enable_lvds(struct intel_encoder *encoder) 98static void intel_enable_lvds(struct intel_encoder *encoder)
97{ 99{
98 struct drm_device *dev = encoder->base.dev; 100 struct drm_device *dev = encoder->base.dev;
99 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); 101 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
100 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 102 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
101 struct drm_i915_private *dev_priv = dev->dev_private; 103 struct drm_i915_private *dev_priv = dev->dev_private;
102 u32 ctl_reg, lvds_reg, stat_reg; 104 u32 ctl_reg, lvds_reg, stat_reg;
@@ -113,7 +115,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
113 115
114 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); 116 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
115 117
116 if (intel_lvds->pfit_dirty) { 118 if (lvds_encoder->pfit_dirty) {
117 /* 119 /*
118 * Enable automatic panel scaling so that non-native modes 120 * Enable automatic panel scaling so that non-native modes
119 * fill the screen. The panel fitter should only be 121 * fill the screen. The panel fitter should only be
@@ -121,12 +123,12 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
121 * register description and PRM. 123 * register description and PRM.
122 */ 124 */
123 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", 125 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
124 intel_lvds->pfit_control, 126 lvds_encoder->pfit_control,
125 intel_lvds->pfit_pgm_ratios); 127 lvds_encoder->pfit_pgm_ratios);
126 128
127 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); 129 I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
128 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); 130 I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
129 intel_lvds->pfit_dirty = false; 131 lvds_encoder->pfit_dirty = false;
130 } 132 }
131 133
132 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 134 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
@@ -140,7 +142,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
140static void intel_disable_lvds(struct intel_encoder *encoder) 142static void intel_disable_lvds(struct intel_encoder *encoder)
141{ 143{
142 struct drm_device *dev = encoder->base.dev; 144 struct drm_device *dev = encoder->base.dev;
143 struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base); 145 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
144 struct drm_i915_private *dev_priv = dev->dev_private; 146 struct drm_i915_private *dev_priv = dev->dev_private;
145 u32 ctl_reg, lvds_reg, stat_reg; 147 u32 ctl_reg, lvds_reg, stat_reg;
146 148
@@ -160,9 +162,9 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
160 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 162 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
161 DRM_ERROR("timed out waiting for panel to power off\n"); 163 DRM_ERROR("timed out waiting for panel to power off\n");
162 164
163 if (intel_lvds->pfit_control) { 165 if (lvds_encoder->pfit_control) {
164 I915_WRITE(PFIT_CONTROL, 0); 166 I915_WRITE(PFIT_CONTROL, 0);
165 intel_lvds->pfit_dirty = true; 167 lvds_encoder->pfit_dirty = true;
166 } 168 }
167 169
168 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 170 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
@@ -172,8 +174,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
172static int intel_lvds_mode_valid(struct drm_connector *connector, 174static int intel_lvds_mode_valid(struct drm_connector *connector,
173 struct drm_display_mode *mode) 175 struct drm_display_mode *mode)
174{ 176{
175 struct intel_lvds *intel_lvds = intel_attached_lvds(connector); 177 struct intel_connector *intel_connector = to_intel_connector(connector);
176 struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode; 178 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
177 179
178 if (mode->hdisplay > fixed_mode->hdisplay) 180 if (mode->hdisplay > fixed_mode->hdisplay)
179 return MODE_PANEL; 181 return MODE_PANEL;
@@ -249,8 +251,10 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
249{ 251{
250 struct drm_device *dev = encoder->dev; 252 struct drm_device *dev = encoder->dev;
251 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = dev->dev_private;
252 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 254 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
253 struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc; 255 struct intel_connector *intel_connector =
256 &lvds_encoder->attached_connector->base;
257 struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
254 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 258 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
255 int pipe; 259 int pipe;
256 260
@@ -260,7 +264,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
260 return false; 264 return false;
261 } 265 }
262 266
263 if (intel_encoder_check_is_cloned(&intel_lvds->base)) 267 if (intel_encoder_check_is_cloned(&lvds_encoder->base))
264 return false; 268 return false;
265 269
266 /* 270 /*
@@ -269,10 +273,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
269 * with the panel scaling set up to source from the H/VDisplay 273 * with the panel scaling set up to source from the H/VDisplay
270 * of the original mode. 274 * of the original mode.
271 */ 275 */
272 intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode); 276 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
277 adjusted_mode);
273 278
274 if (HAS_PCH_SPLIT(dev)) { 279 if (HAS_PCH_SPLIT(dev)) {
275 intel_pch_panel_fitting(dev, intel_lvds->fitting_mode, 280 intel_pch_panel_fitting(dev,
281 intel_connector->panel.fitting_mode,
276 mode, adjusted_mode); 282 mode, adjusted_mode);
277 return true; 283 return true;
278 } 284 }
@@ -298,7 +304,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
298 304
299 drm_mode_set_crtcinfo(adjusted_mode, 0); 305 drm_mode_set_crtcinfo(adjusted_mode, 0);
300 306
301 switch (intel_lvds->fitting_mode) { 307 switch (intel_connector->panel.fitting_mode) {
302 case DRM_MODE_SCALE_CENTER: 308 case DRM_MODE_SCALE_CENTER:
303 /* 309 /*
304 * For centered modes, we have to calculate border widths & 310 * For centered modes, we have to calculate border widths &
@@ -396,11 +402,11 @@ out:
396 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither) 402 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
397 pfit_control |= PANEL_8TO6_DITHER_ENABLE; 403 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
398 404
399 if (pfit_control != intel_lvds->pfit_control || 405 if (pfit_control != lvds_encoder->pfit_control ||
400 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 406 pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
401 intel_lvds->pfit_control = pfit_control; 407 lvds_encoder->pfit_control = pfit_control;
402 intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios; 408 lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
403 intel_lvds->pfit_dirty = true; 409 lvds_encoder->pfit_dirty = true;
404 } 410 }
405 dev_priv->lvds_border_bits = border; 411 dev_priv->lvds_border_bits = border;
406 412
@@ -449,14 +455,15 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
449 */ 455 */
450static int intel_lvds_get_modes(struct drm_connector *connector) 456static int intel_lvds_get_modes(struct drm_connector *connector)
451{ 457{
452 struct intel_lvds *intel_lvds = intel_attached_lvds(connector); 458 struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
453 struct drm_device *dev = connector->dev; 459 struct drm_device *dev = connector->dev;
454 struct drm_display_mode *mode; 460 struct drm_display_mode *mode;
455 461
456 if (intel_lvds->edid) 462 /* use cached edid if we have one */
457 return drm_add_edid_modes(connector, intel_lvds->edid); 463 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
464 return drm_add_edid_modes(connector, lvds_connector->base.edid);
458 465
459 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 466 mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
460 if (mode == NULL) 467 if (mode == NULL)
461 return 0; 468 return 0;
462 469
@@ -496,10 +503,11 @@ static const struct dmi_system_id intel_no_modeset_on_lid[] = {
496static int intel_lid_notify(struct notifier_block *nb, unsigned long val, 503static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
497 void *unused) 504 void *unused)
498{ 505{
499 struct drm_i915_private *dev_priv = 506 struct intel_lvds_connector *lvds_connector =
500 container_of(nb, struct drm_i915_private, lid_notifier); 507 container_of(nb, struct intel_lvds_connector, lid_notifier);
501 struct drm_device *dev = dev_priv->dev; 508 struct drm_connector *connector = &lvds_connector->base.base;
502 struct drm_connector *connector = dev_priv->int_lvds_connector; 509 struct drm_device *dev = connector->dev;
510 struct drm_i915_private *dev_priv = dev->dev_private;
503 511
504 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 512 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
505 return NOTIFY_OK; 513 return NOTIFY_OK;
@@ -508,9 +516,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
508 * check and update the status of LVDS connector after receiving 516 * check and update the status of LVDS connector after receiving
509 * the LID nofication event. 517 * the LID nofication event.
510 */ 518 */
511 if (connector) 519 connector->status = connector->funcs->detect(connector, false);
512 connector->status = connector->funcs->detect(connector,
513 false);
514 520
515 /* Don't force modeset on machines where it causes a GPU lockup */ 521 /* Don't force modeset on machines where it causes a GPU lockup */
516 if (dmi_check_system(intel_no_modeset_on_lid)) 522 if (dmi_check_system(intel_no_modeset_on_lid))
@@ -526,7 +532,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
526 dev_priv->modeset_on_lid = 0; 532 dev_priv->modeset_on_lid = 0;
527 533
528 mutex_lock(&dev->mode_config.mutex); 534 mutex_lock(&dev->mode_config.mutex);
529 intel_modeset_check_state(dev); 535 intel_modeset_setup_hw_state(dev, true);
530 mutex_unlock(&dev->mode_config.mutex); 536 mutex_unlock(&dev->mode_config.mutex);
531 537
532 return NOTIFY_OK; 538 return NOTIFY_OK;
@@ -541,13 +547,18 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
541 */ 547 */
542static void intel_lvds_destroy(struct drm_connector *connector) 548static void intel_lvds_destroy(struct drm_connector *connector)
543{ 549{
544 struct drm_device *dev = connector->dev; 550 struct intel_lvds_connector *lvds_connector =
545 struct drm_i915_private *dev_priv = dev->dev_private; 551 to_lvds_connector(connector);
546 552
547 intel_panel_destroy_backlight(dev); 553 if (lvds_connector->lid_notifier.notifier_call)
554 acpi_lid_notifier_unregister(&lvds_connector->lid_notifier);
555
556 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
557 kfree(lvds_connector->base.edid);
558
559 intel_panel_destroy_backlight(connector->dev);
560 intel_panel_fini(&lvds_connector->base.panel);
548 561
549 if (dev_priv->lid_notifier.notifier_call)
550 acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
551 drm_sysfs_connector_remove(connector); 562 drm_sysfs_connector_remove(connector);
552 drm_connector_cleanup(connector); 563 drm_connector_cleanup(connector);
553 kfree(connector); 564 kfree(connector);
@@ -557,22 +568,24 @@ static int intel_lvds_set_property(struct drm_connector *connector,
557 struct drm_property *property, 568 struct drm_property *property,
558 uint64_t value) 569 uint64_t value)
559{ 570{
560 struct intel_lvds *intel_lvds = intel_attached_lvds(connector); 571 struct intel_connector *intel_connector = to_intel_connector(connector);
561 struct drm_device *dev = connector->dev; 572 struct drm_device *dev = connector->dev;
562 573
563 if (property == dev->mode_config.scaling_mode_property) { 574 if (property == dev->mode_config.scaling_mode_property) {
564 struct drm_crtc *crtc = intel_lvds->base.base.crtc; 575 struct drm_crtc *crtc;
565 576
566 if (value == DRM_MODE_SCALE_NONE) { 577 if (value == DRM_MODE_SCALE_NONE) {
567 DRM_DEBUG_KMS("no scaling not supported\n"); 578 DRM_DEBUG_KMS("no scaling not supported\n");
568 return -EINVAL; 579 return -EINVAL;
569 } 580 }
570 581
571 if (intel_lvds->fitting_mode == value) { 582 if (intel_connector->panel.fitting_mode == value) {
572 /* the LVDS scaling property is not changed */ 583 /* the LVDS scaling property is not changed */
573 return 0; 584 return 0;
574 } 585 }
575 intel_lvds->fitting_mode = value; 586 intel_connector->panel.fitting_mode = value;
587
588 crtc = intel_attached_encoder(connector)->base.crtc;
576 if (crtc && crtc->enabled) { 589 if (crtc && crtc->enabled) {
577 /* 590 /*
578 * If the CRTC is enabled, the display will be changed 591 * If the CRTC is enabled, the display will be changed
@@ -912,12 +925,15 @@ static bool intel_lvds_supported(struct drm_device *dev)
912bool intel_lvds_init(struct drm_device *dev) 925bool intel_lvds_init(struct drm_device *dev)
913{ 926{
914 struct drm_i915_private *dev_priv = dev->dev_private; 927 struct drm_i915_private *dev_priv = dev->dev_private;
915 struct intel_lvds *intel_lvds; 928 struct intel_lvds_encoder *lvds_encoder;
916 struct intel_encoder *intel_encoder; 929 struct intel_encoder *intel_encoder;
930 struct intel_lvds_connector *lvds_connector;
917 struct intel_connector *intel_connector; 931 struct intel_connector *intel_connector;
918 struct drm_connector *connector; 932 struct drm_connector *connector;
919 struct drm_encoder *encoder; 933 struct drm_encoder *encoder;
920 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 934 struct drm_display_mode *scan; /* *modes, *bios_mode; */
935 struct drm_display_mode *fixed_mode = NULL;
936 struct edid *edid;
921 struct drm_crtc *crtc; 937 struct drm_crtc *crtc;
922 u32 lvds; 938 u32 lvds;
923 int pipe; 939 int pipe;
@@ -945,23 +961,25 @@ bool intel_lvds_init(struct drm_device *dev)
945 } 961 }
946 } 962 }
947 963
948 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); 964 lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
949 if (!intel_lvds) { 965 if (!lvds_encoder)
950 return false; 966 return false;
951 }
952 967
953 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 968 lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
954 if (!intel_connector) { 969 if (!lvds_connector) {
955 kfree(intel_lvds); 970 kfree(lvds_encoder);
956 return false; 971 return false;
957 } 972 }
958 973
974 lvds_encoder->attached_connector = lvds_connector;
975
959 if (!HAS_PCH_SPLIT(dev)) { 976 if (!HAS_PCH_SPLIT(dev)) {
960 intel_lvds->pfit_control = I915_READ(PFIT_CONTROL); 977 lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
961 } 978 }
962 979
963 intel_encoder = &intel_lvds->base; 980 intel_encoder = &lvds_encoder->base;
964 encoder = &intel_encoder->base; 981 encoder = &intel_encoder->base;
982 intel_connector = &lvds_connector->base;
965 connector = &intel_connector->base; 983 connector = &intel_connector->base;
966 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs, 984 drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
967 DRM_MODE_CONNECTOR_LVDS); 985 DRM_MODE_CONNECTOR_LVDS);
@@ -993,14 +1011,10 @@ bool intel_lvds_init(struct drm_device *dev)
993 1011
994 /* create the scaling mode property */ 1012 /* create the scaling mode property */
995 drm_mode_create_scaling_mode_property(dev); 1013 drm_mode_create_scaling_mode_property(dev);
996 /* 1014 drm_object_attach_property(&connector->base,
997 * the initial panel fitting mode will be FULL_SCREEN.
998 */
999
1000 drm_connector_attach_property(&intel_connector->base,
1001 dev->mode_config.scaling_mode_property, 1015 dev->mode_config.scaling_mode_property,
1002 DRM_MODE_SCALE_ASPECT); 1016 DRM_MODE_SCALE_ASPECT);
1003 intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT; 1017 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
1004 /* 1018 /*
1005 * LVDS discovery: 1019 * LVDS discovery:
1006 * 1) check for EDID on DDC 1020 * 1) check for EDID on DDC
@@ -1015,20 +1029,21 @@ bool intel_lvds_init(struct drm_device *dev)
1015 * Attempt to get the fixed panel mode from DDC. Assume that the 1029 * Attempt to get the fixed panel mode from DDC. Assume that the
1016 * preferred mode is the right one. 1030 * preferred mode is the right one.
1017 */ 1031 */
1018 intel_lvds->edid = drm_get_edid(connector, 1032 edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
1019 intel_gmbus_get_adapter(dev_priv, 1033 if (edid) {
1020 pin)); 1034 if (drm_add_edid_modes(connector, edid)) {
1021 if (intel_lvds->edid) {
1022 if (drm_add_edid_modes(connector,
1023 intel_lvds->edid)) {
1024 drm_mode_connector_update_edid_property(connector, 1035 drm_mode_connector_update_edid_property(connector,
1025 intel_lvds->edid); 1036 edid);
1026 } else { 1037 } else {
1027 kfree(intel_lvds->edid); 1038 kfree(edid);
1028 intel_lvds->edid = NULL; 1039 edid = ERR_PTR(-EINVAL);
1029 } 1040 }
1041 } else {
1042 edid = ERR_PTR(-ENOENT);
1030 } 1043 }
1031 if (!intel_lvds->edid) { 1044 lvds_connector->base.edid = edid;
1045
1046 if (IS_ERR_OR_NULL(edid)) {
1032 /* Didn't get an EDID, so 1047 /* Didn't get an EDID, so
1033 * Set wide sync ranges so we get all modes 1048 * Set wide sync ranges so we get all modes
1034 * handed to valid_mode for checking 1049 * handed to valid_mode for checking
@@ -1041,22 +1056,26 @@ bool intel_lvds_init(struct drm_device *dev)
1041 1056
1042 list_for_each_entry(scan, &connector->probed_modes, head) { 1057 list_for_each_entry(scan, &connector->probed_modes, head) {
1043 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 1058 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
1044 intel_lvds->fixed_mode = 1059 DRM_DEBUG_KMS("using preferred mode from EDID: ");
1045 drm_mode_duplicate(dev, scan); 1060 drm_mode_debug_printmodeline(scan);
1046 intel_find_lvds_downclock(dev, 1061
1047 intel_lvds->fixed_mode, 1062 fixed_mode = drm_mode_duplicate(dev, scan);
1048 connector); 1063 if (fixed_mode) {
1049 goto out; 1064 intel_find_lvds_downclock(dev, fixed_mode,
1065 connector);
1066 goto out;
1067 }
1050 } 1068 }
1051 } 1069 }
1052 1070
1053 /* Failed to get EDID, what about VBT? */ 1071 /* Failed to get EDID, what about VBT? */
1054 if (dev_priv->lfp_lvds_vbt_mode) { 1072 if (dev_priv->lfp_lvds_vbt_mode) {
1055 intel_lvds->fixed_mode = 1073 DRM_DEBUG_KMS("using mode from VBT: ");
1056 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 1074 drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
1057 if (intel_lvds->fixed_mode) { 1075
1058 intel_lvds->fixed_mode->type |= 1076 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
1059 DRM_MODE_TYPE_PREFERRED; 1077 if (fixed_mode) {
1078 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1060 goto out; 1079 goto out;
1061 } 1080 }
1062 } 1081 }
@@ -1076,16 +1095,17 @@ bool intel_lvds_init(struct drm_device *dev)
1076 crtc = intel_get_crtc_for_pipe(dev, pipe); 1095 crtc = intel_get_crtc_for_pipe(dev, pipe);
1077 1096
1078 if (crtc && (lvds & LVDS_PORT_EN)) { 1097 if (crtc && (lvds & LVDS_PORT_EN)) {
1079 intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc); 1098 fixed_mode = intel_crtc_mode_get(dev, crtc);
1080 if (intel_lvds->fixed_mode) { 1099 if (fixed_mode) {
1081 intel_lvds->fixed_mode->type |= 1100 DRM_DEBUG_KMS("using current (BIOS) mode: ");
1082 DRM_MODE_TYPE_PREFERRED; 1101 drm_mode_debug_printmodeline(fixed_mode);
1102 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
1083 goto out; 1103 goto out;
1084 } 1104 }
1085 } 1105 }
1086 1106
1087 /* If we still don't have a mode after all that, give up. */ 1107 /* If we still don't have a mode after all that, give up. */
1088 if (!intel_lvds->fixed_mode) 1108 if (!fixed_mode)
1089 goto failed; 1109 goto failed;
1090 1110
1091out: 1111out:
@@ -1100,16 +1120,15 @@ out:
1100 I915_WRITE(PP_CONTROL, 1120 I915_WRITE(PP_CONTROL,
1101 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); 1121 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1102 } 1122 }
1103 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1123 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1104 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1124 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1105 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1125 DRM_DEBUG_KMS("lid notifier registration failed\n");
1106 dev_priv->lid_notifier.notifier_call = NULL; 1126 lvds_connector->lid_notifier.notifier_call = NULL;
1107 } 1127 }
1108 /* keep the LVDS connector */
1109 dev_priv->int_lvds_connector = connector;
1110 drm_sysfs_connector_add(connector); 1128 drm_sysfs_connector_add(connector);
1111 1129
1112 intel_panel_setup_backlight(dev); 1130 intel_panel_init(&intel_connector->panel, fixed_mode);
1131 intel_panel_setup_backlight(connector);
1113 1132
1114 return true; 1133 return true;
1115 1134
@@ -1117,7 +1136,9 @@ failed:
1117 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1136 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
1118 drm_connector_cleanup(connector); 1137 drm_connector_cleanup(connector);
1119 drm_encoder_cleanup(encoder); 1138 drm_encoder_cleanup(encoder);
1120 kfree(intel_lvds); 1139 if (fixed_mode)
1121 kfree(intel_connector); 1140 drm_mode_destroy(dev, fixed_mode);
1141 kfree(lvds_encoder);
1142 kfree(lvds_connector);
1122 return false; 1143 return false;
1123} 1144}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index cabd84bf66eb..b00f1c83adce 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -45,7 +45,6 @@ int intel_connector_update_modes(struct drm_connector *connector,
45 drm_mode_connector_update_edid_property(connector, edid); 45 drm_mode_connector_update_edid_property(connector, edid);
46 ret = drm_add_edid_modes(connector, edid); 46 ret = drm_add_edid_modes(connector, edid);
47 drm_edid_to_eld(connector, edid); 47 drm_edid_to_eld(connector, edid);
48 kfree(edid);
49 48
50 return ret; 49 return ret;
51} 50}
@@ -61,12 +60,16 @@ int intel_ddc_get_modes(struct drm_connector *connector,
61 struct i2c_adapter *adapter) 60 struct i2c_adapter *adapter)
62{ 61{
63 struct edid *edid; 62 struct edid *edid;
63 int ret;
64 64
65 edid = drm_get_edid(connector, adapter); 65 edid = drm_get_edid(connector, adapter);
66 if (!edid) 66 if (!edid)
67 return 0; 67 return 0;
68 68
69 return intel_connector_update_modes(connector, edid); 69 ret = intel_connector_update_modes(connector, edid);
70 kfree(edid);
71
72 return ret;
70} 73}
71 74
72static const struct drm_prop_enum_list force_audio_names[] = { 75static const struct drm_prop_enum_list force_audio_names[] = {
@@ -94,7 +97,7 @@ intel_attach_force_audio_property(struct drm_connector *connector)
94 97
95 dev_priv->force_audio_property = prop; 98 dev_priv->force_audio_property = prop;
96 } 99 }
97 drm_connector_attach_property(connector, prop, 0); 100 drm_object_attach_property(&connector->base, prop, 0);
98} 101}
99 102
100static const struct drm_prop_enum_list broadcast_rgb_names[] = { 103static const struct drm_prop_enum_list broadcast_rgb_names[] = {
@@ -121,5 +124,5 @@ intel_attach_broadcast_rgb_property(struct drm_connector *connector)
121 dev_priv->broadcast_rgb_property = prop; 124 dev_priv->broadcast_rgb_property = prop;
122 } 125 }
123 126
124 drm_connector_attach_property(connector, prop, 0); 127 drm_object_attach_property(&connector->base, prop, 0);
125} 128}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5530413213d8..7741c22c934c 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -154,6 +154,8 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle; 154 struct opregion_asle __iomem *asle = dev_priv->opregion.asle;
155 u32 max; 155 u32 max;
156 156
157 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
158
157 if (!(bclp & ASLE_BCLP_VALID)) 159 if (!(bclp & ASLE_BCLP_VALID))
158 return ASLE_BACKLIGHT_FAILED; 160 return ASLE_BACKLIGHT_FAILED;
159 161
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index e2aacd329545..bee8cb6108a7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -130,32 +130,34 @@ static int is_backlight_combination_mode(struct drm_device *dev)
130 return 0; 130 return 0;
131} 131}
132 132
133static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) 133static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
134{ 134{
135 struct drm_i915_private *dev_priv = dev->dev_private;
135 u32 val; 136 u32 val;
136 137
137 /* Restore the CTL value if it lost, e.g. GPU reset */ 138 /* Restore the CTL value if it lost, e.g. GPU reset */
138 139
139 if (HAS_PCH_SPLIT(dev_priv->dev)) { 140 if (HAS_PCH_SPLIT(dev_priv->dev)) {
140 val = I915_READ(BLC_PWM_PCH_CTL2); 141 val = I915_READ(BLC_PWM_PCH_CTL2);
141 if (dev_priv->saveBLC_PWM_CTL2 == 0) { 142 if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
142 dev_priv->saveBLC_PWM_CTL2 = val; 143 dev_priv->regfile.saveBLC_PWM_CTL2 = val;
143 } else if (val == 0) { 144 } else if (val == 0) {
144 I915_WRITE(BLC_PWM_PCH_CTL2, 145 val = dev_priv->regfile.saveBLC_PWM_CTL2;
145 dev_priv->saveBLC_PWM_CTL2); 146 I915_WRITE(BLC_PWM_PCH_CTL2, val);
146 val = dev_priv->saveBLC_PWM_CTL2;
147 } 147 }
148 } else { 148 } else {
149 val = I915_READ(BLC_PWM_CTL); 149 val = I915_READ(BLC_PWM_CTL);
150 if (dev_priv->saveBLC_PWM_CTL == 0) { 150 if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
151 dev_priv->saveBLC_PWM_CTL = val; 151 dev_priv->regfile.saveBLC_PWM_CTL = val;
152 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 152 if (INTEL_INFO(dev)->gen >= 4)
153 dev_priv->regfile.saveBLC_PWM_CTL2 =
154 I915_READ(BLC_PWM_CTL2);
153 } else if (val == 0) { 155 } else if (val == 0) {
154 I915_WRITE(BLC_PWM_CTL, 156 val = dev_priv->regfile.saveBLC_PWM_CTL;
155 dev_priv->saveBLC_PWM_CTL); 157 I915_WRITE(BLC_PWM_CTL, val);
156 I915_WRITE(BLC_PWM_CTL2, 158 if (INTEL_INFO(dev)->gen >= 4)
157 dev_priv->saveBLC_PWM_CTL2); 159 I915_WRITE(BLC_PWM_CTL2,
158 val = dev_priv->saveBLC_PWM_CTL; 160 dev_priv->regfile.saveBLC_PWM_CTL2);
159 } 161 }
160 } 162 }
161 163
@@ -164,10 +166,9 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
164 166
165static u32 _intel_panel_get_max_backlight(struct drm_device *dev) 167static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
166{ 168{
167 struct drm_i915_private *dev_priv = dev->dev_private;
168 u32 max; 169 u32 max;
169 170
170 max = i915_read_blc_pwm_ctl(dev_priv); 171 max = i915_read_blc_pwm_ctl(dev);
171 172
172 if (HAS_PCH_SPLIT(dev)) { 173 if (HAS_PCH_SPLIT(dev)) {
173 max >>= 16; 174 max >>= 16;
@@ -275,7 +276,7 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
275 } 276 }
276 277
277 tmp = I915_READ(BLC_PWM_CTL); 278 tmp = I915_READ(BLC_PWM_CTL);
278 if (INTEL_INFO(dev)->gen < 4) 279 if (INTEL_INFO(dev)->gen < 4)
279 level <<= 1; 280 level <<= 1;
280 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK; 281 tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
281 I915_WRITE(BLC_PWM_CTL, tmp | level); 282 I915_WRITE(BLC_PWM_CTL, tmp | level);
@@ -374,26 +375,23 @@ static void intel_panel_init_backlight(struct drm_device *dev)
374enum drm_connector_status 375enum drm_connector_status
375intel_panel_detect(struct drm_device *dev) 376intel_panel_detect(struct drm_device *dev)
376{ 377{
377#if 0
378 struct drm_i915_private *dev_priv = dev->dev_private; 378 struct drm_i915_private *dev_priv = dev->dev_private;
379#endif
380
381 if (i915_panel_ignore_lid)
382 return i915_panel_ignore_lid > 0 ?
383 connector_status_connected :
384 connector_status_disconnected;
385 379
386 /* opregion lid state on HP 2540p is wrong at boot up,
387 * appears to be either the BIOS or Linux ACPI fault */
388#if 0
389 /* Assume that the BIOS does not lie through the OpRegion... */ 380 /* Assume that the BIOS does not lie through the OpRegion... */
390 if (dev_priv->opregion.lid_state) 381 if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
391 return ioread32(dev_priv->opregion.lid_state) & 0x1 ? 382 return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
392 connector_status_connected : 383 connector_status_connected :
393 connector_status_disconnected; 384 connector_status_disconnected;
394#endif 385 }
395 386
396 return connector_status_unknown; 387 switch (i915_panel_ignore_lid) {
388 case -2:
389 return connector_status_connected;
390 case -1:
391 return connector_status_disconnected;
392 default:
393 return connector_status_unknown;
394 }
397} 395}
398 396
399#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE 397#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -416,21 +414,14 @@ static const struct backlight_ops intel_panel_bl_ops = {
416 .get_brightness = intel_panel_get_brightness, 414 .get_brightness = intel_panel_get_brightness,
417}; 415};
418 416
419int intel_panel_setup_backlight(struct drm_device *dev) 417int intel_panel_setup_backlight(struct drm_connector *connector)
420{ 418{
419 struct drm_device *dev = connector->dev;
421 struct drm_i915_private *dev_priv = dev->dev_private; 420 struct drm_i915_private *dev_priv = dev->dev_private;
422 struct backlight_properties props; 421 struct backlight_properties props;
423 struct drm_connector *connector;
424 422
425 intel_panel_init_backlight(dev); 423 intel_panel_init_backlight(dev);
426 424
427 if (dev_priv->int_lvds_connector)
428 connector = dev_priv->int_lvds_connector;
429 else if (dev_priv->int_edp_connector)
430 connector = dev_priv->int_edp_connector;
431 else
432 return -ENODEV;
433
434 memset(&props, 0, sizeof(props)); 425 memset(&props, 0, sizeof(props));
435 props.type = BACKLIGHT_RAW; 426 props.type = BACKLIGHT_RAW;
436 props.max_brightness = _intel_panel_get_max_backlight(dev); 427 props.max_brightness = _intel_panel_get_max_backlight(dev);
@@ -460,9 +451,9 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
460 backlight_device_unregister(dev_priv->backlight); 451 backlight_device_unregister(dev_priv->backlight);
461} 452}
462#else 453#else
463int intel_panel_setup_backlight(struct drm_device *dev) 454int intel_panel_setup_backlight(struct drm_connector *connector)
464{ 455{
465 intel_panel_init_backlight(dev); 456 intel_panel_init_backlight(connector->dev);
466 return 0; 457 return 0;
467} 458}
468 459
@@ -471,3 +462,20 @@ void intel_panel_destroy_backlight(struct drm_device *dev)
471 return; 462 return;
472} 463}
473#endif 464#endif
465
466int intel_panel_init(struct intel_panel *panel,
467 struct drm_display_mode *fixed_mode)
468{
469 panel->fixed_mode = fixed_mode;
470
471 return 0;
472}
473
474void intel_panel_fini(struct intel_panel *panel)
475{
476 struct intel_connector *intel_connector =
477 container_of(panel, struct intel_connector, panel);
478
479 if (panel->fixed_mode)
480 drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
481}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 442968f8b201..496caa73eb70 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1325,10 +1325,11 @@ static void valleyview_update_wm(struct drm_device *dev)
1325 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1325 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1326 planea_wm); 1326 planea_wm);
1327 I915_WRITE(DSPFW2, 1327 I915_WRITE(DSPFW2,
1328 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1328 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1329 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1329 (cursora_wm << DSPFW_CURSORA_SHIFT));
1330 I915_WRITE(DSPFW3, 1330 I915_WRITE(DSPFW3,
1331 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT))); 1331 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1332 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1332} 1333}
1333 1334
1334static void g4x_update_wm(struct drm_device *dev) 1335static void g4x_update_wm(struct drm_device *dev)
@@ -1374,11 +1375,11 @@ static void g4x_update_wm(struct drm_device *dev)
1374 (planeb_wm << DSPFW_PLANEB_SHIFT) | 1375 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1375 planea_wm); 1376 planea_wm);
1376 I915_WRITE(DSPFW2, 1377 I915_WRITE(DSPFW2,
1377 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | 1378 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1378 (cursora_wm << DSPFW_CURSORA_SHIFT)); 1379 (cursora_wm << DSPFW_CURSORA_SHIFT));
1379 /* HPLL off in SR has some issues on G4x... disable it */ 1380 /* HPLL off in SR has some issues on G4x... disable it */
1380 I915_WRITE(DSPFW3, 1381 I915_WRITE(DSPFW3,
1381 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | 1382 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1382 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 1383 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1383} 1384}
1384 1385
@@ -1468,9 +1469,12 @@ static void i9xx_update_wm(struct drm_device *dev)
1468 fifo_size = dev_priv->display.get_fifo_size(dev, 0); 1469 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1469 crtc = intel_get_crtc_for_plane(dev, 0); 1470 crtc = intel_get_crtc_for_plane(dev, 0);
1470 if (crtc->enabled && crtc->fb) { 1471 if (crtc->enabled && crtc->fb) {
1472 int cpp = crtc->fb->bits_per_pixel / 8;
1473 if (IS_GEN2(dev))
1474 cpp = 4;
1475
1471 planea_wm = intel_calculate_wm(crtc->mode.clock, 1476 planea_wm = intel_calculate_wm(crtc->mode.clock,
1472 wm_info, fifo_size, 1477 wm_info, fifo_size, cpp,
1473 crtc->fb->bits_per_pixel / 8,
1474 latency_ns); 1478 latency_ns);
1475 enabled = crtc; 1479 enabled = crtc;
1476 } else 1480 } else
@@ -1479,9 +1483,12 @@ static void i9xx_update_wm(struct drm_device *dev)
1479 fifo_size = dev_priv->display.get_fifo_size(dev, 1); 1483 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1480 crtc = intel_get_crtc_for_plane(dev, 1); 1484 crtc = intel_get_crtc_for_plane(dev, 1);
1481 if (crtc->enabled && crtc->fb) { 1485 if (crtc->enabled && crtc->fb) {
1486 int cpp = crtc->fb->bits_per_pixel / 8;
1487 if (IS_GEN2(dev))
1488 cpp = 4;
1489
1482 planeb_wm = intel_calculate_wm(crtc->mode.clock, 1490 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1483 wm_info, fifo_size, 1491 wm_info, fifo_size, cpp,
1484 crtc->fb->bits_per_pixel / 8,
1485 latency_ns); 1492 latency_ns);
1486 if (enabled == NULL) 1493 if (enabled == NULL)
1487 enabled = crtc; 1494 enabled = crtc;
@@ -1571,8 +1578,7 @@ static void i830_update_wm(struct drm_device *dev)
1571 1578
1572 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, 1579 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1573 dev_priv->display.get_fifo_size(dev, 0), 1580 dev_priv->display.get_fifo_size(dev, 0),
1574 crtc->fb->bits_per_pixel / 8, 1581 4, latency_ns);
1575 latency_ns);
1576 fwater_lo = I915_READ(FW_BLC) & ~0xfff; 1582 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1577 fwater_lo |= (3<<8) | planea_wm; 1583 fwater_lo |= (3<<8) | planea_wm;
1578 1584
@@ -2323,7 +2329,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
2323 struct drm_i915_private *dev_priv = dev->dev_private; 2329 struct drm_i915_private *dev_priv = dev->dev_private;
2324 u32 limits = gen6_rps_limits(dev_priv, &val); 2330 u32 limits = gen6_rps_limits(dev_priv, &val);
2325 2331
2326 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2332 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2327 WARN_ON(val > dev_priv->rps.max_delay); 2333 WARN_ON(val > dev_priv->rps.max_delay);
2328 WARN_ON(val < dev_priv->rps.min_delay); 2334 WARN_ON(val < dev_priv->rps.min_delay);
2329 2335
@@ -2398,12 +2404,12 @@ static void gen6_enable_rps(struct drm_device *dev)
2398 struct intel_ring_buffer *ring; 2404 struct intel_ring_buffer *ring;
2399 u32 rp_state_cap; 2405 u32 rp_state_cap;
2400 u32 gt_perf_status; 2406 u32 gt_perf_status;
2401 u32 pcu_mbox, rc6_mask = 0; 2407 u32 rc6vids, pcu_mbox, rc6_mask = 0;
2402 u32 gtfifodbg; 2408 u32 gtfifodbg;
2403 int rc6_mode; 2409 int rc6_mode;
2404 int i; 2410 int i, ret;
2405 2411
2406 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2412 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2407 2413
2408 /* Here begins a magic sequence of register writes to enable 2414 /* Here begins a magic sequence of register writes to enable
2409 * auto-downclocking. 2415 * auto-downclocking.
@@ -2497,30 +2503,16 @@ static void gen6_enable_rps(struct drm_device *dev)
2497 GEN6_RP_UP_BUSY_AVG | 2503 GEN6_RP_UP_BUSY_AVG |
2498 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT)); 2504 (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
2499 2505
2500 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2506 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2501 500)) 2507 if (!ret) {
2502 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); 2508 pcu_mbox = 0;
2503 2509 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2504 I915_WRITE(GEN6_PCODE_DATA, 0); 2510 if (ret && pcu_mbox & (1<<31)) { /* OC supported */
2505 I915_WRITE(GEN6_PCODE_MAILBOX, 2511 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2506 GEN6_PCODE_READY | 2512 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2507 GEN6_PCODE_WRITE_MIN_FREQ_TABLE); 2513 }
2508 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 2514 } else {
2509 500)) 2515 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2510 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2511
2512 /* Check for overclock support */
2513 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2514 500))
2515 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2516 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2517 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2518 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2519 500))
2520 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2521 if (pcu_mbox & (1<<31)) { /* OC supported */
2522 dev_priv->rps.max_delay = pcu_mbox & 0xff;
2523 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2524 } 2516 }
2525 2517
2526 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8); 2518 gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
@@ -2534,6 +2526,20 @@ static void gen6_enable_rps(struct drm_device *dev)
2534 /* enable all PM interrupts */ 2526 /* enable all PM interrupts */
2535 I915_WRITE(GEN6_PMINTRMSK, 0); 2527 I915_WRITE(GEN6_PMINTRMSK, 0);
2536 2528
2529 rc6vids = 0;
2530 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
2531 if (IS_GEN6(dev) && ret) {
2532 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
2533 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
2534 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
2535 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
2536 rc6vids &= 0xffff00;
2537 rc6vids |= GEN6_ENCODE_RC6_VID(450);
2538 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
2539 if (ret)
2540 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
2541 }
2542
2537 gen6_gt_force_wake_put(dev_priv); 2543 gen6_gt_force_wake_put(dev_priv);
2538} 2544}
2539 2545
@@ -2541,10 +2547,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2541{ 2547{
2542 struct drm_i915_private *dev_priv = dev->dev_private; 2548 struct drm_i915_private *dev_priv = dev->dev_private;
2543 int min_freq = 15; 2549 int min_freq = 15;
2544 int gpu_freq, ia_freq, max_ia_freq; 2550 int gpu_freq;
2551 unsigned int ia_freq, max_ia_freq;
2545 int scaling_factor = 180; 2552 int scaling_factor = 180;
2546 2553
2547 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2554 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
2548 2555
2549 max_ia_freq = cpufreq_quick_get_max(0); 2556 max_ia_freq = cpufreq_quick_get_max(0);
2550 /* 2557 /*
@@ -2575,17 +2582,11 @@ static void gen6_update_ring_freq(struct drm_device *dev)
2575 else 2582 else
2576 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); 2583 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2577 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); 2584 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2585 ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
2578 2586
2579 I915_WRITE(GEN6_PCODE_DATA, 2587 sandybridge_pcode_write(dev_priv,
2580 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | 2588 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2581 gpu_freq); 2589 ia_freq | gpu_freq);
2582 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2583 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2584 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2585 GEN6_PCODE_READY) == 0, 10)) {
2586 DRM_ERROR("pcode write of freq table timed out\n");
2587 continue;
2588 }
2589 } 2590 }
2590} 2591}
2591 2592
@@ -2593,16 +2594,16 @@ void ironlake_teardown_rc6(struct drm_device *dev)
2593{ 2594{
2594 struct drm_i915_private *dev_priv = dev->dev_private; 2595 struct drm_i915_private *dev_priv = dev->dev_private;
2595 2596
2596 if (dev_priv->renderctx) { 2597 if (dev_priv->ips.renderctx) {
2597 i915_gem_object_unpin(dev_priv->renderctx); 2598 i915_gem_object_unpin(dev_priv->ips.renderctx);
2598 drm_gem_object_unreference(&dev_priv->renderctx->base); 2599 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
2599 dev_priv->renderctx = NULL; 2600 dev_priv->ips.renderctx = NULL;
2600 } 2601 }
2601 2602
2602 if (dev_priv->pwrctx) { 2603 if (dev_priv->ips.pwrctx) {
2603 i915_gem_object_unpin(dev_priv->pwrctx); 2604 i915_gem_object_unpin(dev_priv->ips.pwrctx);
2604 drm_gem_object_unreference(&dev_priv->pwrctx->base); 2605 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
2605 dev_priv->pwrctx = NULL; 2606 dev_priv->ips.pwrctx = NULL;
2606 } 2607 }
2607} 2608}
2608 2609
@@ -2628,14 +2629,14 @@ static int ironlake_setup_rc6(struct drm_device *dev)
2628{ 2629{
2629 struct drm_i915_private *dev_priv = dev->dev_private; 2630 struct drm_i915_private *dev_priv = dev->dev_private;
2630 2631
2631 if (dev_priv->renderctx == NULL) 2632 if (dev_priv->ips.renderctx == NULL)
2632 dev_priv->renderctx = intel_alloc_context_page(dev); 2633 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
2633 if (!dev_priv->renderctx) 2634 if (!dev_priv->ips.renderctx)
2634 return -ENOMEM; 2635 return -ENOMEM;
2635 2636
2636 if (dev_priv->pwrctx == NULL) 2637 if (dev_priv->ips.pwrctx == NULL)
2637 dev_priv->pwrctx = intel_alloc_context_page(dev); 2638 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
2638 if (!dev_priv->pwrctx) { 2639 if (!dev_priv->ips.pwrctx) {
2639 ironlake_teardown_rc6(dev); 2640 ironlake_teardown_rc6(dev);
2640 return -ENOMEM; 2641 return -ENOMEM;
2641 } 2642 }
@@ -2647,6 +2648,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2647{ 2648{
2648 struct drm_i915_private *dev_priv = dev->dev_private; 2649 struct drm_i915_private *dev_priv = dev->dev_private;
2649 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 2650 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2651 bool was_interruptible;
2650 int ret; 2652 int ret;
2651 2653
2652 /* rc6 disabled by default due to repeated reports of hanging during 2654 /* rc6 disabled by default due to repeated reports of hanging during
@@ -2661,6 +2663,9 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2661 if (ret) 2663 if (ret)
2662 return; 2664 return;
2663 2665
2666 was_interruptible = dev_priv->mm.interruptible;
2667 dev_priv->mm.interruptible = false;
2668
2664 /* 2669 /*
2665 * GPU can automatically power down the render unit if given a page 2670 * GPU can automatically power down the render unit if given a page
2666 * to save state. 2671 * to save state.
@@ -2668,12 +2673,13 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2668 ret = intel_ring_begin(ring, 6); 2673 ret = intel_ring_begin(ring, 6);
2669 if (ret) { 2674 if (ret) {
2670 ironlake_teardown_rc6(dev); 2675 ironlake_teardown_rc6(dev);
2676 dev_priv->mm.interruptible = was_interruptible;
2671 return; 2677 return;
2672 } 2678 }
2673 2679
2674 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 2680 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2675 intel_ring_emit(ring, MI_SET_CONTEXT); 2681 intel_ring_emit(ring, MI_SET_CONTEXT);
2676 intel_ring_emit(ring, dev_priv->renderctx->gtt_offset | 2682 intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
2677 MI_MM_SPACE_GTT | 2683 MI_MM_SPACE_GTT |
2678 MI_SAVE_EXT_STATE_EN | 2684 MI_SAVE_EXT_STATE_EN |
2679 MI_RESTORE_EXT_STATE_EN | 2685 MI_RESTORE_EXT_STATE_EN |
@@ -2688,14 +2694,15 @@ static void ironlake_enable_rc6(struct drm_device *dev)
2688 * does an implicit flush, combined with MI_FLUSH above, it should be 2694 * does an implicit flush, combined with MI_FLUSH above, it should be
2689 * safe to assume that renderctx is valid 2695 * safe to assume that renderctx is valid
2690 */ 2696 */
2691 ret = intel_wait_ring_idle(ring); 2697 ret = intel_ring_idle(ring);
2698 dev_priv->mm.interruptible = was_interruptible;
2692 if (ret) { 2699 if (ret) {
2693 DRM_ERROR("failed to enable ironlake power power savings\n"); 2700 DRM_ERROR("failed to enable ironlake power power savings\n");
2694 ironlake_teardown_rc6(dev); 2701 ironlake_teardown_rc6(dev);
2695 return; 2702 return;
2696 } 2703 }
2697 2704
2698 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); 2705 I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
2699 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 2706 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2700} 2707}
2701 2708
@@ -3304,37 +3311,72 @@ static void intel_init_emon(struct drm_device *dev)
3304 3311
3305void intel_disable_gt_powersave(struct drm_device *dev) 3312void intel_disable_gt_powersave(struct drm_device *dev)
3306{ 3313{
3314 struct drm_i915_private *dev_priv = dev->dev_private;
3315
3307 if (IS_IRONLAKE_M(dev)) { 3316 if (IS_IRONLAKE_M(dev)) {
3308 ironlake_disable_drps(dev); 3317 ironlake_disable_drps(dev);
3309 ironlake_disable_rc6(dev); 3318 ironlake_disable_rc6(dev);
3310 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { 3319 } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
3320 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
3321 mutex_lock(&dev_priv->rps.hw_lock);
3311 gen6_disable_rps(dev); 3322 gen6_disable_rps(dev);
3323 mutex_unlock(&dev_priv->rps.hw_lock);
3312 } 3324 }
3313} 3325}
3314 3326
3327static void intel_gen6_powersave_work(struct work_struct *work)
3328{
3329 struct drm_i915_private *dev_priv =
3330 container_of(work, struct drm_i915_private,
3331 rps.delayed_resume_work.work);
3332 struct drm_device *dev = dev_priv->dev;
3333
3334 mutex_lock(&dev_priv->rps.hw_lock);
3335 gen6_enable_rps(dev);
3336 gen6_update_ring_freq(dev);
3337 mutex_unlock(&dev_priv->rps.hw_lock);
3338}
3339
3315void intel_enable_gt_powersave(struct drm_device *dev) 3340void intel_enable_gt_powersave(struct drm_device *dev)
3316{ 3341{
3342 struct drm_i915_private *dev_priv = dev->dev_private;
3343
3317 if (IS_IRONLAKE_M(dev)) { 3344 if (IS_IRONLAKE_M(dev)) {
3318 ironlake_enable_drps(dev); 3345 ironlake_enable_drps(dev);
3319 ironlake_enable_rc6(dev); 3346 ironlake_enable_rc6(dev);
3320 intel_init_emon(dev); 3347 intel_init_emon(dev);
3321 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) { 3348 } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
3322 gen6_enable_rps(dev); 3349 /*
3323 gen6_update_ring_freq(dev); 3350 * PCU communication is slow and this doesn't need to be
3351 * done at any specific time, so do this out of our fast path
3352 * to make resume and init faster.
3353 */
3354 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
3355 round_jiffies_up_relative(HZ));
3324 } 3356 }
3325} 3357}
3326 3358
3359static void ibx_init_clock_gating(struct drm_device *dev)
3360{
3361 struct drm_i915_private *dev_priv = dev->dev_private;
3362
3363 /*
3364 * On Ibex Peak and Cougar Point, we need to disable clock
3365 * gating for the panel power sequencer or it will fail to
3366 * start up when no ports are active.
3367 */
3368 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3369}
3370
3327static void ironlake_init_clock_gating(struct drm_device *dev) 3371static void ironlake_init_clock_gating(struct drm_device *dev)
3328{ 3372{
3329 struct drm_i915_private *dev_priv = dev->dev_private; 3373 struct drm_i915_private *dev_priv = dev->dev_private;
3330 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 3374 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3331 3375
3332 /* Required for FBC */ 3376 /* Required for FBC */
3333 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | 3377 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
3334 DPFCRUNIT_CLOCK_GATE_DISABLE | 3378 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
3335 DPFDUNIT_CLOCK_GATE_DISABLE; 3379 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
3336 /* Required for CxSR */
3337 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
3338 3380
3339 I915_WRITE(PCH_3DCGDIS0, 3381 I915_WRITE(PCH_3DCGDIS0,
3340 MARIUNIT_CLOCK_GATE_DISABLE | 3382 MARIUNIT_CLOCK_GATE_DISABLE |
@@ -3342,8 +3384,6 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
3342 I915_WRITE(PCH_3DCGDIS1, 3384 I915_WRITE(PCH_3DCGDIS1,
3343 VFMUNIT_CLOCK_GATE_DISABLE); 3385 VFMUNIT_CLOCK_GATE_DISABLE);
3344 3386
3345 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3346
3347 /* 3387 /*
3348 * According to the spec the following bits should be set in 3388 * According to the spec the following bits should be set in
3349 * order to enable memory self-refresh 3389 * order to enable memory self-refresh
@@ -3354,9 +3394,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
3354 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3394 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3355 (I915_READ(ILK_DISPLAY_CHICKEN2) | 3395 (I915_READ(ILK_DISPLAY_CHICKEN2) |
3356 ILK_DPARB_GATE | ILK_VSDPFD_FULL)); 3396 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3357 I915_WRITE(ILK_DSPCLK_GATE, 3397 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
3358 (I915_READ(ILK_DSPCLK_GATE) |
3359 ILK_DPARB_CLK_GATE));
3360 I915_WRITE(DISP_ARB_CTL, 3398 I915_WRITE(DISP_ARB_CTL,
3361 (I915_READ(DISP_ARB_CTL) | 3399 (I915_READ(DISP_ARB_CTL) |
3362 DISP_FBC_WM_DIS)); 3400 DISP_FBC_WM_DIS));
@@ -3378,28 +3416,56 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
3378 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3416 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3379 I915_READ(ILK_DISPLAY_CHICKEN2) | 3417 I915_READ(ILK_DISPLAY_CHICKEN2) |
3380 ILK_DPARB_GATE); 3418 ILK_DPARB_GATE);
3381 I915_WRITE(ILK_DSPCLK_GATE,
3382 I915_READ(ILK_DSPCLK_GATE) |
3383 ILK_DPFC_DIS1 |
3384 ILK_DPFC_DIS2 |
3385 ILK_CLK_FBC);
3386 } 3419 }
3387 3420
3421 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3422
3388 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3423 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3389 I915_READ(ILK_DISPLAY_CHICKEN2) | 3424 I915_READ(ILK_DISPLAY_CHICKEN2) |
3390 ILK_ELPIN_409_SELECT); 3425 ILK_ELPIN_409_SELECT);
3391 I915_WRITE(_3D_CHICKEN2, 3426 I915_WRITE(_3D_CHICKEN2,
3392 _3D_CHICKEN2_WM_READ_PIPELINED << 16 | 3427 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3393 _3D_CHICKEN2_WM_READ_PIPELINED); 3428 _3D_CHICKEN2_WM_READ_PIPELINED);
3429
3430 /* WaDisableRenderCachePipelinedFlush */
3431 I915_WRITE(CACHE_MODE_0,
3432 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3433
3434 ibx_init_clock_gating(dev);
3435}
3436
3437static void cpt_init_clock_gating(struct drm_device *dev)
3438{
3439 struct drm_i915_private *dev_priv = dev->dev_private;
3440 int pipe;
3441
3442 /*
3443 * On Ibex Peak and Cougar Point, we need to disable clock
3444 * gating for the panel power sequencer or it will fail to
3445 * start up when no ports are active.
3446 */
3447 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3448 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3449 DPLS_EDP_PPS_FIX_DIS);
3450 /* The below fixes the weird display corruption, a few pixels shifted
3451 * downward, on (only) LVDS of some HP laptops with IVY.
3452 */
3453 for_each_pipe(pipe)
3454 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
3455 /* WADP0ClockGatingDisable */
3456 for_each_pipe(pipe) {
3457 I915_WRITE(TRANS_CHICKEN1(pipe),
3458 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
3459 }
3394} 3460}
3395 3461
3396static void gen6_init_clock_gating(struct drm_device *dev) 3462static void gen6_init_clock_gating(struct drm_device *dev)
3397{ 3463{
3398 struct drm_i915_private *dev_priv = dev->dev_private; 3464 struct drm_i915_private *dev_priv = dev->dev_private;
3399 int pipe; 3465 int pipe;
3400 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; 3466 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
3401 3467
3402 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); 3468 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
3403 3469
3404 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3470 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3405 I915_READ(ILK_DISPLAY_CHICKEN2) | 3471 I915_READ(ILK_DISPLAY_CHICKEN2) |
@@ -3454,11 +3520,12 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3454 I915_WRITE(ILK_DISPLAY_CHICKEN2, 3520 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3455 I915_READ(ILK_DISPLAY_CHICKEN2) | 3521 I915_READ(ILK_DISPLAY_CHICKEN2) |
3456 ILK_DPARB_GATE | ILK_VSDPFD_FULL); 3522 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3457 I915_WRITE(ILK_DSPCLK_GATE, 3523 I915_WRITE(ILK_DSPCLK_GATE_D,
3458 I915_READ(ILK_DSPCLK_GATE) | 3524 I915_READ(ILK_DSPCLK_GATE_D) |
3459 ILK_DPARB_CLK_GATE | 3525 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
3460 ILK_DPFD_CLK_GATE); 3526 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
3461 3527
3528 /* WaMbcDriverBootEnable */
3462 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3529 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3463 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3530 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3464 3531
@@ -3473,6 +3540,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
3473 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ 3540 * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
3474 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff)); 3541 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
3475 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); 3542 I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
3543
3544 cpt_init_clock_gating(dev);
3476} 3545}
3477 3546
3478static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) 3547static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
@@ -3487,13 +3556,24 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3487 I915_WRITE(GEN7_FF_THREAD_MODE, reg); 3556 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3488} 3557}
3489 3558
3559static void lpt_init_clock_gating(struct drm_device *dev)
3560{
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3562
3563 /*
3564 * TODO: this bit should only be enabled when really needed, then
3565 * disabled when not needed anymore in order to save power.
3566 */
3567 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
3568 I915_WRITE(SOUTH_DSPCLK_GATE_D,
3569 I915_READ(SOUTH_DSPCLK_GATE_D) |
3570 PCH_LP_PARTITION_LEVEL_DISABLE);
3571}
3572
3490static void haswell_init_clock_gating(struct drm_device *dev) 3573static void haswell_init_clock_gating(struct drm_device *dev)
3491{ 3574{
3492 struct drm_i915_private *dev_priv = dev->dev_private; 3575 struct drm_i915_private *dev_priv = dev->dev_private;
3493 int pipe; 3576 int pipe;
3494 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3495
3496 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3497 3577
3498 I915_WRITE(WM3_LP_ILK, 0); 3578 I915_WRITE(WM3_LP_ILK, 0);
3499 I915_WRITE(WM2_LP_ILK, 0); 3579 I915_WRITE(WM2_LP_ILK, 0);
@@ -3504,12 +3584,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3504 */ 3584 */
3505 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); 3585 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3506 3586
3507 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3508
3509 I915_WRITE(IVB_CHICKEN3,
3510 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3511 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3512
3513 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3587 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3514 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3588 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3515 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3589 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3538,6 +3612,10 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3538 I915_WRITE(CACHE_MODE_1, 3612 I915_WRITE(CACHE_MODE_1,
3539 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); 3613 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3540 3614
3615 /* WaMbcDriverBootEnable */
3616 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3617 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3618
3541 /* XXX: This is a workaround for early silicon revisions and should be 3619 /* XXX: This is a workaround for early silicon revisions and should be
3542 * removed later. 3620 * removed later.
3543 */ 3621 */
@@ -3547,27 +3625,38 @@ static void haswell_init_clock_gating(struct drm_device *dev)
3547 WM_DBG_DISALLOW_SPRITE | 3625 WM_DBG_DISALLOW_SPRITE |
3548 WM_DBG_DISALLOW_MAXFIFO); 3626 WM_DBG_DISALLOW_MAXFIFO);
3549 3627
3628 lpt_init_clock_gating(dev);
3550} 3629}
3551 3630
3552static void ivybridge_init_clock_gating(struct drm_device *dev) 3631static void ivybridge_init_clock_gating(struct drm_device *dev)
3553{ 3632{
3554 struct drm_i915_private *dev_priv = dev->dev_private; 3633 struct drm_i915_private *dev_priv = dev->dev_private;
3555 int pipe; 3634 int pipe;
3556 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3557 uint32_t snpcr; 3635 uint32_t snpcr;
3558 3636
3559 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3560
3561 I915_WRITE(WM3_LP_ILK, 0); 3637 I915_WRITE(WM3_LP_ILK, 0);
3562 I915_WRITE(WM2_LP_ILK, 0); 3638 I915_WRITE(WM2_LP_ILK, 0);
3563 I915_WRITE(WM1_LP_ILK, 0); 3639 I915_WRITE(WM1_LP_ILK, 0);
3564 3640
3565 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 3641 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3642
3643 /* WaDisableEarlyCull */
3644 I915_WRITE(_3D_CHICKEN3,
3645 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3566 3646
3647 /* WaDisableBackToBackFlipFix */
3567 I915_WRITE(IVB_CHICKEN3, 3648 I915_WRITE(IVB_CHICKEN3,
3568 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3649 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3569 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3650 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3570 3651
3652 /* WaDisablePSDDualDispatchEnable */
3653 if (IS_IVB_GT1(dev))
3654 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3655 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3656 else
3657 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
3658 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3659
3571 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3660 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3572 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3661 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3573 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3662 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
@@ -3576,7 +3665,18 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3576 I915_WRITE(GEN7_L3CNTLREG1, 3665 I915_WRITE(GEN7_L3CNTLREG1,
3577 GEN7_WA_FOR_GEN7_L3_CONTROL); 3666 GEN7_WA_FOR_GEN7_L3_CONTROL);
3578 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, 3667 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3579 GEN7_WA_L3_CHICKEN_MODE); 3668 GEN7_WA_L3_CHICKEN_MODE);
3669 if (IS_IVB_GT1(dev))
3670 I915_WRITE(GEN7_ROW_CHICKEN2,
3671 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3672 else
3673 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
3674 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3675
3676
3677 /* WaForceL3Serialization */
3678 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3679 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3580 3680
3581 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 3681 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3582 * gating disable must be set. Failure to set it results in 3682 * gating disable must be set. Failure to set it results in
@@ -3607,6 +3707,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3607 intel_flush_display_plane(dev_priv, pipe); 3707 intel_flush_display_plane(dev_priv, pipe);
3608 } 3708 }
3609 3709
3710 /* WaMbcDriverBootEnable */
3610 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3711 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3611 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3712 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3612 3713
@@ -3620,39 +3721,59 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
3620 snpcr &= ~GEN6_MBC_SNPCR_MASK; 3721 snpcr &= ~GEN6_MBC_SNPCR_MASK;
3621 snpcr |= GEN6_MBC_SNPCR_MED; 3722 snpcr |= GEN6_MBC_SNPCR_MED;
3622 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); 3723 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3724
3725 cpt_init_clock_gating(dev);
3623} 3726}
3624 3727
3625static void valleyview_init_clock_gating(struct drm_device *dev) 3728static void valleyview_init_clock_gating(struct drm_device *dev)
3626{ 3729{
3627 struct drm_i915_private *dev_priv = dev->dev_private; 3730 struct drm_i915_private *dev_priv = dev->dev_private;
3628 int pipe; 3731 int pipe;
3629 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3630
3631 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3632 3732
3633 I915_WRITE(WM3_LP_ILK, 0); 3733 I915_WRITE(WM3_LP_ILK, 0);
3634 I915_WRITE(WM2_LP_ILK, 0); 3734 I915_WRITE(WM2_LP_ILK, 0);
3635 I915_WRITE(WM1_LP_ILK, 0); 3735 I915_WRITE(WM1_LP_ILK, 0);
3636 3736
3637 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); 3737 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
3738
3739 /* WaDisableEarlyCull */
3740 I915_WRITE(_3D_CHICKEN3,
3741 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3638 3742
3743 /* WaDisableBackToBackFlipFix */
3639 I915_WRITE(IVB_CHICKEN3, 3744 I915_WRITE(IVB_CHICKEN3,
3640 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | 3745 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3641 CHICKEN3_DGMG_DONE_FIX_DISABLE); 3746 CHICKEN3_DGMG_DONE_FIX_DISABLE);
3642 3747
3748 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
3749 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
3750
3643 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */ 3751 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3644 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1, 3752 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3645 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC); 3753 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3646 3754
3647 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */ 3755 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3648 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL); 3756 I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
3649 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE); 3757 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3650 3758
3759 /* WaForceL3Serialization */
3760 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3761 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3762
3763 /* WaDisableDopClockGating */
3764 I915_WRITE(GEN7_ROW_CHICKEN2,
3765 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
3766
3767 /* WaForceL3Serialization */
3768 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3769 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3770
3651 /* This is required by WaCatErrorRejectionIssue */ 3771 /* This is required by WaCatErrorRejectionIssue */
3652 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 3772 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3653 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 3773 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3654 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 3774 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3655 3775
3776 /* WaMbcDriverBootEnable */
3656 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | 3777 I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3657 GEN6_MBCTL_ENABLE_BOOT_FETCH); 3778 GEN6_MBCTL_ENABLE_BOOT_FETCH);
3658 3779
@@ -3704,6 +3825,13 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
3704 PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN | 3825 PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
3705 SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN | 3826 SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
3706 PLANEA_FLIPDONE_INT_EN); 3827 PLANEA_FLIPDONE_INT_EN);
3828
3829 /*
3830 * WaDisableVLVClockGating_VBIIssue
3831 * Disable clock gating on th GCFG unit to prevent a delay
3832 * in the reporting of vblank events.
3833 */
3834 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
3707} 3835}
3708 3836
3709static void g4x_init_clock_gating(struct drm_device *dev) 3837static void g4x_init_clock_gating(struct drm_device *dev)
@@ -3722,6 +3850,10 @@ static void g4x_init_clock_gating(struct drm_device *dev)
3722 if (IS_GM45(dev)) 3850 if (IS_GM45(dev))
3723 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; 3851 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3724 I915_WRITE(DSPCLK_GATE_D, dspclk_gate); 3852 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3853
3854 /* WaDisableRenderCachePipelinedFlush */
3855 I915_WRITE(CACHE_MODE_0,
3856 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
3725} 3857}
3726 3858
3727static void crestline_init_clock_gating(struct drm_device *dev) 3859static void crestline_init_clock_gating(struct drm_device *dev)
@@ -3777,44 +3909,11 @@ static void i830_init_clock_gating(struct drm_device *dev)
3777 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 3909 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3778} 3910}
3779 3911
3780static void ibx_init_clock_gating(struct drm_device *dev)
3781{
3782 struct drm_i915_private *dev_priv = dev->dev_private;
3783
3784 /*
3785 * On Ibex Peak and Cougar Point, we need to disable clock
3786 * gating for the panel power sequencer or it will fail to
3787 * start up when no ports are active.
3788 */
3789 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3790}
3791
3792static void cpt_init_clock_gating(struct drm_device *dev)
3793{
3794 struct drm_i915_private *dev_priv = dev->dev_private;
3795 int pipe;
3796
3797 /*
3798 * On Ibex Peak and Cougar Point, we need to disable clock
3799 * gating for the panel power sequencer or it will fail to
3800 * start up when no ports are active.
3801 */
3802 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3803 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3804 DPLS_EDP_PPS_FIX_DIS);
3805 /* Without this, mode sets may fail silently on FDI */
3806 for_each_pipe(pipe)
3807 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3808}
3809
3810void intel_init_clock_gating(struct drm_device *dev) 3912void intel_init_clock_gating(struct drm_device *dev)
3811{ 3913{
3812 struct drm_i915_private *dev_priv = dev->dev_private; 3914 struct drm_i915_private *dev_priv = dev->dev_private;
3813 3915
3814 dev_priv->display.init_clock_gating(dev); 3916 dev_priv->display.init_clock_gating(dev);
3815
3816 if (dev_priv->display.init_pch_clock_gating)
3817 dev_priv->display.init_pch_clock_gating(dev);
3818} 3917}
3819 3918
3820/* Starting with Haswell, we have different power wells for 3919/* Starting with Haswell, we have different power wells for
@@ -3840,7 +3939,7 @@ void intel_init_power_wells(struct drm_device *dev)
3840 3939
3841 if ((well & HSW_PWR_WELL_STATE) == 0) { 3940 if ((well & HSW_PWR_WELL_STATE) == 0) {
3842 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); 3941 I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3843 if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20)) 3942 if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
3844 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); 3943 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3845 } 3944 }
3846 } 3945 }
@@ -3878,11 +3977,6 @@ void intel_init_pm(struct drm_device *dev)
3878 3977
3879 /* For FIFO watermark updates */ 3978 /* For FIFO watermark updates */
3880 if (HAS_PCH_SPLIT(dev)) { 3979 if (HAS_PCH_SPLIT(dev)) {
3881 if (HAS_PCH_IBX(dev))
3882 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
3883 else if (HAS_PCH_CPT(dev))
3884 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
3885
3886 if (IS_GEN5(dev)) { 3980 if (IS_GEN5(dev)) {
3887 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) 3981 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3888 dev_priv->display.update_wm = ironlake_update_wm; 3982 dev_priv->display.update_wm = ironlake_update_wm;
@@ -3993,6 +4087,12 @@ static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
3993 DRM_ERROR("GT thread status wait timed out\n"); 4087 DRM_ERROR("GT thread status wait timed out\n");
3994} 4088}
3995 4089
4090static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
4091{
4092 I915_WRITE_NOTRACE(FORCEWAKE, 0);
4093 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4094}
4095
3996static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 4096static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
3997{ 4097{
3998 u32 forcewake_ack; 4098 u32 forcewake_ack;
@@ -4006,7 +4106,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4006 FORCEWAKE_ACK_TIMEOUT_MS)) 4106 FORCEWAKE_ACK_TIMEOUT_MS))
4007 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4107 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4008 4108
4009 I915_WRITE_NOTRACE(FORCEWAKE, 1); 4109 I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
4010 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4110 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4011 4111
4012 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4112 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4016,6 +4116,12 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4016 __gen6_gt_wait_for_thread_c0(dev_priv); 4116 __gen6_gt_wait_for_thread_c0(dev_priv);
4017} 4117}
4018 4118
4119static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
4120{
4121 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
4122 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4123}
4124
4019static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) 4125static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4020{ 4126{
4021 u32 forcewake_ack; 4127 u32 forcewake_ack;
@@ -4029,7 +4135,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
4029 FORCEWAKE_ACK_TIMEOUT_MS)) 4135 FORCEWAKE_ACK_TIMEOUT_MS))
4030 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4136 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4031 4137
4032 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); 4138 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4033 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */ 4139 POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
4034 4140
4035 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1), 4141 if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
@@ -4073,7 +4179,7 @@ static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4073 4179
4074static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) 4180static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4075{ 4181{
4076 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); 4182 I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4077 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */ 4183 /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4078 gen6_gt_check_fifodbg(dev_priv); 4184 gen6_gt_check_fifodbg(dev_priv);
4079} 4185}
@@ -4111,13 +4217,18 @@ int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4111 return ret; 4217 return ret;
4112} 4218}
4113 4219
4220static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
4221{
4222 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
4223}
4224
4114static void vlv_force_wake_get(struct drm_i915_private *dev_priv) 4225static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4115{ 4226{
4116 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0, 4227 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4117 FORCEWAKE_ACK_TIMEOUT_MS)) 4228 FORCEWAKE_ACK_TIMEOUT_MS))
4118 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n"); 4229 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4119 4230
4120 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1)); 4231 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
4121 4232
4122 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1), 4233 if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4123 FORCEWAKE_ACK_TIMEOUT_MS)) 4234 FORCEWAKE_ACK_TIMEOUT_MS))
@@ -4128,49 +4239,89 @@ static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4128 4239
4129static void vlv_force_wake_put(struct drm_i915_private *dev_priv) 4240static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4130{ 4241{
4131 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1)); 4242 I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
4132 /* The below doubles as a POSTING_READ */ 4243 /* The below doubles as a POSTING_READ */
4133 gen6_gt_check_fifodbg(dev_priv); 4244 gen6_gt_check_fifodbg(dev_priv);
4134} 4245}
4135 4246
4247void intel_gt_reset(struct drm_device *dev)
4248{
4249 struct drm_i915_private *dev_priv = dev->dev_private;
4250
4251 if (IS_VALLEYVIEW(dev)) {
4252 vlv_force_wake_reset(dev_priv);
4253 } else if (INTEL_INFO(dev)->gen >= 6) {
4254 __gen6_gt_force_wake_reset(dev_priv);
4255 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4256 __gen6_gt_force_wake_mt_reset(dev_priv);
4257 }
4258}
4259
4136void intel_gt_init(struct drm_device *dev) 4260void intel_gt_init(struct drm_device *dev)
4137{ 4261{
4138 struct drm_i915_private *dev_priv = dev->dev_private; 4262 struct drm_i915_private *dev_priv = dev->dev_private;
4139 4263
4140 spin_lock_init(&dev_priv->gt_lock); 4264 spin_lock_init(&dev_priv->gt_lock);
4141 4265
4266 intel_gt_reset(dev);
4267
4142 if (IS_VALLEYVIEW(dev)) { 4268 if (IS_VALLEYVIEW(dev)) {
4143 dev_priv->gt.force_wake_get = vlv_force_wake_get; 4269 dev_priv->gt.force_wake_get = vlv_force_wake_get;
4144 dev_priv->gt.force_wake_put = vlv_force_wake_put; 4270 dev_priv->gt.force_wake_put = vlv_force_wake_put;
4145 } else if (INTEL_INFO(dev)->gen >= 6) { 4271 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
4272 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
4273 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
4274 } else if (IS_GEN6(dev)) {
4146 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; 4275 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
4147 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; 4276 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
4277 }
4278 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
4279 intel_gen6_powersave_work);
4280}
4148 4281
4149 /* IVB configs may use multi-threaded forcewake */ 4282int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
4150 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 4283{
4151 u32 ecobus; 4284 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4152 4285
4153 /* A small trick here - if the bios hasn't configured 4286 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4154 * MT forcewake, and if the device is in RC6, then 4287 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
4155 * force_wake_mt_get will not wake the device and the 4288 return -EAGAIN;
4156 * ECOBUS read will return zero. Which will be 4289 }
4157 * (correctly) interpreted by the test below as MT 4290
4158 * forcewake being disabled. 4291 I915_WRITE(GEN6_PCODE_DATA, *val);
4159 */ 4292 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4160 mutex_lock(&dev->struct_mutex); 4293
4161 __gen6_gt_force_wake_mt_get(dev_priv); 4294 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4162 ecobus = I915_READ_NOTRACE(ECOBUS); 4295 500)) {
4163 __gen6_gt_force_wake_mt_put(dev_priv); 4296 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
4164 mutex_unlock(&dev->struct_mutex); 4297 return -ETIMEDOUT;
4165
4166 if (ecobus & FORCEWAKE_MT_ENABLE) {
4167 DRM_DEBUG_KMS("Using MT version of forcewake\n");
4168 dev_priv->gt.force_wake_get =
4169 __gen6_gt_force_wake_mt_get;
4170 dev_priv->gt.force_wake_put =
4171 __gen6_gt_force_wake_mt_put;
4172 }
4173 }
4174 } 4298 }
4299
4300 *val = I915_READ(GEN6_PCODE_DATA);
4301 I915_WRITE(GEN6_PCODE_DATA, 0);
4302
4303 return 0;
4175} 4304}
4176 4305
4306int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4307{
4308 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4309
4310 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4311 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
4312 return -EAGAIN;
4313 }
4314
4315 I915_WRITE(GEN6_PCODE_DATA, val);
4316 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4317
4318 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4319 500)) {
4320 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
4321 return -ETIMEDOUT;
4322 }
4323
4324 I915_WRITE(GEN6_PCODE_DATA, 0);
4325
4326 return 0;
4327}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ecbc5c5dbbbc..2346b920bd86 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -45,7 +45,7 @@ struct pipe_control {
45 45
46static inline int ring_space(struct intel_ring_buffer *ring) 46static inline int ring_space(struct intel_ring_buffer *ring)
47{ 47{
48 int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); 48 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
49 if (space < 0) 49 if (space < 0)
50 space += ring->size; 50 space += ring->size;
51 return space; 51 return space;
@@ -245,7 +245,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,
245 /* 245 /*
246 * TLB invalidate requires a post-sync write. 246 * TLB invalidate requires a post-sync write.
247 */ 247 */
248 flags |= PIPE_CONTROL_QW_WRITE; 248 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
249 } 249 }
250 250
251 ret = intel_ring_begin(ring, 4); 251 ret = intel_ring_begin(ring, 4);
@@ -555,15 +555,11 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
555 555
556static void 556static void
557update_mboxes(struct intel_ring_buffer *ring, 557update_mboxes(struct intel_ring_buffer *ring,
558 u32 seqno, 558 u32 mmio_offset)
559 u32 mmio_offset)
560{ 559{
561 intel_ring_emit(ring, MI_SEMAPHORE_MBOX | 560 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
562 MI_SEMAPHORE_GLOBAL_GTT |
563 MI_SEMAPHORE_REGISTER |
564 MI_SEMAPHORE_UPDATE);
565 intel_ring_emit(ring, seqno);
566 intel_ring_emit(ring, mmio_offset); 561 intel_ring_emit(ring, mmio_offset);
562 intel_ring_emit(ring, ring->outstanding_lazy_request);
567} 563}
568 564
569/** 565/**
@@ -576,8 +572,7 @@ update_mboxes(struct intel_ring_buffer *ring,
576 * This acts like a signal in the canonical semaphore. 572 * This acts like a signal in the canonical semaphore.
577 */ 573 */
578static int 574static int
579gen6_add_request(struct intel_ring_buffer *ring, 575gen6_add_request(struct intel_ring_buffer *ring)
580 u32 *seqno)
581{ 576{
582 u32 mbox1_reg; 577 u32 mbox1_reg;
583 u32 mbox2_reg; 578 u32 mbox2_reg;
@@ -590,13 +585,11 @@ gen6_add_request(struct intel_ring_buffer *ring,
590 mbox1_reg = ring->signal_mbox[0]; 585 mbox1_reg = ring->signal_mbox[0];
591 mbox2_reg = ring->signal_mbox[1]; 586 mbox2_reg = ring->signal_mbox[1];
592 587
593 *seqno = i915_gem_next_request_seqno(ring); 588 update_mboxes(ring, mbox1_reg);
594 589 update_mboxes(ring, mbox2_reg);
595 update_mboxes(ring, *seqno, mbox1_reg);
596 update_mboxes(ring, *seqno, mbox2_reg);
597 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 590 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
598 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 591 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
599 intel_ring_emit(ring, *seqno); 592 intel_ring_emit(ring, ring->outstanding_lazy_request);
600 intel_ring_emit(ring, MI_USER_INTERRUPT); 593 intel_ring_emit(ring, MI_USER_INTERRUPT);
601 intel_ring_advance(ring); 594 intel_ring_advance(ring);
602 595
@@ -653,10 +646,8 @@ do { \
653} while (0) 646} while (0)
654 647
655static int 648static int
656pc_render_add_request(struct intel_ring_buffer *ring, 649pc_render_add_request(struct intel_ring_buffer *ring)
657 u32 *result)
658{ 650{
659 u32 seqno = i915_gem_next_request_seqno(ring);
660 struct pipe_control *pc = ring->private; 651 struct pipe_control *pc = ring->private;
661 u32 scratch_addr = pc->gtt_offset + 128; 652 u32 scratch_addr = pc->gtt_offset + 128;
662 int ret; 653 int ret;
@@ -677,7 +668,7 @@ pc_render_add_request(struct intel_ring_buffer *ring,
677 PIPE_CONTROL_WRITE_FLUSH | 668 PIPE_CONTROL_WRITE_FLUSH |
678 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 669 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
679 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 670 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
680 intel_ring_emit(ring, seqno); 671 intel_ring_emit(ring, ring->outstanding_lazy_request);
681 intel_ring_emit(ring, 0); 672 intel_ring_emit(ring, 0);
682 PIPE_CONTROL_FLUSH(ring, scratch_addr); 673 PIPE_CONTROL_FLUSH(ring, scratch_addr);
683 scratch_addr += 128; /* write to separate cachelines */ 674 scratch_addr += 128; /* write to separate cachelines */
@@ -696,11 +687,10 @@ pc_render_add_request(struct intel_ring_buffer *ring,
696 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 687 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
697 PIPE_CONTROL_NOTIFY); 688 PIPE_CONTROL_NOTIFY);
698 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 689 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
699 intel_ring_emit(ring, seqno); 690 intel_ring_emit(ring, ring->outstanding_lazy_request);
700 intel_ring_emit(ring, 0); 691 intel_ring_emit(ring, 0);
701 intel_ring_advance(ring); 692 intel_ring_advance(ring);
702 693
703 *result = seqno;
704 return 0; 694 return 0;
705} 695}
706 696
@@ -888,25 +878,20 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
888} 878}
889 879
890static int 880static int
891i9xx_add_request(struct intel_ring_buffer *ring, 881i9xx_add_request(struct intel_ring_buffer *ring)
892 u32 *result)
893{ 882{
894 u32 seqno;
895 int ret; 883 int ret;
896 884
897 ret = intel_ring_begin(ring, 4); 885 ret = intel_ring_begin(ring, 4);
898 if (ret) 886 if (ret)
899 return ret; 887 return ret;
900 888
901 seqno = i915_gem_next_request_seqno(ring);
902
903 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 889 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
904 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 890 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
905 intel_ring_emit(ring, seqno); 891 intel_ring_emit(ring, ring->outstanding_lazy_request);
906 intel_ring_emit(ring, MI_USER_INTERRUPT); 892 intel_ring_emit(ring, MI_USER_INTERRUPT);
907 intel_ring_advance(ring); 893 intel_ring_advance(ring);
908 894
909 *result = seqno;
910 return 0; 895 return 0;
911} 896}
912 897
@@ -964,7 +949,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
964} 949}
965 950
966static int 951static int
967i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 952i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
953 u32 offset, u32 length,
954 unsigned flags)
968{ 955{
969 int ret; 956 int ret;
970 957
@@ -975,7 +962,7 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
975 intel_ring_emit(ring, 962 intel_ring_emit(ring,
976 MI_BATCH_BUFFER_START | 963 MI_BATCH_BUFFER_START |
977 MI_BATCH_GTT | 964 MI_BATCH_GTT |
978 MI_BATCH_NON_SECURE_I965); 965 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
979 intel_ring_emit(ring, offset); 966 intel_ring_emit(ring, offset);
980 intel_ring_advance(ring); 967 intel_ring_advance(ring);
981 968
@@ -984,7 +971,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
984 971
985static int 972static int
986i830_dispatch_execbuffer(struct intel_ring_buffer *ring, 973i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
987 u32 offset, u32 len) 974 u32 offset, u32 len,
975 unsigned flags)
988{ 976{
989 int ret; 977 int ret;
990 978
@@ -993,7 +981,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
993 return ret; 981 return ret;
994 982
995 intel_ring_emit(ring, MI_BATCH_BUFFER); 983 intel_ring_emit(ring, MI_BATCH_BUFFER);
996 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 984 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
997 intel_ring_emit(ring, offset + len - 8); 985 intel_ring_emit(ring, offset + len - 8);
998 intel_ring_emit(ring, 0); 986 intel_ring_emit(ring, 0);
999 intel_ring_advance(ring); 987 intel_ring_advance(ring);
@@ -1003,7 +991,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
1003 991
1004static int 992static int
1005i915_dispatch_execbuffer(struct intel_ring_buffer *ring, 993i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1006 u32 offset, u32 len) 994 u32 offset, u32 len,
995 unsigned flags)
1007{ 996{
1008 int ret; 997 int ret;
1009 998
@@ -1012,7 +1001,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
1012 return ret; 1001 return ret;
1013 1002
1014 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); 1003 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
1015 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE); 1004 intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
1016 intel_ring_advance(ring); 1005 intel_ring_advance(ring);
1017 1006
1018 return 0; 1007 return 0;
@@ -1075,6 +1064,29 @@ err:
1075 return ret; 1064 return ret;
1076} 1065}
1077 1066
1067static int init_phys_hws_pga(struct intel_ring_buffer *ring)
1068{
1069 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1070 u32 addr;
1071
1072 if (!dev_priv->status_page_dmah) {
1073 dev_priv->status_page_dmah =
1074 drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
1075 if (!dev_priv->status_page_dmah)
1076 return -ENOMEM;
1077 }
1078
1079 addr = dev_priv->status_page_dmah->busaddr;
1080 if (INTEL_INFO(ring->dev)->gen >= 4)
1081 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
1082 I915_WRITE(HWS_PGA, addr);
1083
1084 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1085 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1086
1087 return 0;
1088}
1089
1078static int intel_init_ring_buffer(struct drm_device *dev, 1090static int intel_init_ring_buffer(struct drm_device *dev,
1079 struct intel_ring_buffer *ring) 1091 struct intel_ring_buffer *ring)
1080{ 1092{
@@ -1086,6 +1098,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1086 INIT_LIST_HEAD(&ring->active_list); 1098 INIT_LIST_HEAD(&ring->active_list);
1087 INIT_LIST_HEAD(&ring->request_list); 1099 INIT_LIST_HEAD(&ring->request_list);
1088 ring->size = 32 * PAGE_SIZE; 1100 ring->size = 32 * PAGE_SIZE;
1101 memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
1089 1102
1090 init_waitqueue_head(&ring->irq_queue); 1103 init_waitqueue_head(&ring->irq_queue);
1091 1104
@@ -1093,6 +1106,11 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1093 ret = init_status_page(ring); 1106 ret = init_status_page(ring);
1094 if (ret) 1107 if (ret)
1095 return ret; 1108 return ret;
1109 } else {
1110 BUG_ON(ring->id != RCS);
1111 ret = init_phys_hws_pga(ring);
1112 if (ret)
1113 return ret;
1096 } 1114 }
1097 1115
1098 obj = i915_gem_alloc_object(dev, ring->size); 1116 obj = i915_gem_alloc_object(dev, ring->size);
@@ -1157,7 +1175,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1157 1175
1158 /* Disable the ring buffer. The ring must be idle at this point */ 1176 /* Disable the ring buffer. The ring must be idle at this point */
1159 dev_priv = ring->dev->dev_private; 1177 dev_priv = ring->dev->dev_private;
1160 ret = intel_wait_ring_idle(ring); 1178 ret = intel_ring_idle(ring);
1161 if (ret) 1179 if (ret)
1162 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", 1180 DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
1163 ring->name, ret); 1181 ring->name, ret);
@@ -1176,28 +1194,6 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1176 cleanup_status_page(ring); 1194 cleanup_status_page(ring);
1177} 1195}
1178 1196
1179static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1180{
1181 uint32_t __iomem *virt;
1182 int rem = ring->size - ring->tail;
1183
1184 if (ring->space < rem) {
1185 int ret = intel_wait_ring_buffer(ring, rem);
1186 if (ret)
1187 return ret;
1188 }
1189
1190 virt = ring->virtual_start + ring->tail;
1191 rem /= 4;
1192 while (rem--)
1193 iowrite32(MI_NOOP, virt++);
1194
1195 ring->tail = 0;
1196 ring->space = ring_space(ring);
1197
1198 return 0;
1199}
1200
1201static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) 1197static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1202{ 1198{
1203 int ret; 1199 int ret;
@@ -1231,7 +1227,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1231 if (request->tail == -1) 1227 if (request->tail == -1)
1232 continue; 1228 continue;
1233 1229
1234 space = request->tail - (ring->tail + 8); 1230 space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
1235 if (space < 0) 1231 if (space < 0)
1236 space += ring->size; 1232 space += ring->size;
1237 if (space >= n) { 1233 if (space >= n) {
@@ -1266,7 +1262,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1266 return 0; 1262 return 0;
1267} 1263}
1268 1264
1269int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n) 1265static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
1270{ 1266{
1271 struct drm_device *dev = ring->dev; 1267 struct drm_device *dev = ring->dev;
1272 struct drm_i915_private *dev_priv = dev->dev_private; 1268 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1309,6 +1305,60 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1309 return -EBUSY; 1305 return -EBUSY;
1310} 1306}
1311 1307
1308static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1309{
1310 uint32_t __iomem *virt;
1311 int rem = ring->size - ring->tail;
1312
1313 if (ring->space < rem) {
1314 int ret = ring_wait_for_space(ring, rem);
1315 if (ret)
1316 return ret;
1317 }
1318
1319 virt = ring->virtual_start + ring->tail;
1320 rem /= 4;
1321 while (rem--)
1322 iowrite32(MI_NOOP, virt++);
1323
1324 ring->tail = 0;
1325 ring->space = ring_space(ring);
1326
1327 return 0;
1328}
1329
1330int intel_ring_idle(struct intel_ring_buffer *ring)
1331{
1332 u32 seqno;
1333 int ret;
1334
1335 /* We need to add any requests required to flush the objects and ring */
1336 if (ring->outstanding_lazy_request) {
1337 ret = i915_add_request(ring, NULL, NULL);
1338 if (ret)
1339 return ret;
1340 }
1341
1342 /* Wait upon the last request to be completed */
1343 if (list_empty(&ring->request_list))
1344 return 0;
1345
1346 seqno = list_entry(ring->request_list.prev,
1347 struct drm_i915_gem_request,
1348 list)->seqno;
1349
1350 return i915_wait_seqno(ring, seqno);
1351}
1352
1353static int
1354intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
1355{
1356 if (ring->outstanding_lazy_request)
1357 return 0;
1358
1359 return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
1360}
1361
1312int intel_ring_begin(struct intel_ring_buffer *ring, 1362int intel_ring_begin(struct intel_ring_buffer *ring,
1313 int num_dwords) 1363 int num_dwords)
1314{ 1364{
@@ -1320,6 +1370,11 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1320 if (ret) 1370 if (ret)
1321 return ret; 1371 return ret;
1322 1372
1373 /* Preallocate the olr before touching the ring */
1374 ret = intel_ring_alloc_seqno(ring);
1375 if (ret)
1376 return ret;
1377
1323 if (unlikely(ring->tail + n > ring->effective_size)) { 1378 if (unlikely(ring->tail + n > ring->effective_size)) {
1324 ret = intel_wrap_ring_buffer(ring); 1379 ret = intel_wrap_ring_buffer(ring);
1325 if (unlikely(ret)) 1380 if (unlikely(ret))
@@ -1327,7 +1382,7 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1327 } 1382 }
1328 1383
1329 if (unlikely(ring->space < n)) { 1384 if (unlikely(ring->space < n)) {
1330 ret = intel_wait_ring_buffer(ring, n); 1385 ret = ring_wait_for_space(ring, n);
1331 if (unlikely(ret)) 1386 if (unlikely(ret))
1332 return ret; 1387 return ret;
1333 } 1388 }
@@ -1391,10 +1446,17 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1391 return ret; 1446 return ret;
1392 1447
1393 cmd = MI_FLUSH_DW; 1448 cmd = MI_FLUSH_DW;
1449 /*
1450 * Bspec vol 1c.5 - video engine command streamer:
1451 * "If ENABLED, all TLBs will be invalidated once the flush
1452 * operation is complete. This bit is only valid when the
1453 * Post-Sync Operation field is a value of 1h or 3h."
1454 */
1394 if (invalidate & I915_GEM_GPU_DOMAINS) 1455 if (invalidate & I915_GEM_GPU_DOMAINS)
1395 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; 1456 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
1457 MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1396 intel_ring_emit(ring, cmd); 1458 intel_ring_emit(ring, cmd);
1397 intel_ring_emit(ring, 0); 1459 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1398 intel_ring_emit(ring, 0); 1460 intel_ring_emit(ring, 0);
1399 intel_ring_emit(ring, MI_NOOP); 1461 intel_ring_emit(ring, MI_NOOP);
1400 intel_ring_advance(ring); 1462 intel_ring_advance(ring);
@@ -1402,8 +1464,30 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
1402} 1464}
1403 1465
1404static int 1466static int
1467hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1468 u32 offset, u32 len,
1469 unsigned flags)
1470{
1471 int ret;
1472
1473 ret = intel_ring_begin(ring, 2);
1474 if (ret)
1475 return ret;
1476
1477 intel_ring_emit(ring,
1478 MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
1479 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
1480 /* bit0-7 is the length on GEN6+ */
1481 intel_ring_emit(ring, offset);
1482 intel_ring_advance(ring);
1483
1484 return 0;
1485}
1486
1487static int
1405gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 1488gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1406 u32 offset, u32 len) 1489 u32 offset, u32 len,
1490 unsigned flags)
1407{ 1491{
1408 int ret; 1492 int ret;
1409 1493
@@ -1411,7 +1495,9 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1411 if (ret) 1495 if (ret)
1412 return ret; 1496 return ret;
1413 1497
1414 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); 1498 intel_ring_emit(ring,
1499 MI_BATCH_BUFFER_START |
1500 (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
1415 /* bit0-7 is the length on GEN6+ */ 1501 /* bit0-7 is the length on GEN6+ */
1416 intel_ring_emit(ring, offset); 1502 intel_ring_emit(ring, offset);
1417 intel_ring_advance(ring); 1503 intel_ring_advance(ring);
@@ -1432,10 +1518,17 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
1432 return ret; 1518 return ret;
1433 1519
1434 cmd = MI_FLUSH_DW; 1520 cmd = MI_FLUSH_DW;
1521 /*
1522 * Bspec vol 1c.3 - blitter engine command streamer:
1523 * "If ENABLED, all TLBs will be invalidated once the flush
1524 * operation is complete. This bit is only valid when the
1525 * Post-Sync Operation field is a value of 1h or 3h."
1526 */
1435 if (invalidate & I915_GEM_DOMAIN_RENDER) 1527 if (invalidate & I915_GEM_DOMAIN_RENDER)
1436 cmd |= MI_INVALIDATE_TLB; 1528 cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
1529 MI_FLUSH_DW_OP_STOREDW;
1437 intel_ring_emit(ring, cmd); 1530 intel_ring_emit(ring, cmd);
1438 intel_ring_emit(ring, 0); 1531 intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
1439 intel_ring_emit(ring, 0); 1532 intel_ring_emit(ring, 0);
1440 intel_ring_emit(ring, MI_NOOP); 1533 intel_ring_emit(ring, MI_NOOP);
1441 intel_ring_advance(ring); 1534 intel_ring_advance(ring);
@@ -1490,7 +1583,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1490 ring->irq_enable_mask = I915_USER_INTERRUPT; 1583 ring->irq_enable_mask = I915_USER_INTERRUPT;
1491 } 1584 }
1492 ring->write_tail = ring_write_tail; 1585 ring->write_tail = ring_write_tail;
1493 if (INTEL_INFO(dev)->gen >= 6) 1586 if (IS_HASWELL(dev))
1587 ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
1588 else if (INTEL_INFO(dev)->gen >= 6)
1494 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; 1589 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1495 else if (INTEL_INFO(dev)->gen >= 4) 1590 else if (INTEL_INFO(dev)->gen >= 4)
1496 ring->dispatch_execbuffer = i965_dispatch_execbuffer; 1591 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
@@ -1501,12 +1596,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
1501 ring->init = init_render_ring; 1596 ring->init = init_render_ring;
1502 ring->cleanup = render_ring_cleanup; 1597 ring->cleanup = render_ring_cleanup;
1503 1598
1504
1505 if (!I915_NEED_GFX_HWS(dev)) {
1506 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1507 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1508 }
1509
1510 return intel_init_ring_buffer(dev, ring); 1599 return intel_init_ring_buffer(dev, ring);
1511} 1600}
1512 1601
@@ -1514,6 +1603,7 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1514{ 1603{
1515 drm_i915_private_t *dev_priv = dev->dev_private; 1604 drm_i915_private_t *dev_priv = dev->dev_private;
1516 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1605 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1606 int ret;
1517 1607
1518 ring->name = "render ring"; 1608 ring->name = "render ring";
1519 ring->id = RCS; 1609 ring->id = RCS;
@@ -1551,16 +1641,13 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1551 ring->init = init_render_ring; 1641 ring->init = init_render_ring;
1552 ring->cleanup = render_ring_cleanup; 1642 ring->cleanup = render_ring_cleanup;
1553 1643
1554 if (!I915_NEED_GFX_HWS(dev))
1555 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1556
1557 ring->dev = dev; 1644 ring->dev = dev;
1558 INIT_LIST_HEAD(&ring->active_list); 1645 INIT_LIST_HEAD(&ring->active_list);
1559 INIT_LIST_HEAD(&ring->request_list); 1646 INIT_LIST_HEAD(&ring->request_list);
1560 1647
1561 ring->size = size; 1648 ring->size = size;
1562 ring->effective_size = ring->size; 1649 ring->effective_size = ring->size;
1563 if (IS_I830(ring->dev)) 1650 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1564 ring->effective_size -= 128; 1651 ring->effective_size -= 128;
1565 1652
1566 ring->virtual_start = ioremap_wc(start, size); 1653 ring->virtual_start = ioremap_wc(start, size);
@@ -1570,6 +1657,12 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1570 return -ENOMEM; 1657 return -ENOMEM;
1571 } 1658 }
1572 1659
1660 if (!I915_NEED_GFX_HWS(dev)) {
1661 ret = init_phys_hws_pga(ring);
1662 if (ret)
1663 return ret;
1664 }
1665
1573 return 0; 1666 return 0;
1574} 1667}
1575 1668
@@ -1618,7 +1711,6 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1618 } 1711 }
1619 ring->init = init_ring_common; 1712 ring->init = init_ring_common;
1620 1713
1621
1622 return intel_init_ring_buffer(dev, ring); 1714 return intel_init_ring_buffer(dev, ring);
1623} 1715}
1624 1716
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ea7a311a1f0..526182ed0c6d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,6 +1,17 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4/*
5 * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
6 * Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
7 * Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
8 *
9 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
10 * cacheline, the Head Pointer must not be greater than the Tail
11 * Pointer."
12 */
13#define I915_RING_FREE_SPACE 64
14
4struct intel_hw_status_page { 15struct intel_hw_status_page {
5 u32 *page_addr; 16 u32 *page_addr;
6 unsigned int gfx_addr; 17 unsigned int gfx_addr;
@@ -70,8 +81,7 @@ struct intel_ring_buffer {
70 int __must_check (*flush)(struct intel_ring_buffer *ring, 81 int __must_check (*flush)(struct intel_ring_buffer *ring,
71 u32 invalidate_domains, 82 u32 invalidate_domains,
72 u32 flush_domains); 83 u32 flush_domains);
73 int (*add_request)(struct intel_ring_buffer *ring, 84 int (*add_request)(struct intel_ring_buffer *ring);
74 u32 *seqno);
75 /* Some chipsets are not quite as coherent as advertised and need 85 /* Some chipsets are not quite as coherent as advertised and need
76 * an expensive kick to force a true read of the up-to-date seqno. 86 * an expensive kick to force a true read of the up-to-date seqno.
77 * However, the up-to-date seqno is not always required and the last 87 * However, the up-to-date seqno is not always required and the last
@@ -81,7 +91,9 @@ struct intel_ring_buffer {
81 u32 (*get_seqno)(struct intel_ring_buffer *ring, 91 u32 (*get_seqno)(struct intel_ring_buffer *ring,
82 bool lazy_coherency); 92 bool lazy_coherency);
83 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, 93 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
84 u32 offset, u32 length); 94 u32 offset, u32 length,
95 unsigned flags);
96#define I915_DISPATCH_SECURE 0x1
85 void (*cleanup)(struct intel_ring_buffer *ring); 97 void (*cleanup)(struct intel_ring_buffer *ring);
86 int (*sync_to)(struct intel_ring_buffer *ring, 98 int (*sync_to)(struct intel_ring_buffer *ring,
87 struct intel_ring_buffer *to, 99 struct intel_ring_buffer *to,
@@ -181,27 +193,21 @@ intel_read_status_page(struct intel_ring_buffer *ring,
181 * The area from dword 0x20 to 0x3ff is available for driver usage. 193 * The area from dword 0x20 to 0x3ff is available for driver usage.
182 */ 194 */
183#define I915_GEM_HWS_INDEX 0x20 195#define I915_GEM_HWS_INDEX 0x20
196#define I915_GEM_HWS_SCRATCH_INDEX 0x30
197#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
184 198
185void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); 199void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
186 200
187int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
188static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
189{
190 return intel_wait_ring_buffer(ring, ring->size - 8);
191}
192
193int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); 201int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
194
195static inline void intel_ring_emit(struct intel_ring_buffer *ring, 202static inline void intel_ring_emit(struct intel_ring_buffer *ring,
196 u32 data) 203 u32 data)
197{ 204{
198 iowrite32(data, ring->virtual_start + ring->tail); 205 iowrite32(data, ring->virtual_start + ring->tail);
199 ring->tail += 4; 206 ring->tail += 4;
200} 207}
201
202void intel_ring_advance(struct intel_ring_buffer *ring); 208void intel_ring_advance(struct intel_ring_buffer *ring);
209int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
203 210
204u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
205int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); 211int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
206int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); 212int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
207 213
@@ -217,6 +223,12 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
217 return ring->tail; 223 return ring->tail;
218} 224}
219 225
226static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
227{
228 BUG_ON(ring->outstanding_lazy_request == 0);
229 return ring->outstanding_lazy_request;
230}
231
220static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) 232static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
221{ 233{
222 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) 234 if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index a6ac0b416964..c275bf0fa36d 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -509,7 +509,7 @@ out:
509static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 509static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
510 void *response, int response_len) 510 void *response, int response_len)
511{ 511{
512 u8 retry = 5; 512 u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
513 u8 status; 513 u8 status;
514 int i; 514 int i;
515 515
@@ -522,14 +522,27 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
522 * command to be complete. 522 * command to be complete.
523 * 523 *
524 * Check 5 times in case the hardware failed to read the docs. 524 * Check 5 times in case the hardware failed to read the docs.
525 *
526 * Also beware that the first response by many devices is to
527 * reply PENDING and stall for time. TVs are notorious for
528 * requiring longer than specified to complete their replies.
529 * Originally (in the DDX long ago), the delay was only ever 15ms
530 * with an additional delay of 30ms applied for TVs added later after
531 * many experiments. To accommodate both sets of delays, we do a
532 * sequence of slow checks if the device is falling behind and fails
533 * to reply within 5*15µs.
525 */ 534 */
526 if (!intel_sdvo_read_byte(intel_sdvo, 535 if (!intel_sdvo_read_byte(intel_sdvo,
527 SDVO_I2C_CMD_STATUS, 536 SDVO_I2C_CMD_STATUS,
528 &status)) 537 &status))
529 goto log_fail; 538 goto log_fail;
530 539
531 while (status == SDVO_CMD_STATUS_PENDING && retry--) { 540 while (status == SDVO_CMD_STATUS_PENDING && --retry) {
532 udelay(15); 541 if (retry < 10)
542 msleep(15);
543 else
544 udelay(15);
545
533 if (!intel_sdvo_read_byte(intel_sdvo, 546 if (!intel_sdvo_read_byte(intel_sdvo,
534 SDVO_I2C_CMD_STATUS, 547 SDVO_I2C_CMD_STATUS,
535 &status)) 548 &status))
@@ -1228,6 +1241,30 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1228 1241
1229 temp = I915_READ(intel_sdvo->sdvo_reg); 1242 temp = I915_READ(intel_sdvo->sdvo_reg);
1230 if ((temp & SDVO_ENABLE) != 0) { 1243 if ((temp & SDVO_ENABLE) != 0) {
1244 /* HW workaround for IBX, we need to move the port to
1245 * transcoder A before disabling it. */
1246 if (HAS_PCH_IBX(encoder->base.dev)) {
1247 struct drm_crtc *crtc = encoder->base.crtc;
1248 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1249
1250 if (temp & SDVO_PIPE_B_SELECT) {
1251 temp &= ~SDVO_PIPE_B_SELECT;
1252 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1253 POSTING_READ(intel_sdvo->sdvo_reg);
1254
1255 /* Again we need to write this twice. */
1256 I915_WRITE(intel_sdvo->sdvo_reg, temp);
1257 POSTING_READ(intel_sdvo->sdvo_reg);
1258
1259 /* Transcoder selection bits only update
1260 * effectively on vblank. */
1261 if (crtc)
1262 intel_wait_for_vblank(encoder->base.dev, pipe);
1263 else
1264 msleep(50);
1265 }
1266 }
1267
1231 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE); 1268 intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
1232 } 1269 }
1233} 1270}
@@ -1244,8 +1281,20 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
1244 u8 status; 1281 u8 status;
1245 1282
1246 temp = I915_READ(intel_sdvo->sdvo_reg); 1283 temp = I915_READ(intel_sdvo->sdvo_reg);
1247 if ((temp & SDVO_ENABLE) == 0) 1284 if ((temp & SDVO_ENABLE) == 0) {
1285 /* HW workaround for IBX, we need to move the port
1286 * to transcoder A before disabling it. */
1287 if (HAS_PCH_IBX(dev)) {
1288 struct drm_crtc *crtc = encoder->base.crtc;
1289 int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
1290
1291 /* Restore the transcoder select bit. */
1292 if (pipe == PIPE_B)
1293 temp |= SDVO_PIPE_B_SELECT;
1294 }
1295
1248 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE); 1296 intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
1297 }
1249 for (i = 0; i < 2; i++) 1298 for (i = 0; i < 2; i++)
1250 intel_wait_for_vblank(dev, intel_crtc->pipe); 1299 intel_wait_for_vblank(dev, intel_crtc->pipe);
1251 1300
@@ -1499,15 +1548,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1499 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1548 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1500 enum drm_connector_status ret; 1549 enum drm_connector_status ret;
1501 1550
1502 if (!intel_sdvo_write_cmd(intel_sdvo, 1551 if (!intel_sdvo_get_value(intel_sdvo,
1503 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1552 SDVO_CMD_GET_ATTACHED_DISPLAYS,
1504 return connector_status_unknown; 1553 &response, 2))
1505
1506 /* add 30ms delay when the output type might be TV */
1507 if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
1508 msleep(30);
1509
1510 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1511 return connector_status_unknown; 1554 return connector_status_unknown;
1512 1555
1513 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n", 1556 DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
@@ -1796,7 +1839,7 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1796 intel_sdvo_destroy_enhance_property(connector); 1839 intel_sdvo_destroy_enhance_property(connector);
1797 drm_sysfs_connector_remove(connector); 1840 drm_sysfs_connector_remove(connector);
1798 drm_connector_cleanup(connector); 1841 drm_connector_cleanup(connector);
1799 kfree(connector); 1842 kfree(intel_sdvo_connector);
1800} 1843}
1801 1844
1802static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) 1845static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
@@ -1828,7 +1871,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1828 uint8_t cmd; 1871 uint8_t cmd;
1829 int ret; 1872 int ret;
1830 1873
1831 ret = drm_connector_property_set_value(connector, property, val); 1874 ret = drm_object_property_set_value(&connector->base, property, val);
1832 if (ret) 1875 if (ret)
1833 return ret; 1876 return ret;
1834 1877
@@ -1883,7 +1926,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1883 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) { 1926 } else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
1884 temp_value = val; 1927 temp_value = val;
1885 if (intel_sdvo_connector->left == property) { 1928 if (intel_sdvo_connector->left == property) {
1886 drm_connector_property_set_value(connector, 1929 drm_object_property_set_value(&connector->base,
1887 intel_sdvo_connector->right, val); 1930 intel_sdvo_connector->right, val);
1888 if (intel_sdvo_connector->left_margin == temp_value) 1931 if (intel_sdvo_connector->left_margin == temp_value)
1889 return 0; 1932 return 0;
@@ -1895,7 +1938,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1895 cmd = SDVO_CMD_SET_OVERSCAN_H; 1938 cmd = SDVO_CMD_SET_OVERSCAN_H;
1896 goto set_value; 1939 goto set_value;
1897 } else if (intel_sdvo_connector->right == property) { 1940 } else if (intel_sdvo_connector->right == property) {
1898 drm_connector_property_set_value(connector, 1941 drm_object_property_set_value(&connector->base,
1899 intel_sdvo_connector->left, val); 1942 intel_sdvo_connector->left, val);
1900 if (intel_sdvo_connector->right_margin == temp_value) 1943 if (intel_sdvo_connector->right_margin == temp_value)
1901 return 0; 1944 return 0;
@@ -1907,7 +1950,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1907 cmd = SDVO_CMD_SET_OVERSCAN_H; 1950 cmd = SDVO_CMD_SET_OVERSCAN_H;
1908 goto set_value; 1951 goto set_value;
1909 } else if (intel_sdvo_connector->top == property) { 1952 } else if (intel_sdvo_connector->top == property) {
1910 drm_connector_property_set_value(connector, 1953 drm_object_property_set_value(&connector->base,
1911 intel_sdvo_connector->bottom, val); 1954 intel_sdvo_connector->bottom, val);
1912 if (intel_sdvo_connector->top_margin == temp_value) 1955 if (intel_sdvo_connector->top_margin == temp_value)
1913 return 0; 1956 return 0;
@@ -1919,7 +1962,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1919 cmd = SDVO_CMD_SET_OVERSCAN_V; 1962 cmd = SDVO_CMD_SET_OVERSCAN_V;
1920 goto set_value; 1963 goto set_value;
1921 } else if (intel_sdvo_connector->bottom == property) { 1964 } else if (intel_sdvo_connector->bottom == property) {
1922 drm_connector_property_set_value(connector, 1965 drm_object_property_set_value(&connector->base,
1923 intel_sdvo_connector->top, val); 1966 intel_sdvo_connector->top, val);
1924 if (intel_sdvo_connector->bottom_margin == temp_value) 1967 if (intel_sdvo_connector->bottom_margin == temp_value)
1925 return 0; 1968 return 0;
@@ -2072,17 +2115,24 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
2072 else 2115 else
2073 mapping = &dev_priv->sdvo_mappings[1]; 2116 mapping = &dev_priv->sdvo_mappings[1];
2074 2117
2075 pin = GMBUS_PORT_DPB; 2118 if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
2076 if (mapping->initialized)
2077 pin = mapping->i2c_pin; 2119 pin = mapping->i2c_pin;
2120 else
2121 pin = GMBUS_PORT_DPB;
2078 2122
2079 if (intel_gmbus_is_port_valid(pin)) { 2123 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
2080 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); 2124
2081 intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); 2125 /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
2082 intel_gmbus_force_bit(sdvo->i2c, true); 2126 * our code totally fails once we start using gmbus. Hence fall back to
2083 } else { 2127 * bit banging for now. */
2084 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); 2128 intel_gmbus_force_bit(sdvo->i2c, true);
2085 } 2129}
2130
2131/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
2132static void
2133intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
2134{
2135 intel_gmbus_force_bit(sdvo->i2c, false);
2086} 2136}
2087 2137
2088static bool 2138static bool
@@ -2427,7 +2477,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2427 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]); 2477 i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
2428 2478
2429 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0]; 2479 intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
2430 drm_connector_attach_property(&intel_sdvo_connector->base.base, 2480 drm_object_attach_property(&intel_sdvo_connector->base.base.base,
2431 intel_sdvo_connector->tv_format, 0); 2481 intel_sdvo_connector->tv_format, 0);
2432 return true; 2482 return true;
2433 2483
@@ -2443,7 +2493,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
2443 intel_sdvo_connector->name = \ 2493 intel_sdvo_connector->name = \
2444 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \ 2494 drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
2445 if (!intel_sdvo_connector->name) return false; \ 2495 if (!intel_sdvo_connector->name) return false; \
2446 drm_connector_attach_property(connector, \ 2496 drm_object_attach_property(&connector->base, \
2447 intel_sdvo_connector->name, \ 2497 intel_sdvo_connector->name, \
2448 intel_sdvo_connector->cur_##name); \ 2498 intel_sdvo_connector->cur_##name); \
2449 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \ 2499 DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
@@ -2480,7 +2530,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2480 if (!intel_sdvo_connector->left) 2530 if (!intel_sdvo_connector->left)
2481 return false; 2531 return false;
2482 2532
2483 drm_connector_attach_property(connector, 2533 drm_object_attach_property(&connector->base,
2484 intel_sdvo_connector->left, 2534 intel_sdvo_connector->left,
2485 intel_sdvo_connector->left_margin); 2535 intel_sdvo_connector->left_margin);
2486 2536
@@ -2489,7 +2539,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2489 if (!intel_sdvo_connector->right) 2539 if (!intel_sdvo_connector->right)
2490 return false; 2540 return false;
2491 2541
2492 drm_connector_attach_property(connector, 2542 drm_object_attach_property(&connector->base,
2493 intel_sdvo_connector->right, 2543 intel_sdvo_connector->right,
2494 intel_sdvo_connector->right_margin); 2544 intel_sdvo_connector->right_margin);
2495 DRM_DEBUG_KMS("h_overscan: max %d, " 2545 DRM_DEBUG_KMS("h_overscan: max %d, "
@@ -2517,7 +2567,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2517 if (!intel_sdvo_connector->top) 2567 if (!intel_sdvo_connector->top)
2518 return false; 2568 return false;
2519 2569
2520 drm_connector_attach_property(connector, 2570 drm_object_attach_property(&connector->base,
2521 intel_sdvo_connector->top, 2571 intel_sdvo_connector->top,
2522 intel_sdvo_connector->top_margin); 2572 intel_sdvo_connector->top_margin);
2523 2573
@@ -2527,7 +2577,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2527 if (!intel_sdvo_connector->bottom) 2577 if (!intel_sdvo_connector->bottom)
2528 return false; 2578 return false;
2529 2579
2530 drm_connector_attach_property(connector, 2580 drm_object_attach_property(&connector->base,
2531 intel_sdvo_connector->bottom, 2581 intel_sdvo_connector->bottom,
2532 intel_sdvo_connector->bottom_margin); 2582 intel_sdvo_connector->bottom_margin);
2533 DRM_DEBUG_KMS("v_overscan: max %d, " 2583 DRM_DEBUG_KMS("v_overscan: max %d, "
@@ -2559,7 +2609,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
2559 if (!intel_sdvo_connector->dot_crawl) 2609 if (!intel_sdvo_connector->dot_crawl)
2560 return false; 2610 return false;
2561 2611
2562 drm_connector_attach_property(connector, 2612 drm_object_attach_property(&connector->base,
2563 intel_sdvo_connector->dot_crawl, 2613 intel_sdvo_connector->dot_crawl,
2564 intel_sdvo_connector->cur_dot_crawl); 2614 intel_sdvo_connector->cur_dot_crawl);
2565 DRM_DEBUG_KMS("dot crawl: current %d\n", response); 2615 DRM_DEBUG_KMS("dot crawl: current %d\n", response);
@@ -2663,10 +2713,8 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2663 intel_sdvo->is_sdvob = is_sdvob; 2713 intel_sdvo->is_sdvob = is_sdvob;
2664 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; 2714 intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
2665 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); 2715 intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
2666 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) { 2716 if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
2667 kfree(intel_sdvo); 2717 goto err_i2c_bus;
2668 return false;
2669 }
2670 2718
2671 /* encoder type will be decided later */ 2719 /* encoder type will be decided later */
2672 intel_encoder = &intel_sdvo->base; 2720 intel_encoder = &intel_sdvo->base;
@@ -2765,6 +2813,8 @@ err_output:
2765err: 2813err:
2766 drm_encoder_cleanup(&intel_encoder->base); 2814 drm_encoder_cleanup(&intel_encoder->base);
2767 i2c_del_adapter(&intel_sdvo->ddc); 2815 i2c_del_adapter(&intel_sdvo->ddc);
2816err_i2c_bus:
2817 intel_sdvo_unselect_i2c_bus(intel_sdvo);
2768 kfree(intel_sdvo); 2818 kfree(intel_sdvo);
2769 2819
2770 return false; 2820 return false;
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 82f5e5c7009d..827dcd4edf1c 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -48,7 +48,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
48 struct intel_plane *intel_plane = to_intel_plane(plane); 48 struct intel_plane *intel_plane = to_intel_plane(plane);
49 int pipe = intel_plane->pipe; 49 int pipe = intel_plane->pipe;
50 u32 sprctl, sprscale = 0; 50 u32 sprctl, sprscale = 0;
51 int pixel_size; 51 unsigned long sprsurf_offset, linear_offset;
52 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
52 53
53 sprctl = I915_READ(SPRCTL(pipe)); 54 sprctl = I915_READ(SPRCTL(pipe));
54 55
@@ -61,33 +62,24 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
61 switch (fb->pixel_format) { 62 switch (fb->pixel_format) {
62 case DRM_FORMAT_XBGR8888: 63 case DRM_FORMAT_XBGR8888:
63 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; 64 sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
64 pixel_size = 4;
65 break; 65 break;
66 case DRM_FORMAT_XRGB8888: 66 case DRM_FORMAT_XRGB8888:
67 sprctl |= SPRITE_FORMAT_RGBX888; 67 sprctl |= SPRITE_FORMAT_RGBX888;
68 pixel_size = 4;
69 break; 68 break;
70 case DRM_FORMAT_YUYV: 69 case DRM_FORMAT_YUYV:
71 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; 70 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
72 pixel_size = 2;
73 break; 71 break;
74 case DRM_FORMAT_YVYU: 72 case DRM_FORMAT_YVYU:
75 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU; 73 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
76 pixel_size = 2;
77 break; 74 break;
78 case DRM_FORMAT_UYVY: 75 case DRM_FORMAT_UYVY:
79 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY; 76 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
80 pixel_size = 2;
81 break; 77 break;
82 case DRM_FORMAT_VYUY: 78 case DRM_FORMAT_VYUY:
83 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY; 79 sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
84 pixel_size = 2;
85 break; 80 break;
86 default: 81 default:
87 DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); 82 BUG();
88 sprctl |= SPRITE_FORMAT_RGBX888;
89 pixel_size = 4;
90 break;
91 } 83 }
92 84
93 if (obj->tiling_mode != I915_TILING_NONE) 85 if (obj->tiling_mode != I915_TILING_NONE)
@@ -127,18 +119,28 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
127 119
128 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 120 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
129 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); 121 I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
130 if (obj->tiling_mode != I915_TILING_NONE) { 122
123 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
124 sprsurf_offset =
125 intel_gen4_compute_offset_xtiled(&x, &y,
126 fb->bits_per_pixel / 8,
127 fb->pitches[0]);
128 linear_offset -= sprsurf_offset;
129
130 /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
131 * register */
132 if (IS_HASWELL(dev))
133 I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
134 else if (obj->tiling_mode != I915_TILING_NONE)
131 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x); 135 I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
132 } else { 136 else
133 unsigned long offset; 137 I915_WRITE(SPRLINOFF(pipe), linear_offset);
134 138
135 offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
136 I915_WRITE(SPRLINOFF(pipe), offset);
137 }
138 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 139 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
139 I915_WRITE(SPRSCALE(pipe), sprscale); 140 if (intel_plane->can_scale)
141 I915_WRITE(SPRSCALE(pipe), sprscale);
140 I915_WRITE(SPRCTL(pipe), sprctl); 142 I915_WRITE(SPRCTL(pipe), sprctl);
141 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset); 143 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
142 POSTING_READ(SPRSURF(pipe)); 144 POSTING_READ(SPRSURF(pipe));
143} 145}
144 146
@@ -152,7 +154,8 @@ ivb_disable_plane(struct drm_plane *plane)
152 154
153 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); 155 I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
154 /* Can't leave the scaler enabled... */ 156 /* Can't leave the scaler enabled... */
155 I915_WRITE(SPRSCALE(pipe), 0); 157 if (intel_plane->can_scale)
158 I915_WRITE(SPRSCALE(pipe), 0);
156 /* Activate double buffered register update */ 159 /* Activate double buffered register update */
157 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); 160 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
158 POSTING_READ(SPRSURF(pipe)); 161 POSTING_READ(SPRSURF(pipe));
@@ -225,8 +228,10 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
225 struct drm_device *dev = plane->dev; 228 struct drm_device *dev = plane->dev;
226 struct drm_i915_private *dev_priv = dev->dev_private; 229 struct drm_i915_private *dev_priv = dev->dev_private;
227 struct intel_plane *intel_plane = to_intel_plane(plane); 230 struct intel_plane *intel_plane = to_intel_plane(plane);
228 int pipe = intel_plane->pipe, pixel_size; 231 int pipe = intel_plane->pipe;
232 unsigned long dvssurf_offset, linear_offset;
229 u32 dvscntr, dvsscale; 233 u32 dvscntr, dvsscale;
234 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
230 235
231 dvscntr = I915_READ(DVSCNTR(pipe)); 236 dvscntr = I915_READ(DVSCNTR(pipe));
232 237
@@ -239,33 +244,24 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
239 switch (fb->pixel_format) { 244 switch (fb->pixel_format) {
240 case DRM_FORMAT_XBGR8888: 245 case DRM_FORMAT_XBGR8888:
241 dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; 246 dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
242 pixel_size = 4;
243 break; 247 break;
244 case DRM_FORMAT_XRGB8888: 248 case DRM_FORMAT_XRGB8888:
245 dvscntr |= DVS_FORMAT_RGBX888; 249 dvscntr |= DVS_FORMAT_RGBX888;
246 pixel_size = 4;
247 break; 250 break;
248 case DRM_FORMAT_YUYV: 251 case DRM_FORMAT_YUYV:
249 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; 252 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
250 pixel_size = 2;
251 break; 253 break;
252 case DRM_FORMAT_YVYU: 254 case DRM_FORMAT_YVYU:
253 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU; 255 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
254 pixel_size = 2;
255 break; 256 break;
256 case DRM_FORMAT_UYVY: 257 case DRM_FORMAT_UYVY:
257 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY; 258 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
258 pixel_size = 2;
259 break; 259 break;
260 case DRM_FORMAT_VYUY: 260 case DRM_FORMAT_VYUY:
261 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY; 261 dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
262 pixel_size = 2;
263 break; 262 break;
264 default: 263 default:
265 DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n"); 264 BUG();
266 dvscntr |= DVS_FORMAT_RGBX888;
267 pixel_size = 4;
268 break;
269 } 265 }
270 266
271 if (obj->tiling_mode != I915_TILING_NONE) 267 if (obj->tiling_mode != I915_TILING_NONE)
@@ -289,18 +285,23 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
289 285
290 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 286 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
291 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); 287 I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
292 if (obj->tiling_mode != I915_TILING_NONE) { 288
289 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
290 dvssurf_offset =
291 intel_gen4_compute_offset_xtiled(&x, &y,
292 fb->bits_per_pixel / 8,
293 fb->pitches[0]);
294 linear_offset -= dvssurf_offset;
295
296 if (obj->tiling_mode != I915_TILING_NONE)
293 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x); 297 I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
294 } else { 298 else
295 unsigned long offset; 299 I915_WRITE(DVSLINOFF(pipe), linear_offset);
296 300
297 offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
298 I915_WRITE(DVSLINOFF(pipe), offset);
299 }
300 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 301 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
301 I915_WRITE(DVSSCALE(pipe), dvsscale); 302 I915_WRITE(DVSSCALE(pipe), dvsscale);
302 I915_WRITE(DVSCNTR(pipe), dvscntr); 303 I915_WRITE(DVSCNTR(pipe), dvscntr);
303 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset); 304 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
304 POSTING_READ(DVSSURF(pipe)); 305 POSTING_READ(DVSSURF(pipe));
305} 306}
306 307
@@ -422,6 +423,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
422 struct intel_framebuffer *intel_fb; 423 struct intel_framebuffer *intel_fb;
423 struct drm_i915_gem_object *obj, *old_obj; 424 struct drm_i915_gem_object *obj, *old_obj;
424 int pipe = intel_plane->pipe; 425 int pipe = intel_plane->pipe;
426 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
427 pipe);
425 int ret = 0; 428 int ret = 0;
426 int x = src_x >> 16, y = src_y >> 16; 429 int x = src_x >> 16, y = src_y >> 16;
427 int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay; 430 int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
@@ -436,7 +439,7 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
436 src_h = src_h >> 16; 439 src_h = src_h >> 16;
437 440
438 /* Pipe must be running... */ 441 /* Pipe must be running... */
439 if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE)) 442 if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
440 return -EINVAL; 443 return -EINVAL;
441 444
442 if (crtc_x >= primary_w || crtc_y >= primary_h) 445 if (crtc_x >= primary_w || crtc_y >= primary_h)
@@ -446,6 +449,15 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
446 if (intel_plane->pipe != intel_crtc->pipe) 449 if (intel_plane->pipe != intel_crtc->pipe)
447 return -EINVAL; 450 return -EINVAL;
448 451
452 /* Sprite planes can be linear or x-tiled surfaces */
453 switch (obj->tiling_mode) {
454 case I915_TILING_NONE:
455 case I915_TILING_X:
456 break;
457 default:
458 return -EINVAL;
459 }
460
449 /* 461 /*
450 * Clamp the width & height into the visible area. Note we don't 462 * Clamp the width & height into the visible area. Note we don't
451 * try to scale the source if part of the visible region is offscreen. 463 * try to scale the source if part of the visible region is offscreen.
@@ -473,6 +485,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
473 goto out; 485 goto out;
474 486
475 /* 487 /*
488 * We may not have a scaler, eg. HSW does not have it any more
489 */
490 if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
491 return -EINVAL;
492
493 /*
476 * We can take a larger source and scale it down, but 494 * We can take a larger source and scale it down, but
477 * only so much... 16x is the max on SNB. 495 * only so much... 16x is the max on SNB.
478 */ 496 */
@@ -665,6 +683,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
665 switch (INTEL_INFO(dev)->gen) { 683 switch (INTEL_INFO(dev)->gen) {
666 case 5: 684 case 5:
667 case 6: 685 case 6:
686 intel_plane->can_scale = true;
668 intel_plane->max_downscale = 16; 687 intel_plane->max_downscale = 16;
669 intel_plane->update_plane = ilk_update_plane; 688 intel_plane->update_plane = ilk_update_plane;
670 intel_plane->disable_plane = ilk_disable_plane; 689 intel_plane->disable_plane = ilk_disable_plane;
@@ -681,6 +700,10 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
681 break; 700 break;
682 701
683 case 7: 702 case 7:
703 if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
704 intel_plane->can_scale = false;
705 else
706 intel_plane->can_scale = true;
684 intel_plane->max_downscale = 2; 707 intel_plane->max_downscale = 2;
685 intel_plane->update_plane = ivb_update_plane; 708 intel_plane->update_plane = ivb_update_plane;
686 intel_plane->disable_plane = ivb_disable_plane; 709 intel_plane->disable_plane = ivb_disable_plane;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 62bb048c135e..ea93520c1278 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1088,13 +1088,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1088 int dspcntr_reg = DSPCNTR(intel_crtc->plane); 1088 int dspcntr_reg = DSPCNTR(intel_crtc->plane);
1089 int pipeconf = I915_READ(pipeconf_reg); 1089 int pipeconf = I915_READ(pipeconf_reg);
1090 int dspcntr = I915_READ(dspcntr_reg); 1090 int dspcntr = I915_READ(dspcntr_reg);
1091 int dspbase_reg = DSPADDR(intel_crtc->plane);
1092 int xpos = 0x0, ypos = 0x0; 1091 int xpos = 0x0, ypos = 0x0;
1093 unsigned int xsize, ysize; 1092 unsigned int xsize, ysize;
1094 /* Pipe must be off here */ 1093 /* Pipe must be off here */
1095 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE); 1094 I915_WRITE(dspcntr_reg, dspcntr & ~DISPLAY_PLANE_ENABLE);
1096 /* Flush the plane changes */ 1095 intel_flush_display_plane(dev_priv, intel_crtc->plane);
1097 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1098 1096
1099 /* Wait for vblank for the disable to take effect */ 1097 /* Wait for vblank for the disable to take effect */
1100 if (IS_GEN2(dev)) 1098 if (IS_GEN2(dev))
@@ -1123,8 +1121,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1123 1121
1124 I915_WRITE(pipeconf_reg, pipeconf); 1122 I915_WRITE(pipeconf_reg, pipeconf);
1125 I915_WRITE(dspcntr_reg, dspcntr); 1123 I915_WRITE(dspcntr_reg, dspcntr);
1126 /* Flush the plane changes */ 1124 intel_flush_display_plane(dev_priv, intel_crtc->plane);
1127 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1128 } 1125 }
1129 1126
1130 j = 0; 1127 j = 0;
@@ -1292,7 +1289,7 @@ static void intel_tv_find_better_format(struct drm_connector *connector)
1292 } 1289 }
1293 1290
1294 intel_tv->tv_format = tv_mode->name; 1291 intel_tv->tv_format = tv_mode->name;
1295 drm_connector_property_set_value(connector, 1292 drm_object_property_set_value(&connector->base,
1296 connector->dev->mode_config.tv_mode_property, i); 1293 connector->dev->mode_config.tv_mode_property, i);
1297} 1294}
1298 1295
@@ -1446,7 +1443,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
1446 int ret = 0; 1443 int ret = 0;
1447 bool changed = false; 1444 bool changed = false;
1448 1445
1449 ret = drm_connector_property_set_value(connector, property, val); 1446 ret = drm_object_property_set_value(&connector->base, property, val);
1450 if (ret < 0) 1447 if (ret < 0)
1451 goto out; 1448 goto out;
1452 1449
@@ -1658,18 +1655,18 @@ intel_tv_init(struct drm_device *dev)
1658 ARRAY_SIZE(tv_modes), 1655 ARRAY_SIZE(tv_modes),
1659 tv_format_names); 1656 tv_format_names);
1660 1657
1661 drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, 1658 drm_object_attach_property(&connector->base, dev->mode_config.tv_mode_property,
1662 initial_mode); 1659 initial_mode);
1663 drm_connector_attach_property(connector, 1660 drm_object_attach_property(&connector->base,
1664 dev->mode_config.tv_left_margin_property, 1661 dev->mode_config.tv_left_margin_property,
1665 intel_tv->margin[TV_MARGIN_LEFT]); 1662 intel_tv->margin[TV_MARGIN_LEFT]);
1666 drm_connector_attach_property(connector, 1663 drm_object_attach_property(&connector->base,
1667 dev->mode_config.tv_top_margin_property, 1664 dev->mode_config.tv_top_margin_property,
1668 intel_tv->margin[TV_MARGIN_TOP]); 1665 intel_tv->margin[TV_MARGIN_TOP]);
1669 drm_connector_attach_property(connector, 1666 drm_object_attach_property(&connector->base,
1670 dev->mode_config.tv_right_margin_property, 1667 dev->mode_config.tv_right_margin_property,
1671 intel_tv->margin[TV_MARGIN_RIGHT]); 1668 intel_tv->margin[TV_MARGIN_RIGHT]);
1672 drm_connector_attach_property(connector, 1669 drm_object_attach_property(&connector->base,
1673 dev->mode_config.tv_bottom_margin_property, 1670 dev->mode_config.tv_bottom_margin_property,
1674 intel_tv->margin[TV_MARGIN_BOTTOM]); 1671 intel_tv->margin[TV_MARGIN_BOTTOM]);
1675 drm_sysfs_connector_add(connector); 1672 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index d6a1aae33701..70dd3c5529d4 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -133,6 +133,8 @@ static int mga_vram_init(struct mga_device *mdev)
133{ 133{
134 void __iomem *mem; 134 void __iomem *mem;
135 struct apertures_struct *aper = alloc_apertures(1); 135 struct apertures_struct *aper = alloc_apertures(1);
136 if (!aper)
137 return -ENOMEM;
136 138
137 /* BAR 0 is VRAM */ 139 /* BAR 0 is VRAM */
138 mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0); 140 mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
@@ -140,9 +142,9 @@ static int mga_vram_init(struct mga_device *mdev)
140 142
141 aper->ranges[0].base = mdev->mc.vram_base; 143 aper->ranges[0].base = mdev->mc.vram_base;
142 aper->ranges[0].size = mdev->mc.vram_window; 144 aper->ranges[0].size = mdev->mc.vram_window;
143 aper->count = 1;
144 145
145 remove_conflicting_framebuffers(aper, "mgafb", true); 146 remove_conflicting_framebuffers(aper, "mgafb", true);
147 kfree(aper);
146 148
147 if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window, 149 if (!request_mem_region(mdev->mc.vram_base, mdev->mc.vram_window,
148 "mgadrmfb_vram")) { 150 "mgadrmfb_vram")) {
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 1504699666c4..8fc9d9201945 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,11 +186,11 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
186 186
187static int mgag200_bo_move(struct ttm_buffer_object *bo, 187static int mgag200_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible, 188 bool evict, bool interruptible,
189 bool no_wait_reserve, bool no_wait_gpu, 189 bool no_wait_gpu,
190 struct ttm_mem_reg *new_mem) 190 struct ttm_mem_reg *new_mem)
191{ 191{
192 int r; 192 int r;
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
194 return r; 194 return r;
195} 195}
196 196
@@ -355,7 +355,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
355 355
356 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size, 356 ret = ttm_bo_init(&mdev->ttm.bdev, &mgabo->bo, size,
357 ttm_bo_type_device, &mgabo->placement, 357 ttm_bo_type_device, &mgabo->placement,
358 align >> PAGE_SHIFT, 0, false, NULL, acc_size, 358 align >> PAGE_SHIFT, false, NULL, acc_size,
359 NULL, mgag200_bo_ttm_destroy); 359 NULL, mgag200_bo_ttm_destroy);
360 if (ret) 360 if (ret)
361 return ret; 361 return ret;
@@ -382,7 +382,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
382 mgag200_ttm_placement(bo, pl_flag); 382 mgag200_ttm_placement(bo, pl_flag);
383 for (i = 0; i < bo->placement.num_placement; i++) 383 for (i = 0; i < bo->placement.num_placement; i++)
384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 384 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
385 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 385 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
386 if (ret) 386 if (ret)
387 return ret; 387 return ret;
388 388
@@ -405,7 +405,7 @@ int mgag200_bo_unpin(struct mgag200_bo *bo)
405 405
406 for (i = 0; i < bo->placement.num_placement ; i++) 406 for (i = 0; i < bo->placement.num_placement ; i++)
407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 407 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
408 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 408 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
409 if (ret) 409 if (ret)
410 return ret; 410 return ret;
411 411
@@ -430,7 +430,7 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
430 for (i = 0; i < bo->placement.num_placement ; i++) 430 for (i = 0; i < bo->placement.num_placement ; i++)
431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 431 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
432 432
433 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false, false); 433 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
434 if (ret) { 434 if (ret) {
435 DRM_ERROR("pushing to VRAM failed\n"); 435 DRM_ERROR("pushing to VRAM failed\n");
436 return ret; 436 return ret;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index a990df4d6c04..ab25752a0b1e 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -11,6 +11,7 @@ nouveau-y := core/core/client.o
11nouveau-y += core/core/engctx.o 11nouveau-y += core/core/engctx.o
12nouveau-y += core/core/engine.o 12nouveau-y += core/core/engine.o
13nouveau-y += core/core/enum.o 13nouveau-y += core/core/enum.o
14nouveau-y += core/core/falcon.o
14nouveau-y += core/core/gpuobj.o 15nouveau-y += core/core/gpuobj.o
15nouveau-y += core/core/handle.o 16nouveau-y += core/core/handle.o
16nouveau-y += core/core/mm.o 17nouveau-y += core/core/mm.o
@@ -29,6 +30,7 @@ nouveau-y += core/subdev/bios/base.o
29nouveau-y += core/subdev/bios/bit.o 30nouveau-y += core/subdev/bios/bit.o
30nouveau-y += core/subdev/bios/conn.o 31nouveau-y += core/subdev/bios/conn.o
31nouveau-y += core/subdev/bios/dcb.o 32nouveau-y += core/subdev/bios/dcb.o
33nouveau-y += core/subdev/bios/disp.o
32nouveau-y += core/subdev/bios/dp.o 34nouveau-y += core/subdev/bios/dp.o
33nouveau-y += core/subdev/bios/extdev.o 35nouveau-y += core/subdev/bios/extdev.o
34nouveau-y += core/subdev/bios/gpio.o 36nouveau-y += core/subdev/bios/gpio.o
@@ -64,9 +66,19 @@ nouveau-y += core/subdev/devinit/nv50.o
64nouveau-y += core/subdev/fb/base.o 66nouveau-y += core/subdev/fb/base.o
65nouveau-y += core/subdev/fb/nv04.o 67nouveau-y += core/subdev/fb/nv04.o
66nouveau-y += core/subdev/fb/nv10.o 68nouveau-y += core/subdev/fb/nv10.o
69nouveau-y += core/subdev/fb/nv1a.o
67nouveau-y += core/subdev/fb/nv20.o 70nouveau-y += core/subdev/fb/nv20.o
71nouveau-y += core/subdev/fb/nv25.o
68nouveau-y += core/subdev/fb/nv30.o 72nouveau-y += core/subdev/fb/nv30.o
73nouveau-y += core/subdev/fb/nv35.o
74nouveau-y += core/subdev/fb/nv36.o
69nouveau-y += core/subdev/fb/nv40.o 75nouveau-y += core/subdev/fb/nv40.o
76nouveau-y += core/subdev/fb/nv41.o
77nouveau-y += core/subdev/fb/nv44.o
78nouveau-y += core/subdev/fb/nv46.o
79nouveau-y += core/subdev/fb/nv47.o
80nouveau-y += core/subdev/fb/nv49.o
81nouveau-y += core/subdev/fb/nv4e.o
70nouveau-y += core/subdev/fb/nv50.o 82nouveau-y += core/subdev/fb/nv50.o
71nouveau-y += core/subdev/fb/nvc0.o 83nouveau-y += core/subdev/fb/nvc0.o
72nouveau-y += core/subdev/gpio/base.o 84nouveau-y += core/subdev/gpio/base.o
@@ -111,7 +123,10 @@ nouveau-y += core/engine/dmaobj/base.o
111nouveau-y += core/engine/dmaobj/nv04.o 123nouveau-y += core/engine/dmaobj/nv04.o
112nouveau-y += core/engine/dmaobj/nv50.o 124nouveau-y += core/engine/dmaobj/nv50.o
113nouveau-y += core/engine/dmaobj/nvc0.o 125nouveau-y += core/engine/dmaobj/nvc0.o
126nouveau-y += core/engine/dmaobj/nvd0.o
114nouveau-y += core/engine/bsp/nv84.o 127nouveau-y += core/engine/bsp/nv84.o
128nouveau-y += core/engine/bsp/nvc0.o
129nouveau-y += core/engine/bsp/nve0.o
115nouveau-y += core/engine/copy/nva3.o 130nouveau-y += core/engine/copy/nva3.o
116nouveau-y += core/engine/copy/nvc0.o 131nouveau-y += core/engine/copy/nvc0.o
117nouveau-y += core/engine/copy/nve0.o 132nouveau-y += core/engine/copy/nve0.o
@@ -119,7 +134,21 @@ nouveau-y += core/engine/crypt/nv84.o
119nouveau-y += core/engine/crypt/nv98.o 134nouveau-y += core/engine/crypt/nv98.o
120nouveau-y += core/engine/disp/nv04.o 135nouveau-y += core/engine/disp/nv04.o
121nouveau-y += core/engine/disp/nv50.o 136nouveau-y += core/engine/disp/nv50.o
137nouveau-y += core/engine/disp/nv84.o
138nouveau-y += core/engine/disp/nv94.o
139nouveau-y += core/engine/disp/nva0.o
140nouveau-y += core/engine/disp/nva3.o
122nouveau-y += core/engine/disp/nvd0.o 141nouveau-y += core/engine/disp/nvd0.o
142nouveau-y += core/engine/disp/nve0.o
143nouveau-y += core/engine/disp/dacnv50.o
144nouveau-y += core/engine/disp/hdanva3.o
145nouveau-y += core/engine/disp/hdanvd0.o
146nouveau-y += core/engine/disp/hdminv84.o
147nouveau-y += core/engine/disp/hdminva3.o
148nouveau-y += core/engine/disp/hdminvd0.o
149nouveau-y += core/engine/disp/sornv50.o
150nouveau-y += core/engine/disp/sornv94.o
151nouveau-y += core/engine/disp/sornvd0.o
123nouveau-y += core/engine/disp/vga.o 152nouveau-y += core/engine/disp/vga.o
124nouveau-y += core/engine/fifo/base.o 153nouveau-y += core/engine/fifo/base.o
125nouveau-y += core/engine/fifo/nv04.o 154nouveau-y += core/engine/fifo/nv04.o
@@ -151,11 +180,14 @@ nouveau-y += core/engine/mpeg/nv40.o
151nouveau-y += core/engine/mpeg/nv50.o 180nouveau-y += core/engine/mpeg/nv50.o
152nouveau-y += core/engine/mpeg/nv84.o 181nouveau-y += core/engine/mpeg/nv84.o
153nouveau-y += core/engine/ppp/nv98.o 182nouveau-y += core/engine/ppp/nv98.o
183nouveau-y += core/engine/ppp/nvc0.o
154nouveau-y += core/engine/software/nv04.o 184nouveau-y += core/engine/software/nv04.o
155nouveau-y += core/engine/software/nv10.o 185nouveau-y += core/engine/software/nv10.o
156nouveau-y += core/engine/software/nv50.o 186nouveau-y += core/engine/software/nv50.o
157nouveau-y += core/engine/software/nvc0.o 187nouveau-y += core/engine/software/nvc0.o
158nouveau-y += core/engine/vp/nv84.o 188nouveau-y += core/engine/vp/nv84.o
189nouveau-y += core/engine/vp/nvc0.o
190nouveau-y += core/engine/vp/nve0.o
159 191
160# drm/core 192# drm/core
161nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o 193nouveau-y += nouveau_drm.o nouveau_chan.o nouveau_dma.o nouveau_fence.o
@@ -166,7 +198,7 @@ nouveau-y += nv04_fence.o nv10_fence.o nv50_fence.o nv84_fence.o nvc0_fence.o
166 198
167# drm/kms 199# drm/kms
168nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o 200nouveau-y += nouveau_bios.o nouveau_fbcon.o nouveau_display.o
169nouveau-y += nouveau_connector.o nouveau_hdmi.o nouveau_dp.o 201nouveau-y += nouveau_connector.o nouveau_dp.o
170nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o 202nouveau-y += nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o
171 203
172# drm/kms/nv04:nv50 204# drm/kms/nv04:nv50
@@ -175,9 +207,7 @@ nouveau-y += nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o
175nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o 207nouveau-y += nv04_crtc.o nv04_display.o nv04_cursor.o
176 208
177# drm/kms/nv50- 209# drm/kms/nv50-
178nouveau-y += nv50_display.o nvd0_display.o 210nouveau-y += nv50_display.o
179nouveau-y += nv50_crtc.o nv50_dac.o nv50_sor.o nv50_cursor.o
180nouveau-y += nv50_evo.o
181 211
182# drm/pm 212# drm/pm
183nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o 213nouveau-y += nouveau_pm.o nouveau_volt.o nouveau_perf.o
diff --git a/drivers/gpu/drm/nouveau/core/core/engctx.c b/drivers/gpu/drm/nouveau/core/core/engctx.c
index e41b10d5eb59..84c71fad2b6c 100644
--- a/drivers/gpu/drm/nouveau/core/core/engctx.c
+++ b/drivers/gpu/drm/nouveau/core/core/engctx.c
@@ -189,6 +189,21 @@ nouveau_engctx_fini(struct nouveau_engctx *engctx, bool suspend)
189 return nouveau_gpuobj_fini(&engctx->base, suspend); 189 return nouveau_gpuobj_fini(&engctx->base, suspend);
190} 190}
191 191
192int
193_nouveau_engctx_ctor(struct nouveau_object *parent,
194 struct nouveau_object *engine,
195 struct nouveau_oclass *oclass, void *data, u32 size,
196 struct nouveau_object **pobject)
197{
198 struct nouveau_engctx *engctx;
199 int ret;
200
201 ret = nouveau_engctx_create(parent, engine, oclass, NULL, 256, 256,
202 NVOBJ_FLAG_ZERO_ALLOC, &engctx);
203 *pobject = nv_object(engctx);
204 return ret;
205}
206
192void 207void
193_nouveau_engctx_dtor(struct nouveau_object *object) 208_nouveau_engctx_dtor(struct nouveau_object *object)
194{ 209{
diff --git a/drivers/gpu/drm/nouveau/core/core/falcon.c b/drivers/gpu/drm/nouveau/core/core/falcon.c
new file mode 100644
index 000000000000..6b0843c33877
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/core/falcon.c
@@ -0,0 +1,247 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <core/falcon.h>
24
25#include <subdev/timer.h>
26
27u32
28_nouveau_falcon_rd32(struct nouveau_object *object, u64 addr)
29{
30 struct nouveau_falcon *falcon = (void *)object;
31 return nv_rd32(falcon, falcon->addr + addr);
32}
33
34void
35_nouveau_falcon_wr32(struct nouveau_object *object, u64 addr, u32 data)
36{
37 struct nouveau_falcon *falcon = (void *)object;
38 nv_wr32(falcon, falcon->addr + addr, data);
39}
40
41int
42_nouveau_falcon_init(struct nouveau_object *object)
43{
44 struct nouveau_device *device = nv_device(object);
45 struct nouveau_falcon *falcon = (void *)object;
46 const struct firmware *fw;
47 char name[32] = "internal";
48 int ret, i;
49 u32 caps;
50
51 /* enable engine, and determine its capabilities */
52 ret = nouveau_engine_init(&falcon->base);
53 if (ret)
54 return ret;
55
56 if (device->chipset < 0xa3 ||
57 device->chipset == 0xaa || device->chipset == 0xac) {
58 falcon->version = 0;
59 falcon->secret = (falcon->addr == 0x087000) ? 1 : 0;
60 } else {
61 caps = nv_ro32(falcon, 0x12c);
62 falcon->version = (caps & 0x0000000f);
63 falcon->secret = (caps & 0x00000030) >> 4;
64 }
65
66 caps = nv_ro32(falcon, 0x108);
67 falcon->code.limit = (caps & 0x000001ff) << 8;
68 falcon->data.limit = (caps & 0x0003fe00) >> 1;
69
70 nv_debug(falcon, "falcon version: %d\n", falcon->version);
71 nv_debug(falcon, "secret level: %d\n", falcon->secret);
72 nv_debug(falcon, "code limit: %d\n", falcon->code.limit);
73 nv_debug(falcon, "data limit: %d\n", falcon->data.limit);
74
75 /* wait for 'uc halted' to be signalled before continuing */
76 if (falcon->secret) {
77 nv_wait(falcon, 0x008, 0x00000010, 0x00000010);
78 nv_wo32(falcon, 0x004, 0x00000010);
79 }
80
81 /* disable all interrupts */
82 nv_wo32(falcon, 0x014, 0xffffffff);
83
84 /* no default ucode provided by the engine implementation, try and
85 * locate a "self-bootstrapping" firmware image for the engine
86 */
87 if (!falcon->code.data) {
88 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03x",
89 device->chipset, falcon->addr >> 12);
90
91 ret = request_firmware(&fw, name, &device->pdev->dev);
92 if (ret == 0) {
93 falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
94 falcon->code.size = fw->size;
95 falcon->data.data = NULL;
96 falcon->data.size = 0;
97 release_firmware(fw);
98 }
99
100 falcon->external = true;
101 }
102
103 /* next step is to try and load "static code/data segment" firmware
104 * images for the engine
105 */
106 if (!falcon->code.data) {
107 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xd",
108 device->chipset, falcon->addr >> 12);
109
110 ret = request_firmware(&fw, name, &device->pdev->dev);
111 if (ret) {
112 nv_error(falcon, "unable to load firmware data\n");
113 return ret;
114 }
115
116 falcon->data.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
117 falcon->data.size = fw->size;
118 release_firmware(fw);
119 if (!falcon->data.data)
120 return -ENOMEM;
121
122 snprintf(name, sizeof(name), "nouveau/nv%02x_fuc%03xc",
123 device->chipset, falcon->addr >> 12);
124
125 ret = request_firmware(&fw, name, &device->pdev->dev);
126 if (ret) {
127 nv_error(falcon, "unable to load firmware code\n");
128 return ret;
129 }
130
131 falcon->code.data = kmemdup(fw->data, fw->size, GFP_KERNEL);
132 falcon->code.size = fw->size;
133 release_firmware(fw);
134 if (!falcon->code.data)
135 return -ENOMEM;
136 }
137
138 nv_debug(falcon, "firmware: %s (%s)\n", name, falcon->data.data ?
139 "static code/data segments" : "self-bootstrapping");
140
141 /* ensure any "self-bootstrapping" firmware image is in vram */
142 if (!falcon->data.data && !falcon->core) {
143 ret = nouveau_gpuobj_new(object->parent, NULL,
144 falcon->code.size, 256, 0,
145 &falcon->core);
146 if (ret) {
147 nv_error(falcon, "core allocation failed, %d\n", ret);
148 return ret;
149 }
150
151 for (i = 0; i < falcon->code.size; i += 4)
152 nv_wo32(falcon->core, i, falcon->code.data[i / 4]);
153 }
154
155 /* upload firmware bootloader (or the full code segments) */
156 if (falcon->core) {
157 if (device->card_type < NV_C0)
158 nv_wo32(falcon, 0x618, 0x04000000);
159 else
160 nv_wo32(falcon, 0x618, 0x00000114);
161 nv_wo32(falcon, 0x11c, 0);
162 nv_wo32(falcon, 0x110, falcon->core->addr >> 8);
163 nv_wo32(falcon, 0x114, 0);
164 nv_wo32(falcon, 0x118, 0x00006610);
165 } else {
166 if (falcon->code.size > falcon->code.limit ||
167 falcon->data.size > falcon->data.limit) {
168 nv_error(falcon, "ucode exceeds falcon limit(s)\n");
169 return -EINVAL;
170 }
171
172 if (falcon->version < 3) {
173 nv_wo32(falcon, 0xff8, 0x00100000);
174 for (i = 0; i < falcon->code.size / 4; i++)
175 nv_wo32(falcon, 0xff4, falcon->code.data[i]);
176 } else {
177 nv_wo32(falcon, 0x180, 0x01000000);
178 for (i = 0; i < falcon->code.size / 4; i++) {
179 if ((i & 0x3f) == 0)
180 nv_wo32(falcon, 0x188, i >> 6);
181 nv_wo32(falcon, 0x184, falcon->code.data[i]);
182 }
183 }
184 }
185
186 /* upload data segment (if necessary), zeroing the remainder */
187 if (falcon->version < 3) {
188 nv_wo32(falcon, 0xff8, 0x00000000);
189 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
190 nv_wo32(falcon, 0xff4, falcon->data.data[i]);
191 for (; i < falcon->data.limit; i += 4)
192 nv_wo32(falcon, 0xff4, 0x00000000);
193 } else {
194 nv_wo32(falcon, 0x1c0, 0x01000000);
195 for (i = 0; !falcon->core && i < falcon->data.size / 4; i++)
196 nv_wo32(falcon, 0x1c4, falcon->data.data[i]);
197 for (; i < falcon->data.limit / 4; i++)
198 nv_wo32(falcon, 0x1c4, 0x00000000);
199 }
200
201 /* start it running */
202 nv_wo32(falcon, 0x10c, 0x00000001); /* BLOCK_ON_FIFO */
203 nv_wo32(falcon, 0x104, 0x00000000); /* ENTRY */
204 nv_wo32(falcon, 0x100, 0x00000002); /* TRIGGER */
205 nv_wo32(falcon, 0x048, 0x00000003); /* FIFO | CHSW */
206 return 0;
207}
208
209int
210_nouveau_falcon_fini(struct nouveau_object *object, bool suspend)
211{
212 struct nouveau_falcon *falcon = (void *)object;
213
214 if (!suspend) {
215 nouveau_gpuobj_ref(NULL, &falcon->core);
216 if (falcon->external) {
217 kfree(falcon->data.data);
218 kfree(falcon->code.data);
219 falcon->code.data = NULL;
220 }
221 }
222
223 nv_mo32(falcon, 0x048, 0x00000003, 0x00000000);
224 nv_wo32(falcon, 0x014, 0xffffffff);
225
226 return nouveau_engine_fini(&falcon->base, suspend);
227}
228
229int
230nouveau_falcon_create_(struct nouveau_object *parent,
231 struct nouveau_object *engine,
232 struct nouveau_oclass *oclass, u32 addr, bool enable,
233 const char *iname, const char *fname,
234 int length, void **pobject)
235{
236 struct nouveau_falcon *falcon;
237 int ret;
238
239 ret = nouveau_engine_create_(parent, engine, oclass, enable, iname,
240 fname, length, pobject);
241 falcon = *pobject;
242 if (ret)
243 return ret;
244
245 falcon->addr = addr;
246 return 0;
247}
diff --git a/drivers/gpu/drm/nouveau/core/core/gpuobj.c b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
index 70586fde69cf..560b2214cf1c 100644
--- a/drivers/gpu/drm/nouveau/core/core/gpuobj.c
+++ b/drivers/gpu/drm/nouveau/core/core/gpuobj.c
@@ -183,7 +183,7 @@ _nouveau_gpuobj_fini(struct nouveau_object *object, bool suspend)
183} 183}
184 184
185u32 185u32
186_nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr) 186_nouveau_gpuobj_rd32(struct nouveau_object *object, u64 addr)
187{ 187{
188 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); 188 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
189 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); 189 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
@@ -193,7 +193,7 @@ _nouveau_gpuobj_rd32(struct nouveau_object *object, u32 addr)
193} 193}
194 194
195void 195void
196_nouveau_gpuobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 196_nouveau_gpuobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
197{ 197{
198 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object); 198 struct nouveau_gpuobj *gpuobj = nv_gpuobj(object);
199 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent); 199 struct nouveau_ofuncs *pfuncs = nv_ofuncs(gpuobj->parent);
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index a6d3cd6490f7..0261a11b2ae0 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -234,15 +234,18 @@ nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
234int 234int
235nouveau_mm_fini(struct nouveau_mm *mm) 235nouveau_mm_fini(struct nouveau_mm *mm)
236{ 236{
237 struct nouveau_mm_node *node, *heap = 237 if (nouveau_mm_initialised(mm)) {
238 list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry); 238 struct nouveau_mm_node *node, *heap =
239 int nodes = 0; 239 list_first_entry(&mm->nodes, typeof(*heap), nl_entry);
240 int nodes = 0;
241
242 list_for_each_entry(node, &mm->nodes, nl_entry) {
243 if (WARN_ON(nodes++ == mm->heap_nodes))
244 return -EBUSY;
245 }
240 246
241 list_for_each_entry(node, &mm->nodes, nl_entry) { 247 kfree(heap);
242 if (WARN_ON(nodes++ == mm->heap_nodes))
243 return -EBUSY;
244 } 248 }
245 249
246 kfree(heap);
247 return 0; 250 return 0;
248} 251}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
index 66f7dfd907ee..1d9f614cb97d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv84.c
@@ -22,18 +22,13 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 25#include <core/engctx.h>
26#include <core/class.h>
28 27
29#include <engine/bsp.h> 28#include <engine/bsp.h>
30 29
31struct nv84_bsp_priv { 30struct nv84_bsp_priv {
32 struct nouveau_bsp base; 31 struct nouveau_engine base;
33};
34
35struct nv84_bsp_chan {
36 struct nouveau_bsp_chan base;
37}; 32};
38 33
39/******************************************************************************* 34/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_bsp_sclass[] = {
49 * BSP context 44 * BSP context
50 ******************************************************************************/ 45 ******************************************************************************/
51 46
52static int
53nv84_bsp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_bsp_chan *priv;
59 int ret;
60
61 ret = nouveau_bsp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_bsp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_bsp_chan *priv = (void *)object;
74 nouveau_bsp_context_destroy(&priv->base);
75}
76
77static int
78nv84_bsp_context_init(struct nouveau_object *object)
79{
80 struct nv84_bsp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_bsp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_bsp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_bsp_chan *priv = (void *)object;
94 return nouveau_bsp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass 47static struct nouveau_oclass
98nv84_bsp_cclass = { 48nv84_bsp_cclass = {
99 .handle = NV_ENGCTX(BSP, 0x84), 49 .handle = NV_ENGCTX(BSP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) { 50 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_bsp_context_ctor, 51 .ctor = _nouveau_engctx_ctor,
102 .dtor = nv84_bsp_context_dtor, 52 .dtor = _nouveau_engctx_dtor,
103 .init = nv84_bsp_context_init, 53 .init = _nouveau_engctx_init,
104 .fini = nv84_bsp_context_fini, 54 .fini = _nouveau_engctx_fini,
105 .rd32 = _nouveau_bsp_context_rd32, 55 .rd32 = _nouveau_engctx_rd32,
106 .wr32 = _nouveau_bsp_context_wr32, 56 .wr32 = _nouveau_engctx_wr32,
107 }, 57 },
108}; 58};
109 59
@@ -111,11 +61,6 @@ nv84_bsp_cclass = {
111 * BSP engine/subdev functions 61 * BSP engine/subdev functions
112 ******************************************************************************/ 62 ******************************************************************************/
113 63
114static void
115nv84_bsp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int 64static int
120nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 65nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size, 66 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
124 struct nv84_bsp_priv *priv; 69 struct nv84_bsp_priv *priv;
125 int ret; 70 int ret;
126 71
127 ret = nouveau_bsp_create(parent, engine, oclass, &priv); 72 ret = nouveau_engine_create(parent, engine, oclass, true,
73 "PBSP", "bsp", &priv);
128 *pobject = nv_object(priv); 74 *pobject = nv_object(priv);
129 if (ret) 75 if (ret)
130 return ret; 76 return ret;
131 77
132 nv_subdev(priv)->unit = 0x04008000; 78 nv_subdev(priv)->unit = 0x04008000;
133 nv_subdev(priv)->intr = nv84_bsp_intr;
134 nv_engine(priv)->cclass = &nv84_bsp_cclass; 79 nv_engine(priv)->cclass = &nv84_bsp_cclass;
135 nv_engine(priv)->sclass = nv84_bsp_sclass; 80 nv_engine(priv)->sclass = nv84_bsp_sclass;
136 return 0; 81 return 0;
137} 82}
138 83
139static void
140nv84_bsp_dtor(struct nouveau_object *object)
141{
142 struct nv84_bsp_priv *priv = (void *)object;
143 nouveau_bsp_destroy(&priv->base);
144}
145
146static int
147nv84_bsp_init(struct nouveau_object *object)
148{
149 struct nv84_bsp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_bsp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_bsp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_bsp_priv *priv = (void *)object;
163 return nouveau_bsp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass 84struct nouveau_oclass
167nv84_bsp_oclass = { 85nv84_bsp_oclass = {
168 .handle = NV_ENGINE(BSP, 0x84), 86 .handle = NV_ENGINE(BSP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) { 87 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_bsp_ctor, 88 .ctor = nv84_bsp_ctor,
171 .dtor = nv84_bsp_dtor, 89 .dtor = _nouveau_engine_dtor,
172 .init = nv84_bsp_init, 90 .init = _nouveau_engine_init,
173 .fini = nv84_bsp_fini, 91 .fini = _nouveau_engine_fini,
174 }, 92 },
175}; 93};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
new file mode 100644
index 000000000000..0a5aa6bb0870
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Maarten Lankhorst
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Maarten Lankhorst
23 */
24
25#include <core/falcon.h>
26
27#include <engine/bsp.h>
28
29struct nvc0_bsp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * BSP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nvc0_bsp_sclass[] = {
39 { 0x90b1, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PBSP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_bsp_cclass = {
49 .handle = NV_ENGCTX(BSP, 0xc0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PBSP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nvc0_bsp_init(struct nouveau_object *object)
66{
67 struct nvc0_bsp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x084010, 0x0000fff2);
75 nv_wr32(priv, 0x08401c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nvc0_bsp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
88 "PBSP", "bsp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00008000;
94 nv_engine(priv)->cclass = &nvc0_bsp_cclass;
95 nv_engine(priv)->sclass = nvc0_bsp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nvc0_bsp_oclass = {
101 .handle = NV_ENGINE(BSP, 0xc0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nvc0_bsp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nvc0_bsp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
new file mode 100644
index 000000000000..d4f23bbd75b4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/falcon.h>
26
27#include <engine/bsp.h>
28
29struct nve0_bsp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * BSP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nve0_bsp_sclass[] = {
39 { 0x95b1, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PBSP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nve0_bsp_cclass = {
49 .handle = NV_ENGCTX(BSP, 0xe0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PBSP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nve0_bsp_init(struct nouveau_object *object)
66{
67 struct nve0_bsp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x084010, 0x0000fff2);
75 nv_wr32(priv, 0x08401c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nve0_bsp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
88 "PBSP", "bsp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00008000;
94 nv_engine(priv)->cclass = &nve0_bsp_cclass;
95 nv_engine(priv)->sclass = nve0_bsp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nve0_bsp_oclass = {
101 .handle = NV_ENGINE(BSP, 0xe0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nve0_bsp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nve0_bsp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
index 4df6da0af740..283248c7b050 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nva3.c
@@ -22,10 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/falcon.h>
26#include <core/enum.h>
27#include <core/class.h> 26#include <core/class.h>
28#include <core/engctx.h> 27#include <core/enum.h>
29 28
30#include <subdev/fb.h> 29#include <subdev/fb.h>
31#include <subdev/vm.h> 30#include <subdev/vm.h>
@@ -36,11 +35,7 @@
36#include "fuc/nva3.fuc.h" 35#include "fuc/nva3.fuc.h"
37 36
38struct nva3_copy_priv { 37struct nva3_copy_priv {
39 struct nouveau_copy base; 38 struct nouveau_falcon base;
40};
41
42struct nva3_copy_chan {
43 struct nouveau_copy_chan base;
44}; 39};
45 40
46/******************************************************************************* 41/*******************************************************************************
@@ -57,34 +52,16 @@ nva3_copy_sclass[] = {
57 * PCOPY context 52 * PCOPY context
58 ******************************************************************************/ 53 ******************************************************************************/
59 54
60static int
61nva3_copy_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nva3_copy_chan *priv;
67 int ret;
68
69 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256, 0,
70 NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass 55static struct nouveau_oclass
79nva3_copy_cclass = { 56nva3_copy_cclass = {
80 .handle = NV_ENGCTX(COPY0, 0xa3), 57 .handle = NV_ENGCTX(COPY0, 0xa3),
81 .ofuncs = &(struct nouveau_ofuncs) { 58 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nva3_copy_context_ctor, 59 .ctor = _nouveau_falcon_context_ctor,
83 .dtor = _nouveau_copy_context_dtor, 60 .dtor = _nouveau_falcon_context_dtor,
84 .init = _nouveau_copy_context_init, 61 .init = _nouveau_falcon_context_init,
85 .fini = _nouveau_copy_context_fini, 62 .fini = _nouveau_falcon_context_fini,
86 .rd32 = _nouveau_copy_context_rd32, 63 .rd32 = _nouveau_falcon_context_rd32,
87 .wr32 = _nouveau_copy_context_wr32, 64 .wr32 = _nouveau_falcon_context_wr32,
88 65
89 }, 66 },
90}; 67};
@@ -100,41 +77,40 @@ static const struct nouveau_enum nva3_copy_isr_error_name[] = {
100 {} 77 {}
101}; 78};
102 79
103static void 80void
104nva3_copy_intr(struct nouveau_subdev *subdev) 81nva3_copy_intr(struct nouveau_subdev *subdev)
105{ 82{
106 struct nouveau_fifo *pfifo = nouveau_fifo(subdev); 83 struct nouveau_fifo *pfifo = nouveau_fifo(subdev);
107 struct nouveau_engine *engine = nv_engine(subdev); 84 struct nouveau_engine *engine = nv_engine(subdev);
85 struct nouveau_falcon *falcon = (void *)subdev;
108 struct nouveau_object *engctx; 86 struct nouveau_object *engctx;
109 struct nva3_copy_priv *priv = (void *)subdev; 87 u32 dispatch = nv_ro32(falcon, 0x01c);
110 u32 dispatch = nv_rd32(priv, 0x10401c); 88 u32 stat = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16);
111 u32 stat = nv_rd32(priv, 0x104008) & dispatch & ~(dispatch >> 16); 89 u64 inst = nv_ro32(falcon, 0x050) & 0x3fffffff;
112 u64 inst = nv_rd32(priv, 0x104050) & 0x3fffffff; 90 u32 ssta = nv_ro32(falcon, 0x040) & 0x0000ffff;
113 u32 ssta = nv_rd32(priv, 0x104040) & 0x0000ffff; 91 u32 addr = nv_ro32(falcon, 0x040) >> 16;
114 u32 addr = nv_rd32(priv, 0x104040) >> 16;
115 u32 mthd = (addr & 0x07ff) << 2; 92 u32 mthd = (addr & 0x07ff) << 2;
116 u32 subc = (addr & 0x3800) >> 11; 93 u32 subc = (addr & 0x3800) >> 11;
117 u32 data = nv_rd32(priv, 0x104044); 94 u32 data = nv_ro32(falcon, 0x044);
118 int chid; 95 int chid;
119 96
120 engctx = nouveau_engctx_get(engine, inst); 97 engctx = nouveau_engctx_get(engine, inst);
121 chid = pfifo->chid(pfifo, engctx); 98 chid = pfifo->chid(pfifo, engctx);
122 99
123 if (stat & 0x00000040) { 100 if (stat & 0x00000040) {
124 nv_error(priv, "DISPATCH_ERROR ["); 101 nv_error(falcon, "DISPATCH_ERROR [");
125 nouveau_enum_print(nva3_copy_isr_error_name, ssta); 102 nouveau_enum_print(nva3_copy_isr_error_name, ssta);
126 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n", 103 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
127 chid, inst << 12, subc, mthd, data); 104 chid, inst << 12, subc, mthd, data);
128 nv_wr32(priv, 0x104004, 0x00000040); 105 nv_wo32(falcon, 0x004, 0x00000040);
129 stat &= ~0x00000040; 106 stat &= ~0x00000040;
130 } 107 }
131 108
132 if (stat) { 109 if (stat) {
133 nv_error(priv, "unhandled intr 0x%08x\n", stat); 110 nv_error(falcon, "unhandled intr 0x%08x\n", stat);
134 nv_wr32(priv, 0x104004, stat); 111 nv_wo32(falcon, 0x004, stat);
135 } 112 }
136 113
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx); 114 nouveau_engctx_put(engctx);
139} 115}
140 116
@@ -154,7 +130,8 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
154 struct nva3_copy_priv *priv; 130 struct nva3_copy_priv *priv;
155 int ret; 131 int ret;
156 132
157 ret = nouveau_copy_create(parent, engine, oclass, enable, 0, &priv); 133 ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, enable,
134 "PCE0", "copy0", &priv);
158 *pobject = nv_object(priv); 135 *pobject = nv_object(priv);
159 if (ret) 136 if (ret)
160 return ret; 137 return ret;
@@ -164,59 +141,22 @@ nva3_copy_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
164 nv_engine(priv)->cclass = &nva3_copy_cclass; 141 nv_engine(priv)->cclass = &nva3_copy_cclass;
165 nv_engine(priv)->sclass = nva3_copy_sclass; 142 nv_engine(priv)->sclass = nva3_copy_sclass;
166 nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush; 143 nv_engine(priv)->tlb_flush = nva3_copy_tlb_flush;
144 nv_falcon(priv)->code.data = nva3_pcopy_code;
145 nv_falcon(priv)->code.size = sizeof(nva3_pcopy_code);
146 nv_falcon(priv)->data.data = nva3_pcopy_data;
147 nv_falcon(priv)->data.size = sizeof(nva3_pcopy_data);
167 return 0; 148 return 0;
168} 149}
169 150
170static int
171nva3_copy_init(struct nouveau_object *object)
172{
173 struct nva3_copy_priv *priv = (void *)object;
174 int ret, i;
175
176 ret = nouveau_copy_init(&priv->base);
177 if (ret)
178 return ret;
179
180 /* disable all interrupts */
181 nv_wr32(priv, 0x104014, 0xffffffff);
182
183 /* upload ucode */
184 nv_wr32(priv, 0x1041c0, 0x01000000);
185 for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
186 nv_wr32(priv, 0x1041c4, nva3_pcopy_data[i]);
187
188 nv_wr32(priv, 0x104180, 0x01000000);
189 for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
190 if ((i & 0x3f) == 0)
191 nv_wr32(priv, 0x104188, i >> 6);
192 nv_wr32(priv, 0x104184, nva3_pcopy_code[i]);
193 }
194
195 /* start it running */
196 nv_wr32(priv, 0x10410c, 0x00000000);
197 nv_wr32(priv, 0x104104, 0x00000000); /* ENTRY */
198 nv_wr32(priv, 0x104100, 0x00000002); /* TRIGGER */
199 return 0;
200}
201
202static int
203nva3_copy_fini(struct nouveau_object *object, bool suspend)
204{
205 struct nva3_copy_priv *priv = (void *)object;
206
207 nv_mask(priv, 0x104048, 0x00000003, 0x00000000);
208 nv_wr32(priv, 0x104014, 0xffffffff);
209
210 return nouveau_copy_fini(&priv->base, suspend);
211}
212
213struct nouveau_oclass 151struct nouveau_oclass
214nva3_copy_oclass = { 152nva3_copy_oclass = {
215 .handle = NV_ENGINE(COPY0, 0xa3), 153 .handle = NV_ENGINE(COPY0, 0xa3),
216 .ofuncs = &(struct nouveau_ofuncs) { 154 .ofuncs = &(struct nouveau_ofuncs) {
217 .ctor = nva3_copy_ctor, 155 .ctor = nva3_copy_ctor,
218 .dtor = _nouveau_copy_dtor, 156 .dtor = _nouveau_falcon_dtor,
219 .init = nva3_copy_init, 157 .init = _nouveau_falcon_init,
220 .fini = nva3_copy_fini, 158 .fini = _nouveau_falcon_fini,
159 .rd32 = _nouveau_falcon_rd32,
160 .wr32 = _nouveau_falcon_wr32,
221 }, 161 },
222}; 162};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
index 06d4a8791055..b3ed2737e21f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nvc0.c
@@ -22,10 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/falcon.h>
26#include <core/enum.h>
27#include <core/class.h> 26#include <core/class.h>
28#include <core/engctx.h> 27#include <core/enum.h>
29 28
30#include <engine/fifo.h> 29#include <engine/fifo.h>
31#include <engine/copy.h> 30#include <engine/copy.h>
@@ -33,11 +32,7 @@
33#include "fuc/nvc0.fuc.h" 32#include "fuc/nvc0.fuc.h"
34 33
35struct nvc0_copy_priv { 34struct nvc0_copy_priv {
36 struct nouveau_copy base; 35 struct nouveau_falcon base;
37};
38
39struct nvc0_copy_chan {
40 struct nouveau_copy_chan base;
41}; 36};
42 37
43/******************************************************************************* 38/*******************************************************************************
@@ -60,32 +55,14 @@ nvc0_copy1_sclass[] = {
60 * PCOPY context 55 * PCOPY context
61 ******************************************************************************/ 56 ******************************************************************************/
62 57
63static int
64nvc0_copy_context_ctor(struct nouveau_object *parent,
65 struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nvc0_copy_chan *priv;
70 int ret;
71
72 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
73 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 return 0;
79}
80
81static struct nouveau_ofuncs 58static struct nouveau_ofuncs
82nvc0_copy_context_ofuncs = { 59nvc0_copy_context_ofuncs = {
83 .ctor = nvc0_copy_context_ctor, 60 .ctor = _nouveau_falcon_context_ctor,
84 .dtor = _nouveau_copy_context_dtor, 61 .dtor = _nouveau_falcon_context_dtor,
85 .init = _nouveau_copy_context_init, 62 .init = _nouveau_falcon_context_init,
86 .fini = _nouveau_copy_context_fini, 63 .fini = _nouveau_falcon_context_fini,
87 .rd32 = _nouveau_copy_context_rd32, 64 .rd32 = _nouveau_falcon_context_rd32,
88 .wr32 = _nouveau_copy_context_wr32, 65 .wr32 = _nouveau_falcon_context_wr32,
89}; 66};
90 67
91static struct nouveau_oclass 68static struct nouveau_oclass
@@ -104,50 +81,18 @@ nvc0_copy1_cclass = {
104 * PCOPY engine/subdev functions 81 * PCOPY engine/subdev functions
105 ******************************************************************************/ 82 ******************************************************************************/
106 83
107static const struct nouveau_enum nvc0_copy_isr_error_name[] = { 84static int
108 { 0x0001, "ILLEGAL_MTHD" }, 85nvc0_copy_init(struct nouveau_object *object)
109 { 0x0002, "INVALID_ENUM" },
110 { 0x0003, "INVALID_BITFIELD" },
111 {}
112};
113
114static void
115nvc0_copy_intr(struct nouveau_subdev *subdev)
116{ 86{
117 struct nouveau_fifo *pfifo = nouveau_fifo(subdev); 87 struct nvc0_copy_priv *priv = (void *)object;
118 struct nouveau_engine *engine = nv_engine(subdev); 88 int ret;
119 struct nouveau_object *engctx;
120 int idx = nv_engidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
121 struct nvc0_copy_priv *priv = (void *)subdev;
122 u32 disp = nv_rd32(priv, 0x10401c + (idx * 0x1000));
123 u32 intr = nv_rd32(priv, 0x104008 + (idx * 0x1000));
124 u32 stat = intr & disp & ~(disp >> 16);
125 u64 inst = nv_rd32(priv, 0x104050 + (idx * 0x1000)) & 0x0fffffff;
126 u32 ssta = nv_rd32(priv, 0x104040 + (idx * 0x1000)) & 0x0000ffff;
127 u32 addr = nv_rd32(priv, 0x104040 + (idx * 0x1000)) >> 16;
128 u32 mthd = (addr & 0x07ff) << 2;
129 u32 subc = (addr & 0x3800) >> 11;
130 u32 data = nv_rd32(priv, 0x104044 + (idx * 0x1000));
131 int chid;
132
133 engctx = nouveau_engctx_get(engine, inst);
134 chid = pfifo->chid(pfifo, engctx);
135
136 if (stat & 0x00000040) {
137 nv_error(priv, "DISPATCH_ERROR [");
138 nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
139 printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
140 chid, (u64)inst << 12, subc, mthd, data);
141 nv_wr32(priv, 0x104004 + (idx * 0x1000), 0x00000040);
142 stat &= ~0x00000040;
143 }
144 89
145 if (stat) { 90 ret = nouveau_falcon_init(&priv->base);
146 nv_error(priv, "unhandled intr 0x%08x\n", stat); 91 if (ret)
147 nv_wr32(priv, 0x104004 + (idx * 0x1000), stat); 92 return ret;
148 }
149 93
150 nouveau_engctx_put(engctx); 94 nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
95 return 0;
151} 96}
152 97
153static int 98static int
@@ -161,15 +106,20 @@ nvc0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
161 if (nv_rd32(parent, 0x022500) & 0x00000100) 106 if (nv_rd32(parent, 0x022500) & 0x00000100)
162 return -ENODEV; 107 return -ENODEV;
163 108
164 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); 109 ret = nouveau_falcon_create(parent, engine, oclass, 0x104000, true,
110 "PCE0", "copy0", &priv);
165 *pobject = nv_object(priv); 111 *pobject = nv_object(priv);
166 if (ret) 112 if (ret)
167 return ret; 113 return ret;
168 114
169 nv_subdev(priv)->unit = 0x00000040; 115 nv_subdev(priv)->unit = 0x00000040;
170 nv_subdev(priv)->intr = nvc0_copy_intr; 116 nv_subdev(priv)->intr = nva3_copy_intr;
171 nv_engine(priv)->cclass = &nvc0_copy0_cclass; 117 nv_engine(priv)->cclass = &nvc0_copy0_cclass;
172 nv_engine(priv)->sclass = nvc0_copy0_sclass; 118 nv_engine(priv)->sclass = nvc0_copy0_sclass;
119 nv_falcon(priv)->code.data = nvc0_pcopy_code;
120 nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
121 nv_falcon(priv)->data.data = nvc0_pcopy_data;
122 nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
173 return 0; 123 return 0;
174} 124}
175 125
@@ -184,72 +134,33 @@ nvc0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
184 if (nv_rd32(parent, 0x022500) & 0x00000200) 134 if (nv_rd32(parent, 0x022500) & 0x00000200)
185 return -ENODEV; 135 return -ENODEV;
186 136
187 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); 137 ret = nouveau_falcon_create(parent, engine, oclass, 0x105000, true,
138 "PCE1", "copy1", &priv);
188 *pobject = nv_object(priv); 139 *pobject = nv_object(priv);
189 if (ret) 140 if (ret)
190 return ret; 141 return ret;
191 142
192 nv_subdev(priv)->unit = 0x00000080; 143 nv_subdev(priv)->unit = 0x00000080;
193 nv_subdev(priv)->intr = nvc0_copy_intr; 144 nv_subdev(priv)->intr = nva3_copy_intr;
194 nv_engine(priv)->cclass = &nvc0_copy1_cclass; 145 nv_engine(priv)->cclass = &nvc0_copy1_cclass;
195 nv_engine(priv)->sclass = nvc0_copy1_sclass; 146 nv_engine(priv)->sclass = nvc0_copy1_sclass;
147 nv_falcon(priv)->code.data = nvc0_pcopy_code;
148 nv_falcon(priv)->code.size = sizeof(nvc0_pcopy_code);
149 nv_falcon(priv)->data.data = nvc0_pcopy_data;
150 nv_falcon(priv)->data.size = sizeof(nvc0_pcopy_data);
196 return 0; 151 return 0;
197} 152}
198 153
199static int
200nvc0_copy_init(struct nouveau_object *object)
201{
202 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
203 struct nvc0_copy_priv *priv = (void *)object;
204 int ret, i;
205
206 ret = nouveau_copy_init(&priv->base);
207 if (ret)
208 return ret;
209
210 /* disable all interrupts */
211 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
212
213 /* upload ucode */
214 nv_wr32(priv, 0x1041c0 + (idx * 0x1000), 0x01000000);
215 for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
216 nv_wr32(priv, 0x1041c4 + (idx * 0x1000), nvc0_pcopy_data[i]);
217
218 nv_wr32(priv, 0x104180 + (idx * 0x1000), 0x01000000);
219 for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
220 if ((i & 0x3f) == 0)
221 nv_wr32(priv, 0x104188 + (idx * 0x1000), i >> 6);
222 nv_wr32(priv, 0x104184 + (idx * 0x1000), nvc0_pcopy_code[i]);
223 }
224
225 /* start it running */
226 nv_wr32(priv, 0x104084 + (idx * 0x1000), idx);
227 nv_wr32(priv, 0x10410c + (idx * 0x1000), 0x00000000);
228 nv_wr32(priv, 0x104104 + (idx * 0x1000), 0x00000000); /* ENTRY */
229 nv_wr32(priv, 0x104100 + (idx * 0x1000), 0x00000002); /* TRIGGER */
230 return 0;
231}
232
233static int
234nvc0_copy_fini(struct nouveau_object *object, bool suspend)
235{
236 int idx = nv_engidx(object) - NVDEV_ENGINE_COPY0;
237 struct nvc0_copy_priv *priv = (void *)object;
238
239 nv_mask(priv, 0x104048 + (idx * 0x1000), 0x00000003, 0x00000000);
240 nv_wr32(priv, 0x104014 + (idx * 0x1000), 0xffffffff);
241
242 return nouveau_copy_fini(&priv->base, suspend);
243}
244
245struct nouveau_oclass 154struct nouveau_oclass
246nvc0_copy0_oclass = { 155nvc0_copy0_oclass = {
247 .handle = NV_ENGINE(COPY0, 0xc0), 156 .handle = NV_ENGINE(COPY0, 0xc0),
248 .ofuncs = &(struct nouveau_ofuncs) { 157 .ofuncs = &(struct nouveau_ofuncs) {
249 .ctor = nvc0_copy0_ctor, 158 .ctor = nvc0_copy0_ctor,
250 .dtor = _nouveau_copy_dtor, 159 .dtor = _nouveau_falcon_dtor,
251 .init = nvc0_copy_init, 160 .init = nvc0_copy_init,
252 .fini = nvc0_copy_fini, 161 .fini = _nouveau_falcon_fini,
162 .rd32 = _nouveau_falcon_rd32,
163 .wr32 = _nouveau_falcon_wr32,
253 }, 164 },
254}; 165};
255 166
@@ -258,8 +169,10 @@ nvc0_copy1_oclass = {
258 .handle = NV_ENGINE(COPY1, 0xc0), 169 .handle = NV_ENGINE(COPY1, 0xc0),
259 .ofuncs = &(struct nouveau_ofuncs) { 170 .ofuncs = &(struct nouveau_ofuncs) {
260 .ctor = nvc0_copy1_ctor, 171 .ctor = nvc0_copy1_ctor,
261 .dtor = _nouveau_copy_dtor, 172 .dtor = _nouveau_falcon_dtor,
262 .init = nvc0_copy_init, 173 .init = nvc0_copy_init,
263 .fini = nvc0_copy_fini, 174 .fini = _nouveau_falcon_fini,
175 .rd32 = _nouveau_falcon_rd32,
176 .wr32 = _nouveau_falcon_wr32,
264 }, 177 },
265}; 178};
diff --git a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
index 2017c1579ac5..dbbe9e8998fe 100644
--- a/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/copy/nve0.c
@@ -30,11 +30,7 @@
30#include <engine/copy.h> 30#include <engine/copy.h>
31 31
32struct nve0_copy_priv { 32struct nve0_copy_priv {
33 struct nouveau_copy base; 33 struct nouveau_engine base;
34};
35
36struct nve0_copy_chan {
37 struct nouveau_copy_chan base;
38}; 34};
39 35
40/******************************************************************************* 36/*******************************************************************************
@@ -51,32 +47,14 @@ nve0_copy_sclass[] = {
51 * PCOPY context 47 * PCOPY context
52 ******************************************************************************/ 48 ******************************************************************************/
53 49
54static int
55nve0_copy_context_ctor(struct nouveau_object *parent,
56 struct nouveau_object *engine,
57 struct nouveau_oclass *oclass, void *data, u32 size,
58 struct nouveau_object **pobject)
59{
60 struct nve0_copy_chan *priv;
61 int ret;
62
63 ret = nouveau_copy_context_create(parent, engine, oclass, NULL, 256,
64 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
65 *pobject = nv_object(priv);
66 if (ret)
67 return ret;
68
69 return 0;
70}
71
72static struct nouveau_ofuncs 50static struct nouveau_ofuncs
73nve0_copy_context_ofuncs = { 51nve0_copy_context_ofuncs = {
74 .ctor = nve0_copy_context_ctor, 52 .ctor = _nouveau_engctx_ctor,
75 .dtor = _nouveau_copy_context_dtor, 53 .dtor = _nouveau_engctx_dtor,
76 .init = _nouveau_copy_context_init, 54 .init = _nouveau_engctx_init,
77 .fini = _nouveau_copy_context_fini, 55 .fini = _nouveau_engctx_fini,
78 .rd32 = _nouveau_copy_context_rd32, 56 .rd32 = _nouveau_engctx_rd32,
79 .wr32 = _nouveau_copy_context_wr32, 57 .wr32 = _nouveau_engctx_wr32,
80}; 58};
81 59
82static struct nouveau_oclass 60static struct nouveau_oclass
@@ -100,7 +78,8 @@ nve0_copy0_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 if (nv_rd32(parent, 0x022500) & 0x00000100) 78 if (nv_rd32(parent, 0x022500) & 0x00000100)
101 return -ENODEV; 79 return -ENODEV;
102 80
103 ret = nouveau_copy_create(parent, engine, oclass, true, 0, &priv); 81 ret = nouveau_engine_create(parent, engine, oclass, true,
82 "PCE0", "copy0", &priv);
104 *pobject = nv_object(priv); 83 *pobject = nv_object(priv);
105 if (ret) 84 if (ret)
106 return ret; 85 return ret;
@@ -122,7 +101,8 @@ nve0_copy1_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
122 if (nv_rd32(parent, 0x022500) & 0x00000200) 101 if (nv_rd32(parent, 0x022500) & 0x00000200)
123 return -ENODEV; 102 return -ENODEV;
124 103
125 ret = nouveau_copy_create(parent, engine, oclass, true, 1, &priv); 104 ret = nouveau_engine_create(parent, engine, oclass, true,
105 "PCE1", "copy1", &priv);
126 *pobject = nv_object(priv); 106 *pobject = nv_object(priv);
127 if (ret) 107 if (ret)
128 return ret; 108 return ret;
@@ -138,9 +118,9 @@ nve0_copy0_oclass = {
138 .handle = NV_ENGINE(COPY0, 0xe0), 118 .handle = NV_ENGINE(COPY0, 0xe0),
139 .ofuncs = &(struct nouveau_ofuncs) { 119 .ofuncs = &(struct nouveau_ofuncs) {
140 .ctor = nve0_copy0_ctor, 120 .ctor = nve0_copy0_ctor,
141 .dtor = _nouveau_copy_dtor, 121 .dtor = _nouveau_engine_dtor,
142 .init = _nouveau_copy_init, 122 .init = _nouveau_engine_init,
143 .fini = _nouveau_copy_fini, 123 .fini = _nouveau_engine_fini,
144 }, 124 },
145}; 125};
146 126
@@ -149,8 +129,8 @@ nve0_copy1_oclass = {
149 .handle = NV_ENGINE(COPY1, 0xe0), 129 .handle = NV_ENGINE(COPY1, 0xe0),
150 .ofuncs = &(struct nouveau_ofuncs) { 130 .ofuncs = &(struct nouveau_ofuncs) {
151 .ctor = nve0_copy1_ctor, 131 .ctor = nve0_copy1_ctor,
152 .dtor = _nouveau_copy_dtor, 132 .dtor = _nouveau_engine_dtor,
153 .init = _nouveau_copy_init, 133 .init = _nouveau_engine_init,
154 .fini = _nouveau_copy_fini, 134 .fini = _nouveau_engine_fini,
155 }, 135 },
156}; 136};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
index 1d85e5b66ca0..b97490512723 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv84.c
@@ -34,11 +34,7 @@
34#include <engine/crypt.h> 34#include <engine/crypt.h>
35 35
36struct nv84_crypt_priv { 36struct nv84_crypt_priv {
37 struct nouveau_crypt base; 37 struct nouveau_engine base;
38};
39
40struct nv84_crypt_chan {
41 struct nouveau_crypt_chan base;
42}; 38};
43 39
44/******************************************************************************* 40/*******************************************************************************
@@ -87,34 +83,16 @@ nv84_crypt_sclass[] = {
87 * PCRYPT context 83 * PCRYPT context
88 ******************************************************************************/ 84 ******************************************************************************/
89 85
90static int
91nv84_crypt_context_ctor(struct nouveau_object *parent,
92 struct nouveau_object *engine,
93 struct nouveau_oclass *oclass, void *data, u32 size,
94 struct nouveau_object **pobject)
95{
96 struct nv84_crypt_chan *priv;
97 int ret;
98
99 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
100 0, NVOBJ_FLAG_ZERO_ALLOC, &priv);
101 *pobject = nv_object(priv);
102 if (ret)
103 return ret;
104
105 return 0;
106}
107
108static struct nouveau_oclass 86static struct nouveau_oclass
109nv84_crypt_cclass = { 87nv84_crypt_cclass = {
110 .handle = NV_ENGCTX(CRYPT, 0x84), 88 .handle = NV_ENGCTX(CRYPT, 0x84),
111 .ofuncs = &(struct nouveau_ofuncs) { 89 .ofuncs = &(struct nouveau_ofuncs) {
112 .ctor = nv84_crypt_context_ctor, 90 .ctor = _nouveau_engctx_ctor,
113 .dtor = _nouveau_crypt_context_dtor, 91 .dtor = _nouveau_engctx_dtor,
114 .init = _nouveau_crypt_context_init, 92 .init = _nouveau_engctx_init,
115 .fini = _nouveau_crypt_context_fini, 93 .fini = _nouveau_engctx_fini,
116 .rd32 = _nouveau_crypt_context_rd32, 94 .rd32 = _nouveau_engctx_rd32,
117 .wr32 = _nouveau_crypt_context_wr32, 95 .wr32 = _nouveau_engctx_wr32,
118 }, 96 },
119}; 97};
120 98
@@ -157,7 +135,6 @@ nv84_crypt_intr(struct nouveau_subdev *subdev)
157 nv_wr32(priv, 0x102130, stat); 135 nv_wr32(priv, 0x102130, stat);
158 nv_wr32(priv, 0x10200c, 0x10); 136 nv_wr32(priv, 0x10200c, 0x10);
159 137
160 nv50_fb_trap(nouveau_fb(priv), 1);
161 nouveau_engctx_put(engctx); 138 nouveau_engctx_put(engctx);
162} 139}
163 140
@@ -176,7 +153,8 @@ nv84_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
176 struct nv84_crypt_priv *priv; 153 struct nv84_crypt_priv *priv;
177 int ret; 154 int ret;
178 155
179 ret = nouveau_crypt_create(parent, engine, oclass, &priv); 156 ret = nouveau_engine_create(parent, engine, oclass, true,
157 "PCRYPT", "crypt", &priv);
180 *pobject = nv_object(priv); 158 *pobject = nv_object(priv);
181 if (ret) 159 if (ret)
182 return ret; 160 return ret;
@@ -195,7 +173,7 @@ nv84_crypt_init(struct nouveau_object *object)
195 struct nv84_crypt_priv *priv = (void *)object; 173 struct nv84_crypt_priv *priv = (void *)object;
196 int ret; 174 int ret;
197 175
198 ret = nouveau_crypt_init(&priv->base); 176 ret = nouveau_engine_init(&priv->base);
199 if (ret) 177 if (ret)
200 return ret; 178 return ret;
201 179
@@ -210,8 +188,8 @@ nv84_crypt_oclass = {
210 .handle = NV_ENGINE(CRYPT, 0x84), 188 .handle = NV_ENGINE(CRYPT, 0x84),
211 .ofuncs = &(struct nouveau_ofuncs) { 189 .ofuncs = &(struct nouveau_ofuncs) {
212 .ctor = nv84_crypt_ctor, 190 .ctor = nv84_crypt_ctor,
213 .dtor = _nouveau_crypt_dtor, 191 .dtor = _nouveau_engine_dtor,
214 .init = nv84_crypt_init, 192 .init = nv84_crypt_init,
215 .fini = _nouveau_crypt_fini, 193 .fini = _nouveau_engine_fini,
216 }, 194 },
217}; 195};
diff --git a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
index 9e3876c89b96..21986f3bf0c8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/crypt/nv98.c
@@ -26,6 +26,7 @@
26#include <core/enum.h> 26#include <core/enum.h>
27#include <core/class.h> 27#include <core/class.h>
28#include <core/engctx.h> 28#include <core/engctx.h>
29#include <core/falcon.h>
29 30
30#include <subdev/timer.h> 31#include <subdev/timer.h>
31#include <subdev/fb.h> 32#include <subdev/fb.h>
@@ -36,11 +37,7 @@
36#include "fuc/nv98.fuc.h" 37#include "fuc/nv98.fuc.h"
37 38
38struct nv98_crypt_priv { 39struct nv98_crypt_priv {
39 struct nouveau_crypt base; 40 struct nouveau_falcon base;
40};
41
42struct nv98_crypt_chan {
43 struct nouveau_crypt_chan base;
44}; 41};
45 42
46/******************************************************************************* 43/*******************************************************************************
@@ -57,34 +54,16 @@ nv98_crypt_sclass[] = {
57 * PCRYPT context 54 * PCRYPT context
58 ******************************************************************************/ 55 ******************************************************************************/
59 56
60static int
61nv98_crypt_context_ctor(struct nouveau_object *parent,
62 struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 size,
64 struct nouveau_object **pobject)
65{
66 struct nv98_crypt_chan *priv;
67 int ret;
68
69 ret = nouveau_crypt_context_create(parent, engine, oclass, NULL, 256,
70 256, NVOBJ_FLAG_ZERO_ALLOC, &priv);
71 *pobject = nv_object(priv);
72 if (ret)
73 return ret;
74
75 return 0;
76}
77
78static struct nouveau_oclass 57static struct nouveau_oclass
79nv98_crypt_cclass = { 58nv98_crypt_cclass = {
80 .handle = NV_ENGCTX(CRYPT, 0x98), 59 .handle = NV_ENGCTX(CRYPT, 0x98),
81 .ofuncs = &(struct nouveau_ofuncs) { 60 .ofuncs = &(struct nouveau_ofuncs) {
82 .ctor = nv98_crypt_context_ctor, 61 .ctor = _nouveau_falcon_context_ctor,
83 .dtor = _nouveau_crypt_context_dtor, 62 .dtor = _nouveau_falcon_context_dtor,
84 .init = _nouveau_crypt_context_init, 63 .init = _nouveau_falcon_context_init,
85 .fini = _nouveau_crypt_context_fini, 64 .fini = _nouveau_falcon_context_fini,
86 .rd32 = _nouveau_crypt_context_rd32, 65 .rd32 = _nouveau_falcon_context_rd32,
87 .wr32 = _nouveau_crypt_context_wr32, 66 .wr32 = _nouveau_falcon_context_wr32,
88 }, 67 },
89}; 68};
90 69
@@ -134,7 +113,6 @@ nv98_crypt_intr(struct nouveau_subdev *subdev)
134 nv_wr32(priv, 0x087004, stat); 113 nv_wr32(priv, 0x087004, stat);
135 } 114 }
136 115
137 nv50_fb_trap(nouveau_fb(priv), 1);
138 nouveau_engctx_put(engctx); 116 nouveau_engctx_put(engctx);
139} 117}
140 118
@@ -153,7 +131,8 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
153 struct nv98_crypt_priv *priv; 131 struct nv98_crypt_priv *priv;
154 int ret; 132 int ret;
155 133
156 ret = nouveau_crypt_create(parent, engine, oclass, &priv); 134 ret = nouveau_falcon_create(parent, engine, oclass, 0x087000, true,
135 "PCRYPT", "crypt", &priv);
157 *pobject = nv_object(priv); 136 *pobject = nv_object(priv);
158 if (ret) 137 if (ret)
159 return ret; 138 return ret;
@@ -163,36 +142,10 @@ nv98_crypt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
163 nv_engine(priv)->cclass = &nv98_crypt_cclass; 142 nv_engine(priv)->cclass = &nv98_crypt_cclass;
164 nv_engine(priv)->sclass = nv98_crypt_sclass; 143 nv_engine(priv)->sclass = nv98_crypt_sclass;
165 nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush; 144 nv_engine(priv)->tlb_flush = nv98_crypt_tlb_flush;
166 return 0; 145 nv_falcon(priv)->code.data = nv98_pcrypt_code;
167} 146 nv_falcon(priv)->code.size = sizeof(nv98_pcrypt_code);
168 147 nv_falcon(priv)->data.data = nv98_pcrypt_data;
169static int 148 nv_falcon(priv)->data.size = sizeof(nv98_pcrypt_data);
170nv98_crypt_init(struct nouveau_object *object)
171{
172 struct nv98_crypt_priv *priv = (void *)object;
173 int ret, i;
174
175 ret = nouveau_crypt_init(&priv->base);
176 if (ret)
177 return ret;
178
179 /* wait for exit interrupt to signal */
180 nv_wait(priv, 0x087008, 0x00000010, 0x00000010);
181 nv_wr32(priv, 0x087004, 0x00000010);
182
183 /* upload microcode code and data segments */
184 nv_wr32(priv, 0x087ff8, 0x00100000);
185 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
186 nv_wr32(priv, 0x087ff4, nv98_pcrypt_code[i]);
187
188 nv_wr32(priv, 0x087ff8, 0x00000000);
189 for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
190 nv_wr32(priv, 0x087ff4, nv98_pcrypt_data[i]);
191
192 /* start it running */
193 nv_wr32(priv, 0x08710c, 0x00000000);
194 nv_wr32(priv, 0x087104, 0x00000000); /* ENTRY */
195 nv_wr32(priv, 0x087100, 0x00000002); /* TRIGGER */
196 return 0; 149 return 0;
197} 150}
198 151
@@ -201,8 +154,10 @@ nv98_crypt_oclass = {
201 .handle = NV_ENGINE(CRYPT, 0x98), 154 .handle = NV_ENGINE(CRYPT, 0x98),
202 .ofuncs = &(struct nouveau_ofuncs) { 155 .ofuncs = &(struct nouveau_ofuncs) {
203 .ctor = nv98_crypt_ctor, 156 .ctor = nv98_crypt_ctor,
204 .dtor = _nouveau_crypt_dtor, 157 .dtor = _nouveau_falcon_dtor,
205 .init = nv98_crypt_init, 158 .init = _nouveau_falcon_init,
206 .fini = _nouveau_crypt_fini, 159 .fini = _nouveau_falcon_fini,
160 .rd32 = _nouveau_falcon_rd32,
161 .wr32 = _nouveau_falcon_wr32,
207 }, 162 },
208}; 163};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
new file mode 100644
index 000000000000..d0817d94454c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/timer.h>
31
32#include "nv50.h"
33
34int
35nv50_dac_power(struct nv50_disp_priv *priv, int or, u32 data)
36{
37 const u32 stat = (data & NV50_DISP_DAC_PWR_HSYNC) |
38 (data & NV50_DISP_DAC_PWR_VSYNC) |
39 (data & NV50_DISP_DAC_PWR_DATA) |
40 (data & NV50_DISP_DAC_PWR_STATE);
41 const u32 doff = (or * 0x800);
42 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
43 nv_mask(priv, 0x61a004 + doff, 0xc000007f, 0x80000000 | stat);
44 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
45 return 0;
46}
47
48int
49nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
50{
51 const u32 doff = (or * 0x800);
52 int load = -EINVAL;
53 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
54 udelay(9500);
55 nv_wr32(priv, 0x61a00c + doff, 0x80000000);
56 load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
57 nv_wr32(priv, 0x61a00c + doff, 0x00000000);
58 return load;
59}
60
61int
62nv50_dac_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
63{
64 struct nv50_disp_priv *priv = (void *)object->engine;
65 const u8 or = (mthd & NV50_DISP_DAC_MTHD_OR);
66 u32 *data = args;
67 int ret;
68
69 if (size < sizeof(u32))
70 return -EINVAL;
71
72 switch (mthd & ~0x3f) {
73 case NV50_DISP_DAC_PWR:
74 ret = priv->dac.power(priv, or, data[0]);
75 break;
76 case NV50_DISP_DAC_LOAD:
77 ret = priv->dac.sense(priv, or, data[0]);
78 if (ret >= 0) {
79 data[0] = ret;
80 ret = 0;
81 }
82 break;
83 default:
84 BUG_ON(1);
85 }
86
87 return ret;
88}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
new file mode 100644
index 000000000000..373dbcc523b2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
32{
33 const u32 soff = (or * 0x800);
34 int i;
35
36 if (data && data[0]) {
37 for (i = 0; i < size; i++)
38 nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
39 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
40 } else
41 if (data) {
42 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000001);
43 } else {
44 nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000000);
45 }
46
47 return 0;
48}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
new file mode 100644
index 000000000000..dc57e24fc1df
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h>
32
33#include "nv50.h"
34
35int
36nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
37{
38 const u32 soff = (or * 0x030);
39 int i;
40
41 if (data && data[0]) {
42 for (i = 0; i < size; i++)
43 nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
44 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
45 } else
46 if (data) {
47 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000001);
48 } else {
49 nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000000);
50 }
51
52 return 0;
53}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
new file mode 100644
index 000000000000..0d36bdc51417
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminv84.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nv84_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
32{
33 const u32 hoff = (head * 0x800);
34
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
36 nv_mask(priv, 0x6165a4 + hoff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
39 return 0;
40 }
41
42 /* AVI InfoFrame */
43 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000000);
44 nv_wr32(priv, 0x616528 + hoff, 0x000d0282);
45 nv_wr32(priv, 0x61652c + hoff, 0x0000006f);
46 nv_wr32(priv, 0x616530 + hoff, 0x00000000);
47 nv_wr32(priv, 0x616534 + hoff, 0x00000000);
48 nv_wr32(priv, 0x616538 + hoff, 0x00000000);
49 nv_mask(priv, 0x616520 + hoff, 0x00000001, 0x00000001);
50
51 /* Audio InfoFrame */
52 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000000);
53 nv_wr32(priv, 0x616508 + hoff, 0x000a0184);
54 nv_wr32(priv, 0x61650c + hoff, 0x00000071);
55 nv_wr32(priv, 0x616510 + hoff, 0x00000000);
56 nv_mask(priv, 0x616500 + hoff, 0x00000001, 0x00000001);
57
58 /* ??? */
59 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
60 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
61 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
62
63 /* HDMI_CTRL */
64 nv_mask(priv, 0x6165a4 + hoff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
65 return 0;
66}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
new file mode 100644
index 000000000000..f065fc248adf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminva3.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nva3_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
32{
33 const u32 soff = (or * 0x800);
34
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
36 nv_mask(priv, 0x61c5a4 + soff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
39 return 0;
40 }
41
42 /* AVI InfoFrame */
43 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000000);
44 nv_wr32(priv, 0x61c528 + soff, 0x000d0282);
45 nv_wr32(priv, 0x61c52c + soff, 0x0000006f);
46 nv_wr32(priv, 0x61c530 + soff, 0x00000000);
47 nv_wr32(priv, 0x61c534 + soff, 0x00000000);
48 nv_wr32(priv, 0x61c538 + soff, 0x00000000);
49 nv_mask(priv, 0x61c520 + soff, 0x00000001, 0x00000001);
50
51 /* Audio InfoFrame */
52 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000000);
53 nv_wr32(priv, 0x61c508 + soff, 0x000a0184);
54 nv_wr32(priv, 0x61c50c + soff, 0x00000071);
55 nv_wr32(priv, 0x61c510 + soff, 0x00000000);
56 nv_mask(priv, 0x61c500 + soff, 0x00000001, 0x00000001);
57
58 /* ??? */
59 nv_mask(priv, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
60 nv_mask(priv, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
61 nv_mask(priv, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
62
63 /* HDMI_CTRL */
64 nv_mask(priv, 0x61c5a4 + soff, 0x5f1f007f, data | 0x1f000000 /* ??? */);
65 return 0;
66}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
new file mode 100644
index 000000000000..5151bb261832
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdminvd0.c
@@ -0,0 +1,62 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include "nv50.h"
29
30int
31nvd0_hdmi_ctrl(struct nv50_disp_priv *priv, int head, int or, u32 data)
32{
33 const u32 hoff = (head * 0x800);
34
35 if (!(data & NV84_DISP_SOR_HDMI_PWR_STATE_ON)) {
36 nv_mask(priv, 0x616798 + hoff, 0x40000000, 0x00000000);
37 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
38 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
39 return 0;
40 }
41
42 /* AVI InfoFrame */
43 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000000);
44 nv_wr32(priv, 0x61671c + hoff, 0x000d0282);
45 nv_wr32(priv, 0x616720 + hoff, 0x0000006f);
46 nv_wr32(priv, 0x616724 + hoff, 0x00000000);
47 nv_wr32(priv, 0x616728 + hoff, 0x00000000);
48 nv_wr32(priv, 0x61672c + hoff, 0x00000000);
49 nv_mask(priv, 0x616714 + hoff, 0x00000001, 0x00000001);
50
51 /* ??? InfoFrame? */
52 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000000);
53 nv_wr32(priv, 0x6167ac + hoff, 0x00000010);
54 nv_mask(priv, 0x6167a4 + hoff, 0x00000001, 0x00000001);
55
56 /* HDMI_CTRL */
57 nv_mask(priv, 0x616798 + hoff, 0x401f007f, data);
58
59 /* NFI, audio doesn't work without it though.. */
60 nv_mask(priv, 0x616548 + hoff, 0x00000070, 0x00000000);
61 return 0;
62}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 15b182c84ce8..0f09af135415 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -22,20 +22,740 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/bar.h> 25#include <core/object.h>
26#include <core/parent.h>
27#include <core/handle.h>
28#include <core/class.h>
26 29
27#include <engine/software.h> 30#include <engine/software.h>
28#include <engine/disp.h> 31#include <engine/disp.h>
29 32
30struct nv50_disp_priv { 33#include <subdev/bios.h>
31 struct nouveau_disp base; 34#include <subdev/bios/dcb.h>
35#include <subdev/bios/disp.h>
36#include <subdev/bios/init.h>
37#include <subdev/bios/pll.h>
38#include <subdev/timer.h>
39#include <subdev/fb.h>
40#include <subdev/bar.h>
41#include <subdev/clock.h>
42
43#include "nv50.h"
44
45/*******************************************************************************
46 * EVO channel base class
47 ******************************************************************************/
48
49int
50nv50_disp_chan_create_(struct nouveau_object *parent,
51 struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, int chid,
53 int length, void **pobject)
54{
55 struct nv50_disp_base *base = (void *)parent;
56 struct nv50_disp_chan *chan;
57 int ret;
58
59 if (base->chan & (1 << chid))
60 return -EBUSY;
61 base->chan |= (1 << chid);
62
63 ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL,
64 (1ULL << NVDEV_ENGINE_DMAOBJ),
65 length, pobject);
66 chan = *pobject;
67 if (ret)
68 return ret;
69
70 chan->chid = chid;
71 return 0;
72}
73
74void
75nv50_disp_chan_destroy(struct nv50_disp_chan *chan)
76{
77 struct nv50_disp_base *base = (void *)nv_object(chan)->parent;
78 base->chan &= ~(1 << chan->chid);
79 nouveau_namedb_destroy(&chan->base);
80}
81
82u32
83nv50_disp_chan_rd32(struct nouveau_object *object, u64 addr)
84{
85 struct nv50_disp_priv *priv = (void *)object->engine;
86 struct nv50_disp_chan *chan = (void *)object;
87 return nv_rd32(priv, 0x640000 + (chan->chid * 0x1000) + addr);
88}
89
90void
91nv50_disp_chan_wr32(struct nouveau_object *object, u64 addr, u32 data)
92{
93 struct nv50_disp_priv *priv = (void *)object->engine;
94 struct nv50_disp_chan *chan = (void *)object;
95 nv_wr32(priv, 0x640000 + (chan->chid * 0x1000) + addr, data);
96}
97
98/*******************************************************************************
99 * EVO DMA channel base class
100 ******************************************************************************/
101
102static int
103nv50_disp_dmac_object_attach(struct nouveau_object *parent,
104 struct nouveau_object *object, u32 name)
105{
106 struct nv50_disp_base *base = (void *)parent->parent;
107 struct nv50_disp_chan *chan = (void *)parent;
108 u32 addr = nv_gpuobj(object)->node->offset;
109 u32 chid = chan->chid;
110 u32 data = (chid << 28) | (addr << 10) | chid;
111 return nouveau_ramht_insert(base->ramht, chid, name, data);
112}
113
114static void
115nv50_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
116{
117 struct nv50_disp_base *base = (void *)parent->parent;
118 nouveau_ramht_remove(base->ramht, cookie);
119}
120
121int
122nv50_disp_dmac_create_(struct nouveau_object *parent,
123 struct nouveau_object *engine,
124 struct nouveau_oclass *oclass, u32 pushbuf, int chid,
125 int length, void **pobject)
126{
127 struct nv50_disp_dmac *dmac;
128 int ret;
129
130 ret = nv50_disp_chan_create_(parent, engine, oclass, chid,
131 length, pobject);
132 dmac = *pobject;
133 if (ret)
134 return ret;
135
136 dmac->pushdma = (void *)nouveau_handle_ref(parent, pushbuf);
137 if (!dmac->pushdma)
138 return -ENOENT;
139
140 switch (nv_mclass(dmac->pushdma)) {
141 case 0x0002:
142 case 0x003d:
143 if (dmac->pushdma->limit - dmac->pushdma->start != 0xfff)
144 return -EINVAL;
145
146 switch (dmac->pushdma->target) {
147 case NV_MEM_TARGET_VRAM:
148 dmac->push = 0x00000000 | dmac->pushdma->start >> 8;
149 break;
150 case NV_MEM_TARGET_PCI_NOSNOOP:
151 dmac->push = 0x00000003 | dmac->pushdma->start >> 8;
152 break;
153 default:
154 return -EINVAL;
155 }
156 break;
157 default:
158 return -EINVAL;
159 }
160
161 return 0;
162}
163
164void
165nv50_disp_dmac_dtor(struct nouveau_object *object)
166{
167 struct nv50_disp_dmac *dmac = (void *)object;
168 nouveau_object_ref(NULL, (struct nouveau_object **)&dmac->pushdma);
169 nv50_disp_chan_destroy(&dmac->base);
170}
171
172static int
173nv50_disp_dmac_init(struct nouveau_object *object)
174{
175 struct nv50_disp_priv *priv = (void *)object->engine;
176 struct nv50_disp_dmac *dmac = (void *)object;
177 int chid = dmac->base.chid;
178 int ret;
179
180 ret = nv50_disp_chan_init(&dmac->base);
181 if (ret)
182 return ret;
183
184 /* enable error reporting */
185 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00010001 << chid);
186
187 /* initialise channel for dma command submission */
188 nv_wr32(priv, 0x610204 + (chid * 0x0010), dmac->push);
189 nv_wr32(priv, 0x610208 + (chid * 0x0010), 0x00010000);
190 nv_wr32(priv, 0x61020c + (chid * 0x0010), chid);
191 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000010, 0x00000010);
192 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
193 nv_wr32(priv, 0x610200 + (chid * 0x0010), 0x00000013);
194
195 /* wait for it to go inactive */
196 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x80000000, 0x00000000)) {
197 nv_error(dmac, "init timeout, 0x%08x\n",
198 nv_rd32(priv, 0x610200 + (chid * 0x10)));
199 return -EBUSY;
200 }
201
202 return 0;
203}
204
205static int
206nv50_disp_dmac_fini(struct nouveau_object *object, bool suspend)
207{
208 struct nv50_disp_priv *priv = (void *)object->engine;
209 struct nv50_disp_dmac *dmac = (void *)object;
210 int chid = dmac->base.chid;
211
212 /* deactivate channel */
213 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00001010, 0x00001000);
214 nv_mask(priv, 0x610200 + (chid * 0x0010), 0x00000003, 0x00000000);
215 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x001e0000, 0x00000000)) {
216 nv_error(dmac, "fini timeout, 0x%08x\n",
217 nv_rd32(priv, 0x610200 + (chid * 0x10)));
218 if (suspend)
219 return -EBUSY;
220 }
221
222 /* disable error reporting */
223 nv_mask(priv, 0x610028, 0x00010001 << chid, 0x00000000 << chid);
224
225 return nv50_disp_chan_fini(&dmac->base, suspend);
226}
227
228/*******************************************************************************
229 * EVO master channel object
230 ******************************************************************************/
231
232static int
233nv50_disp_mast_ctor(struct nouveau_object *parent,
234 struct nouveau_object *engine,
235 struct nouveau_oclass *oclass, void *data, u32 size,
236 struct nouveau_object **pobject)
237{
238 struct nv50_display_mast_class *args = data;
239 struct nv50_disp_dmac *mast;
240 int ret;
241
242 if (size < sizeof(*args))
243 return -EINVAL;
244
245 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
246 0, sizeof(*mast), (void **)&mast);
247 *pobject = nv_object(mast);
248 if (ret)
249 return ret;
250
251 nv_parent(mast)->object_attach = nv50_disp_dmac_object_attach;
252 nv_parent(mast)->object_detach = nv50_disp_dmac_object_detach;
253 return 0;
254}
255
256static int
257nv50_disp_mast_init(struct nouveau_object *object)
258{
259 struct nv50_disp_priv *priv = (void *)object->engine;
260 struct nv50_disp_dmac *mast = (void *)object;
261 int ret;
262
263 ret = nv50_disp_chan_init(&mast->base);
264 if (ret)
265 return ret;
266
267 /* enable error reporting */
268 nv_mask(priv, 0x610028, 0x00010001, 0x00010001);
269
270 /* attempt to unstick channel from some unknown state */
271 if ((nv_rd32(priv, 0x610200) & 0x009f0000) == 0x00020000)
272 nv_mask(priv, 0x610200, 0x00800000, 0x00800000);
273 if ((nv_rd32(priv, 0x610200) & 0x003f0000) == 0x00030000)
274 nv_mask(priv, 0x610200, 0x00600000, 0x00600000);
275
276 /* initialise channel for dma command submission */
277 nv_wr32(priv, 0x610204, mast->push);
278 nv_wr32(priv, 0x610208, 0x00010000);
279 nv_wr32(priv, 0x61020c, 0x00000000);
280 nv_mask(priv, 0x610200, 0x00000010, 0x00000010);
281 nv_wr32(priv, 0x640000, 0x00000000);
282 nv_wr32(priv, 0x610200, 0x01000013);
283
284 /* wait for it to go inactive */
285 if (!nv_wait(priv, 0x610200, 0x80000000, 0x00000000)) {
286 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610200));
287 return -EBUSY;
288 }
289
290 return 0;
291}
292
293static int
294nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
295{
296 struct nv50_disp_priv *priv = (void *)object->engine;
297 struct nv50_disp_dmac *mast = (void *)object;
298
299 /* deactivate channel */
300 nv_mask(priv, 0x610200, 0x00000010, 0x00000000);
301 nv_mask(priv, 0x610200, 0x00000003, 0x00000000);
302 if (!nv_wait(priv, 0x610200, 0x001e0000, 0x00000000)) {
303 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610200));
304 if (suspend)
305 return -EBUSY;
306 }
307
308 /* disable error reporting */
309 nv_mask(priv, 0x610028, 0x00010001, 0x00000000);
310
311 return nv50_disp_chan_fini(&mast->base, suspend);
312}
313
314struct nouveau_ofuncs
315nv50_disp_mast_ofuncs = {
316 .ctor = nv50_disp_mast_ctor,
317 .dtor = nv50_disp_dmac_dtor,
318 .init = nv50_disp_mast_init,
319 .fini = nv50_disp_mast_fini,
320 .rd32 = nv50_disp_chan_rd32,
321 .wr32 = nv50_disp_chan_wr32,
322};
323
324/*******************************************************************************
325 * EVO sync channel objects
326 ******************************************************************************/
327
328static int
329nv50_disp_sync_ctor(struct nouveau_object *parent,
330 struct nouveau_object *engine,
331 struct nouveau_oclass *oclass, void *data, u32 size,
332 struct nouveau_object **pobject)
333{
334 struct nv50_display_sync_class *args = data;
335 struct nv50_disp_dmac *dmac;
336 int ret;
337
338 if (size < sizeof(*data) || args->head > 1)
339 return -EINVAL;
340
341 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
342 1 + args->head, sizeof(*dmac),
343 (void **)&dmac);
344 *pobject = nv_object(dmac);
345 if (ret)
346 return ret;
347
348 nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
349 nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
350 return 0;
351}
352
353struct nouveau_ofuncs
354nv50_disp_sync_ofuncs = {
355 .ctor = nv50_disp_sync_ctor,
356 .dtor = nv50_disp_dmac_dtor,
357 .init = nv50_disp_dmac_init,
358 .fini = nv50_disp_dmac_fini,
359 .rd32 = nv50_disp_chan_rd32,
360 .wr32 = nv50_disp_chan_wr32,
361};
362
363/*******************************************************************************
364 * EVO overlay channel objects
365 ******************************************************************************/
366
367static int
368nv50_disp_ovly_ctor(struct nouveau_object *parent,
369 struct nouveau_object *engine,
370 struct nouveau_oclass *oclass, void *data, u32 size,
371 struct nouveau_object **pobject)
372{
373 struct nv50_display_ovly_class *args = data;
374 struct nv50_disp_dmac *dmac;
375 int ret;
376
377 if (size < sizeof(*data) || args->head > 1)
378 return -EINVAL;
379
380 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
381 3 + args->head, sizeof(*dmac),
382 (void **)&dmac);
383 *pobject = nv_object(dmac);
384 if (ret)
385 return ret;
386
387 nv_parent(dmac)->object_attach = nv50_disp_dmac_object_attach;
388 nv_parent(dmac)->object_detach = nv50_disp_dmac_object_detach;
389 return 0;
390}
391
392struct nouveau_ofuncs
393nv50_disp_ovly_ofuncs = {
394 .ctor = nv50_disp_ovly_ctor,
395 .dtor = nv50_disp_dmac_dtor,
396 .init = nv50_disp_dmac_init,
397 .fini = nv50_disp_dmac_fini,
398 .rd32 = nv50_disp_chan_rd32,
399 .wr32 = nv50_disp_chan_wr32,
400};
401
402/*******************************************************************************
403 * EVO PIO channel base class
404 ******************************************************************************/
405
406static int
407nv50_disp_pioc_create_(struct nouveau_object *parent,
408 struct nouveau_object *engine,
409 struct nouveau_oclass *oclass, int chid,
410 int length, void **pobject)
411{
412 return nv50_disp_chan_create_(parent, engine, oclass, chid,
413 length, pobject);
414}
415
416static void
417nv50_disp_pioc_dtor(struct nouveau_object *object)
418{
419 struct nv50_disp_pioc *pioc = (void *)object;
420 nv50_disp_chan_destroy(&pioc->base);
421}
422
423static int
424nv50_disp_pioc_init(struct nouveau_object *object)
425{
426 struct nv50_disp_priv *priv = (void *)object->engine;
427 struct nv50_disp_pioc *pioc = (void *)object;
428 int chid = pioc->base.chid;
429 int ret;
430
431 ret = nv50_disp_chan_init(&pioc->base);
432 if (ret)
433 return ret;
434
435 nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00002000);
436 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00000000, 0x00000000)) {
437 nv_error(pioc, "timeout0: 0x%08x\n",
438 nv_rd32(priv, 0x610200 + (chid * 0x10)));
439 return -EBUSY;
440 }
441
442 nv_wr32(priv, 0x610200 + (chid * 0x10), 0x00000001);
443 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00010000)) {
444 nv_error(pioc, "timeout1: 0x%08x\n",
445 nv_rd32(priv, 0x610200 + (chid * 0x10)));
446 return -EBUSY;
447 }
448
449 return 0;
450}
451
452static int
453nv50_disp_pioc_fini(struct nouveau_object *object, bool suspend)
454{
455 struct nv50_disp_priv *priv = (void *)object->engine;
456 struct nv50_disp_pioc *pioc = (void *)object;
457 int chid = pioc->base.chid;
458
459 nv_mask(priv, 0x610200 + (chid * 0x10), 0x00000001, 0x00000000);
460 if (!nv_wait(priv, 0x610200 + (chid * 0x10), 0x00030000, 0x00000000)) {
461 nv_error(pioc, "timeout: 0x%08x\n",
462 nv_rd32(priv, 0x610200 + (chid * 0x10)));
463 if (suspend)
464 return -EBUSY;
465 }
466
467 return nv50_disp_chan_fini(&pioc->base, suspend);
468}
469
470/*******************************************************************************
471 * EVO immediate overlay channel objects
472 ******************************************************************************/
473
474static int
475nv50_disp_oimm_ctor(struct nouveau_object *parent,
476 struct nouveau_object *engine,
477 struct nouveau_oclass *oclass, void *data, u32 size,
478 struct nouveau_object **pobject)
479{
480 struct nv50_display_oimm_class *args = data;
481 struct nv50_disp_pioc *pioc;
482 int ret;
483
484 if (size < sizeof(*args) || args->head > 1)
485 return -EINVAL;
486
487 ret = nv50_disp_pioc_create_(parent, engine, oclass, 5 + args->head,
488 sizeof(*pioc), (void **)&pioc);
489 *pobject = nv_object(pioc);
490 if (ret)
491 return ret;
492
493 return 0;
494}
495
496struct nouveau_ofuncs
497nv50_disp_oimm_ofuncs = {
498 .ctor = nv50_disp_oimm_ctor,
499 .dtor = nv50_disp_pioc_dtor,
500 .init = nv50_disp_pioc_init,
501 .fini = nv50_disp_pioc_fini,
502 .rd32 = nv50_disp_chan_rd32,
503 .wr32 = nv50_disp_chan_wr32,
504};
505
506/*******************************************************************************
507 * EVO cursor channel objects
508 ******************************************************************************/
509
510static int
511nv50_disp_curs_ctor(struct nouveau_object *parent,
512 struct nouveau_object *engine,
513 struct nouveau_oclass *oclass, void *data, u32 size,
514 struct nouveau_object **pobject)
515{
516 struct nv50_display_curs_class *args = data;
517 struct nv50_disp_pioc *pioc;
518 int ret;
519
520 if (size < sizeof(*args) || args->head > 1)
521 return -EINVAL;
522
523 ret = nv50_disp_pioc_create_(parent, engine, oclass, 7 + args->head,
524 sizeof(*pioc), (void **)&pioc);
525 *pobject = nv_object(pioc);
526 if (ret)
527 return ret;
528
529 return 0;
530}
531
532struct nouveau_ofuncs
533nv50_disp_curs_ofuncs = {
534 .ctor = nv50_disp_curs_ctor,
535 .dtor = nv50_disp_pioc_dtor,
536 .init = nv50_disp_pioc_init,
537 .fini = nv50_disp_pioc_fini,
538 .rd32 = nv50_disp_chan_rd32,
539 .wr32 = nv50_disp_chan_wr32,
540};
541
542/*******************************************************************************
543 * Base display object
544 ******************************************************************************/
545
546static int
547nv50_disp_base_ctor(struct nouveau_object *parent,
548 struct nouveau_object *engine,
549 struct nouveau_oclass *oclass, void *data, u32 size,
550 struct nouveau_object **pobject)
551{
552 struct nv50_disp_priv *priv = (void *)engine;
553 struct nv50_disp_base *base;
554 int ret;
555
556 ret = nouveau_parent_create(parent, engine, oclass, 0,
557 priv->sclass, 0, &base);
558 *pobject = nv_object(base);
559 if (ret)
560 return ret;
561
562 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
563}
564
565static void
566nv50_disp_base_dtor(struct nouveau_object *object)
567{
568 struct nv50_disp_base *base = (void *)object;
569 nouveau_ramht_ref(NULL, &base->ramht);
570 nouveau_parent_destroy(&base->base);
571}
572
573static int
574nv50_disp_base_init(struct nouveau_object *object)
575{
576 struct nv50_disp_priv *priv = (void *)object->engine;
577 struct nv50_disp_base *base = (void *)object;
578 int ret, i;
579 u32 tmp;
580
581 ret = nouveau_parent_init(&base->base);
582 if (ret)
583 return ret;
584
585 /* The below segments of code copying values from one register to
586 * another appear to inform EVO of the display capabilities or
587 * something similar. NFI what the 0x614004 caps are for..
588 */
589 tmp = nv_rd32(priv, 0x614004);
590 nv_wr32(priv, 0x610184, tmp);
591
592 /* ... CRTC caps */
593 for (i = 0; i < priv->head.nr; i++) {
594 tmp = nv_rd32(priv, 0x616100 + (i * 0x800));
595 nv_wr32(priv, 0x610190 + (i * 0x10), tmp);
596 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
597 nv_wr32(priv, 0x610194 + (i * 0x10), tmp);
598 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
599 nv_wr32(priv, 0x610198 + (i * 0x10), tmp);
600 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
601 nv_wr32(priv, 0x61019c + (i * 0x10), tmp);
602 }
603
604 /* ... DAC caps */
605 for (i = 0; i < priv->dac.nr; i++) {
606 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
607 nv_wr32(priv, 0x6101d0 + (i * 0x04), tmp);
608 }
609
610 /* ... SOR caps */
611 for (i = 0; i < priv->sor.nr; i++) {
612 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
613 nv_wr32(priv, 0x6101e0 + (i * 0x04), tmp);
614 }
615
616 /* ... EXT caps */
617 for (i = 0; i < 3; i++) {
618 tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
619 nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
620 }
621
622 /* steal display away from vbios, or something like that */
623 if (nv_rd32(priv, 0x610024) & 0x00000100) {
624 nv_wr32(priv, 0x610024, 0x00000100);
625 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
626 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
627 nv_error(priv, "timeout acquiring display\n");
628 return -EBUSY;
629 }
630 }
631
632 /* point at display engine memory area (hash table, objects) */
633 nv_wr32(priv, 0x610010, (nv_gpuobj(base->ramht)->addr >> 8) | 9);
634
635 /* enable supervisor interrupts, disable everything else */
636 nv_wr32(priv, 0x61002c, 0x00000370);
637 nv_wr32(priv, 0x610028, 0x00000000);
638 return 0;
639}
640
641static int
642nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
643{
644 struct nv50_disp_priv *priv = (void *)object->engine;
645 struct nv50_disp_base *base = (void *)object;
646
647 /* disable all interrupts */
648 nv_wr32(priv, 0x610024, 0x00000000);
649 nv_wr32(priv, 0x610020, 0x00000000);
650
651 return nouveau_parent_fini(&base->base, suspend);
652}
653
654struct nouveau_ofuncs
655nv50_disp_base_ofuncs = {
656 .ctor = nv50_disp_base_ctor,
657 .dtor = nv50_disp_base_dtor,
658 .init = nv50_disp_base_init,
659 .fini = nv50_disp_base_fini,
660};
661
662static struct nouveau_omthds
663nv50_disp_base_omthds[] = {
664 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
665 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
666 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
667 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
668 {},
669};
670
671static struct nouveau_oclass
672nv50_disp_base_oclass[] = {
673 { NV50_DISP_CLASS, &nv50_disp_base_ofuncs, nv50_disp_base_omthds },
674 {}
32}; 675};
33 676
34static struct nouveau_oclass 677static struct nouveau_oclass
35nv50_disp_sclass[] = { 678nv50_disp_sclass[] = {
36 {}, 679 { NV50_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
680 { NV50_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
681 { NV50_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
682 { NV50_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
683 { NV50_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
684 {}
685};
686
687/*******************************************************************************
688 * Display context, tracks instmem allocation and prevents more than one
689 * client using the display hardware at any time.
690 ******************************************************************************/
691
692static int
693nv50_disp_data_ctor(struct nouveau_object *parent,
694 struct nouveau_object *engine,
695 struct nouveau_oclass *oclass, void *data, u32 size,
696 struct nouveau_object **pobject)
697{
698 struct nv50_disp_priv *priv = (void *)engine;
699 struct nouveau_engctx *ectx;
700 int ret = -EBUSY;
701
702 /* no context needed for channel objects... */
703 if (nv_mclass(parent) != NV_DEVICE_CLASS) {
704 atomic_inc(&parent->refcount);
705 *pobject = parent;
706 return 0;
707 }
708
709 /* allocate display hardware to client */
710 mutex_lock(&nv_subdev(priv)->mutex);
711 if (list_empty(&nv_engine(priv)->contexts)) {
712 ret = nouveau_engctx_create(parent, engine, oclass, NULL,
713 0x10000, 0x10000,
714 NVOBJ_FLAG_HEAP, &ectx);
715 *pobject = nv_object(ectx);
716 }
717 mutex_unlock(&nv_subdev(priv)->mutex);
718 return ret;
719}
720
721struct nouveau_oclass
722nv50_disp_cclass = {
723 .handle = NV_ENGCTX(DISP, 0x50),
724 .ofuncs = &(struct nouveau_ofuncs) {
725 .ctor = nv50_disp_data_ctor,
726 .dtor = _nouveau_engctx_dtor,
727 .init = _nouveau_engctx_init,
728 .fini = _nouveau_engctx_fini,
729 .rd32 = _nouveau_engctx_rd32,
730 .wr32 = _nouveau_engctx_wr32,
731 },
37}; 732};
38 733
734/*******************************************************************************
735 * Display engine implementation
736 ******************************************************************************/
737
738static void
739nv50_disp_intr_error(struct nv50_disp_priv *priv)
740{
741 u32 channels = (nv_rd32(priv, 0x610020) & 0x001f0000) >> 16;
742 u32 addr, data;
743 int chid;
744
745 for (chid = 0; chid < 5; chid++) {
746 if (!(channels & (1 << chid)))
747 continue;
748
749 nv_wr32(priv, 0x610020, 0x00010000 << chid);
750 addr = nv_rd32(priv, 0x610080 + (chid * 0x08));
751 data = nv_rd32(priv, 0x610084 + (chid * 0x08));
752 nv_wr32(priv, 0x610080 + (chid * 0x08), 0x90000000);
753
754 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x 0x%08x\n",
755 chid, addr & 0xffc, data, addr);
756 }
757}
758
39static void 759static void
40nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc) 760nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
41{ 761{
@@ -80,30 +800,422 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
80 disp->vblank.notify(disp->vblank.data, crtc); 800 disp->vblank.notify(disp->vblank.data, crtc);
81} 801}
82 802
803static u16
804exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
805 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
806 struct nvbios_outp *info)
807{
808 struct nouveau_bios *bios = nouveau_bios(priv);
809 u16 mask, type, data;
810
811 if (outp < 4) {
812 type = DCB_OUTPUT_ANALOG;
813 mask = 0;
814 } else {
815 outp -= 4;
816 switch (ctrl & 0x00000f00) {
817 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
818 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
819 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
820 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
821 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
822 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
823 default:
824 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
825 return 0x0000;
826 }
827 }
828
829 mask = 0x00c0 & (mask << 6);
830 mask |= 0x0001 << outp;
831 mask |= 0x0100 << head;
832
833 data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
834 if (!data)
835 return 0x0000;
836
837 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
838}
839
840static bool
841exec_script(struct nv50_disp_priv *priv, int head, int id)
842{
843 struct nouveau_bios *bios = nouveau_bios(priv);
844 struct nvbios_outp info;
845 struct dcb_output dcb;
846 u8 ver, hdr, cnt, len;
847 u16 data;
848 u32 ctrl = 0x00000000;
849 int i;
850
851 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
852 ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
853
854 if (nv_device(priv)->chipset < 0x90 ||
855 nv_device(priv)->chipset == 0x92 ||
856 nv_device(priv)->chipset == 0xa0) {
857 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
858 ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
859 i += 3;
860 } else {
861 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
862 ctrl = nv_rd32(priv, 0x610798 + (i * 8));
863 i += 3;
864 }
865
866 if (!(ctrl & (1 << head)))
867 return false;
868
869 data = exec_lookup(priv, head, i, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
870 if (data) {
871 struct nvbios_init init = {
872 .subdev = nv_subdev(priv),
873 .bios = bios,
874 .offset = info.script[id],
875 .outp = &dcb,
876 .crtc = head,
877 .execute = 1,
878 };
879
880 return nvbios_exec(&init) == 0;
881 }
882
883 return false;
884}
885
886static u32
887exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
888 struct dcb_output *outp)
889{
890 struct nouveau_bios *bios = nouveau_bios(priv);
891 struct nvbios_outp info1;
892 struct nvbios_ocfg info2;
893 u8 ver, hdr, cnt, len;
894 u16 data, conf;
895 u32 ctrl = 0x00000000;
896 int i;
897
898 for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
899 ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
900
901 if (nv_device(priv)->chipset < 0x90 ||
902 nv_device(priv)->chipset == 0x92 ||
903 nv_device(priv)->chipset == 0xa0) {
904 for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
905 ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
906 i += 3;
907 } else {
908 for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
909 ctrl = nv_rd32(priv, 0x610794 + (i * 8));
910 i += 3;
911 }
912
913 if (!(ctrl & (1 << head)))
914 return 0x0000;
915
916 data = exec_lookup(priv, head, i, ctrl, outp, &ver, &hdr, &cnt, &len, &info1);
917 if (!data)
918 return 0x0000;
919
920 switch (outp->type) {
921 case DCB_OUTPUT_TMDS:
922 conf = (ctrl & 0x00000f00) >> 8;
923 if (pclk >= 165000)
924 conf |= 0x0100;
925 break;
926 case DCB_OUTPUT_LVDS:
927 conf = priv->sor.lvdsconf;
928 break;
929 case DCB_OUTPUT_DP:
930 conf = (ctrl & 0x00000f00) >> 8;
931 break;
932 case DCB_OUTPUT_ANALOG:
933 default:
934 conf = 0x00ff;
935 break;
936 }
937
938 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
939 if (data) {
940 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
941 if (data) {
942 struct nvbios_init init = {
943 .subdev = nv_subdev(priv),
944 .bios = bios,
945 .offset = data,
946 .outp = outp,
947 .crtc = head,
948 .execute = 1,
949 };
950
951 if (nvbios_exec(&init))
952 return 0x0000;
953 return conf;
954 }
955 }
956
957 return 0x0000;
958}
959
960static void
961nv50_disp_intr_unk10(struct nv50_disp_priv *priv, u32 super)
962{
963 int head = ffs((super & 0x00000060) >> 5) - 1;
964 if (head >= 0) {
965 head = ffs((super & 0x00000180) >> 7) - 1;
966 if (head >= 0)
967 exec_script(priv, head, 1);
968 }
969
970 nv_wr32(priv, 0x610024, 0x00000010);
971 nv_wr32(priv, 0x610030, 0x80000000);
972}
973
974static void
975nv50_disp_intr_unk20_dp(struct nv50_disp_priv *priv,
976 struct dcb_output *outp, u32 pclk)
977{
978 const int link = !(outp->sorconf.link & 1);
979 const int or = ffs(outp->or) - 1;
980 const u32 soff = ( or * 0x800);
981 const u32 loff = (link * 0x080) + soff;
982 const u32 ctrl = nv_rd32(priv, 0x610794 + (or * 8));
983 const u32 symbol = 100000;
984 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x0000f0000;
985 u32 clksor = nv_rd32(priv, 0x614300 + soff);
986 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
987 int TU, VTUi, VTUf, VTUa;
988 u64 link_data_rate, link_ratio, unk;
989 u32 best_diff = 64 * symbol;
990 u32 link_nr, link_bw, bits, r;
991
992 /* calculate packed data rate for each lane */
993 if (dpctrl > 0x00030000) link_nr = 4;
994 else if (dpctrl > 0x00010000) link_nr = 2;
995 else link_nr = 1;
996
997 if (clksor & 0x000c0000)
998 link_bw = 270000;
999 else
1000 link_bw = 162000;
1001
1002 if ((ctrl & 0xf0000) == 0x60000) bits = 30;
1003 else if ((ctrl & 0xf0000) == 0x50000) bits = 24;
1004 else bits = 18;
1005
1006 link_data_rate = (pclk * bits / 8) / link_nr;
1007
1008 /* calculate ratio of packed data rate to link symbol rate */
1009 link_ratio = link_data_rate * symbol;
1010 r = do_div(link_ratio, link_bw);
1011
1012 for (TU = 64; TU >= 32; TU--) {
1013 /* calculate average number of valid symbols in each TU */
1014 u32 tu_valid = link_ratio * TU;
1015 u32 calc, diff;
1016
1017 /* find a hw representation for the fraction.. */
1018 VTUi = tu_valid / symbol;
1019 calc = VTUi * symbol;
1020 diff = tu_valid - calc;
1021 if (diff) {
1022 if (diff >= (symbol / 2)) {
1023 VTUf = symbol / (symbol - diff);
1024 if (symbol - (VTUf * diff))
1025 VTUf++;
1026
1027 if (VTUf <= 15) {
1028 VTUa = 1;
1029 calc += symbol - (symbol / VTUf);
1030 } else {
1031 VTUa = 0;
1032 VTUf = 1;
1033 calc += symbol;
1034 }
1035 } else {
1036 VTUa = 0;
1037 VTUf = min((int)(symbol / diff), 15);
1038 calc += symbol / VTUf;
1039 }
1040
1041 diff = calc - tu_valid;
1042 } else {
1043 /* no remainder, but the hw doesn't like the fractional
1044 * part to be zero. decrement the integer part and
1045 * have the fraction add a whole symbol back
1046 */
1047 VTUa = 0;
1048 VTUf = 1;
1049 VTUi--;
1050 }
1051
1052 if (diff < best_diff) {
1053 best_diff = diff;
1054 bestTU = TU;
1055 bestVTUa = VTUa;
1056 bestVTUf = VTUf;
1057 bestVTUi = VTUi;
1058 if (diff == 0)
1059 break;
1060 }
1061 }
1062
1063 if (!bestTU) {
1064 nv_error(priv, "unable to find suitable dp config\n");
1065 return;
1066 }
1067
1068 /* XXX close to vbios numbers, but not right */
1069 unk = (symbol - link_ratio) * bestTU;
1070 unk *= link_ratio;
1071 r = do_div(unk, symbol);
1072 r = do_div(unk, symbol);
1073 unk += 6;
1074
1075 nv_mask(priv, 0x61c10c + loff, 0x000001fc, bestTU << 2);
1076 nv_mask(priv, 0x61c128 + loff, 0x010f7f3f, bestVTUa << 24 |
1077 bestVTUf << 16 |
1078 bestVTUi << 8 | unk);
1079}
1080
1081static void
1082nv50_disp_intr_unk20(struct nv50_disp_priv *priv, u32 super)
1083{
1084 struct dcb_output outp;
1085 u32 addr, mask, data;
1086 int head;
1087
1088 /* finish detaching encoder? */
1089 head = ffs((super & 0x00000180) >> 7) - 1;
1090 if (head >= 0)
1091 exec_script(priv, head, 2);
1092
1093 /* check whether a vpll change is required */
1094 head = ffs((super & 0x00000600) >> 9) - 1;
1095 if (head >= 0) {
1096 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1097 if (pclk) {
1098 struct nouveau_clock *clk = nouveau_clock(priv);
1099 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
1100 }
1101
1102 nv_mask(priv, 0x614200 + head * 0x800, 0x0000000f, 0x00000000);
1103 }
1104
1105 /* (re)attach the relevant OR to the head */
1106 head = ffs((super & 0x00000180) >> 7) - 1;
1107 if (head >= 0) {
1108 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1109 u32 conf = exec_clkcmp(priv, head, 0, pclk, &outp);
1110 if (conf) {
1111 if (outp.type == DCB_OUTPUT_ANALOG) {
1112 addr = 0x614280 + (ffs(outp.or) - 1) * 0x800;
1113 mask = 0xffffffff;
1114 data = 0x00000000;
1115 } else {
1116 if (outp.type == DCB_OUTPUT_DP)
1117 nv50_disp_intr_unk20_dp(priv, &outp, pclk);
1118 addr = 0x614300 + (ffs(outp.or) - 1) * 0x800;
1119 mask = 0x00000707;
1120 data = (conf & 0x0100) ? 0x0101 : 0x0000;
1121 }
1122
1123 nv_mask(priv, addr, mask, data);
1124 }
1125 }
1126
1127 nv_wr32(priv, 0x610024, 0x00000020);
1128 nv_wr32(priv, 0x610030, 0x80000000);
1129}
1130
1131/* If programming a TMDS output on a SOR that can also be configured for
1132 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
1133 *
1134 * It looks like the VBIOS TMDS scripts make an attempt at this, however,
1135 * the VBIOS scripts on at least one board I have only switch it off on
1136 * link 0, causing a blank display if the output has previously been
1137 * programmed for DisplayPort.
1138 */
1139static void
1140nv50_disp_intr_unk40_tmds(struct nv50_disp_priv *priv, struct dcb_output *outp)
1141{
1142 struct nouveau_bios *bios = nouveau_bios(priv);
1143 const int link = !(outp->sorconf.link & 1);
1144 const int or = ffs(outp->or) - 1;
1145 const u32 loff = (or * 0x800) + (link * 0x80);
1146 const u16 mask = (outp->sorconf.link << 6) | outp->or;
1147 u8 ver, hdr;
1148
1149 if (dcb_outp_match(bios, DCB_OUTPUT_DP, mask, &ver, &hdr, outp))
1150 nv_mask(priv, 0x61c10c + loff, 0x00000001, 0x00000000);
1151}
1152
83static void 1153static void
1154nv50_disp_intr_unk40(struct nv50_disp_priv *priv, u32 super)
1155{
1156 int head = ffs((super & 0x00000180) >> 7) - 1;
1157 if (head >= 0) {
1158 struct dcb_output outp;
1159 u32 pclk = nv_rd32(priv, 0x610ad0 + (head * 0x540)) & 0x3fffff;
1160 if (pclk && exec_clkcmp(priv, head, 1, pclk, &outp)) {
1161 if (outp.type == DCB_OUTPUT_TMDS)
1162 nv50_disp_intr_unk40_tmds(priv, &outp);
1163 }
1164 }
1165
1166 nv_wr32(priv, 0x610024, 0x00000040);
1167 nv_wr32(priv, 0x610030, 0x80000000);
1168}
1169
1170static void
1171nv50_disp_intr_super(struct nv50_disp_priv *priv, u32 intr1)
1172{
1173 u32 super = nv_rd32(priv, 0x610030);
1174
1175 nv_debug(priv, "supervisor 0x%08x 0x%08x\n", intr1, super);
1176
1177 if (intr1 & 0x00000010)
1178 nv50_disp_intr_unk10(priv, super);
1179 if (intr1 & 0x00000020)
1180 nv50_disp_intr_unk20(priv, super);
1181 if (intr1 & 0x00000040)
1182 nv50_disp_intr_unk40(priv, super);
1183}
1184
1185void
84nv50_disp_intr(struct nouveau_subdev *subdev) 1186nv50_disp_intr(struct nouveau_subdev *subdev)
85{ 1187{
86 struct nv50_disp_priv *priv = (void *)subdev; 1188 struct nv50_disp_priv *priv = (void *)subdev;
87 u32 stat1 = nv_rd32(priv, 0x610024); 1189 u32 intr0 = nv_rd32(priv, 0x610020);
1190 u32 intr1 = nv_rd32(priv, 0x610024);
88 1191
89 if (stat1 & 0x00000004) { 1192 if (intr0 & 0x001f0000) {
1193 nv50_disp_intr_error(priv);
1194 intr0 &= ~0x001f0000;
1195 }
1196
1197 if (intr1 & 0x00000004) {
90 nv50_disp_intr_vblank(priv, 0); 1198 nv50_disp_intr_vblank(priv, 0);
91 nv_wr32(priv, 0x610024, 0x00000004); 1199 nv_wr32(priv, 0x610024, 0x00000004);
92 stat1 &= ~0x00000004; 1200 intr1 &= ~0x00000004;
93 } 1201 }
94 1202
95 if (stat1 & 0x00000008) { 1203 if (intr1 & 0x00000008) {
96 nv50_disp_intr_vblank(priv, 1); 1204 nv50_disp_intr_vblank(priv, 1);
97 nv_wr32(priv, 0x610024, 0x00000008); 1205 nv_wr32(priv, 0x610024, 0x00000008);
98 stat1 &= ~0x00000008; 1206 intr1 &= ~0x00000008;
99 } 1207 }
100 1208
1209 if (intr1 & 0x00000070) {
1210 nv50_disp_intr_super(priv, intr1);
1211 intr1 &= ~0x00000070;
1212 }
101} 1213}
102 1214
103static int 1215static int
104nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 1216nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
105 struct nouveau_oclass *oclass, void *data, u32 size, 1217 struct nouveau_oclass *oclass, void *data, u32 size,
106 struct nouveau_object **pobject) 1218 struct nouveau_object **pobject)
107{ 1219{
108 struct nv50_disp_priv *priv; 1220 struct nv50_disp_priv *priv;
109 int ret; 1221 int ret;
@@ -114,8 +1226,16 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
114 if (ret) 1226 if (ret)
115 return ret; 1227 return ret;
116 1228
117 nv_engine(priv)->sclass = nv50_disp_sclass; 1229 nv_engine(priv)->sclass = nv50_disp_base_oclass;
1230 nv_engine(priv)->cclass = &nv50_disp_cclass;
118 nv_subdev(priv)->intr = nv50_disp_intr; 1231 nv_subdev(priv)->intr = nv50_disp_intr;
1232 priv->sclass = nv50_disp_sclass;
1233 priv->head.nr = 2;
1234 priv->dac.nr = 3;
1235 priv->sor.nr = 2;
1236 priv->dac.power = nv50_dac_power;
1237 priv->dac.sense = nv50_dac_sense;
1238 priv->sor.power = nv50_sor_power;
119 1239
120 INIT_LIST_HEAD(&priv->base.vblank.list); 1240 INIT_LIST_HEAD(&priv->base.vblank.list);
121 spin_lock_init(&priv->base.vblank.lock); 1241 spin_lock_init(&priv->base.vblank.lock);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
new file mode 100644
index 000000000000..a6bb931450f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -0,0 +1,142 @@
1#ifndef __NV50_DISP_H__
2#define __NV50_DISP_H__
3
4#include <core/parent.h>
5#include <core/namedb.h>
6#include <core/ramht.h>
7
8#include <engine/dmaobj.h>
9#include <engine/disp.h>
10
11struct dcb_output;
12
13struct nv50_disp_priv {
14 struct nouveau_disp base;
15 struct nouveau_oclass *sclass;
16 struct {
17 int nr;
18 } head;
19 struct {
20 int nr;
21 int (*power)(struct nv50_disp_priv *, int dac, u32 data);
22 int (*sense)(struct nv50_disp_priv *, int dac, u32 load);
23 } dac;
24 struct {
25 int nr;
26 int (*power)(struct nv50_disp_priv *, int sor, u32 data);
27 int (*hda_eld)(struct nv50_disp_priv *, int sor, u8 *, u32);
28 int (*hdmi)(struct nv50_disp_priv *, int head, int sor, u32);
29 int (*dp_train_init)(struct nv50_disp_priv *, int sor, int link,
30 int head, u16 type, u16 mask, u32 data,
31 struct dcb_output *);
32 int (*dp_train_fini)(struct nv50_disp_priv *, int sor, int link,
33 int head, u16 type, u16 mask, u32 data,
34 struct dcb_output *);
35 int (*dp_train)(struct nv50_disp_priv *, int sor, int link,
36 u16 type, u16 mask, u32 data,
37 struct dcb_output *);
38 int (*dp_lnkctl)(struct nv50_disp_priv *, int sor, int link,
39 int head, u16 type, u16 mask, u32 data,
40 struct dcb_output *);
41 int (*dp_drvctl)(struct nv50_disp_priv *, int sor, int link,
42 int lane, u16 type, u16 mask, u32 data,
43 struct dcb_output *);
44 u32 lvdsconf;
45 } sor;
46};
47
48#define DAC_MTHD(n) (n), (n) + 0x03
49
50int nv50_dac_mthd(struct nouveau_object *, u32, void *, u32);
51int nv50_dac_power(struct nv50_disp_priv *, int, u32);
52int nv50_dac_sense(struct nv50_disp_priv *, int, u32);
53
54#define SOR_MTHD(n) (n), (n) + 0x3f
55
56int nva3_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
57int nvd0_hda_eld(struct nv50_disp_priv *, int, u8 *, u32);
58
59int nv84_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
60int nva3_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
61int nvd0_hdmi_ctrl(struct nv50_disp_priv *, int, int, u32);
62
63int nv50_sor_mthd(struct nouveau_object *, u32, void *, u32);
64int nv50_sor_power(struct nv50_disp_priv *, int, u32);
65
66int nv94_sor_dp_train_init(struct nv50_disp_priv *, int, int, int, u16, u16,
67 u32, struct dcb_output *);
68int nv94_sor_dp_train_fini(struct nv50_disp_priv *, int, int, int, u16, u16,
69 u32, struct dcb_output *);
70int nv94_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
71 struct dcb_output *);
72int nv94_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
73 struct dcb_output *);
74int nv94_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
75 struct dcb_output *);
76
77int nvd0_sor_dp_train(struct nv50_disp_priv *, int, int, u16, u16, u32,
78 struct dcb_output *);
79int nvd0_sor_dp_lnkctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
80 struct dcb_output *);
81int nvd0_sor_dp_drvctl(struct nv50_disp_priv *, int, int, int, u16, u16, u32,
82 struct dcb_output *);
83
84struct nv50_disp_base {
85 struct nouveau_parent base;
86 struct nouveau_ramht *ramht;
87 u32 chan;
88};
89
90struct nv50_disp_chan {
91 struct nouveau_namedb base;
92 int chid;
93};
94
95int nv50_disp_chan_create_(struct nouveau_object *, struct nouveau_object *,
96 struct nouveau_oclass *, int, int, void **);
97void nv50_disp_chan_destroy(struct nv50_disp_chan *);
98u32 nv50_disp_chan_rd32(struct nouveau_object *, u64);
99void nv50_disp_chan_wr32(struct nouveau_object *, u64, u32);
100
101#define nv50_disp_chan_init(a) \
102 nouveau_namedb_init(&(a)->base)
103#define nv50_disp_chan_fini(a,b) \
104 nouveau_namedb_fini(&(a)->base, (b))
105
106int nv50_disp_dmac_create_(struct nouveau_object *, struct nouveau_object *,
107 struct nouveau_oclass *, u32, int, int, void **);
108void nv50_disp_dmac_dtor(struct nouveau_object *);
109
110struct nv50_disp_dmac {
111 struct nv50_disp_chan base;
112 struct nouveau_dmaobj *pushdma;
113 u32 push;
114};
115
116struct nv50_disp_pioc {
117 struct nv50_disp_chan base;
118};
119
120extern struct nouveau_ofuncs nv50_disp_mast_ofuncs;
121extern struct nouveau_ofuncs nv50_disp_sync_ofuncs;
122extern struct nouveau_ofuncs nv50_disp_ovly_ofuncs;
123extern struct nouveau_ofuncs nv50_disp_oimm_ofuncs;
124extern struct nouveau_ofuncs nv50_disp_curs_ofuncs;
125extern struct nouveau_ofuncs nv50_disp_base_ofuncs;
126extern struct nouveau_oclass nv50_disp_cclass;
127void nv50_disp_intr(struct nouveau_subdev *);
128
129extern struct nouveau_omthds nv84_disp_base_omthds[];
130
131extern struct nouveau_omthds nva3_disp_base_omthds[];
132
133extern struct nouveau_ofuncs nvd0_disp_mast_ofuncs;
134extern struct nouveau_ofuncs nvd0_disp_sync_ofuncs;
135extern struct nouveau_ofuncs nvd0_disp_ovly_ofuncs;
136extern struct nouveau_ofuncs nvd0_disp_oimm_ofuncs;
137extern struct nouveau_ofuncs nvd0_disp_curs_ofuncs;
138extern struct nouveau_ofuncs nvd0_disp_base_ofuncs;
139extern struct nouveau_oclass nvd0_disp_cclass;
140void nvd0_disp_intr(struct nouveau_subdev *);
141
142#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
new file mode 100644
index 000000000000..fc84eacdfbec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -0,0 +1,98 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nv84_disp_sclass[] = {
34 { NV84_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NV84_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NV84_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NV84_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NV84_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42struct nouveau_omthds
43nv84_disp_base_omthds[] = {
44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
45 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
47 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
48 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
49 {},
50};
51
52static struct nouveau_oclass
53nv84_disp_base_oclass[] = {
54 { NV84_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
55 {}
56};
57
58static int
59nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 struct nouveau_oclass *oclass, void *data, u32 size,
61 struct nouveau_object **pobject)
62{
63 struct nv50_disp_priv *priv;
64 int ret;
65
66 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
67 "display", &priv);
68 *pobject = nv_object(priv);
69 if (ret)
70 return ret;
71
72 nv_engine(priv)->sclass = nv84_disp_base_oclass;
73 nv_engine(priv)->cclass = &nv50_disp_cclass;
74 nv_subdev(priv)->intr = nv50_disp_intr;
75 priv->sclass = nv84_disp_sclass;
76 priv->head.nr = 2;
77 priv->dac.nr = 3;
78 priv->sor.nr = 2;
79 priv->dac.power = nv50_dac_power;
80 priv->dac.sense = nv50_dac_sense;
81 priv->sor.power = nv50_sor_power;
82 priv->sor.hdmi = nv84_hdmi_ctrl;
83
84 INIT_LIST_HEAD(&priv->base.vblank.list);
85 spin_lock_init(&priv->base.vblank.lock);
86 return 0;
87}
88
89struct nouveau_oclass
90nv84_disp_oclass = {
91 .handle = NV_ENGINE(DISP, 0x82),
92 .ofuncs = &(struct nouveau_ofuncs) {
93 .ctor = nv84_disp_ctor,
94 .dtor = _nouveau_disp_dtor,
95 .init = _nouveau_disp_init,
96 .fini = _nouveau_disp_fini,
97 },
98};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
new file mode 100644
index 000000000000..ba9dfd4669a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -0,0 +1,109 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nv94_disp_sclass[] = {
34 { NV94_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NV94_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NV94_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NV94_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NV94_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_omthds
43nv94_disp_base_omthds[] = {
44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
45 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
46 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
47 { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
48 { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
49 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
50 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
51 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
52 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
53 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
55 {},
56};
57
58static struct nouveau_oclass
59nv94_disp_base_oclass[] = {
60 { NV94_DISP_CLASS, &nv50_disp_base_ofuncs, nv94_disp_base_omthds },
61 {}
62};
63
64static int
65nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 struct nouveau_oclass *oclass, void *data, u32 size,
67 struct nouveau_object **pobject)
68{
69 struct nv50_disp_priv *priv;
70 int ret;
71
72 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
73 "display", &priv);
74 *pobject = nv_object(priv);
75 if (ret)
76 return ret;
77
78 nv_engine(priv)->sclass = nv94_disp_base_oclass;
79 nv_engine(priv)->cclass = &nv50_disp_cclass;
80 nv_subdev(priv)->intr = nv50_disp_intr;
81 priv->sclass = nv94_disp_sclass;
82 priv->head.nr = 2;
83 priv->dac.nr = 3;
84 priv->sor.nr = 4;
85 priv->dac.power = nv50_dac_power;
86 priv->dac.sense = nv50_dac_sense;
87 priv->sor.power = nv50_sor_power;
88 priv->sor.hdmi = nv84_hdmi_ctrl;
89 priv->sor.dp_train = nv94_sor_dp_train;
90 priv->sor.dp_train_init = nv94_sor_dp_train_init;
91 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
92 priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
93 priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
94
95 INIT_LIST_HEAD(&priv->base.vblank.list);
96 spin_lock_init(&priv->base.vblank.lock);
97 return 0;
98}
99
100struct nouveau_oclass
101nv94_disp_oclass = {
102 .handle = NV_ENGINE(DISP, 0x88),
103 .ofuncs = &(struct nouveau_ofuncs) {
104 .ctor = nv94_disp_ctor,
105 .dtor = _nouveau_disp_dtor,
106 .init = _nouveau_disp_init,
107 .fini = _nouveau_disp_fini,
108 },
109};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
new file mode 100644
index 000000000000..5d63902cdeda
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -0,0 +1,88 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nva0_disp_sclass[] = {
34 { NVA0_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NVA0_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NVA0_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NVA0_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NVA0_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nva0_disp_base_oclass[] = {
44 { NVA0_DISP_CLASS, &nv50_disp_base_ofuncs, nv84_disp_base_omthds },
45 {}
46};
47
48static int
49nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nv50_disp_priv *priv;
54 int ret;
55
56 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
57 "display", &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 nv_engine(priv)->sclass = nva0_disp_base_oclass;
63 nv_engine(priv)->cclass = &nv50_disp_cclass;
64 nv_subdev(priv)->intr = nv50_disp_intr;
65 priv->sclass = nva0_disp_sclass;
66 priv->head.nr = 2;
67 priv->dac.nr = 3;
68 priv->sor.nr = 2;
69 priv->dac.power = nv50_dac_power;
70 priv->dac.sense = nv50_dac_sense;
71 priv->sor.power = nv50_sor_power;
72 priv->sor.hdmi = nv84_hdmi_ctrl;
73
74 INIT_LIST_HEAD(&priv->base.vblank.list);
75 spin_lock_init(&priv->base.vblank.lock);
76 return 0;
77}
78
79struct nouveau_oclass
80nva0_disp_oclass = {
81 .handle = NV_ENGINE(DISP, 0x83),
82 .ofuncs = &(struct nouveau_ofuncs) {
83 .ctor = nva0_disp_ctor,
84 .dtor = _nouveau_disp_dtor,
85 .init = _nouveau_disp_init,
86 .fini = _nouveau_disp_fini,
87 },
88};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
new file mode 100644
index 000000000000..e9192ca389fa
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nva3_disp_sclass[] = {
34 { NVA3_DISP_MAST_CLASS, &nv50_disp_mast_ofuncs },
35 { NVA3_DISP_SYNC_CLASS, &nv50_disp_sync_ofuncs },
36 { NVA3_DISP_OVLY_CLASS, &nv50_disp_ovly_ofuncs },
37 { NVA3_DISP_OIMM_CLASS, &nv50_disp_oimm_ofuncs },
38 { NVA3_DISP_CURS_CLASS, &nv50_disp_curs_ofuncs },
39 {}
40};
41
42struct nouveau_omthds
43nva3_disp_base_omthds[] = {
44 { SOR_MTHD(NV50_DISP_SOR_PWR) , nv50_sor_mthd },
45 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD) , nv50_sor_mthd },
46 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR) , nv50_sor_mthd },
47 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT) , nv50_sor_mthd },
48 { SOR_MTHD(NV94_DISP_SOR_DP_TRAIN) , nv50_sor_mthd },
49 { SOR_MTHD(NV94_DISP_SOR_DP_LNKCTL) , nv50_sor_mthd },
50 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(0)), nv50_sor_mthd },
51 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(1)), nv50_sor_mthd },
52 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(2)), nv50_sor_mthd },
53 { SOR_MTHD(NV94_DISP_SOR_DP_DRVCTL(3)), nv50_sor_mthd },
54 { DAC_MTHD(NV50_DISP_DAC_PWR) , nv50_dac_mthd },
55 { DAC_MTHD(NV50_DISP_DAC_LOAD) , nv50_dac_mthd },
56 {},
57};
58
59static struct nouveau_oclass
60nva3_disp_base_oclass[] = {
61 { NVA3_DISP_CLASS, &nv50_disp_base_ofuncs, nva3_disp_base_omthds },
62 {}
63};
64
65static int
66nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
67 struct nouveau_oclass *oclass, void *data, u32 size,
68 struct nouveau_object **pobject)
69{
70 struct nv50_disp_priv *priv;
71 int ret;
72
73 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
74 "display", &priv);
75 *pobject = nv_object(priv);
76 if (ret)
77 return ret;
78
79 nv_engine(priv)->sclass = nva3_disp_base_oclass;
80 nv_engine(priv)->cclass = &nv50_disp_cclass;
81 nv_subdev(priv)->intr = nv50_disp_intr;
82 priv->sclass = nva3_disp_sclass;
83 priv->head.nr = 2;
84 priv->dac.nr = 3;
85 priv->sor.nr = 4;
86 priv->dac.power = nv50_dac_power;
87 priv->dac.sense = nv50_dac_sense;
88 priv->sor.power = nv50_sor_power;
89 priv->sor.hda_eld = nva3_hda_eld;
90 priv->sor.hdmi = nva3_hdmi_ctrl;
91 priv->sor.dp_train = nv94_sor_dp_train;
92 priv->sor.dp_train_init = nv94_sor_dp_train_init;
93 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
94 priv->sor.dp_lnkctl = nv94_sor_dp_lnkctl;
95 priv->sor.dp_drvctl = nv94_sor_dp_drvctl;
96
97 INIT_LIST_HEAD(&priv->base.vblank.list);
98 spin_lock_init(&priv->base.vblank.lock);
99 return 0;
100}
101
102struct nouveau_oclass
103nva3_disp_oclass = {
104 .handle = NV_ENGINE(DISP, 0x85),
105 .ofuncs = &(struct nouveau_ofuncs) {
106 .ctor = nva3_disp_ctor,
107 .dtor = _nouveau_disp_dtor,
108 .init = _nouveau_disp_init,
109 .fini = _nouveau_disp_fini,
110 },
111};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index d93efbcf75b8..9e38ebff5fb3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -22,22 +22,808 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/bar.h> 25#include <core/object.h>
26#include <core/parent.h>
27#include <core/handle.h>
28#include <core/class.h>
26 29
27#include <engine/software.h> 30#include <engine/software.h>
28#include <engine/disp.h> 31#include <engine/disp.h>
29 32
30struct nvd0_disp_priv { 33#include <subdev/timer.h>
31 struct nouveau_disp base; 34#include <subdev/fb.h>
35#include <subdev/bar.h>
36#include <subdev/clock.h>
37
38#include <subdev/bios.h>
39#include <subdev/bios/dcb.h>
40#include <subdev/bios/disp.h>
41#include <subdev/bios/init.h>
42#include <subdev/bios/pll.h>
43
44#include "nv50.h"
45
46/*******************************************************************************
47 * EVO DMA channel base class
48 ******************************************************************************/
49
50static int
51nvd0_disp_dmac_object_attach(struct nouveau_object *parent,
52 struct nouveau_object *object, u32 name)
53{
54 struct nv50_disp_base *base = (void *)parent->parent;
55 struct nv50_disp_chan *chan = (void *)parent;
56 u32 addr = nv_gpuobj(object)->node->offset;
57 u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001;
58 return nouveau_ramht_insert(base->ramht, chan->chid, name, data);
59}
60
61static void
62nvd0_disp_dmac_object_detach(struct nouveau_object *parent, int cookie)
63{
64 struct nv50_disp_base *base = (void *)parent->parent;
65 nouveau_ramht_remove(base->ramht, cookie);
66}
67
68static int
69nvd0_disp_dmac_init(struct nouveau_object *object)
70{
71 struct nv50_disp_priv *priv = (void *)object->engine;
72 struct nv50_disp_dmac *dmac = (void *)object;
73 int chid = dmac->base.chid;
74 int ret;
75
76 ret = nv50_disp_chan_init(&dmac->base);
77 if (ret)
78 return ret;
79
80 /* enable error reporting */
81 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
82 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
83
84 /* initialise channel for dma command submission */
85 nv_wr32(priv, 0x610494 + (chid * 0x0010), dmac->push);
86 nv_wr32(priv, 0x610498 + (chid * 0x0010), 0x00010000);
87 nv_wr32(priv, 0x61049c + (chid * 0x0010), 0x00000001);
88 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
89 nv_wr32(priv, 0x640000 + (chid * 0x1000), 0x00000000);
90 nv_wr32(priv, 0x610490 + (chid * 0x0010), 0x00000013);
91
92 /* wait for it to go inactive */
93 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x80000000, 0x00000000)) {
94 nv_error(dmac, "init: 0x%08x\n",
95 nv_rd32(priv, 0x610490 + (chid * 0x10)));
96 return -EBUSY;
97 }
98
99 return 0;
100}
101
102static int
103nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
104{
105 struct nv50_disp_priv *priv = (void *)object->engine;
106 struct nv50_disp_dmac *dmac = (void *)object;
107 int chid = dmac->base.chid;
108
109 /* deactivate channel */
110 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00001010, 0x00001000);
111 nv_mask(priv, 0x610490 + (chid * 0x0010), 0x00000003, 0x00000000);
112 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x001e0000, 0x00000000)) {
113 nv_error(dmac, "fini: 0x%08x\n",
114 nv_rd32(priv, 0x610490 + (chid * 0x10)));
115 if (suspend)
116 return -EBUSY;
117 }
118
119 /* disable error reporting */
120 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
121 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
122
123 return nv50_disp_chan_fini(&dmac->base, suspend);
124}
125
126/*******************************************************************************
127 * EVO master channel object
128 ******************************************************************************/
129
130static int
131nvd0_disp_mast_ctor(struct nouveau_object *parent,
132 struct nouveau_object *engine,
133 struct nouveau_oclass *oclass, void *data, u32 size,
134 struct nouveau_object **pobject)
135{
136 struct nv50_display_mast_class *args = data;
137 struct nv50_disp_dmac *mast;
138 int ret;
139
140 if (size < sizeof(*args))
141 return -EINVAL;
142
143 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
144 0, sizeof(*mast), (void **)&mast);
145 *pobject = nv_object(mast);
146 if (ret)
147 return ret;
148
149 nv_parent(mast)->object_attach = nvd0_disp_dmac_object_attach;
150 nv_parent(mast)->object_detach = nvd0_disp_dmac_object_detach;
151 return 0;
152}
153
154static int
155nvd0_disp_mast_init(struct nouveau_object *object)
156{
157 struct nv50_disp_priv *priv = (void *)object->engine;
158 struct nv50_disp_dmac *mast = (void *)object;
159 int ret;
160
161 ret = nv50_disp_chan_init(&mast->base);
162 if (ret)
163 return ret;
164
165 /* enable error reporting */
166 nv_mask(priv, 0x610090, 0x00000001, 0x00000001);
167 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000001);
168
169 /* initialise channel for dma command submission */
170 nv_wr32(priv, 0x610494, mast->push);
171 nv_wr32(priv, 0x610498, 0x00010000);
172 nv_wr32(priv, 0x61049c, 0x00000001);
173 nv_mask(priv, 0x610490, 0x00000010, 0x00000010);
174 nv_wr32(priv, 0x640000, 0x00000000);
175 nv_wr32(priv, 0x610490, 0x01000013);
176
177 /* wait for it to go inactive */
178 if (!nv_wait(priv, 0x610490, 0x80000000, 0x00000000)) {
179 nv_error(mast, "init: 0x%08x\n", nv_rd32(priv, 0x610490));
180 return -EBUSY;
181 }
182
183 return 0;
184}
185
186static int
187nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
188{
189 struct nv50_disp_priv *priv = (void *)object->engine;
190 struct nv50_disp_dmac *mast = (void *)object;
191
192 /* deactivate channel */
193 nv_mask(priv, 0x610490, 0x00000010, 0x00000000);
194 nv_mask(priv, 0x610490, 0x00000003, 0x00000000);
195 if (!nv_wait(priv, 0x610490, 0x001e0000, 0x00000000)) {
196 nv_error(mast, "fini: 0x%08x\n", nv_rd32(priv, 0x610490));
197 if (suspend)
198 return -EBUSY;
199 }
200
201 /* disable error reporting */
202 nv_mask(priv, 0x610090, 0x00000001, 0x00000000);
203 nv_mask(priv, 0x6100a0, 0x00000001, 0x00000000);
204
205 return nv50_disp_chan_fini(&mast->base, suspend);
206}
207
208struct nouveau_ofuncs
209nvd0_disp_mast_ofuncs = {
210 .ctor = nvd0_disp_mast_ctor,
211 .dtor = nv50_disp_dmac_dtor,
212 .init = nvd0_disp_mast_init,
213 .fini = nvd0_disp_mast_fini,
214 .rd32 = nv50_disp_chan_rd32,
215 .wr32 = nv50_disp_chan_wr32,
216};
217
218/*******************************************************************************
219 * EVO sync channel objects
220 ******************************************************************************/
221
222static int
223nvd0_disp_sync_ctor(struct nouveau_object *parent,
224 struct nouveau_object *engine,
225 struct nouveau_oclass *oclass, void *data, u32 size,
226 struct nouveau_object **pobject)
227{
228 struct nv50_display_sync_class *args = data;
229 struct nv50_disp_priv *priv = (void *)engine;
230 struct nv50_disp_dmac *dmac;
231 int ret;
232
233 if (size < sizeof(*data) || args->head >= priv->head.nr)
234 return -EINVAL;
235
236 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
237 1 + args->head, sizeof(*dmac),
238 (void **)&dmac);
239 *pobject = nv_object(dmac);
240 if (ret)
241 return ret;
242
243 nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
244 nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
245 return 0;
246}
247
248struct nouveau_ofuncs
249nvd0_disp_sync_ofuncs = {
250 .ctor = nvd0_disp_sync_ctor,
251 .dtor = nv50_disp_dmac_dtor,
252 .init = nvd0_disp_dmac_init,
253 .fini = nvd0_disp_dmac_fini,
254 .rd32 = nv50_disp_chan_rd32,
255 .wr32 = nv50_disp_chan_wr32,
256};
257
258/*******************************************************************************
259 * EVO overlay channel objects
260 ******************************************************************************/
261
262static int
263nvd0_disp_ovly_ctor(struct nouveau_object *parent,
264 struct nouveau_object *engine,
265 struct nouveau_oclass *oclass, void *data, u32 size,
266 struct nouveau_object **pobject)
267{
268 struct nv50_display_ovly_class *args = data;
269 struct nv50_disp_priv *priv = (void *)engine;
270 struct nv50_disp_dmac *dmac;
271 int ret;
272
273 if (size < sizeof(*data) || args->head >= priv->head.nr)
274 return -EINVAL;
275
276 ret = nv50_disp_dmac_create_(parent, engine, oclass, args->pushbuf,
277 5 + args->head, sizeof(*dmac),
278 (void **)&dmac);
279 *pobject = nv_object(dmac);
280 if (ret)
281 return ret;
282
283 nv_parent(dmac)->object_attach = nvd0_disp_dmac_object_attach;
284 nv_parent(dmac)->object_detach = nvd0_disp_dmac_object_detach;
285 return 0;
286}
287
288struct nouveau_ofuncs
289nvd0_disp_ovly_ofuncs = {
290 .ctor = nvd0_disp_ovly_ctor,
291 .dtor = nv50_disp_dmac_dtor,
292 .init = nvd0_disp_dmac_init,
293 .fini = nvd0_disp_dmac_fini,
294 .rd32 = nv50_disp_chan_rd32,
295 .wr32 = nv50_disp_chan_wr32,
296};
297
298/*******************************************************************************
299 * EVO PIO channel base class
300 ******************************************************************************/
301
302static int
303nvd0_disp_pioc_create_(struct nouveau_object *parent,
304 struct nouveau_object *engine,
305 struct nouveau_oclass *oclass, int chid,
306 int length, void **pobject)
307{
308 return nv50_disp_chan_create_(parent, engine, oclass, chid,
309 length, pobject);
310}
311
312static void
313nvd0_disp_pioc_dtor(struct nouveau_object *object)
314{
315 struct nv50_disp_pioc *pioc = (void *)object;
316 nv50_disp_chan_destroy(&pioc->base);
317}
318
319static int
320nvd0_disp_pioc_init(struct nouveau_object *object)
321{
322 struct nv50_disp_priv *priv = (void *)object->engine;
323 struct nv50_disp_pioc *pioc = (void *)object;
324 int chid = pioc->base.chid;
325 int ret;
326
327 ret = nv50_disp_chan_init(&pioc->base);
328 if (ret)
329 return ret;
330
331 /* enable error reporting */
332 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000001 << chid);
333 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
334
335 /* activate channel */
336 nv_wr32(priv, 0x610490 + (chid * 0x10), 0x00000001);
337 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00010000)) {
338 nv_error(pioc, "init: 0x%08x\n",
339 nv_rd32(priv, 0x610490 + (chid * 0x10)));
340 return -EBUSY;
341 }
342
343 return 0;
344}
345
346static int
347nvd0_disp_pioc_fini(struct nouveau_object *object, bool suspend)
348{
349 struct nv50_disp_priv *priv = (void *)object->engine;
350 struct nv50_disp_pioc *pioc = (void *)object;
351 int chid = pioc->base.chid;
352
353 nv_mask(priv, 0x610490 + (chid * 0x10), 0x00000001, 0x00000000);
354 if (!nv_wait(priv, 0x610490 + (chid * 0x10), 0x00030000, 0x00000000)) {
355 nv_error(pioc, "timeout: 0x%08x\n",
356 nv_rd32(priv, 0x610490 + (chid * 0x10)));
357 if (suspend)
358 return -EBUSY;
359 }
360
361 /* disable error reporting */
362 nv_mask(priv, 0x610090, 0x00000001 << chid, 0x00000000);
363 nv_mask(priv, 0x6100a0, 0x00000001 << chid, 0x00000000);
364
365 return nv50_disp_chan_fini(&pioc->base, suspend);
366}
367
368/*******************************************************************************
369 * EVO immediate overlay channel objects
370 ******************************************************************************/
371
372static int
373nvd0_disp_oimm_ctor(struct nouveau_object *parent,
374 struct nouveau_object *engine,
375 struct nouveau_oclass *oclass, void *data, u32 size,
376 struct nouveau_object **pobject)
377{
378 struct nv50_display_oimm_class *args = data;
379 struct nv50_disp_priv *priv = (void *)engine;
380 struct nv50_disp_pioc *pioc;
381 int ret;
382
383 if (size < sizeof(*args) || args->head >= priv->head.nr)
384 return -EINVAL;
385
386 ret = nvd0_disp_pioc_create_(parent, engine, oclass, 9 + args->head,
387 sizeof(*pioc), (void **)&pioc);
388 *pobject = nv_object(pioc);
389 if (ret)
390 return ret;
391
392 return 0;
393}
394
395struct nouveau_ofuncs
396nvd0_disp_oimm_ofuncs = {
397 .ctor = nvd0_disp_oimm_ctor,
398 .dtor = nvd0_disp_pioc_dtor,
399 .init = nvd0_disp_pioc_init,
400 .fini = nvd0_disp_pioc_fini,
401 .rd32 = nv50_disp_chan_rd32,
402 .wr32 = nv50_disp_chan_wr32,
403};
404
405/*******************************************************************************
406 * EVO cursor channel objects
407 ******************************************************************************/
408
409static int
410nvd0_disp_curs_ctor(struct nouveau_object *parent,
411 struct nouveau_object *engine,
412 struct nouveau_oclass *oclass, void *data, u32 size,
413 struct nouveau_object **pobject)
414{
415 struct nv50_display_curs_class *args = data;
416 struct nv50_disp_priv *priv = (void *)engine;
417 struct nv50_disp_pioc *pioc;
418 int ret;
419
420 if (size < sizeof(*args) || args->head >= priv->head.nr)
421 return -EINVAL;
422
423 ret = nvd0_disp_pioc_create_(parent, engine, oclass, 13 + args->head,
424 sizeof(*pioc), (void **)&pioc);
425 *pobject = nv_object(pioc);
426 if (ret)
427 return ret;
428
429 return 0;
430}
431
432struct nouveau_ofuncs
433nvd0_disp_curs_ofuncs = {
434 .ctor = nvd0_disp_curs_ctor,
435 .dtor = nvd0_disp_pioc_dtor,
436 .init = nvd0_disp_pioc_init,
437 .fini = nvd0_disp_pioc_fini,
438 .rd32 = nv50_disp_chan_rd32,
439 .wr32 = nv50_disp_chan_wr32,
440};
441
442/*******************************************************************************
443 * Base display object
444 ******************************************************************************/
445
446static int
447nvd0_disp_base_ctor(struct nouveau_object *parent,
448 struct nouveau_object *engine,
449 struct nouveau_oclass *oclass, void *data, u32 size,
450 struct nouveau_object **pobject)
451{
452 struct nv50_disp_priv *priv = (void *)engine;
453 struct nv50_disp_base *base;
454 int ret;
455
456 ret = nouveau_parent_create(parent, engine, oclass, 0,
457 priv->sclass, 0, &base);
458 *pobject = nv_object(base);
459 if (ret)
460 return ret;
461
462 return nouveau_ramht_new(parent, parent, 0x1000, 0, &base->ramht);
463}
464
465static void
466nvd0_disp_base_dtor(struct nouveau_object *object)
467{
468 struct nv50_disp_base *base = (void *)object;
469 nouveau_ramht_ref(NULL, &base->ramht);
470 nouveau_parent_destroy(&base->base);
471}
472
473static int
474nvd0_disp_base_init(struct nouveau_object *object)
475{
476 struct nv50_disp_priv *priv = (void *)object->engine;
477 struct nv50_disp_base *base = (void *)object;
478 int ret, i;
479 u32 tmp;
480
481 ret = nouveau_parent_init(&base->base);
482 if (ret)
483 return ret;
484
485 /* The below segments of code copying values from one register to
486 * another appear to inform EVO of the display capabilities or
487 * something similar.
488 */
489
490 /* ... CRTC caps */
491 for (i = 0; i < priv->head.nr; i++) {
492 tmp = nv_rd32(priv, 0x616104 + (i * 0x800));
493 nv_wr32(priv, 0x6101b4 + (i * 0x800), tmp);
494 tmp = nv_rd32(priv, 0x616108 + (i * 0x800));
495 nv_wr32(priv, 0x6101b8 + (i * 0x800), tmp);
496 tmp = nv_rd32(priv, 0x61610c + (i * 0x800));
497 nv_wr32(priv, 0x6101bc + (i * 0x800), tmp);
498 }
499
500 /* ... DAC caps */
501 for (i = 0; i < priv->dac.nr; i++) {
502 tmp = nv_rd32(priv, 0x61a000 + (i * 0x800));
503 nv_wr32(priv, 0x6101c0 + (i * 0x800), tmp);
504 }
505
506 /* ... SOR caps */
507 for (i = 0; i < priv->sor.nr; i++) {
508 tmp = nv_rd32(priv, 0x61c000 + (i * 0x800));
509 nv_wr32(priv, 0x6301c4 + (i * 0x800), tmp);
510 }
511
512 /* steal display away from vbios, or something like that */
513 if (nv_rd32(priv, 0x6100ac) & 0x00000100) {
514 nv_wr32(priv, 0x6100ac, 0x00000100);
515 nv_mask(priv, 0x6194e8, 0x00000001, 0x00000000);
516 if (!nv_wait(priv, 0x6194e8, 0x00000002, 0x00000000)) {
517 nv_error(priv, "timeout acquiring display\n");
518 return -EBUSY;
519 }
520 }
521
522 /* point at display engine memory area (hash table, objects) */
523 nv_wr32(priv, 0x610010, (nv_gpuobj(object->parent)->addr >> 8) | 9);
524
525 /* enable supervisor interrupts, disable everything else */
526 nv_wr32(priv, 0x610090, 0x00000000);
527 nv_wr32(priv, 0x6100a0, 0x00000000);
528 nv_wr32(priv, 0x6100b0, 0x00000307);
529
530 return 0;
531}
532
533static int
534nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
535{
536 struct nv50_disp_priv *priv = (void *)object->engine;
537 struct nv50_disp_base *base = (void *)object;
538
539 /* disable all interrupts */
540 nv_wr32(priv, 0x6100b0, 0x00000000);
541
542 return nouveau_parent_fini(&base->base, suspend);
543}
544
545struct nouveau_ofuncs
546nvd0_disp_base_ofuncs = {
547 .ctor = nvd0_disp_base_ctor,
548 .dtor = nvd0_disp_base_dtor,
549 .init = nvd0_disp_base_init,
550 .fini = nvd0_disp_base_fini,
551};
552
553static struct nouveau_oclass
554nvd0_disp_base_oclass[] = {
555 { NVD0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
556 {}
32}; 557};
33 558
34static struct nouveau_oclass 559static struct nouveau_oclass
35nvd0_disp_sclass[] = { 560nvd0_disp_sclass[] = {
36 {}, 561 { NVD0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
562 { NVD0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
563 { NVD0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
564 { NVD0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
565 { NVD0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
566 {}
37}; 567};
38 568
569/*******************************************************************************
570 * Display engine implementation
571 ******************************************************************************/
572
573static u16
574exec_lookup(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl,
575 struct dcb_output *dcb, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
576 struct nvbios_outp *info)
577{
578 struct nouveau_bios *bios = nouveau_bios(priv);
579 u16 mask, type, data;
580
581 if (outp < 4) {
582 type = DCB_OUTPUT_ANALOG;
583 mask = 0;
584 } else {
585 outp -= 4;
586 switch (ctrl & 0x00000f00) {
587 case 0x00000000: type = DCB_OUTPUT_LVDS; mask = 1; break;
588 case 0x00000100: type = DCB_OUTPUT_TMDS; mask = 1; break;
589 case 0x00000200: type = DCB_OUTPUT_TMDS; mask = 2; break;
590 case 0x00000500: type = DCB_OUTPUT_TMDS; mask = 3; break;
591 case 0x00000800: type = DCB_OUTPUT_DP; mask = 1; break;
592 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
593 default:
594 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
595 return 0x0000;
596 }
597 dcb->sorconf.link = mask;
598 }
599
600 mask = 0x00c0 & (mask << 6);
601 mask |= 0x0001 << outp;
602 mask |= 0x0100 << head;
603
604 data = dcb_outp_match(bios, type, mask, ver, hdr, dcb);
605 if (!data)
606 return 0x0000;
607
608 return nvbios_outp_match(bios, type, mask, ver, hdr, cnt, len, info);
609}
610
611static bool
612exec_script(struct nv50_disp_priv *priv, int head, int outp, u32 ctrl, int id)
613{
614 struct nouveau_bios *bios = nouveau_bios(priv);
615 struct nvbios_outp info;
616 struct dcb_output dcb;
617 u8 ver, hdr, cnt, len;
618 u16 data;
619
620 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info);
621 if (data) {
622 struct nvbios_init init = {
623 .subdev = nv_subdev(priv),
624 .bios = bios,
625 .offset = info.script[id],
626 .outp = &dcb,
627 .crtc = head,
628 .execute = 1,
629 };
630
631 return nvbios_exec(&init) == 0;
632 }
633
634 return false;
635}
636
637static u32
638exec_clkcmp(struct nv50_disp_priv *priv, int head, int outp,
639 u32 ctrl, int id, u32 pclk)
640{
641 struct nouveau_bios *bios = nouveau_bios(priv);
642 struct nvbios_outp info1;
643 struct nvbios_ocfg info2;
644 struct dcb_output dcb;
645 u8 ver, hdr, cnt, len;
646 u16 data, conf;
647
648 data = exec_lookup(priv, head, outp, ctrl, &dcb, &ver, &hdr, &cnt, &len, &info1);
649 if (data == 0x0000)
650 return false;
651
652 switch (dcb.type) {
653 case DCB_OUTPUT_TMDS:
654 conf = (ctrl & 0x00000f00) >> 8;
655 if (pclk >= 165000)
656 conf |= 0x0100;
657 break;
658 case DCB_OUTPUT_LVDS:
659 conf = priv->sor.lvdsconf;
660 break;
661 case DCB_OUTPUT_DP:
662 conf = (ctrl & 0x00000f00) >> 8;
663 break;
664 case DCB_OUTPUT_ANALOG:
665 default:
666 conf = 0x00ff;
667 break;
668 }
669
670 data = nvbios_ocfg_match(bios, data, conf, &ver, &hdr, &cnt, &len, &info2);
671 if (data) {
672 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
673 if (data) {
674 struct nvbios_init init = {
675 .subdev = nv_subdev(priv),
676 .bios = bios,
677 .offset = data,
678 .outp = &dcb,
679 .crtc = head,
680 .execute = 1,
681 };
682
683 if (nvbios_exec(&init))
684 return 0x0000;
685 return conf;
686 }
687 }
688
689 return 0x0000;
690}
691
692static void
693nvd0_display_unk1_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
694{
695 int i;
696
697 for (i = 0; mask && i < 8; i++) {
698 u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
699 if (mcc & (1 << head))
700 exec_script(priv, head, i, mcc, 1);
701 }
702
703 nv_wr32(priv, 0x6101d4, 0x00000000);
704 nv_wr32(priv, 0x6109d4, 0x00000000);
705 nv_wr32(priv, 0x6101d0, 0x80000000);
706}
707
39static void 708static void
40nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc) 709nvd0_display_unk2_calc_tu(struct nv50_disp_priv *priv, int head, int or)
710{
711 const u32 ctrl = nv_rd32(priv, 0x660200 + (or * 0x020));
712 const u32 conf = nv_rd32(priv, 0x660404 + (head * 0x300));
713 const u32 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
714 const u32 link = ((ctrl & 0xf00) == 0x800) ? 0 : 1;
715 const u32 hoff = (head * 0x800);
716 const u32 soff = ( or * 0x800);
717 const u32 loff = (link * 0x080) + soff;
718 const u32 symbol = 100000;
719 const u32 TU = 64;
720 u32 dpctrl = nv_rd32(priv, 0x61c10c + loff) & 0x000f0000;
721 u32 clksor = nv_rd32(priv, 0x612300 + soff);
722 u32 datarate, link_nr, link_bw, bits;
723 u64 ratio, value;
724
725 if ((conf & 0x3c0) == 0x180) bits = 30;
726 else if ((conf & 0x3c0) == 0x140) bits = 24;
727 else bits = 18;
728 datarate = (pclk * bits) / 8;
729
730 if (dpctrl > 0x00030000) link_nr = 4;
731 else if (dpctrl > 0x00010000) link_nr = 2;
732 else link_nr = 1;
733
734 link_bw = (clksor & 0x007c0000) >> 18;
735 link_bw *= 27000;
736
737 ratio = datarate;
738 ratio *= symbol;
739 do_div(ratio, link_nr * link_bw);
740
741 value = (symbol - ratio) * TU;
742 value *= ratio;
743 do_div(value, symbol);
744 do_div(value, symbol);
745
746 value += 5;
747 value |= 0x08000000;
748
749 nv_wr32(priv, 0x616610 + hoff, value);
750}
751
752static void
753nvd0_display_unk2_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
754{
755 u32 pclk;
756 int i;
757
758 for (i = 0; mask && i < 8; i++) {
759 u32 mcc = nv_rd32(priv, 0x640180 + (i * 0x20));
760 if (mcc & (1 << head))
761 exec_script(priv, head, i, mcc, 2);
762 }
763
764 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
765 nv_debug(priv, "head %d pclk %d mask 0x%08x\n", head, pclk, mask);
766 if (pclk && (mask & 0x00010000)) {
767 struct nouveau_clock *clk = nouveau_clock(priv);
768 clk->pll_set(clk, PLL_VPLL0 + head, pclk);
769 }
770
771 nv_wr32(priv, 0x612200 + (head * 0x800), 0x00000000);
772
773 for (i = 0; mask && i < 8; i++) {
774 u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20)), cfg;
775 if (mcp & (1 << head)) {
776 if ((cfg = exec_clkcmp(priv, head, i, mcp, 0, pclk))) {
777 u32 addr, mask, data = 0x00000000;
778 if (i < 4) {
779 addr = 0x612280 + ((i - 0) * 0x800);
780 mask = 0xffffffff;
781 } else {
782 switch (mcp & 0x00000f00) {
783 case 0x00000800:
784 case 0x00000900:
785 nvd0_display_unk2_calc_tu(priv, head, i - 4);
786 break;
787 default:
788 break;
789 }
790
791 addr = 0x612300 + ((i - 4) * 0x800);
792 mask = 0x00000707;
793 if (cfg & 0x00000100)
794 data = 0x00000101;
795 }
796 nv_mask(priv, addr, mask, data);
797 }
798 break;
799 }
800 }
801
802 nv_wr32(priv, 0x6101d4, 0x00000000);
803 nv_wr32(priv, 0x6109d4, 0x00000000);
804 nv_wr32(priv, 0x6101d0, 0x80000000);
805}
806
807static void
808nvd0_display_unk4_handler(struct nv50_disp_priv *priv, u32 head, u32 mask)
809{
810 int pclk, i;
811
812 pclk = nv_rd32(priv, 0x660450 + (head * 0x300)) / 1000;
813
814 for (i = 0; mask && i < 8; i++) {
815 u32 mcp = nv_rd32(priv, 0x660180 + (i * 0x20));
816 if (mcp & (1 << head))
817 exec_clkcmp(priv, head, i, mcp, 1, pclk);
818 }
819
820 nv_wr32(priv, 0x6101d4, 0x00000000);
821 nv_wr32(priv, 0x6109d4, 0x00000000);
822 nv_wr32(priv, 0x6101d0, 0x80000000);
823}
824
825static void
826nvd0_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
41{ 827{
42 struct nouveau_bar *bar = nouveau_bar(priv); 828 struct nouveau_bar *bar = nouveau_bar(priv);
43 struct nouveau_disp *disp = &priv->base; 829 struct nouveau_disp *disp = &priv->base;
@@ -65,14 +851,71 @@ nvd0_disp_intr_vblank(struct nvd0_disp_priv *priv, int crtc)
65 disp->vblank.notify(disp->vblank.data, crtc); 851 disp->vblank.notify(disp->vblank.data, crtc);
66} 852}
67 853
68static void 854void
69nvd0_disp_intr(struct nouveau_subdev *subdev) 855nvd0_disp_intr(struct nouveau_subdev *subdev)
70{ 856{
71 struct nvd0_disp_priv *priv = (void *)subdev; 857 struct nv50_disp_priv *priv = (void *)subdev;
72 u32 intr = nv_rd32(priv, 0x610088); 858 u32 intr = nv_rd32(priv, 0x610088);
73 int i; 859 int i;
74 860
75 for (i = 0; i < 4; i++) { 861 if (intr & 0x00000001) {
862 u32 stat = nv_rd32(priv, 0x61008c);
863 nv_wr32(priv, 0x61008c, stat);
864 intr &= ~0x00000001;
865 }
866
867 if (intr & 0x00000002) {
868 u32 stat = nv_rd32(priv, 0x61009c);
869 int chid = ffs(stat) - 1;
870 if (chid >= 0) {
871 u32 mthd = nv_rd32(priv, 0x6101f0 + (chid * 12));
872 u32 data = nv_rd32(priv, 0x6101f4 + (chid * 12));
873 u32 unkn = nv_rd32(priv, 0x6101f8 + (chid * 12));
874
875 nv_error(priv, "chid %d mthd 0x%04x data 0x%08x "
876 "0x%08x 0x%08x\n",
877 chid, (mthd & 0x0000ffc), data, mthd, unkn);
878 nv_wr32(priv, 0x61009c, (1 << chid));
879 nv_wr32(priv, 0x6101f0 + (chid * 12), 0x90000000);
880 }
881
882 intr &= ~0x00000002;
883 }
884
885 if (intr & 0x00100000) {
886 u32 stat = nv_rd32(priv, 0x6100ac);
887 u32 mask = 0, crtc = ~0;
888
889 while (!mask && ++crtc < priv->head.nr)
890 mask = nv_rd32(priv, 0x6101d4 + (crtc * 0x800));
891
892 if (stat & 0x00000001) {
893 nv_wr32(priv, 0x6100ac, 0x00000001);
894 nvd0_display_unk1_handler(priv, crtc, mask);
895 stat &= ~0x00000001;
896 }
897
898 if (stat & 0x00000002) {
899 nv_wr32(priv, 0x6100ac, 0x00000002);
900 nvd0_display_unk2_handler(priv, crtc, mask);
901 stat &= ~0x00000002;
902 }
903
904 if (stat & 0x00000004) {
905 nv_wr32(priv, 0x6100ac, 0x00000004);
906 nvd0_display_unk4_handler(priv, crtc, mask);
907 stat &= ~0x00000004;
908 }
909
910 if (stat) {
911 nv_info(priv, "unknown intr24 0x%08x\n", stat);
912 nv_wr32(priv, 0x6100ac, stat);
913 }
914
915 intr &= ~0x00100000;
916 }
917
918 for (i = 0; i < priv->head.nr; i++) {
76 u32 mask = 0x01000000 << i; 919 u32 mask = 0x01000000 << i;
77 if (mask & intr) { 920 if (mask & intr) {
78 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800)); 921 u32 stat = nv_rd32(priv, 0x6100bc + (i * 0x800));
@@ -86,10 +929,10 @@ nvd0_disp_intr(struct nouveau_subdev *subdev)
86 929
87static int 930static int
88nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 931nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
89 struct nouveau_oclass *oclass, void *data, u32 size, 932 struct nouveau_oclass *oclass, void *data, u32 size,
90 struct nouveau_object **pobject) 933 struct nouveau_object **pobject)
91{ 934{
92 struct nvd0_disp_priv *priv; 935 struct nv50_disp_priv *priv;
93 int ret; 936 int ret;
94 937
95 ret = nouveau_disp_create(parent, engine, oclass, "PDISP", 938 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
@@ -98,8 +941,23 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
98 if (ret) 941 if (ret)
99 return ret; 942 return ret;
100 943
101 nv_engine(priv)->sclass = nvd0_disp_sclass; 944 nv_engine(priv)->sclass = nvd0_disp_base_oclass;
945 nv_engine(priv)->cclass = &nv50_disp_cclass;
102 nv_subdev(priv)->intr = nvd0_disp_intr; 946 nv_subdev(priv)->intr = nvd0_disp_intr;
947 priv->sclass = nvd0_disp_sclass;
948 priv->head.nr = nv_rd32(priv, 0x022448);
949 priv->dac.nr = 3;
950 priv->sor.nr = 4;
951 priv->dac.power = nv50_dac_power;
952 priv->dac.sense = nv50_dac_sense;
953 priv->sor.power = nv50_sor_power;
954 priv->sor.hda_eld = nvd0_hda_eld;
955 priv->sor.hdmi = nvd0_hdmi_ctrl;
956 priv->sor.dp_train = nvd0_sor_dp_train;
957 priv->sor.dp_train_init = nv94_sor_dp_train_init;
958 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
959 priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
960 priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
103 961
104 INIT_LIST_HEAD(&priv->base.vblank.list); 962 INIT_LIST_HEAD(&priv->base.vblank.list);
105 spin_lock_init(&priv->base.vblank.lock); 963 spin_lock_init(&priv->base.vblank.lock);
@@ -108,7 +966,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
108 966
109struct nouveau_oclass 967struct nouveau_oclass
110nvd0_disp_oclass = { 968nvd0_disp_oclass = {
111 .handle = NV_ENGINE(DISP, 0xd0), 969 .handle = NV_ENGINE(DISP, 0x90),
112 .ofuncs = &(struct nouveau_ofuncs) { 970 .ofuncs = &(struct nouveau_ofuncs) {
113 .ctor = nvd0_disp_ctor, 971 .ctor = nvd0_disp_ctor,
114 .dtor = _nouveau_disp_dtor, 972 .dtor = _nouveau_disp_dtor,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
new file mode 100644
index 000000000000..259537c4587e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -0,0 +1,94 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <core/class.h>
29
30#include "nv50.h"
31
32static struct nouveau_oclass
33nve0_disp_sclass[] = {
34 { NVE0_DISP_MAST_CLASS, &nvd0_disp_mast_ofuncs },
35 { NVE0_DISP_SYNC_CLASS, &nvd0_disp_sync_ofuncs },
36 { NVE0_DISP_OVLY_CLASS, &nvd0_disp_ovly_ofuncs },
37 { NVE0_DISP_OIMM_CLASS, &nvd0_disp_oimm_ofuncs },
38 { NVE0_DISP_CURS_CLASS, &nvd0_disp_curs_ofuncs },
39 {}
40};
41
42static struct nouveau_oclass
43nve0_disp_base_oclass[] = {
44 { NVE0_DISP_CLASS, &nvd0_disp_base_ofuncs, nva3_disp_base_omthds },
45 {}
46};
47
48static int
49nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
50 struct nouveau_oclass *oclass, void *data, u32 size,
51 struct nouveau_object **pobject)
52{
53 struct nv50_disp_priv *priv;
54 int ret;
55
56 ret = nouveau_disp_create(parent, engine, oclass, "PDISP",
57 "display", &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 nv_engine(priv)->sclass = nve0_disp_base_oclass;
63 nv_engine(priv)->cclass = &nv50_disp_cclass;
64 nv_subdev(priv)->intr = nvd0_disp_intr;
65 priv->sclass = nve0_disp_sclass;
66 priv->head.nr = nv_rd32(priv, 0x022448);
67 priv->dac.nr = 3;
68 priv->sor.nr = 4;
69 priv->dac.power = nv50_dac_power;
70 priv->dac.sense = nv50_dac_sense;
71 priv->sor.power = nv50_sor_power;
72 priv->sor.hda_eld = nvd0_hda_eld;
73 priv->sor.hdmi = nvd0_hdmi_ctrl;
74 priv->sor.dp_train = nvd0_sor_dp_train;
75 priv->sor.dp_train_init = nv94_sor_dp_train_init;
76 priv->sor.dp_train_fini = nv94_sor_dp_train_fini;
77 priv->sor.dp_lnkctl = nvd0_sor_dp_lnkctl;
78 priv->sor.dp_drvctl = nvd0_sor_dp_drvctl;
79
80 INIT_LIST_HEAD(&priv->base.vblank.list);
81 spin_lock_init(&priv->base.vblank.lock);
82 return 0;
83}
84
85struct nouveau_oclass
86nve0_disp_oclass = {
87 .handle = NV_ENGINE(DISP, 0x91),
88 .ofuncs = &(struct nouveau_ofuncs) {
89 .ctor = nve0_disp_ctor,
90 .dtor = _nouveau_disp_dtor,
91 .init = _nouveau_disp_init,
92 .fini = _nouveau_disp_fini,
93 },
94};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
new file mode 100644
index 000000000000..39b6b67732d0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/timer.h>
31
32#include "nv50.h"
33
34int
35nv50_sor_power(struct nv50_disp_priv *priv, int or, u32 data)
36{
37 const u32 stat = data & NV50_DISP_SOR_PWR_STATE;
38 const u32 soff = (or * 0x800);
39 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
40 nv_mask(priv, 0x61c004 + soff, 0x80000001, 0x80000000 | stat);
41 nv_wait(priv, 0x61c004 + soff, 0x80000000, 0x00000000);
42 nv_wait(priv, 0x61c030 + soff, 0x10000000, 0x00000000);
43 return 0;
44}
45
46int
47nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
48{
49 struct nv50_disp_priv *priv = (void *)object->engine;
50 struct nouveau_bios *bios = nouveau_bios(priv);
51 const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
52 const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
53 const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
54 const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
55 const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
56 struct dcb_output outp;
57 u8 ver, hdr;
58 u32 data;
59 int ret = -EINVAL;
60
61 if (size < sizeof(u32))
62 return -EINVAL;
63 data = *(u32 *)args;
64
65 if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
66 return -ENODEV;
67
68 switch (mthd & ~0x3f) {
69 case NV50_DISP_SOR_PWR:
70 ret = priv->sor.power(priv, or, data);
71 break;
72 case NVA3_DISP_SOR_HDA_ELD:
73 ret = priv->sor.hda_eld(priv, or, args, size);
74 break;
75 case NV84_DISP_SOR_HDMI_PWR:
76 ret = priv->sor.hdmi(priv, head, or, data);
77 break;
78 case NV50_DISP_SOR_LVDS_SCRIPT:
79 priv->sor.lvdsconf = data & NV50_DISP_SOR_LVDS_SCRIPT_ID;
80 ret = 0;
81 break;
82 case NV94_DISP_SOR_DP_TRAIN:
83 switch (data & NV94_DISP_SOR_DP_TRAIN_OP) {
84 case NV94_DISP_SOR_DP_TRAIN_OP_PATTERN:
85 ret = priv->sor.dp_train(priv, or, link, type, mask, data, &outp);
86 break;
87 case NV94_DISP_SOR_DP_TRAIN_OP_INIT:
88 ret = priv->sor.dp_train_init(priv, or, link, head, type, mask, data, &outp);
89 break;
90 case NV94_DISP_SOR_DP_TRAIN_OP_FINI:
91 ret = priv->sor.dp_train_fini(priv, or, link, head, type, mask, data, &outp);
92 break;
93 default:
94 break;
95 }
96 break;
97 case NV94_DISP_SOR_DP_LNKCTL:
98 ret = priv->sor.dp_lnkctl(priv, or, link, head, type, mask, data, &outp);
99 break;
100 case NV94_DISP_SOR_DP_DRVCTL(0):
101 case NV94_DISP_SOR_DP_DRVCTL(1):
102 case NV94_DISP_SOR_DP_DRVCTL(2):
103 case NV94_DISP_SOR_DP_DRVCTL(3):
104 ret = priv->sor.dp_drvctl(priv, or, link, (mthd & 0xc0) >> 6,
105 type, mask, data, &outp);
106 break;
107 default:
108 BUG_ON(1);
109 }
110
111 return ret;
112}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
new file mode 100644
index 000000000000..f6edd009762e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv94.c
@@ -0,0 +1,190 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h>
32
33#include "nv50.h"
34
35static inline u32
36nv94_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
37{
38 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
39 static const u8 nv94[] = { 16, 8, 0, 24 };
40 if (nv_device(priv)->chipset == 0xaf)
41 return nvaf[lane];
42 return nv94[lane];
43}
44
45int
46nv94_sor_dp_train_init(struct nv50_disp_priv *priv, int or, int link, int head,
47 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
48{
49 struct nouveau_bios *bios = nouveau_bios(priv);
50 struct nvbios_dpout info;
51 u8 ver, hdr, cnt, len;
52 u16 outp;
53
54 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
55 if (outp) {
56 struct nvbios_init init = {
57 .subdev = nv_subdev(priv),
58 .bios = bios,
59 .outp = dcbo,
60 .crtc = head,
61 .execute = 1,
62 };
63
64 if (data & NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON)
65 init.offset = info.script[2];
66 else
67 init.offset = info.script[3];
68 nvbios_exec(&init);
69
70 init.offset = info.script[0];
71 nvbios_exec(&init);
72 }
73
74 return 0;
75}
76
77int
78nv94_sor_dp_train_fini(struct nv50_disp_priv *priv, int or, int link, int head,
79 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
80{
81 struct nouveau_bios *bios = nouveau_bios(priv);
82 struct nvbios_dpout info;
83 u8 ver, hdr, cnt, len;
84 u16 outp;
85
86 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
87 if (outp) {
88 struct nvbios_init init = {
89 .subdev = nv_subdev(priv),
90 .bios = bios,
91 .offset = info.script[1],
92 .outp = dcbo,
93 .crtc = head,
94 .execute = 1,
95 };
96
97 nvbios_exec(&init);
98 }
99
100 return 0;
101}
102
103int
104nv94_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
105 u16 type, u16 mask, u32 data, struct dcb_output *info)
106{
107 const u32 loff = (or * 0x800) + (link * 0x80);
108 const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
109 nv_mask(priv, 0x61c10c + loff, 0x0f000000, patt << 24);
110 return 0;
111}
112
113int
114nv94_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
115 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
116{
117 struct nouveau_bios *bios = nouveau_bios(priv);
118 const u32 loff = (or * 0x800) + (link * 0x80);
119 const u32 soff = (or * 0x800);
120 u16 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
121 u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
122 u32 dpctrl = 0x00000000;
123 u32 clksor = 0x00000000;
124 u32 outp, lane = 0;
125 u8 ver, hdr, cnt, len;
126 struct nvbios_dpout info;
127 int i;
128
129 /* -> 10Khz units */
130 link_bw *= 2700;
131
132 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
133 if (outp && info.lnkcmp) {
134 struct nvbios_init init = {
135 .subdev = nv_subdev(priv),
136 .bios = bios,
137 .offset = 0x0000,
138 .outp = dcbo,
139 .crtc = head,
140 .execute = 1,
141 };
142
143 while (link_bw < nv_ro16(bios, info.lnkcmp))
144 info.lnkcmp += 4;
145 init.offset = nv_ro16(bios, info.lnkcmp + 2);
146
147 nvbios_exec(&init);
148 }
149
150 dpctrl |= ((1 << link_nr) - 1) << 16;
151 if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
152 dpctrl |= 0x00004000;
153 if (link_bw > 16200)
154 clksor |= 0x00040000;
155
156 for (i = 0; i < link_nr; i++)
157 lane |= 1 << (nv94_sor_dp_lane_map(priv, i) >> 3);
158
159 nv_mask(priv, 0x614300 + soff, 0x000c0000, clksor);
160 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
161 nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
162 return 0;
163}
164
165int
166nv94_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
167 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
168{
169 struct nouveau_bios *bios = nouveau_bios(priv);
170 const u32 loff = (or * 0x800) + (link * 0x80);
171 const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
172 const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
173 u32 addr, shift = nv94_sor_dp_lane_map(priv, lane);
174 u8 ver, hdr, cnt, len;
175 struct nvbios_dpout outp;
176 struct nvbios_dpcfg ocfg;
177
178 addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
179 if (!addr)
180 return -ENODEV;
181
182 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
183 if (!addr)
184 return -EINVAL;
185
186 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
187 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
188 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
189 return 0;
190}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
new file mode 100644
index 000000000000..c37ce7e29f5d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26#include <core/class.h>
27
28#include <subdev/bios.h>
29#include <subdev/bios/dcb.h>
30#include <subdev/bios/dp.h>
31#include <subdev/bios/init.h>
32
33#include "nv50.h"
34
35static inline u32
36nvd0_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
37{
38 static const u8 nvd0[] = { 16, 8, 0, 24 };
39 return nvd0[lane];
40}
41
42int
43nvd0_sor_dp_train(struct nv50_disp_priv *priv, int or, int link,
44 u16 type, u16 mask, u32 data, struct dcb_output *info)
45{
46 const u32 loff = (or * 0x800) + (link * 0x80);
47 const u32 patt = (data & NV94_DISP_SOR_DP_TRAIN_PATTERN);
48 nv_mask(priv, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * patt);
49 return 0;
50}
51
52int
53nvd0_sor_dp_lnkctl(struct nv50_disp_priv *priv, int or, int link, int head,
54 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
55{
56 struct nouveau_bios *bios = nouveau_bios(priv);
57 const u32 loff = (or * 0x800) + (link * 0x80);
58 const u32 soff = (or * 0x800);
59 const u8 link_bw = (data & NV94_DISP_SOR_DP_LNKCTL_WIDTH) >> 8;
60 const u8 link_nr = (data & NV94_DISP_SOR_DP_LNKCTL_COUNT);
61 u32 dpctrl = 0x00000000;
62 u32 clksor = 0x00000000;
63 u32 outp, lane = 0;
64 u8 ver, hdr, cnt, len;
65 struct nvbios_dpout info;
66 int i;
67
68 outp = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &info);
69 if (outp && info.lnkcmp) {
70 struct nvbios_init init = {
71 .subdev = nv_subdev(priv),
72 .bios = bios,
73 .offset = 0x0000,
74 .outp = dcbo,
75 .crtc = head,
76 .execute = 1,
77 };
78
79 while (nv_ro08(bios, info.lnkcmp) < link_bw)
80 info.lnkcmp += 3;
81 init.offset = nv_ro16(bios, info.lnkcmp + 1);
82
83 nvbios_exec(&init);
84 }
85
86 clksor |= link_bw << 18;
87 dpctrl |= ((1 << link_nr) - 1) << 16;
88 if (data & NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH)
89 dpctrl |= 0x00004000;
90
91 for (i = 0; i < link_nr; i++)
92 lane |= 1 << (nvd0_sor_dp_lane_map(priv, i) >> 3);
93
94 nv_mask(priv, 0x612300 + soff, 0x007c0000, clksor);
95 nv_mask(priv, 0x61c10c + loff, 0x001f4000, dpctrl);
96 nv_mask(priv, 0x61c130 + loff, 0x0000000f, lane);
97 return 0;
98}
99
100int
101nvd0_sor_dp_drvctl(struct nv50_disp_priv *priv, int or, int link, int lane,
102 u16 type, u16 mask, u32 data, struct dcb_output *dcbo)
103{
104 struct nouveau_bios *bios = nouveau_bios(priv);
105 const u32 loff = (or * 0x800) + (link * 0x80);
106 const u8 swing = (data & NV94_DISP_SOR_DP_DRVCTL_VS) >> 8;
107 const u8 preem = (data & NV94_DISP_SOR_DP_DRVCTL_PE);
108 u32 addr, shift = nvd0_sor_dp_lane_map(priv, lane);
109 u8 ver, hdr, cnt, len;
110 struct nvbios_dpout outp;
111 struct nvbios_dpcfg ocfg;
112
113 addr = nvbios_dpout_match(bios, type, mask, &ver, &hdr, &cnt, &len, &outp);
114 if (!addr)
115 return -ENODEV;
116
117 addr = nvbios_dpcfg_match(bios, addr, 0, swing, preem, &ver, &hdr, &cnt, &len, &ocfg);
118 if (!addr)
119 return -EINVAL;
120
121 nv_mask(priv, 0x61c118 + loff, 0x000000ff << shift, ocfg.drv << shift);
122 nv_mask(priv, 0x61c120 + loff, 0x000000ff << shift, ocfg.pre << shift);
123 nv_mask(priv, 0x61c130 + loff, 0x0000ff00, ocfg.unk << 8);
124 nv_mask(priv, 0x61c13c + loff, 0x00000000, 0x00000000);
125 return 0;
126}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
index e1f013d39768..5103e88d1877 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/base.c
@@ -28,37 +28,39 @@
28#include <subdev/fb.h> 28#include <subdev/fb.h>
29#include <engine/dmaobj.h> 29#include <engine/dmaobj.h>
30 30
31int 31static int
32nouveau_dmaobj_create_(struct nouveau_object *parent, 32nouveau_dmaobj_ctor(struct nouveau_object *parent,
33 struct nouveau_object *engine, 33 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass, 34 struct nouveau_oclass *oclass, void *data, u32 size,
35 void *data, u32 size, int len, void **pobject) 35 struct nouveau_object **pobject)
36{ 36{
37 struct nouveau_dmaeng *dmaeng = (void *)engine;
38 struct nouveau_dmaobj *dmaobj;
39 struct nouveau_gpuobj *gpuobj;
37 struct nv_dma_class *args = data; 40 struct nv_dma_class *args = data;
38 struct nouveau_dmaobj *object;
39 int ret; 41 int ret;
40 42
41 if (size < sizeof(*args)) 43 if (size < sizeof(*args))
42 return -EINVAL; 44 return -EINVAL;
43 45
44 ret = nouveau_object_create_(parent, engine, oclass, 0, len, pobject); 46 ret = nouveau_object_create(parent, engine, oclass, 0, &dmaobj);
45 object = *pobject; 47 *pobject = nv_object(dmaobj);
46 if (ret) 48 if (ret)
47 return ret; 49 return ret;
48 50
49 switch (args->flags & NV_DMA_TARGET_MASK) { 51 switch (args->flags & NV_DMA_TARGET_MASK) {
50 case NV_DMA_TARGET_VM: 52 case NV_DMA_TARGET_VM:
51 object->target = NV_MEM_TARGET_VM; 53 dmaobj->target = NV_MEM_TARGET_VM;
52 break; 54 break;
53 case NV_DMA_TARGET_VRAM: 55 case NV_DMA_TARGET_VRAM:
54 object->target = NV_MEM_TARGET_VRAM; 56 dmaobj->target = NV_MEM_TARGET_VRAM;
55 break; 57 break;
56 case NV_DMA_TARGET_PCI: 58 case NV_DMA_TARGET_PCI:
57 object->target = NV_MEM_TARGET_PCI; 59 dmaobj->target = NV_MEM_TARGET_PCI;
58 break; 60 break;
59 case NV_DMA_TARGET_PCI_US: 61 case NV_DMA_TARGET_PCI_US:
60 case NV_DMA_TARGET_AGP: 62 case NV_DMA_TARGET_AGP:
61 object->target = NV_MEM_TARGET_PCI_NOSNOOP; 63 dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP;
62 break; 64 break;
63 default: 65 default:
64 return -EINVAL; 66 return -EINVAL;
@@ -66,22 +68,53 @@ nouveau_dmaobj_create_(struct nouveau_object *parent,
66 68
67 switch (args->flags & NV_DMA_ACCESS_MASK) { 69 switch (args->flags & NV_DMA_ACCESS_MASK) {
68 case NV_DMA_ACCESS_VM: 70 case NV_DMA_ACCESS_VM:
69 object->access = NV_MEM_ACCESS_VM; 71 dmaobj->access = NV_MEM_ACCESS_VM;
70 break; 72 break;
71 case NV_DMA_ACCESS_RD: 73 case NV_DMA_ACCESS_RD:
72 object->access = NV_MEM_ACCESS_RO; 74 dmaobj->access = NV_MEM_ACCESS_RO;
73 break; 75 break;
74 case NV_DMA_ACCESS_WR: 76 case NV_DMA_ACCESS_WR:
75 object->access = NV_MEM_ACCESS_WO; 77 dmaobj->access = NV_MEM_ACCESS_WO;
76 break; 78 break;
77 case NV_DMA_ACCESS_RDWR: 79 case NV_DMA_ACCESS_RDWR:
78 object->access = NV_MEM_ACCESS_RW; 80 dmaobj->access = NV_MEM_ACCESS_RW;
79 break; 81 break;
80 default: 82 default:
81 return -EINVAL; 83 return -EINVAL;
82 } 84 }
83 85
84 object->start = args->start; 86 dmaobj->start = args->start;
85 object->limit = args->limit; 87 dmaobj->limit = args->limit;
86 return 0; 88 dmaobj->conf0 = args->conf0;
89
90 switch (nv_mclass(parent)) {
91 case NV_DEVICE_CLASS:
92 /* delayed, or no, binding */
93 break;
94 default:
95 ret = dmaeng->bind(dmaeng, *pobject, dmaobj, &gpuobj);
96 if (ret == 0) {
97 nouveau_object_ref(NULL, pobject);
98 *pobject = nv_object(gpuobj);
99 }
100 break;
101 }
102
103 return ret;
87} 104}
105
106static struct nouveau_ofuncs
107nouveau_dmaobj_ofuncs = {
108 .ctor = nouveau_dmaobj_ctor,
109 .dtor = nouveau_object_destroy,
110 .init = nouveau_object_init,
111 .fini = nouveau_object_fini,
112};
113
114struct nouveau_oclass
115nouveau_dmaobj_sclass[] = {
116 { NV_DMA_FROM_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
117 { NV_DMA_TO_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
118 { NV_DMA_IN_MEMORY_CLASS, &nouveau_dmaobj_ofuncs },
119 {}
120};
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
index 9f4cc2f31994..027d8217c0fa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv04.c
@@ -34,10 +34,6 @@ struct nv04_dmaeng_priv {
34 struct nouveau_dmaeng base; 34 struct nouveau_dmaeng base;
35}; 35};
36 36
37struct nv04_dmaobj_priv {
38 struct nouveau_dmaobj base;
39};
40
41static int 37static int
42nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 38nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
43 struct nouveau_object *parent, 39 struct nouveau_object *parent,
@@ -53,6 +49,18 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
53 u32 length = dmaobj->limit - dmaobj->start; 49 u32 length = dmaobj->limit - dmaobj->start;
54 int ret; 50 int ret;
55 51
52 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
53 switch (nv_mclass(parent->parent)) {
54 case NV03_CHANNEL_DMA_CLASS:
55 case NV10_CHANNEL_DMA_CLASS:
56 case NV17_CHANNEL_DMA_CLASS:
57 case NV40_CHANNEL_DMA_CLASS:
58 break;
59 default:
60 return -EINVAL;
61 }
62 }
63
56 if (dmaobj->target == NV_MEM_TARGET_VM) { 64 if (dmaobj->target == NV_MEM_TARGET_VM) {
57 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) { 65 if (nv_object(vmm)->oclass == &nv04_vmmgr_oclass) {
58 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0]; 66 struct nouveau_gpuobj *pgt = vmm->vm->pgt[0].obj[0];
@@ -106,56 +114,6 @@ nv04_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
106} 114}
107 115
108static int 116static int
109nv04_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
110 struct nouveau_oclass *oclass, void *data, u32 size,
111 struct nouveau_object **pobject)
112{
113 struct nouveau_dmaeng *dmaeng = (void *)engine;
114 struct nv04_dmaobj_priv *dmaobj;
115 struct nouveau_gpuobj *gpuobj;
116 int ret;
117
118 ret = nouveau_dmaobj_create(parent, engine, oclass,
119 data, size, &dmaobj);
120 *pobject = nv_object(dmaobj);
121 if (ret)
122 return ret;
123
124 switch (nv_mclass(parent)) {
125 case NV_DEVICE_CLASS:
126 break;
127 case NV03_CHANNEL_DMA_CLASS:
128 case NV10_CHANNEL_DMA_CLASS:
129 case NV17_CHANNEL_DMA_CLASS:
130 case NV40_CHANNEL_DMA_CLASS:
131 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
132 nouveau_object_ref(NULL, pobject);
133 *pobject = nv_object(gpuobj);
134 break;
135 default:
136 return -EINVAL;
137 }
138
139 return ret;
140}
141
142static struct nouveau_ofuncs
143nv04_dmaobj_ofuncs = {
144 .ctor = nv04_dmaobj_ctor,
145 .dtor = _nouveau_dmaobj_dtor,
146 .init = _nouveau_dmaobj_init,
147 .fini = _nouveau_dmaobj_fini,
148};
149
150static struct nouveau_oclass
151nv04_dmaobj_sclass[] = {
152 { 0x0002, &nv04_dmaobj_ofuncs },
153 { 0x0003, &nv04_dmaobj_ofuncs },
154 { 0x003d, &nv04_dmaobj_ofuncs },
155 {}
156};
157
158static int
159nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 117nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
160 struct nouveau_oclass *oclass, void *data, u32 size, 118 struct nouveau_oclass *oclass, void *data, u32 size,
161 struct nouveau_object **pobject) 119 struct nouveau_object **pobject)
@@ -168,7 +126,7 @@ nv04_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
168 if (ret) 126 if (ret)
169 return ret; 127 return ret;
170 128
171 priv->base.base.sclass = nv04_dmaobj_sclass; 129 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
172 priv->base.bind = nv04_dmaobj_bind; 130 priv->base.bind = nv04_dmaobj_bind;
173 return 0; 131 return 0;
174} 132}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
index 045d2565e289..750183f7c057 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nv50.c
@@ -32,36 +32,74 @@ struct nv50_dmaeng_priv {
32 struct nouveau_dmaeng base; 32 struct nouveau_dmaeng base;
33}; 33};
34 34
35struct nv50_dmaobj_priv {
36 struct nouveau_dmaobj base;
37};
38
39static int 35static int
40nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng, 36nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
41 struct nouveau_object *parent, 37 struct nouveau_object *parent,
42 struct nouveau_dmaobj *dmaobj, 38 struct nouveau_dmaobj *dmaobj,
43 struct nouveau_gpuobj **pgpuobj) 39 struct nouveau_gpuobj **pgpuobj)
44{ 40{
45 u32 flags = nv_mclass(dmaobj); 41 u32 flags0 = nv_mclass(dmaobj);
42 u32 flags5 = 0x00000000;
46 int ret; 43 int ret;
47 44
45 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
46 switch (nv_mclass(parent->parent)) {
47 case NV50_CHANNEL_DMA_CLASS:
48 case NV84_CHANNEL_DMA_CLASS:
49 case NV50_CHANNEL_IND_CLASS:
50 case NV84_CHANNEL_IND_CLASS:
51 case NV50_DISP_MAST_CLASS:
52 case NV84_DISP_MAST_CLASS:
53 case NV94_DISP_MAST_CLASS:
54 case NVA0_DISP_MAST_CLASS:
55 case NVA3_DISP_MAST_CLASS:
56 case NV50_DISP_SYNC_CLASS:
57 case NV84_DISP_SYNC_CLASS:
58 case NV94_DISP_SYNC_CLASS:
59 case NVA0_DISP_SYNC_CLASS:
60 case NVA3_DISP_SYNC_CLASS:
61 case NV50_DISP_OVLY_CLASS:
62 case NV84_DISP_OVLY_CLASS:
63 case NV94_DISP_OVLY_CLASS:
64 case NVA0_DISP_OVLY_CLASS:
65 case NVA3_DISP_OVLY_CLASS:
66 break;
67 default:
68 return -EINVAL;
69 }
70 }
71
72 if (!(dmaobj->conf0 & NV50_DMA_CONF0_ENABLE)) {
73 if (dmaobj->target == NV_MEM_TARGET_VM) {
74 dmaobj->conf0 = NV50_DMA_CONF0_PRIV_VM;
75 dmaobj->conf0 |= NV50_DMA_CONF0_PART_VM;
76 dmaobj->conf0 |= NV50_DMA_CONF0_COMP_VM;
77 dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_VM;
78 } else {
79 dmaobj->conf0 = NV50_DMA_CONF0_PRIV_US;
80 dmaobj->conf0 |= NV50_DMA_CONF0_PART_256;
81 dmaobj->conf0 |= NV50_DMA_CONF0_COMP_NONE;
82 dmaobj->conf0 |= NV50_DMA_CONF0_TYPE_LINEAR;
83 }
84 }
85
86 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_COMP) << 22;
87 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_TYPE) << 22;
88 flags0 |= (dmaobj->conf0 & NV50_DMA_CONF0_PRIV);
89 flags5 |= (dmaobj->conf0 & NV50_DMA_CONF0_PART);
90
48 switch (dmaobj->target) { 91 switch (dmaobj->target) {
49 case NV_MEM_TARGET_VM: 92 case NV_MEM_TARGET_VM:
50 flags |= 0x00000000; 93 flags0 |= 0x00000000;
51 flags |= 0x60000000; /* COMPRESSION_USEVM */
52 flags |= 0x1fc00000; /* STORAGE_TYPE_USEVM */
53 break; 94 break;
54 case NV_MEM_TARGET_VRAM: 95 case NV_MEM_TARGET_VRAM:
55 flags |= 0x00010000; 96 flags0 |= 0x00010000;
56 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
57 break; 97 break;
58 case NV_MEM_TARGET_PCI: 98 case NV_MEM_TARGET_PCI:
59 flags |= 0x00020000; 99 flags0 |= 0x00020000;
60 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
61 break; 100 break;
62 case NV_MEM_TARGET_PCI_NOSNOOP: 101 case NV_MEM_TARGET_PCI_NOSNOOP:
63 flags |= 0x00030000; 102 flags0 |= 0x00030000;
64 flags |= 0x00100000; /* ACCESSUS_USER_SYSTEM */
65 break; 103 break;
66 default: 104 default:
67 return -EINVAL; 105 return -EINVAL;
@@ -71,79 +109,29 @@ nv50_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
71 case NV_MEM_ACCESS_VM: 109 case NV_MEM_ACCESS_VM:
72 break; 110 break;
73 case NV_MEM_ACCESS_RO: 111 case NV_MEM_ACCESS_RO:
74 flags |= 0x00040000; 112 flags0 |= 0x00040000;
75 break; 113 break;
76 case NV_MEM_ACCESS_WO: 114 case NV_MEM_ACCESS_WO:
77 case NV_MEM_ACCESS_RW: 115 case NV_MEM_ACCESS_RW:
78 flags |= 0x00080000; 116 flags0 |= 0x00080000;
79 break; 117 break;
80 } 118 }
81 119
82 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); 120 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
83 if (ret == 0) { 121 if (ret == 0) {
84 nv_wo32(*pgpuobj, 0x00, flags); 122 nv_wo32(*pgpuobj, 0x00, flags0);
85 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit)); 123 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
86 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start)); 124 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
87 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 | 125 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
88 upper_32_bits(dmaobj->start)); 126 upper_32_bits(dmaobj->start));
89 nv_wo32(*pgpuobj, 0x10, 0x00000000); 127 nv_wo32(*pgpuobj, 0x10, 0x00000000);
90 nv_wo32(*pgpuobj, 0x14, 0x00000000); 128 nv_wo32(*pgpuobj, 0x14, flags5);
91 } 129 }
92 130
93 return ret; 131 return ret;
94} 132}
95 133
96static int 134static int
97nv50_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
98 struct nouveau_oclass *oclass, void *data, u32 size,
99 struct nouveau_object **pobject)
100{
101 struct nouveau_dmaeng *dmaeng = (void *)engine;
102 struct nv50_dmaobj_priv *dmaobj;
103 struct nouveau_gpuobj *gpuobj;
104 int ret;
105
106 ret = nouveau_dmaobj_create(parent, engine, oclass,
107 data, size, &dmaobj);
108 *pobject = nv_object(dmaobj);
109 if (ret)
110 return ret;
111
112 switch (nv_mclass(parent)) {
113 case NV_DEVICE_CLASS:
114 break;
115 case NV50_CHANNEL_DMA_CLASS:
116 case NV84_CHANNEL_DMA_CLASS:
117 case NV50_CHANNEL_IND_CLASS:
118 case NV84_CHANNEL_IND_CLASS:
119 ret = dmaeng->bind(dmaeng, *pobject, &dmaobj->base, &gpuobj);
120 nouveau_object_ref(NULL, pobject);
121 *pobject = nv_object(gpuobj);
122 break;
123 default:
124 return -EINVAL;
125 }
126
127 return ret;
128}
129
130static struct nouveau_ofuncs
131nv50_dmaobj_ofuncs = {
132 .ctor = nv50_dmaobj_ctor,
133 .dtor = _nouveau_dmaobj_dtor,
134 .init = _nouveau_dmaobj_init,
135 .fini = _nouveau_dmaobj_fini,
136};
137
138static struct nouveau_oclass
139nv50_dmaobj_sclass[] = {
140 { 0x0002, &nv50_dmaobj_ofuncs },
141 { 0x0003, &nv50_dmaobj_ofuncs },
142 { 0x003d, &nv50_dmaobj_ofuncs },
143 {}
144};
145
146static int
147nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 135nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
148 struct nouveau_oclass *oclass, void *data, u32 size, 136 struct nouveau_oclass *oclass, void *data, u32 size,
149 struct nouveau_object **pobject) 137 struct nouveau_object **pobject)
@@ -156,7 +144,7 @@ nv50_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
156 if (ret) 144 if (ret)
157 return ret; 145 return ret;
158 146
159 priv->base.base.sclass = nv50_dmaobj_sclass; 147 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
160 priv->base.bind = nv50_dmaobj_bind; 148 priv->base.bind = nv50_dmaobj_bind;
161 return 0; 149 return 0;
162} 150}
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
index 5baa08695535..cd3970d03b80 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvc0.c
@@ -22,7 +22,9 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/device.h>
25#include <core/gpuobj.h> 26#include <core/gpuobj.h>
27#include <core/class.h>
26 28
27#include <subdev/fb.h> 29#include <subdev/fb.h>
28#include <engine/dmaobj.h> 30#include <engine/dmaobj.h>
@@ -31,44 +33,85 @@ struct nvc0_dmaeng_priv {
31 struct nouveau_dmaeng base; 33 struct nouveau_dmaeng base;
32}; 34};
33 35
34struct nvc0_dmaobj_priv {
35 struct nouveau_dmaobj base;
36};
37
38static int 36static int
39nvc0_dmaobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 37nvc0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
40 struct nouveau_oclass *oclass, void *data, u32 size, 38 struct nouveau_object *parent,
41 struct nouveau_object **pobject) 39 struct nouveau_dmaobj *dmaobj,
40 struct nouveau_gpuobj **pgpuobj)
42{ 41{
43 struct nvc0_dmaobj_priv *dmaobj; 42 u32 flags0 = nv_mclass(dmaobj);
43 u32 flags5 = 0x00000000;
44 int ret; 44 int ret;
45 45
46 ret = nouveau_dmaobj_create(parent, engine, oclass, data, size, &dmaobj); 46 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
47 *pobject = nv_object(dmaobj); 47 switch (nv_mclass(parent->parent)) {
48 if (ret) 48 case NVA3_DISP_MAST_CLASS:
49 return ret; 49 case NVA3_DISP_SYNC_CLASS:
50 case NVA3_DISP_OVLY_CLASS:
51 break;
52 default:
53 return -EINVAL;
54 }
55 } else
56 return 0;
57
58 if (!(dmaobj->conf0 & NVC0_DMA_CONF0_ENABLE)) {
59 if (dmaobj->target == NV_MEM_TARGET_VM) {
60 dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_VM;
61 dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_VM;
62 } else {
63 dmaobj->conf0 = NVC0_DMA_CONF0_PRIV_US;
64 dmaobj->conf0 |= NVC0_DMA_CONF0_TYPE_LINEAR;
65 dmaobj->conf0 |= 0x00020000;
66 }
67 }
50 68
51 if (dmaobj->base.target != NV_MEM_TARGET_VM || dmaobj->base.start) 69 flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_TYPE) << 22;
70 flags0 |= (dmaobj->conf0 & NVC0_DMA_CONF0_PRIV);
71 flags5 |= (dmaobj->conf0 & NVC0_DMA_CONF0_UNKN);
72
73 switch (dmaobj->target) {
74 case NV_MEM_TARGET_VM:
75 flags0 |= 0x00000000;
76 break;
77 case NV_MEM_TARGET_VRAM:
78 flags0 |= 0x00010000;
79 break;
80 case NV_MEM_TARGET_PCI:
81 flags0 |= 0x00020000;
82 break;
83 case NV_MEM_TARGET_PCI_NOSNOOP:
84 flags0 |= 0x00030000;
85 break;
86 default:
52 return -EINVAL; 87 return -EINVAL;
88 }
53 89
54 return 0; 90 switch (dmaobj->access) {
55} 91 case NV_MEM_ACCESS_VM:
92 break;
93 case NV_MEM_ACCESS_RO:
94 flags0 |= 0x00040000;
95 break;
96 case NV_MEM_ACCESS_WO:
97 case NV_MEM_ACCESS_RW:
98 flags0 |= 0x00080000;
99 break;
100 }
56 101
57static struct nouveau_ofuncs 102 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
58nvc0_dmaobj_ofuncs = { 103 if (ret == 0) {
59 .ctor = nvc0_dmaobj_ctor, 104 nv_wo32(*pgpuobj, 0x00, flags0);
60 .dtor = _nouveau_dmaobj_dtor, 105 nv_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->limit));
61 .init = _nouveau_dmaobj_init, 106 nv_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->start));
62 .fini = _nouveau_dmaobj_fini, 107 nv_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->limit) << 24 |
63}; 108 upper_32_bits(dmaobj->start));
109 nv_wo32(*pgpuobj, 0x10, 0x00000000);
110 nv_wo32(*pgpuobj, 0x14, flags5);
111 }
64 112
65static struct nouveau_oclass 113 return ret;
66nvc0_dmaobj_sclass[] = { 114}
67 { 0x0002, &nvc0_dmaobj_ofuncs },
68 { 0x0003, &nvc0_dmaobj_ofuncs },
69 { 0x003d, &nvc0_dmaobj_ofuncs },
70 {}
71};
72 115
73static int 116static int
74nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 117nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
@@ -83,7 +126,8 @@ nvc0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
83 if (ret) 126 if (ret)
84 return ret; 127 return ret;
85 128
86 priv->base.base.sclass = nvc0_dmaobj_sclass; 129 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
130 priv->base.bind = nvc0_dmaobj_bind;
87 return 0; 131 return 0;
88} 132}
89 133
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
new file mode 100644
index 000000000000..d1528752980c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -0,0 +1,122 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/device.h>
26#include <core/gpuobj.h>
27#include <core/class.h>
28
29#include <subdev/fb.h>
30#include <engine/dmaobj.h>
31
32struct nvd0_dmaeng_priv {
33 struct nouveau_dmaeng base;
34};
35
36static int
37nvd0_dmaobj_bind(struct nouveau_dmaeng *dmaeng,
38 struct nouveau_object *parent,
39 struct nouveau_dmaobj *dmaobj,
40 struct nouveau_gpuobj **pgpuobj)
41{
42 u32 flags0 = 0x00000000;
43 int ret;
44
45 if (!nv_iclass(parent, NV_ENGCTX_CLASS)) {
46 switch (nv_mclass(parent->parent)) {
47 case NVD0_DISP_MAST_CLASS:
48 case NVD0_DISP_SYNC_CLASS:
49 case NVD0_DISP_OVLY_CLASS:
50 case NVE0_DISP_MAST_CLASS:
51 case NVE0_DISP_SYNC_CLASS:
52 case NVE0_DISP_OVLY_CLASS:
53 break;
54 default:
55 return -EINVAL;
56 }
57 } else
58 return 0;
59
60 if (!(dmaobj->conf0 & NVD0_DMA_CONF0_ENABLE)) {
61 if (dmaobj->target == NV_MEM_TARGET_VM) {
62 dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_VM;
63 dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_LP;
64 } else {
65 dmaobj->conf0 |= NVD0_DMA_CONF0_TYPE_LINEAR;
66 dmaobj->conf0 |= NVD0_DMA_CONF0_PAGE_SP;
67 }
68 }
69
70 flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_TYPE) << 20;
71 flags0 |= (dmaobj->conf0 & NVD0_DMA_CONF0_PAGE) >> 4;
72
73 switch (dmaobj->target) {
74 case NV_MEM_TARGET_VRAM:
75 flags0 |= 0x00000009;
76 break;
77 default:
78 return -EINVAL;
79 break;
80 }
81
82 ret = nouveau_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj);
83 if (ret == 0) {
84 nv_wo32(*pgpuobj, 0x00, flags0);
85 nv_wo32(*pgpuobj, 0x04, dmaobj->start >> 8);
86 nv_wo32(*pgpuobj, 0x08, dmaobj->limit >> 8);
87 nv_wo32(*pgpuobj, 0x0c, 0x00000000);
88 nv_wo32(*pgpuobj, 0x10, 0x00000000);
89 nv_wo32(*pgpuobj, 0x14, 0x00000000);
90 }
91
92 return ret;
93}
94
95static int
96nvd0_dmaeng_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
97 struct nouveau_oclass *oclass, void *data, u32 size,
98 struct nouveau_object **pobject)
99{
100 struct nvd0_dmaeng_priv *priv;
101 int ret;
102
103 ret = nouveau_dmaeng_create(parent, engine, oclass, &priv);
104 *pobject = nv_object(priv);
105 if (ret)
106 return ret;
107
108 nv_engine(priv)->sclass = nouveau_dmaobj_sclass;
109 priv->base.bind = nvd0_dmaobj_bind;
110 return 0;
111}
112
113struct nouveau_oclass
114nvd0_dmaeng_oclass = {
115 .handle = NV_ENGINE(DMAOBJ, 0xd0),
116 .ofuncs = &(struct nouveau_ofuncs) {
117 .ctor = nvd0_dmaeng_ctor,
118 .dtor = _nouveau_dmaeng_dtor,
119 .init = _nouveau_dmaeng_init,
120 .fini = _nouveau_dmaeng_fini,
121 },
122};
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
index bbb43c67c2ae..c2b9db335816 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/base.c
@@ -24,6 +24,7 @@
24 24
25#include <core/object.h> 25#include <core/object.h>
26#include <core/handle.h> 26#include <core/handle.h>
27#include <core/class.h>
27 28
28#include <engine/dmaobj.h> 29#include <engine/dmaobj.h>
29#include <engine/fifo.h> 30#include <engine/fifo.h>
@@ -33,7 +34,7 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
33 struct nouveau_object *engine, 34 struct nouveau_object *engine,
34 struct nouveau_oclass *oclass, 35 struct nouveau_oclass *oclass,
35 int bar, u32 addr, u32 size, u32 pushbuf, 36 int bar, u32 addr, u32 size, u32 pushbuf,
36 u32 engmask, int len, void **ptr) 37 u64 engmask, int len, void **ptr)
37{ 38{
38 struct nouveau_device *device = nv_device(engine); 39 struct nouveau_device *device = nv_device(engine);
39 struct nouveau_fifo *priv = (void *)engine; 40 struct nouveau_fifo *priv = (void *)engine;
@@ -56,18 +57,16 @@ nouveau_fifo_channel_create_(struct nouveau_object *parent,
56 57
57 dmaeng = (void *)chan->pushdma->base.engine; 58 dmaeng = (void *)chan->pushdma->base.engine;
58 switch (chan->pushdma->base.oclass->handle) { 59 switch (chan->pushdma->base.oclass->handle) {
59 case 0x0002: 60 case NV_DMA_FROM_MEMORY_CLASS:
60 case 0x003d: 61 case NV_DMA_IN_MEMORY_CLASS:
61 break; 62 break;
62 default: 63 default:
63 return -EINVAL; 64 return -EINVAL;
64 } 65 }
65 66
66 if (dmaeng->bind) { 67 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu);
67 ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); 68 if (ret)
68 if (ret) 69 return ret;
69 return ret;
70 }
71 70
72 /* find a free fifo channel */ 71 /* find a free fifo channel */
73 spin_lock_irqsave(&priv->lock, flags); 72 spin_lock_irqsave(&priv->lock, flags);
@@ -119,14 +118,14 @@ _nouveau_fifo_channel_dtor(struct nouveau_object *object)
119} 118}
120 119
121u32 120u32
122_nouveau_fifo_channel_rd32(struct nouveau_object *object, u32 addr) 121_nouveau_fifo_channel_rd32(struct nouveau_object *object, u64 addr)
123{ 122{
124 struct nouveau_fifo_chan *chan = (void *)object; 123 struct nouveau_fifo_chan *chan = (void *)object;
125 return ioread32_native(chan->user + addr); 124 return ioread32_native(chan->user + addr);
126} 125}
127 126
128void 127void
129_nouveau_fifo_channel_wr32(struct nouveau_object *object, u32 addr, u32 data) 128_nouveau_fifo_channel_wr32(struct nouveau_object *object, u64 addr, u32 data)
130{ 129{
131 struct nouveau_fifo_chan *chan = (void *)object; 130 struct nouveau_fifo_chan *chan = (void *)object;
132 iowrite32_native(data, chan->user + addr); 131 iowrite32_native(data, chan->user + addr);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index ea76e3e8c9c2..a47a8548f9e0 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -126,9 +126,9 @@ nv04_fifo_chan_ctor(struct nouveau_object *parent,
126 126
127 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 127 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
128 0x10000, args->pushbuf, 128 0x10000, args->pushbuf,
129 (1 << NVDEV_ENGINE_DMAOBJ) | 129 (1ULL << NVDEV_ENGINE_DMAOBJ) |
130 (1 << NVDEV_ENGINE_SW) | 130 (1ULL << NVDEV_ENGINE_SW) |
131 (1 << NVDEV_ENGINE_GR), &chan); 131 (1ULL << NVDEV_ENGINE_GR), &chan);
132 *pobject = nv_object(chan); 132 *pobject = nv_object(chan);
133 if (ret) 133 if (ret)
134 return ret; 134 return ret;
@@ -440,7 +440,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
440 } 440 }
441 441
442 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) { 442 if (!nv04_fifo_swmthd(priv, chid, mthd, data)) {
443 nv_info(priv, "CACHE_ERROR - Ch %d/%d " 443 nv_error(priv, "CACHE_ERROR - Ch %d/%d "
444 "Mthd 0x%04x Data 0x%08x\n", 444 "Mthd 0x%04x Data 0x%08x\n",
445 chid, (mthd >> 13) & 7, mthd & 0x1ffc, 445 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
446 data); 446 data);
@@ -476,7 +476,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
476 u32 ib_get = nv_rd32(priv, 0x003334); 476 u32 ib_get = nv_rd32(priv, 0x003334);
477 u32 ib_put = nv_rd32(priv, 0x003330); 477 u32 ib_put = nv_rd32(priv, 0x003330);
478 478
479 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x " 479 nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%02x%08x "
480 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " 480 "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
481 "State 0x%08x (err: %s) Push 0x%08x\n", 481 "State 0x%08x (err: %s) Push 0x%08x\n",
482 chid, ho_get, dma_get, ho_put, 482 chid, ho_get, dma_get, ho_put,
@@ -494,7 +494,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
494 nv_wr32(priv, 0x003334, ib_put); 494 nv_wr32(priv, 0x003334, ib_put);
495 } 495 }
496 } else { 496 } else {
497 nv_info(priv, "DMA_PUSHER - Ch %d Get 0x%08x " 497 nv_error(priv, "DMA_PUSHER - Ch %d Get 0x%08x "
498 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n", 498 "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
499 chid, dma_get, dma_put, state, 499 chid, dma_get, dma_put, state,
500 nv_dma_state_err(state), push); 500 nv_dma_state_err(state), push);
@@ -525,14 +525,13 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
525 525
526 if (device->card_type == NV_50) { 526 if (device->card_type == NV_50) {
527 if (status & 0x00000010) { 527 if (status & 0x00000010) {
528 nv50_fb_trap(nouveau_fb(priv), 1);
529 status &= ~0x00000010; 528 status &= ~0x00000010;
530 nv_wr32(priv, 0x002100, 0x00000010); 529 nv_wr32(priv, 0x002100, 0x00000010);
531 } 530 }
532 } 531 }
533 532
534 if (status) { 533 if (status) {
535 nv_info(priv, "unknown intr 0x%08x, ch %d\n", 534 nv_warn(priv, "unknown intr 0x%08x, ch %d\n",
536 status, chid); 535 status, chid);
537 nv_wr32(priv, NV03_PFIFO_INTR_0, status); 536 nv_wr32(priv, NV03_PFIFO_INTR_0, status);
538 status = 0; 537 status = 0;
@@ -542,7 +541,7 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
542 } 541 }
543 542
544 if (status) { 543 if (status) {
545 nv_info(priv, "still angry after %d spins, halt\n", cnt); 544 nv_error(priv, "still angry after %d spins, halt\n", cnt);
546 nv_wr32(priv, 0x002140, 0); 545 nv_wr32(priv, 0x002140, 0);
547 nv_wr32(priv, 0x000140, 0); 546 nv_wr32(priv, 0x000140, 0);
548 } 547 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
index 4ba75422b89d..2c927c1d173b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv10.c
@@ -69,9 +69,9 @@ nv10_fifo_chan_ctor(struct nouveau_object *parent,
69 69
70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 70 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
71 0x10000, args->pushbuf, 71 0x10000, args->pushbuf,
72 (1 << NVDEV_ENGINE_DMAOBJ) | 72 (1ULL << NVDEV_ENGINE_DMAOBJ) |
73 (1 << NVDEV_ENGINE_SW) | 73 (1ULL << NVDEV_ENGINE_SW) |
74 (1 << NVDEV_ENGINE_GR), &chan); 74 (1ULL << NVDEV_ENGINE_GR), &chan);
75 *pobject = nv_object(chan); 75 *pobject = nv_object(chan);
76 if (ret) 76 if (ret)
77 return ret; 77 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
index b96e6b0ae2b1..a9cb51d38c57 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv17.c
@@ -74,10 +74,10 @@ nv17_fifo_chan_ctor(struct nouveau_object *parent,
74 74
75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 75 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
76 0x10000, args->pushbuf, 76 0x10000, args->pushbuf,
77 (1 << NVDEV_ENGINE_DMAOBJ) | 77 (1ULL << NVDEV_ENGINE_DMAOBJ) |
78 (1 << NVDEV_ENGINE_SW) | 78 (1ULL << NVDEV_ENGINE_SW) |
79 (1 << NVDEV_ENGINE_GR) | 79 (1ULL << NVDEV_ENGINE_GR) |
80 (1 << NVDEV_ENGINE_MPEG), /* NV31- */ 80 (1ULL << NVDEV_ENGINE_MPEG), /* NV31- */
81 &chan); 81 &chan);
82 *pobject = nv_object(chan); 82 *pobject = nv_object(chan);
83 if (ret) 83 if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
index 559c3b4e1b86..2b1f91721225 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv40.c
@@ -192,10 +192,10 @@ nv40_fifo_chan_ctor(struct nouveau_object *parent,
192 192
193 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 193 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
194 0x1000, args->pushbuf, 194 0x1000, args->pushbuf,
195 (1 << NVDEV_ENGINE_DMAOBJ) | 195 (1ULL << NVDEV_ENGINE_DMAOBJ) |
196 (1 << NVDEV_ENGINE_SW) | 196 (1ULL << NVDEV_ENGINE_SW) |
197 (1 << NVDEV_ENGINE_GR) | 197 (1ULL << NVDEV_ENGINE_GR) |
198 (1 << NVDEV_ENGINE_MPEG), &chan); 198 (1ULL << NVDEV_ENGINE_MPEG), &chan);
199 *pobject = nv_object(chan); 199 *pobject = nv_object(chan);
200 if (ret) 200 if (ret)
201 return ret; 201 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index 536e7634a00d..bd096364f680 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -112,14 +112,6 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
112 return -EINVAL; 112 return -EINVAL;
113 } 113 }
114 114
115 nv_wo32(base->eng, addr + 0x00, 0x00000000);
116 nv_wo32(base->eng, addr + 0x04, 0x00000000);
117 nv_wo32(base->eng, addr + 0x08, 0x00000000);
118 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
119 nv_wo32(base->eng, addr + 0x10, 0x00000000);
120 nv_wo32(base->eng, addr + 0x14, 0x00000000);
121 bar->flush(bar);
122
123 /* HW bug workaround: 115 /* HW bug workaround:
124 * 116 *
125 * PFIFO will hang forever if the connected engines don't report 117 * PFIFO will hang forever if the connected engines don't report
@@ -141,8 +133,18 @@ nv50_fifo_context_detach(struct nouveau_object *parent, bool suspend,
141 if (suspend) 133 if (suspend)
142 ret = -EBUSY; 134 ret = -EBUSY;
143 } 135 }
144
145 nv_wr32(priv, 0x00b860, me); 136 nv_wr32(priv, 0x00b860, me);
137
138 if (ret == 0) {
139 nv_wo32(base->eng, addr + 0x00, 0x00000000);
140 nv_wo32(base->eng, addr + 0x04, 0x00000000);
141 nv_wo32(base->eng, addr + 0x08, 0x00000000);
142 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
143 nv_wo32(base->eng, addr + 0x10, 0x00000000);
144 nv_wo32(base->eng, addr + 0x14, 0x00000000);
145 bar->flush(bar);
146 }
147
146 return ret; 148 return ret;
147} 149}
148 150
@@ -194,10 +196,10 @@ nv50_fifo_chan_ctor_dma(struct nouveau_object *parent,
194 196
195 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 197 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
196 0x2000, args->pushbuf, 198 0x2000, args->pushbuf,
197 (1 << NVDEV_ENGINE_DMAOBJ) | 199 (1ULL << NVDEV_ENGINE_DMAOBJ) |
198 (1 << NVDEV_ENGINE_SW) | 200 (1ULL << NVDEV_ENGINE_SW) |
199 (1 << NVDEV_ENGINE_GR) | 201 (1ULL << NVDEV_ENGINE_GR) |
200 (1 << NVDEV_ENGINE_MPEG), &chan); 202 (1ULL << NVDEV_ENGINE_MPEG), &chan);
201 *pobject = nv_object(chan); 203 *pobject = nv_object(chan);
202 if (ret) 204 if (ret)
203 return ret; 205 return ret;
@@ -247,10 +249,10 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
247 249
248 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 250 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
249 0x2000, args->pushbuf, 251 0x2000, args->pushbuf,
250 (1 << NVDEV_ENGINE_DMAOBJ) | 252 (1ULL << NVDEV_ENGINE_DMAOBJ) |
251 (1 << NVDEV_ENGINE_SW) | 253 (1ULL << NVDEV_ENGINE_SW) |
252 (1 << NVDEV_ENGINE_GR) | 254 (1ULL << NVDEV_ENGINE_GR) |
253 (1 << NVDEV_ENGINE_MPEG), &chan); 255 (1ULL << NVDEV_ENGINE_MPEG), &chan);
254 *pobject = nv_object(chan); 256 *pobject = nv_object(chan);
255 if (ret) 257 if (ret)
256 return ret; 258 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index b4fd26d8f166..1eb1c512f503 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -95,14 +95,6 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
95 return -EINVAL; 95 return -EINVAL;
96 } 96 }
97 97
98 nv_wo32(base->eng, addr + 0x00, 0x00000000);
99 nv_wo32(base->eng, addr + 0x04, 0x00000000);
100 nv_wo32(base->eng, addr + 0x08, 0x00000000);
101 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
102 nv_wo32(base->eng, addr + 0x10, 0x00000000);
103 nv_wo32(base->eng, addr + 0x14, 0x00000000);
104 bar->flush(bar);
105
106 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn); 98 save = nv_mask(priv, 0x002520, 0x0000003f, 1 << engn);
107 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12); 99 nv_wr32(priv, 0x0032fc, nv_gpuobj(base)->addr >> 12);
108 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff); 100 done = nv_wait_ne(priv, 0x0032fc, 0xffffffff, 0xffffffff);
@@ -112,6 +104,14 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
112 if (suspend) 104 if (suspend)
113 return -EBUSY; 105 return -EBUSY;
114 } 106 }
107
108 nv_wo32(base->eng, addr + 0x00, 0x00000000);
109 nv_wo32(base->eng, addr + 0x04, 0x00000000);
110 nv_wo32(base->eng, addr + 0x08, 0x00000000);
111 nv_wo32(base->eng, addr + 0x0c, 0x00000000);
112 nv_wo32(base->eng, addr + 0x10, 0x00000000);
113 nv_wo32(base->eng, addr + 0x14, 0x00000000);
114 bar->flush(bar);
115 return 0; 115 return 0;
116} 116}
117 117
@@ -163,17 +163,17 @@ nv84_fifo_chan_ctor_dma(struct nouveau_object *parent,
163 163
164 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 164 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
165 0x2000, args->pushbuf, 165 0x2000, args->pushbuf,
166 (1 << NVDEV_ENGINE_DMAOBJ) | 166 (1ULL << NVDEV_ENGINE_DMAOBJ) |
167 (1 << NVDEV_ENGINE_SW) | 167 (1ULL << NVDEV_ENGINE_SW) |
168 (1 << NVDEV_ENGINE_GR) | 168 (1ULL << NVDEV_ENGINE_GR) |
169 (1 << NVDEV_ENGINE_MPEG) | 169 (1ULL << NVDEV_ENGINE_MPEG) |
170 (1 << NVDEV_ENGINE_ME) | 170 (1ULL << NVDEV_ENGINE_ME) |
171 (1 << NVDEV_ENGINE_VP) | 171 (1ULL << NVDEV_ENGINE_VP) |
172 (1 << NVDEV_ENGINE_CRYPT) | 172 (1ULL << NVDEV_ENGINE_CRYPT) |
173 (1 << NVDEV_ENGINE_BSP) | 173 (1ULL << NVDEV_ENGINE_BSP) |
174 (1 << NVDEV_ENGINE_PPP) | 174 (1ULL << NVDEV_ENGINE_PPP) |
175 (1 << NVDEV_ENGINE_COPY0) | 175 (1ULL << NVDEV_ENGINE_COPY0) |
176 (1 << NVDEV_ENGINE_UNK1C1), &chan); 176 (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
177 *pobject = nv_object(chan); 177 *pobject = nv_object(chan);
178 if (ret) 178 if (ret)
179 return ret; 179 return ret;
@@ -225,17 +225,17 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
225 225
226 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000, 226 ret = nouveau_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
227 0x2000, args->pushbuf, 227 0x2000, args->pushbuf,
228 (1 << NVDEV_ENGINE_DMAOBJ) | 228 (1ULL << NVDEV_ENGINE_DMAOBJ) |
229 (1 << NVDEV_ENGINE_SW) | 229 (1ULL << NVDEV_ENGINE_SW) |
230 (1 << NVDEV_ENGINE_GR) | 230 (1ULL << NVDEV_ENGINE_GR) |
231 (1 << NVDEV_ENGINE_MPEG) | 231 (1ULL << NVDEV_ENGINE_MPEG) |
232 (1 << NVDEV_ENGINE_ME) | 232 (1ULL << NVDEV_ENGINE_ME) |
233 (1 << NVDEV_ENGINE_VP) | 233 (1ULL << NVDEV_ENGINE_VP) |
234 (1 << NVDEV_ENGINE_CRYPT) | 234 (1ULL << NVDEV_ENGINE_CRYPT) |
235 (1 << NVDEV_ENGINE_BSP) | 235 (1ULL << NVDEV_ENGINE_BSP) |
236 (1 << NVDEV_ENGINE_PPP) | 236 (1ULL << NVDEV_ENGINE_PPP) |
237 (1 << NVDEV_ENGINE_COPY0) | 237 (1ULL << NVDEV_ENGINE_COPY0) |
238 (1 << NVDEV_ENGINE_UNK1C1), &chan); 238 (1ULL << NVDEV_ENGINE_UNK1C1), &chan);
239 *pobject = nv_object(chan); 239 *pobject = nv_object(chan);
240 if (ret) 240 if (ret)
241 return ret; 241 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 6f21be600557..b4365dde1859 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -103,6 +103,9 @@ nvc0_fifo_context_attach(struct nouveau_object *parent,
103 case NVDEV_ENGINE_GR : addr = 0x0210; break; 103 case NVDEV_ENGINE_GR : addr = 0x0210; break;
104 case NVDEV_ENGINE_COPY0: addr = 0x0230; break; 104 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
105 case NVDEV_ENGINE_COPY1: addr = 0x0240; break; 105 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
106 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
107 case NVDEV_ENGINE_VP : addr = 0x0250; break;
108 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
106 default: 109 default:
107 return -EINVAL; 110 return -EINVAL;
108 } 111 }
@@ -137,14 +140,13 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
137 case NVDEV_ENGINE_GR : addr = 0x0210; break; 140 case NVDEV_ENGINE_GR : addr = 0x0210; break;
138 case NVDEV_ENGINE_COPY0: addr = 0x0230; break; 141 case NVDEV_ENGINE_COPY0: addr = 0x0230; break;
139 case NVDEV_ENGINE_COPY1: addr = 0x0240; break; 142 case NVDEV_ENGINE_COPY1: addr = 0x0240; break;
143 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
144 case NVDEV_ENGINE_VP : addr = 0x0250; break;
145 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
140 default: 146 default:
141 return -EINVAL; 147 return -EINVAL;
142 } 148 }
143 149
144 nv_wo32(base, addr + 0x00, 0x00000000);
145 nv_wo32(base, addr + 0x04, 0x00000000);
146 bar->flush(bar);
147
148 nv_wr32(priv, 0x002634, chan->base.chid); 150 nv_wr32(priv, 0x002634, chan->base.chid);
149 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 151 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
150 nv_error(priv, "channel %d kick timeout\n", chan->base.chid); 152 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -152,6 +154,9 @@ nvc0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
152 return -EBUSY; 154 return -EBUSY;
153 } 155 }
154 156
157 nv_wo32(base, addr + 0x00, 0x00000000);
158 nv_wo32(base, addr + 0x04, 0x00000000);
159 bar->flush(bar);
155 return 0; 160 return 0;
156} 161}
157 162
@@ -175,10 +180,13 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
175 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1, 180 ret = nouveau_fifo_channel_create(parent, engine, oclass, 1,
176 priv->user.bar.offset, 0x1000, 181 priv->user.bar.offset, 0x1000,
177 args->pushbuf, 182 args->pushbuf,
178 (1 << NVDEV_ENGINE_SW) | 183 (1ULL << NVDEV_ENGINE_SW) |
179 (1 << NVDEV_ENGINE_GR) | 184 (1ULL << NVDEV_ENGINE_GR) |
180 (1 << NVDEV_ENGINE_COPY0) | 185 (1ULL << NVDEV_ENGINE_COPY0) |
181 (1 << NVDEV_ENGINE_COPY1), &chan); 186 (1ULL << NVDEV_ENGINE_COPY1) |
187 (1ULL << NVDEV_ENGINE_BSP) |
188 (1ULL << NVDEV_ENGINE_VP) |
189 (1ULL << NVDEV_ENGINE_PPP), &chan);
182 *pobject = nv_object(chan); 190 *pobject = nv_object(chan);
183 if (ret) 191 if (ret)
184 return ret; 192 return ret;
@@ -494,7 +502,7 @@ nvc0_fifo_intr(struct nouveau_subdev *subdev)
494 u32 stat = nv_rd32(priv, 0x002100) & mask; 502 u32 stat = nv_rd32(priv, 0x002100) & mask;
495 503
496 if (stat & 0x00000100) { 504 if (stat & 0x00000100) {
497 nv_info(priv, "unknown status 0x00000100\n"); 505 nv_warn(priv, "unknown status 0x00000100\n");
498 nv_wr32(priv, 0x002100, 0x00000100); 506 nv_wr32(priv, 0x002100, 0x00000100);
499 stat &= ~0x00000100; 507 stat &= ~0x00000100;
500 } 508 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 36e81b6fafbc..c930da99c2c1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -38,12 +38,12 @@
38#include <engine/dmaobj.h> 38#include <engine/dmaobj.h>
39#include <engine/fifo.h> 39#include <engine/fifo.h>
40 40
41#define _(a,b) { (a), ((1 << (a)) | (b)) } 41#define _(a,b) { (a), ((1ULL << (a)) | (b)) }
42static const struct { 42static const struct {
43 int subdev; 43 u64 subdev;
44 u32 mask; 44 u64 mask;
45} fifo_engine[] = { 45} fifo_engine[] = {
46 _(NVDEV_ENGINE_GR , (1 << NVDEV_ENGINE_SW)), 46 _(NVDEV_ENGINE_GR , (1ULL << NVDEV_ENGINE_SW)),
47 _(NVDEV_ENGINE_VP , 0), 47 _(NVDEV_ENGINE_VP , 0),
48 _(NVDEV_ENGINE_PPP , 0), 48 _(NVDEV_ENGINE_PPP , 0),
49 _(NVDEV_ENGINE_BSP , 0), 49 _(NVDEV_ENGINE_BSP , 0),
@@ -138,6 +138,9 @@ nve0_fifo_context_attach(struct nouveau_object *parent,
138 case NVDEV_ENGINE_GR : 138 case NVDEV_ENGINE_GR :
139 case NVDEV_ENGINE_COPY0: 139 case NVDEV_ENGINE_COPY0:
140 case NVDEV_ENGINE_COPY1: addr = 0x0210; break; 140 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
141 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
142 case NVDEV_ENGINE_VP : addr = 0x0250; break;
143 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
141 default: 144 default:
142 return -EINVAL; 145 return -EINVAL;
143 } 146 }
@@ -172,14 +175,13 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
172 case NVDEV_ENGINE_GR : 175 case NVDEV_ENGINE_GR :
173 case NVDEV_ENGINE_COPY0: 176 case NVDEV_ENGINE_COPY0:
174 case NVDEV_ENGINE_COPY1: addr = 0x0210; break; 177 case NVDEV_ENGINE_COPY1: addr = 0x0210; break;
178 case NVDEV_ENGINE_BSP : addr = 0x0270; break;
179 case NVDEV_ENGINE_VP : addr = 0x0250; break;
180 case NVDEV_ENGINE_PPP : addr = 0x0260; break;
175 default: 181 default:
176 return -EINVAL; 182 return -EINVAL;
177 } 183 }
178 184
179 nv_wo32(base, addr + 0x00, 0x00000000);
180 nv_wo32(base, addr + 0x04, 0x00000000);
181 bar->flush(bar);
182
183 nv_wr32(priv, 0x002634, chan->base.chid); 185 nv_wr32(priv, 0x002634, chan->base.chid);
184 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) { 186 if (!nv_wait(priv, 0x002634, 0xffffffff, chan->base.chid)) {
185 nv_error(priv, "channel %d kick timeout\n", chan->base.chid); 187 nv_error(priv, "channel %d kick timeout\n", chan->base.chid);
@@ -187,6 +189,9 @@ nve0_fifo_context_detach(struct nouveau_object *parent, bool suspend,
187 return -EBUSY; 189 return -EBUSY;
188 } 190 }
189 191
192 nv_wo32(base, addr + 0x00, 0x00000000);
193 nv_wo32(base, addr + 0x04, 0x00000000);
194 bar->flush(bar);
190 return 0; 195 return 0;
191} 196}
192 197
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
index 618528248457..e30a9c5ff1fc 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv04.c
@@ -787,168 +787,168 @@ nv01_graph_mthd_bind_chroma(struct nouveau_object *object, u32 mthd,
787 787
788static struct nouveau_omthds 788static struct nouveau_omthds
789nv03_graph_gdi_omthds[] = { 789nv03_graph_gdi_omthds[] = {
790 { 0x0184, nv01_graph_mthd_bind_patt }, 790 { 0x0184, 0x0184, nv01_graph_mthd_bind_patt },
791 { 0x0188, nv04_graph_mthd_bind_rop }, 791 { 0x0188, 0x0188, nv04_graph_mthd_bind_rop },
792 { 0x018c, nv04_graph_mthd_bind_beta1 }, 792 { 0x018c, 0x018c, nv04_graph_mthd_bind_beta1 },
793 { 0x0190, nv04_graph_mthd_bind_surf_dst }, 793 { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_dst },
794 { 0x02fc, nv04_graph_mthd_set_operation }, 794 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
795 {} 795 {}
796}; 796};
797 797
798static struct nouveau_omthds 798static struct nouveau_omthds
799nv04_graph_gdi_omthds[] = { 799nv04_graph_gdi_omthds[] = {
800 { 0x0188, nv04_graph_mthd_bind_patt }, 800 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
801 { 0x018c, nv04_graph_mthd_bind_rop }, 801 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
802 { 0x0190, nv04_graph_mthd_bind_beta1 }, 802 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
803 { 0x0194, nv04_graph_mthd_bind_beta4 }, 803 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
804 { 0x0198, nv04_graph_mthd_bind_surf2d }, 804 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
805 { 0x02fc, nv04_graph_mthd_set_operation }, 805 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
806 {} 806 {}
807}; 807};
808 808
809static struct nouveau_omthds 809static struct nouveau_omthds
810nv01_graph_blit_omthds[] = { 810nv01_graph_blit_omthds[] = {
811 { 0x0184, nv01_graph_mthd_bind_chroma }, 811 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
812 { 0x0188, nv01_graph_mthd_bind_clip }, 812 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
813 { 0x018c, nv01_graph_mthd_bind_patt }, 813 { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
814 { 0x0190, nv04_graph_mthd_bind_rop }, 814 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
815 { 0x0194, nv04_graph_mthd_bind_beta1 }, 815 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
816 { 0x0198, nv04_graph_mthd_bind_surf_dst }, 816 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
817 { 0x019c, nv04_graph_mthd_bind_surf_src }, 817 { 0x019c, 0x019c, nv04_graph_mthd_bind_surf_src },
818 { 0x02fc, nv04_graph_mthd_set_operation }, 818 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
819 {} 819 {}
820}; 820};
821 821
822static struct nouveau_omthds 822static struct nouveau_omthds
823nv04_graph_blit_omthds[] = { 823nv04_graph_blit_omthds[] = {
824 { 0x0184, nv01_graph_mthd_bind_chroma }, 824 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
825 { 0x0188, nv01_graph_mthd_bind_clip }, 825 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
826 { 0x018c, nv04_graph_mthd_bind_patt }, 826 { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
827 { 0x0190, nv04_graph_mthd_bind_rop }, 827 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
828 { 0x0194, nv04_graph_mthd_bind_beta1 }, 828 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
829 { 0x0198, nv04_graph_mthd_bind_beta4 }, 829 { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
830 { 0x019c, nv04_graph_mthd_bind_surf2d }, 830 { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
831 { 0x02fc, nv04_graph_mthd_set_operation }, 831 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
832 {} 832 {}
833}; 833};
834 834
835static struct nouveau_omthds 835static struct nouveau_omthds
836nv04_graph_iifc_omthds[] = { 836nv04_graph_iifc_omthds[] = {
837 { 0x0188, nv01_graph_mthd_bind_chroma }, 837 { 0x0188, 0x0188, nv01_graph_mthd_bind_chroma },
838 { 0x018c, nv01_graph_mthd_bind_clip }, 838 { 0x018c, 0x018c, nv01_graph_mthd_bind_clip },
839 { 0x0190, nv04_graph_mthd_bind_patt }, 839 { 0x0190, 0x0190, nv04_graph_mthd_bind_patt },
840 { 0x0194, nv04_graph_mthd_bind_rop }, 840 { 0x0194, 0x0194, nv04_graph_mthd_bind_rop },
841 { 0x0198, nv04_graph_mthd_bind_beta1 }, 841 { 0x0198, 0x0198, nv04_graph_mthd_bind_beta1 },
842 { 0x019c, nv04_graph_mthd_bind_beta4 }, 842 { 0x019c, 0x019c, nv04_graph_mthd_bind_beta4 },
843 { 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf }, 843 { 0x01a0, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf },
844 { 0x03e4, nv04_graph_mthd_set_operation }, 844 { 0x03e4, 0x03e4, nv04_graph_mthd_set_operation },
845 {} 845 {}
846}; 846};
847 847
848static struct nouveau_omthds 848static struct nouveau_omthds
849nv01_graph_ifc_omthds[] = { 849nv01_graph_ifc_omthds[] = {
850 { 0x0184, nv01_graph_mthd_bind_chroma }, 850 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
851 { 0x0188, nv01_graph_mthd_bind_clip }, 851 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
852 { 0x018c, nv01_graph_mthd_bind_patt }, 852 { 0x018c, 0x018c, nv01_graph_mthd_bind_patt },
853 { 0x0190, nv04_graph_mthd_bind_rop }, 853 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
854 { 0x0194, nv04_graph_mthd_bind_beta1 }, 854 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
855 { 0x0198, nv04_graph_mthd_bind_surf_dst }, 855 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf_dst },
856 { 0x02fc, nv04_graph_mthd_set_operation }, 856 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
857 {} 857 {}
858}; 858};
859 859
860static struct nouveau_omthds 860static struct nouveau_omthds
861nv04_graph_ifc_omthds[] = { 861nv04_graph_ifc_omthds[] = {
862 { 0x0184, nv01_graph_mthd_bind_chroma }, 862 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
863 { 0x0188, nv01_graph_mthd_bind_clip }, 863 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
864 { 0x018c, nv04_graph_mthd_bind_patt }, 864 { 0x018c, 0x018c, nv04_graph_mthd_bind_patt },
865 { 0x0190, nv04_graph_mthd_bind_rop }, 865 { 0x0190, 0x0190, nv04_graph_mthd_bind_rop },
866 { 0x0194, nv04_graph_mthd_bind_beta1 }, 866 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta1 },
867 { 0x0198, nv04_graph_mthd_bind_beta4 }, 867 { 0x0198, 0x0198, nv04_graph_mthd_bind_beta4 },
868 { 0x019c, nv04_graph_mthd_bind_surf2d }, 868 { 0x019c, 0x019c, nv04_graph_mthd_bind_surf2d },
869 { 0x02fc, nv04_graph_mthd_set_operation }, 869 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
870 {} 870 {}
871}; 871};
872 872
873static struct nouveau_omthds 873static struct nouveau_omthds
874nv03_graph_sifc_omthds[] = { 874nv03_graph_sifc_omthds[] = {
875 { 0x0184, nv01_graph_mthd_bind_chroma }, 875 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
876 { 0x0188, nv01_graph_mthd_bind_patt }, 876 { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
877 { 0x018c, nv04_graph_mthd_bind_rop }, 877 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
878 { 0x0190, nv04_graph_mthd_bind_beta1 }, 878 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
879 { 0x0194, nv04_graph_mthd_bind_surf_dst }, 879 { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
880 { 0x02fc, nv04_graph_mthd_set_operation }, 880 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
881 {} 881 {}
882}; 882};
883 883
884static struct nouveau_omthds 884static struct nouveau_omthds
885nv04_graph_sifc_omthds[] = { 885nv04_graph_sifc_omthds[] = {
886 { 0x0184, nv01_graph_mthd_bind_chroma }, 886 { 0x0184, 0x0184, nv01_graph_mthd_bind_chroma },
887 { 0x0188, nv04_graph_mthd_bind_patt }, 887 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
888 { 0x018c, nv04_graph_mthd_bind_rop }, 888 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
889 { 0x0190, nv04_graph_mthd_bind_beta1 }, 889 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
890 { 0x0194, nv04_graph_mthd_bind_beta4 }, 890 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
891 { 0x0198, nv04_graph_mthd_bind_surf2d }, 891 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
892 { 0x02fc, nv04_graph_mthd_set_operation }, 892 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
893 {} 893 {}
894}; 894};
895 895
896static struct nouveau_omthds 896static struct nouveau_omthds
897nv03_graph_sifm_omthds[] = { 897nv03_graph_sifm_omthds[] = {
898 { 0x0188, nv01_graph_mthd_bind_patt }, 898 { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
899 { 0x018c, nv04_graph_mthd_bind_rop }, 899 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
900 { 0x0190, nv04_graph_mthd_bind_beta1 }, 900 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
901 { 0x0194, nv04_graph_mthd_bind_surf_dst }, 901 { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
902 { 0x0304, nv04_graph_mthd_set_operation }, 902 { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
903 {} 903 {}
904}; 904};
905 905
906static struct nouveau_omthds 906static struct nouveau_omthds
907nv04_graph_sifm_omthds[] = { 907nv04_graph_sifm_omthds[] = {
908 { 0x0188, nv04_graph_mthd_bind_patt }, 908 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
909 { 0x018c, nv04_graph_mthd_bind_rop }, 909 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
910 { 0x0190, nv04_graph_mthd_bind_beta1 }, 910 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
911 { 0x0194, nv04_graph_mthd_bind_beta4 }, 911 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
912 { 0x0198, nv04_graph_mthd_bind_surf2d }, 912 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
913 { 0x0304, nv04_graph_mthd_set_operation }, 913 { 0x0304, 0x0304, nv04_graph_mthd_set_operation },
914 {} 914 {}
915}; 915};
916 916
917static struct nouveau_omthds 917static struct nouveau_omthds
918nv04_graph_surf3d_omthds[] = { 918nv04_graph_surf3d_omthds[] = {
919 { 0x02f8, nv04_graph_mthd_surf3d_clip_h }, 919 { 0x02f8, 0x02f8, nv04_graph_mthd_surf3d_clip_h },
920 { 0x02fc, nv04_graph_mthd_surf3d_clip_v }, 920 { 0x02fc, 0x02fc, nv04_graph_mthd_surf3d_clip_v },
921 {} 921 {}
922}; 922};
923 923
924static struct nouveau_omthds 924static struct nouveau_omthds
925nv03_graph_ttri_omthds[] = { 925nv03_graph_ttri_omthds[] = {
926 { 0x0188, nv01_graph_mthd_bind_clip }, 926 { 0x0188, 0x0188, nv01_graph_mthd_bind_clip },
927 { 0x018c, nv04_graph_mthd_bind_surf_color }, 927 { 0x018c, 0x018c, nv04_graph_mthd_bind_surf_color },
928 { 0x0190, nv04_graph_mthd_bind_surf_zeta }, 928 { 0x0190, 0x0190, nv04_graph_mthd_bind_surf_zeta },
929 {} 929 {}
930}; 930};
931 931
932static struct nouveau_omthds 932static struct nouveau_omthds
933nv01_graph_prim_omthds[] = { 933nv01_graph_prim_omthds[] = {
934 { 0x0184, nv01_graph_mthd_bind_clip }, 934 { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
935 { 0x0188, nv01_graph_mthd_bind_patt }, 935 { 0x0188, 0x0188, nv01_graph_mthd_bind_patt },
936 { 0x018c, nv04_graph_mthd_bind_rop }, 936 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
937 { 0x0190, nv04_graph_mthd_bind_beta1 }, 937 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
938 { 0x0194, nv04_graph_mthd_bind_surf_dst }, 938 { 0x0194, 0x0194, nv04_graph_mthd_bind_surf_dst },
939 { 0x02fc, nv04_graph_mthd_set_operation }, 939 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
940 {} 940 {}
941}; 941};
942 942
943static struct nouveau_omthds 943static struct nouveau_omthds
944nv04_graph_prim_omthds[] = { 944nv04_graph_prim_omthds[] = {
945 { 0x0184, nv01_graph_mthd_bind_clip }, 945 { 0x0184, 0x0184, nv01_graph_mthd_bind_clip },
946 { 0x0188, nv04_graph_mthd_bind_patt }, 946 { 0x0188, 0x0188, nv04_graph_mthd_bind_patt },
947 { 0x018c, nv04_graph_mthd_bind_rop }, 947 { 0x018c, 0x018c, nv04_graph_mthd_bind_rop },
948 { 0x0190, nv04_graph_mthd_bind_beta1 }, 948 { 0x0190, 0x0190, nv04_graph_mthd_bind_beta1 },
949 { 0x0194, nv04_graph_mthd_bind_beta4 }, 949 { 0x0194, 0x0194, nv04_graph_mthd_bind_beta4 },
950 { 0x0198, nv04_graph_mthd_bind_surf2d }, 950 { 0x0198, 0x0198, nv04_graph_mthd_bind_surf2d },
951 { 0x02fc, nv04_graph_mthd_set_operation }, 951 { 0x02fc, 0x02fc, nv04_graph_mthd_set_operation },
952 {} 952 {}
953}; 953};
954 954
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
index 92521c89e77f..5c0f843ea249 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv10.c
@@ -570,11 +570,11 @@ nv17_graph_mthd_lma_enable(struct nouveau_object *object, u32 mthd,
570 570
571static struct nouveau_omthds 571static struct nouveau_omthds
572nv17_celcius_omthds[] = { 572nv17_celcius_omthds[] = {
573 { 0x1638, nv17_graph_mthd_lma_window }, 573 { 0x1638, 0x1638, nv17_graph_mthd_lma_window },
574 { 0x163c, nv17_graph_mthd_lma_window }, 574 { 0x163c, 0x163c, nv17_graph_mthd_lma_window },
575 { 0x1640, nv17_graph_mthd_lma_window }, 575 { 0x1640, 0x1640, nv17_graph_mthd_lma_window },
576 { 0x1644, nv17_graph_mthd_lma_window }, 576 { 0x1644, 0x1644, nv17_graph_mthd_lma_window },
577 { 0x1658, nv17_graph_mthd_lma_enable }, 577 { 0x1658, 0x1658, nv17_graph_mthd_lma_enable },
578 {} 578 {}
579}; 579};
580 580
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
index 8f3f619c4a78..5b20401bf911 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv20.c
@@ -183,7 +183,7 @@ nv20_graph_tile_prog(struct nouveau_engine *engine, int i)
183 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i); 183 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
184 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr); 184 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->addr);
185 185
186 if (nv_device(engine)->card_type == NV_20) { 186 if (nv_device(engine)->chipset != 0x34) {
187 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp); 187 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
188 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i); 188 nv_wr32(priv, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
189 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp); 189 nv_wr32(priv, NV10_PGRAPH_RDI_DATA, tile->zcomp);
@@ -224,14 +224,14 @@ nv20_graph_intr(struct nouveau_subdev *subdev)
224 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 224 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
225 225
226 if (show) { 226 if (show) {
227 nv_info(priv, ""); 227 nv_error(priv, "");
228 nouveau_bitfield_print(nv10_graph_intr_name, show); 228 nouveau_bitfield_print(nv10_graph_intr_name, show);
229 printk(" nsource:"); 229 printk(" nsource:");
230 nouveau_bitfield_print(nv04_graph_nsource, nsource); 230 nouveau_bitfield_print(nv04_graph_nsource, nsource);
231 printk(" nstatus:"); 231 printk(" nstatus:");
232 nouveau_bitfield_print(nv10_graph_nstatus, nstatus); 232 nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
233 printk("\n"); 233 printk("\n");
234 nv_info(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n", 234 nv_error(priv, "ch %d/%d class 0x%04x mthd 0x%04x data 0x%08x\n",
235 chid, subc, class, mthd, data); 235 chid, subc, class, mthd, data);
236 } 236 }
237 237
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
index cc6574eeb80e..0b36dd3deebd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
@@ -216,10 +216,10 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
216 216
217 switch (nv_device(priv)->chipset) { 217 switch (nv_device(priv)->chipset) {
218 case 0x40: 218 case 0x40:
219 case 0x41: /* guess */ 219 case 0x41:
220 case 0x42: 220 case 0x42:
221 case 0x43: 221 case 0x43:
222 case 0x45: /* guess */ 222 case 0x45:
223 case 0x4e: 223 case 0x4e:
224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch); 224 nv_wr32(priv, NV20_PGRAPH_TSIZE(i), tile->pitch);
225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit); 225 nv_wr32(priv, NV20_PGRAPH_TLIMIT(i), tile->limit);
@@ -227,6 +227,21 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); 227 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); 228 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); 229 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
230 switch (nv_device(priv)->chipset) {
231 case 0x40:
232 case 0x45:
233 nv_wr32(priv, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
234 nv_wr32(priv, NV40_PGRAPH_ZCOMP1(i), tile->zcomp);
235 break;
236 case 0x41:
237 case 0x42:
238 case 0x43:
239 nv_wr32(priv, NV41_PGRAPH_ZCOMP0(i), tile->zcomp);
240 nv_wr32(priv, NV41_PGRAPH_ZCOMP1(i), tile->zcomp);
241 break;
242 default:
243 break;
244 }
230 break; 245 break;
231 case 0x44: 246 case 0x44:
232 case 0x4a: 247 case 0x4a:
@@ -235,18 +250,31 @@ nv40_graph_tile_prog(struct nouveau_engine *engine, int i)
235 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr); 250 nv_wr32(priv, NV20_PGRAPH_TILE(i), tile->addr);
236 break; 251 break;
237 case 0x46: 252 case 0x46:
253 case 0x4c:
238 case 0x47: 254 case 0x47:
239 case 0x49: 255 case 0x49:
240 case 0x4b: 256 case 0x4b:
241 case 0x4c: 257 case 0x63:
242 case 0x67: 258 case 0x67:
243 default: 259 case 0x68:
244 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch); 260 nv_wr32(priv, NV47_PGRAPH_TSIZE(i), tile->pitch);
245 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit); 261 nv_wr32(priv, NV47_PGRAPH_TLIMIT(i), tile->limit);
246 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr); 262 nv_wr32(priv, NV47_PGRAPH_TILE(i), tile->addr);
247 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch); 263 nv_wr32(priv, NV40_PGRAPH_TSIZE1(i), tile->pitch);
248 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit); 264 nv_wr32(priv, NV40_PGRAPH_TLIMIT1(i), tile->limit);
249 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr); 265 nv_wr32(priv, NV40_PGRAPH_TILE1(i), tile->addr);
266 switch (nv_device(priv)->chipset) {
267 case 0x47:
268 case 0x49:
269 case 0x4b:
270 nv_wr32(priv, NV47_PGRAPH_ZCOMP0(i), tile->zcomp);
271 nv_wr32(priv, NV47_PGRAPH_ZCOMP1(i), tile->zcomp);
272 break;
273 default:
274 break;
275 }
276 break;
277 default:
250 break; 278 break;
251 } 279 }
252 280
@@ -293,7 +321,7 @@ nv40_graph_intr(struct nouveau_subdev *subdev)
293 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001); 321 nv_wr32(priv, NV04_PGRAPH_FIFO, 0x00000001);
294 322
295 if (show) { 323 if (show) {
296 nv_info(priv, ""); 324 nv_error(priv, "");
297 nouveau_bitfield_print(nv10_graph_intr_name, show); 325 nouveau_bitfield_print(nv10_graph_intr_name, show);
298 printk(" nsource:"); 326 printk(" nsource:");
299 nouveau_bitfield_print(nv04_graph_nsource, nsource); 327 nouveau_bitfield_print(nv04_graph_nsource, nsource);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index ab3b9dcaf478..b1c3d835b4c2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -184,6 +184,65 @@ nv50_graph_tlb_flush(struct nouveau_engine *engine)
184 return 0; 184 return 0;
185} 185}
186 186
187static const struct nouveau_bitfield nv50_pgraph_status[] = {
188 { 0x00000001, "BUSY" }, /* set when any bit is set */
189 { 0x00000002, "DISPATCH" },
190 { 0x00000004, "UNK2" },
191 { 0x00000008, "UNK3" },
192 { 0x00000010, "UNK4" },
193 { 0x00000020, "UNK5" },
194 { 0x00000040, "M2MF" },
195 { 0x00000080, "UNK7" },
196 { 0x00000100, "CTXPROG" },
197 { 0x00000200, "VFETCH" },
198 { 0x00000400, "CCACHE_UNK4" },
199 { 0x00000800, "STRMOUT_GSCHED_UNK5" },
200 { 0x00001000, "UNK14XX" },
201 { 0x00002000, "UNK24XX_CSCHED" },
202 { 0x00004000, "UNK1CXX" },
203 { 0x00008000, "CLIPID" },
204 { 0x00010000, "ZCULL" },
205 { 0x00020000, "ENG2D" },
206 { 0x00040000, "UNK34XX" },
207 { 0x00080000, "TPRAST" },
208 { 0x00100000, "TPROP" },
209 { 0x00200000, "TEX" },
210 { 0x00400000, "TPVP" },
211 { 0x00800000, "MP" },
212 { 0x01000000, "ROP" },
213 {}
214};
215
216static const char *const nv50_pgraph_vstatus_0[] = {
217 "VFETCH", "CCACHE", "UNK4", "UNK5", "GSCHED", "STRMOUT", "UNK14XX", NULL
218};
219
220static const char *const nv50_pgraph_vstatus_1[] = {
221 "TPRAST", "TPROP", "TEXTURE", "TPVP", "MP", NULL
222};
223
224static const char *const nv50_pgraph_vstatus_2[] = {
225 "UNK24XX", "CSCHED", "UNK1CXX", "CLIPID", "ZCULL", "ENG2D", "UNK34XX",
226 "ROP", NULL
227};
228
229static void nouveau_pgraph_vstatus_print(struct nv50_graph_priv *priv, int r,
230 const char *const units[], u32 status)
231{
232 int i;
233
234 nv_error(priv, "PGRAPH_VSTATUS%d: 0x%08x", r, status);
235
236 for (i = 0; units[i] && status; i++) {
237 if ((status & 7) == 1)
238 pr_cont(" %s", units[i]);
239 status >>= 3;
240 }
241 if (status)
242 pr_cont(" (invalid: 0x%x)", status);
243 pr_cont("\n");
244}
245
187static int 246static int
188nv84_graph_tlb_flush(struct nouveau_engine *engine) 247nv84_graph_tlb_flush(struct nouveau_engine *engine)
189{ 248{
@@ -219,10 +278,19 @@ nv84_graph_tlb_flush(struct nouveau_engine *engine)
219 !(timeout = ptimer->read(ptimer) - start > 2000000000)); 278 !(timeout = ptimer->read(ptimer) - start > 2000000000));
220 279
221 if (timeout) { 280 if (timeout) {
222 nv_error(priv, "PGRAPH TLB flush idle timeout fail: " 281 nv_error(priv, "PGRAPH TLB flush idle timeout fail\n");
223 "0x%08x 0x%08x 0x%08x 0x%08x\n", 282
224 nv_rd32(priv, 0x400700), nv_rd32(priv, 0x400380), 283 tmp = nv_rd32(priv, 0x400700);
225 nv_rd32(priv, 0x400384), nv_rd32(priv, 0x400388)); 284 nv_error(priv, "PGRAPH_STATUS : 0x%08x", tmp);
285 nouveau_bitfield_print(nv50_pgraph_status, tmp);
286 pr_cont("\n");
287
288 nouveau_pgraph_vstatus_print(priv, 0, nv50_pgraph_vstatus_0,
289 nv_rd32(priv, 0x400380));
290 nouveau_pgraph_vstatus_print(priv, 1, nv50_pgraph_vstatus_1,
291 nv_rd32(priv, 0x400384));
292 nouveau_pgraph_vstatus_print(priv, 2, nv50_pgraph_vstatus_2,
293 nv_rd32(priv, 0x400388));
226 } 294 }
227 295
228 nv50_vm_flush_engine(&engine->base, 0x00); 296 nv50_vm_flush_engine(&engine->base, 0x00);
@@ -453,13 +521,13 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
453 } 521 }
454 if (ustatus) { 522 if (ustatus) {
455 if (display) 523 if (display)
456 nv_info(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus); 524 nv_error(priv, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
457 } 525 }
458 nv_wr32(priv, ustatus_addr, 0xc0000000); 526 nv_wr32(priv, ustatus_addr, 0xc0000000);
459 } 527 }
460 528
461 if (!tps && display) 529 if (!tps && display)
462 nv_info(priv, "%s - No TPs claiming errors?\n", name); 530 nv_warn(priv, "%s - No TPs claiming errors?\n", name);
463} 531}
464 532
465static int 533static int
@@ -718,13 +786,12 @@ nv50_graph_intr(struct nouveau_subdev *subdev)
718 nv_wr32(priv, 0x400500, 0x00010001); 786 nv_wr32(priv, 0x400500, 0x00010001);
719 787
720 if (show) { 788 if (show) {
721 nv_info(priv, ""); 789 nv_error(priv, "");
722 nouveau_bitfield_print(nv50_graph_intr_name, show); 790 nouveau_bitfield_print(nv50_graph_intr_name, show);
723 printk("\n"); 791 printk("\n");
724 nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x " 792 nv_error(priv, "ch %d [0x%010llx] subc %d class 0x%04x "
725 "mthd 0x%04x data 0x%08x\n", 793 "mthd 0x%04x data 0x%08x\n",
726 chid, (u64)inst << 12, subc, class, mthd, data); 794 chid, (u64)inst << 12, subc, class, mthd, data);
727 nv50_fb_trap(nouveau_fb(priv), 1);
728 } 795 }
729 796
730 if (nv_rd32(priv, 0x400824) & (1 << 31)) 797 if (nv_rd32(priv, 0x400824) & (1 << 31))
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index c62f2d0f5f0a..47a02081d708 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -814,7 +814,7 @@ nvc0_graph_init_ctxctl(struct nvc0_graph_priv *priv)
814 nv_wr32(priv, 0x41a100, 0x00000002); 814 nv_wr32(priv, 0x41a100, 0x00000002);
815 nv_wr32(priv, 0x409100, 0x00000002); 815 nv_wr32(priv, 0x409100, 0x00000002);
816 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001)) 816 if (!nv_wait(priv, 0x409800, 0x00000001, 0x00000001))
817 nv_info(priv, "0x409800 wait failed\n"); 817 nv_warn(priv, "0x409800 wait failed\n");
818 818
819 nv_wr32(priv, 0x409840, 0xffffffff); 819 nv_wr32(priv, 0x409840, 0xffffffff);
820 nv_wr32(priv, 0x409500, 0x7fffffff); 820 nv_wr32(priv, 0x409500, 0x7fffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
index 9c715a25cecb..fde8e24415e4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/regs.h
@@ -205,6 +205,7 @@
205#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16)) 205#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
206#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16)) 206#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
207#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i)) 207#define NV20_PGRAPH_ZCOMP(i) (0x00400980 + 4*(i))
208#define NV41_PGRAPH_ZCOMP0(i) (0x004009c0 + 4*(i))
208#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16)) 209#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
209#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16)) 210#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
210#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16)) 211#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
@@ -216,6 +217,7 @@
216#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16)) 217#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
217#define NV04_PGRAPH_V_RAM 0x00400D40 218#define NV04_PGRAPH_V_RAM 0x00400D40
218#define NV04_PGRAPH_W_RAM 0x00400D80 219#define NV04_PGRAPH_W_RAM 0x00400D80
220#define NV47_PGRAPH_ZCOMP0(i) (0x00400e00 + 4*(i))
219#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40 221#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
220#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44 222#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
221#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48 223#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
@@ -261,9 +263,12 @@
261#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098 263#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
262#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C 264#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
263#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0 265#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
266#define NV47_PGRAPH_ZCOMP1(i) (0x004068c0 + 4*(i))
264#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16)) 267#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
265#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16)) 268#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
266#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16)) 269#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
267#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16)) 270#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
271#define NV40_PGRAPH_ZCOMP1(i) (0x00406980 + 4*(i))
272#define NV41_PGRAPH_ZCOMP1(i) (0x004069c0 + 4*(i))
268 273
269#endif 274#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 1f394a2629e7..9fd86375f4c4 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -121,9 +121,9 @@ nv31_mpeg_ofuncs = {
121 121
122static struct nouveau_omthds 122static struct nouveau_omthds
123nv31_mpeg_omthds[] = { 123nv31_mpeg_omthds[] = {
124 { 0x0190, nv31_mpeg_mthd_dma }, 124 { 0x0190, 0x0190, nv31_mpeg_mthd_dma },
125 { 0x01a0, nv31_mpeg_mthd_dma }, 125 { 0x01a0, 0x01a0, nv31_mpeg_mthd_dma },
126 { 0x01b0, nv31_mpeg_mthd_dma }, 126 { 0x01b0, 0x01b0, nv31_mpeg_mthd_dma },
127 {} 127 {}
128}; 128};
129 129
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
index 8678a9996d57..bc7d12b30fc1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv50.c
@@ -157,7 +157,6 @@ nv50_mpeg_intr(struct nouveau_subdev *subdev)
157 157
158 nv_wr32(priv, 0x00b100, stat); 158 nv_wr32(priv, 0x00b100, stat);
159 nv_wr32(priv, 0x00b230, 0x00000001); 159 nv_wr32(priv, 0x00b230, 0x00000001);
160 nv50_fb_trap(nouveau_fb(priv), 1);
161} 160}
162 161
163static void 162static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 50e7e0da1981..5a5b2a773ed7 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -22,18 +22,18 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h> 25#include <core/engine.h>
26#include <core/class.h>
27#include <core/engctx.h> 26#include <core/engctx.h>
27#include <core/class.h>
28 28
29#include <engine/ppp.h> 29#include <engine/ppp.h>
30 30
31struct nv98_ppp_priv { 31struct nv98_ppp_priv {
32 struct nouveau_ppp base; 32 struct nouveau_engine base;
33}; 33};
34 34
35struct nv98_ppp_chan { 35struct nv98_ppp_chan {
36 struct nouveau_ppp_chan base; 36 struct nouveau_engctx base;
37}; 37};
38 38
39/******************************************************************************* 39/*******************************************************************************
@@ -49,61 +49,16 @@ nv98_ppp_sclass[] = {
49 * PPPP context 49 * PPPP context
50 ******************************************************************************/ 50 ******************************************************************************/
51 51
52static int
53nv98_ppp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv98_ppp_chan *priv;
59 int ret;
60
61 ret = nouveau_ppp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv98_ppp_context_dtor(struct nouveau_object *object)
72{
73 struct nv98_ppp_chan *priv = (void *)object;
74 nouveau_ppp_context_destroy(&priv->base);
75}
76
77static int
78nv98_ppp_context_init(struct nouveau_object *object)
79{
80 struct nv98_ppp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_ppp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv98_ppp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv98_ppp_chan *priv = (void *)object;
94 return nouveau_ppp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass 52static struct nouveau_oclass
98nv98_ppp_cclass = { 53nv98_ppp_cclass = {
99 .handle = NV_ENGCTX(PPP, 0x98), 54 .handle = NV_ENGCTX(PPP, 0x98),
100 .ofuncs = &(struct nouveau_ofuncs) { 55 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv98_ppp_context_ctor, 56 .ctor = _nouveau_engctx_ctor,
102 .dtor = nv98_ppp_context_dtor, 57 .dtor = _nouveau_engctx_dtor,
103 .init = nv98_ppp_context_init, 58 .init = _nouveau_engctx_init,
104 .fini = nv98_ppp_context_fini, 59 .fini = _nouveau_engctx_fini,
105 .rd32 = _nouveau_ppp_context_rd32, 60 .rd32 = _nouveau_engctx_rd32,
106 .wr32 = _nouveau_ppp_context_wr32, 61 .wr32 = _nouveau_engctx_wr32,
107 }, 62 },
108}; 63};
109 64
@@ -111,11 +66,6 @@ nv98_ppp_cclass = {
111 * PPPP engine/subdev functions 66 * PPPP engine/subdev functions
112 ******************************************************************************/ 67 ******************************************************************************/
113 68
114static void
115nv98_ppp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int 69static int
120nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 70nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size, 71 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +74,25 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
124 struct nv98_ppp_priv *priv; 74 struct nv98_ppp_priv *priv;
125 int ret; 75 int ret;
126 76
127 ret = nouveau_ppp_create(parent, engine, oclass, &priv); 77 ret = nouveau_engine_create(parent, engine, oclass, true,
78 "PPPP", "ppp", &priv);
128 *pobject = nv_object(priv); 79 *pobject = nv_object(priv);
129 if (ret) 80 if (ret)
130 return ret; 81 return ret;
131 82
132 nv_subdev(priv)->unit = 0x00400002; 83 nv_subdev(priv)->unit = 0x00400002;
133 nv_subdev(priv)->intr = nv98_ppp_intr;
134 nv_engine(priv)->cclass = &nv98_ppp_cclass; 84 nv_engine(priv)->cclass = &nv98_ppp_cclass;
135 nv_engine(priv)->sclass = nv98_ppp_sclass; 85 nv_engine(priv)->sclass = nv98_ppp_sclass;
136 return 0; 86 return 0;
137} 87}
138 88
139static void
140nv98_ppp_dtor(struct nouveau_object *object)
141{
142 struct nv98_ppp_priv *priv = (void *)object;
143 nouveau_ppp_destroy(&priv->base);
144}
145
146static int
147nv98_ppp_init(struct nouveau_object *object)
148{
149 struct nv98_ppp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_ppp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv98_ppp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv98_ppp_priv *priv = (void *)object;
163 return nouveau_ppp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass 89struct nouveau_oclass
167nv98_ppp_oclass = { 90nv98_ppp_oclass = {
168 .handle = NV_ENGINE(PPP, 0x98), 91 .handle = NV_ENGINE(PPP, 0x98),
169 .ofuncs = &(struct nouveau_ofuncs) { 92 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv98_ppp_ctor, 93 .ctor = nv98_ppp_ctor,
171 .dtor = nv98_ppp_dtor, 94 .dtor = _nouveau_engine_dtor,
172 .init = nv98_ppp_init, 95 .init = _nouveau_engine_init,
173 .fini = nv98_ppp_fini, 96 .fini = _nouveau_engine_fini,
174 }, 97 },
175}; 98};
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
new file mode 100644
index 000000000000..ebf0d860e2dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Maarten Lankhorst
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Maarten Lankhorst
23 */
24
25#include <core/falcon.h>
26
27#include <engine/ppp.h>
28
29struct nvc0_ppp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * PPP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nvc0_ppp_sclass[] = {
39 { 0x90b3, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PPPP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_ppp_cclass = {
49 .handle = NV_ENGCTX(PPP, 0xc0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PPPP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nvc0_ppp_init(struct nouveau_object *object)
66{
67 struct nvc0_ppp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x086010, 0x0000fff2);
75 nv_wr32(priv, 0x08601c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nvc0_ppp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
88 "PPPP", "ppp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00000002;
94 nv_engine(priv)->cclass = &nvc0_ppp_cclass;
95 nv_engine(priv)->sclass = nvc0_ppp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nvc0_ppp_oclass = {
101 .handle = NV_ENGINE(PPP, 0xc0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nvc0_ppp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nvc0_ppp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
index 3ca4c3aa90b7..2a859a31c30d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv04.c
@@ -63,8 +63,8 @@ nv04_software_flip(struct nouveau_object *object, u32 mthd,
63 63
64static struct nouveau_omthds 64static struct nouveau_omthds
65nv04_software_omthds[] = { 65nv04_software_omthds[] = {
66 { 0x0150, nv04_software_set_ref }, 66 { 0x0150, 0x0150, nv04_software_set_ref },
67 { 0x0500, nv04_software_flip }, 67 { 0x0500, 0x0500, nv04_software_flip },
68 {} 68 {}
69}; 69};
70 70
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
index 6e699afbfdb7..a019364b1e13 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv10.c
@@ -52,7 +52,7 @@ nv10_software_flip(struct nouveau_object *object, u32 mthd,
52 52
53static struct nouveau_omthds 53static struct nouveau_omthds
54nv10_software_omthds[] = { 54nv10_software_omthds[] = {
55 { 0x0500, nv10_software_flip }, 55 { 0x0500, 0x0500, nv10_software_flip },
56 {} 56 {}
57}; 57};
58 58
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
index a2edcd38544a..b0e7e1c01ce6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nv50.c
@@ -117,11 +117,11 @@ nv50_software_mthd_flip(struct nouveau_object *object, u32 mthd,
117 117
118static struct nouveau_omthds 118static struct nouveau_omthds
119nv50_software_omthds[] = { 119nv50_software_omthds[] = {
120 { 0x018c, nv50_software_mthd_dma_vblsem }, 120 { 0x018c, 0x018c, nv50_software_mthd_dma_vblsem },
121 { 0x0400, nv50_software_mthd_vblsem_offset }, 121 { 0x0400, 0x0400, nv50_software_mthd_vblsem_offset },
122 { 0x0404, nv50_software_mthd_vblsem_value }, 122 { 0x0404, 0x0404, nv50_software_mthd_vblsem_value },
123 { 0x0408, nv50_software_mthd_vblsem_release }, 123 { 0x0408, 0x0408, nv50_software_mthd_vblsem_release },
124 { 0x0500, nv50_software_mthd_flip }, 124 { 0x0500, 0x0500, nv50_software_mthd_flip },
125 {} 125 {}
126}; 126};
127 127
diff --git a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
index b7b0d7e330d6..282a1cd1bc2f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/software/nvc0.c
@@ -99,11 +99,11 @@ nvc0_software_mthd_flip(struct nouveau_object *object, u32 mthd,
99 99
100static struct nouveau_omthds 100static struct nouveau_omthds
101nvc0_software_omthds[] = { 101nvc0_software_omthds[] = {
102 { 0x0400, nvc0_software_mthd_vblsem_offset }, 102 { 0x0400, 0x0400, nvc0_software_mthd_vblsem_offset },
103 { 0x0404, nvc0_software_mthd_vblsem_offset }, 103 { 0x0404, 0x0404, nvc0_software_mthd_vblsem_offset },
104 { 0x0408, nvc0_software_mthd_vblsem_value }, 104 { 0x0408, 0x0408, nvc0_software_mthd_vblsem_value },
105 { 0x040c, nvc0_software_mthd_vblsem_release }, 105 { 0x040c, 0x040c, nvc0_software_mthd_vblsem_release },
106 { 0x0500, nvc0_software_mthd_flip }, 106 { 0x0500, 0x0500, nvc0_software_mthd_flip },
107 {} 107 {}
108}; 108};
109 109
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
index dd23c80e5405..261cd96e6951 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv84.c
@@ -22,18 +22,13 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <core/os.h>
26#include <core/class.h>
27#include <core/engctx.h> 25#include <core/engctx.h>
26#include <core/class.h>
28 27
29#include <engine/vp.h> 28#include <engine/vp.h>
30 29
31struct nv84_vp_priv { 30struct nv84_vp_priv {
32 struct nouveau_vp base; 31 struct nouveau_engine base;
33};
34
35struct nv84_vp_chan {
36 struct nouveau_vp_chan base;
37}; 32};
38 33
39/******************************************************************************* 34/*******************************************************************************
@@ -49,61 +44,16 @@ nv84_vp_sclass[] = {
49 * PVP context 44 * PVP context
50 ******************************************************************************/ 45 ******************************************************************************/
51 46
52static int
53nv84_vp_context_ctor(struct nouveau_object *parent,
54 struct nouveau_object *engine,
55 struct nouveau_oclass *oclass, void *data, u32 size,
56 struct nouveau_object **pobject)
57{
58 struct nv84_vp_chan *priv;
59 int ret;
60
61 ret = nouveau_vp_context_create(parent, engine, oclass, NULL,
62 0, 0, 0, &priv);
63 *pobject = nv_object(priv);
64 if (ret)
65 return ret;
66
67 return 0;
68}
69
70static void
71nv84_vp_context_dtor(struct nouveau_object *object)
72{
73 struct nv84_vp_chan *priv = (void *)object;
74 nouveau_vp_context_destroy(&priv->base);
75}
76
77static int
78nv84_vp_context_init(struct nouveau_object *object)
79{
80 struct nv84_vp_chan *priv = (void *)object;
81 int ret;
82
83 ret = nouveau_vp_context_init(&priv->base);
84 if (ret)
85 return ret;
86
87 return 0;
88}
89
90static int
91nv84_vp_context_fini(struct nouveau_object *object, bool suspend)
92{
93 struct nv84_vp_chan *priv = (void *)object;
94 return nouveau_vp_context_fini(&priv->base, suspend);
95}
96
97static struct nouveau_oclass 47static struct nouveau_oclass
98nv84_vp_cclass = { 48nv84_vp_cclass = {
99 .handle = NV_ENGCTX(VP, 0x84), 49 .handle = NV_ENGCTX(VP, 0x84),
100 .ofuncs = &(struct nouveau_ofuncs) { 50 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv84_vp_context_ctor, 51 .ctor = _nouveau_engctx_ctor,
102 .dtor = nv84_vp_context_dtor, 52 .dtor = _nouveau_engctx_dtor,
103 .init = nv84_vp_context_init, 53 .init = _nouveau_engctx_init,
104 .fini = nv84_vp_context_fini, 54 .fini = _nouveau_engctx_fini,
105 .rd32 = _nouveau_vp_context_rd32, 55 .rd32 = _nouveau_engctx_rd32,
106 .wr32 = _nouveau_vp_context_wr32, 56 .wr32 = _nouveau_engctx_wr32,
107 }, 57 },
108}; 58};
109 59
@@ -111,11 +61,6 @@ nv84_vp_cclass = {
111 * PVP engine/subdev functions 61 * PVP engine/subdev functions
112 ******************************************************************************/ 62 ******************************************************************************/
113 63
114static void
115nv84_vp_intr(struct nouveau_subdev *subdev)
116{
117}
118
119static int 64static int
120nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 65nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
121 struct nouveau_oclass *oclass, void *data, u32 size, 66 struct nouveau_oclass *oclass, void *data, u32 size,
@@ -124,52 +69,25 @@ nv84_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
124 struct nv84_vp_priv *priv; 69 struct nv84_vp_priv *priv;
125 int ret; 70 int ret;
126 71
127 ret = nouveau_vp_create(parent, engine, oclass, &priv); 72 ret = nouveau_engine_create(parent, engine, oclass, true,
73 "PVP", "vp", &priv);
128 *pobject = nv_object(priv); 74 *pobject = nv_object(priv);
129 if (ret) 75 if (ret)
130 return ret; 76 return ret;
131 77
132 nv_subdev(priv)->unit = 0x01020000; 78 nv_subdev(priv)->unit = 0x01020000;
133 nv_subdev(priv)->intr = nv84_vp_intr;
134 nv_engine(priv)->cclass = &nv84_vp_cclass; 79 nv_engine(priv)->cclass = &nv84_vp_cclass;
135 nv_engine(priv)->sclass = nv84_vp_sclass; 80 nv_engine(priv)->sclass = nv84_vp_sclass;
136 return 0; 81 return 0;
137} 82}
138 83
139static void
140nv84_vp_dtor(struct nouveau_object *object)
141{
142 struct nv84_vp_priv *priv = (void *)object;
143 nouveau_vp_destroy(&priv->base);
144}
145
146static int
147nv84_vp_init(struct nouveau_object *object)
148{
149 struct nv84_vp_priv *priv = (void *)object;
150 int ret;
151
152 ret = nouveau_vp_init(&priv->base);
153 if (ret)
154 return ret;
155
156 return 0;
157}
158
159static int
160nv84_vp_fini(struct nouveau_object *object, bool suspend)
161{
162 struct nv84_vp_priv *priv = (void *)object;
163 return nouveau_vp_fini(&priv->base, suspend);
164}
165
166struct nouveau_oclass 84struct nouveau_oclass
167nv84_vp_oclass = { 85nv84_vp_oclass = {
168 .handle = NV_ENGINE(VP, 0x84), 86 .handle = NV_ENGINE(VP, 0x84),
169 .ofuncs = &(struct nouveau_ofuncs) { 87 .ofuncs = &(struct nouveau_ofuncs) {
170 .ctor = nv84_vp_ctor, 88 .ctor = nv84_vp_ctor,
171 .dtor = nv84_vp_dtor, 89 .dtor = _nouveau_engine_dtor,
172 .init = nv84_vp_init, 90 .init = _nouveau_engine_init,
173 .fini = nv84_vp_fini, 91 .fini = _nouveau_engine_fini,
174 }, 92 },
175}; 93};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
new file mode 100644
index 000000000000..f761949d7039
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Maarten Lankhorst
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Maarten Lankhorst
23 */
24
25#include <core/falcon.h>
26
27#include <engine/vp.h>
28
29struct nvc0_vp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * VP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nvc0_vp_sclass[] = {
39 { 0x90b2, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PVP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nvc0_vp_cclass = {
49 .handle = NV_ENGCTX(VP, 0xc0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PVP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nvc0_vp_init(struct nouveau_object *object)
66{
67 struct nvc0_vp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x085010, 0x0000fff2);
75 nv_wr32(priv, 0x08501c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nvc0_vp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
88 "PVP", "vp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00020000;
94 nv_engine(priv)->cclass = &nvc0_vp_cclass;
95 nv_engine(priv)->sclass = nvc0_vp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nvc0_vp_oclass = {
101 .handle = NV_ENGINE(VP, 0xc0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nvc0_vp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nvc0_vp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
new file mode 100644
index 000000000000..2384ce5dbe16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/falcon.h>
26
27#include <engine/vp.h>
28
29struct nve0_vp_priv {
30 struct nouveau_falcon base;
31};
32
33/*******************************************************************************
34 * VP object classes
35 ******************************************************************************/
36
37static struct nouveau_oclass
38nve0_vp_sclass[] = {
39 { 0x95b2, &nouveau_object_ofuncs },
40 {},
41};
42
43/*******************************************************************************
44 * PVP context
45 ******************************************************************************/
46
47static struct nouveau_oclass
48nve0_vp_cclass = {
49 .handle = NV_ENGCTX(VP, 0xe0),
50 .ofuncs = &(struct nouveau_ofuncs) {
51 .ctor = _nouveau_falcon_context_ctor,
52 .dtor = _nouveau_falcon_context_dtor,
53 .init = _nouveau_falcon_context_init,
54 .fini = _nouveau_falcon_context_fini,
55 .rd32 = _nouveau_falcon_context_rd32,
56 .wr32 = _nouveau_falcon_context_wr32,
57 },
58};
59
60/*******************************************************************************
61 * PVP engine/subdev functions
62 ******************************************************************************/
63
64static int
65nve0_vp_init(struct nouveau_object *object)
66{
67 struct nve0_vp_priv *priv = (void *)object;
68 int ret;
69
70 ret = nouveau_falcon_init(&priv->base);
71 if (ret)
72 return ret;
73
74 nv_wr32(priv, 0x085010, 0x0000fff2);
75 nv_wr32(priv, 0x08501c, 0x0000fff2);
76 return 0;
77}
78
79static int
80nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
81 struct nouveau_oclass *oclass, void *data, u32 size,
82 struct nouveau_object **pobject)
83{
84 struct nve0_vp_priv *priv;
85 int ret;
86
87 ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
88 "PVP", "vp", &priv);
89 *pobject = nv_object(priv);
90 if (ret)
91 return ret;
92
93 nv_subdev(priv)->unit = 0x00020000;
94 nv_engine(priv)->cclass = &nve0_vp_cclass;
95 nv_engine(priv)->sclass = nve0_vp_sclass;
96 return 0;
97}
98
99struct nouveau_oclass
100nve0_vp_oclass = {
101 .handle = NV_ENGINE(VP, 0xe0),
102 .ofuncs = &(struct nouveau_ofuncs) {
103 .ctor = nve0_vp_ctor,
104 .dtor = _nouveau_falcon_dtor,
105 .init = nve0_vp_init,
106 .fini = _nouveau_falcon_fini,
107 .rd32 = _nouveau_falcon_rd32,
108 .wr32 = _nouveau_falcon_wr32,
109 },
110};
diff --git a/drivers/gpu/drm/nouveau/core/include/core/class.h b/drivers/gpu/drm/nouveau/core/include/core/class.h
index 6180ae9800fc..47c4b3a5bd3a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/class.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/class.h
@@ -23,6 +23,7 @@
23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL 23#define NV_DEVICE_DISABLE_COPY0 0x0000008000000000ULL
24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL 24#define NV_DEVICE_DISABLE_COPY1 0x0000010000000000ULL
25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL 25#define NV_DEVICE_DISABLE_UNK1C1 0x0000020000000000ULL
26#define NV_DEVICE_DISABLE_VENC 0x0000040000000000ULL
26 27
27struct nv_device_class { 28struct nv_device_class {
28 u64 device; /* device identifier, ~0 for client default */ 29 u64 device; /* device identifier, ~0 for client default */
@@ -52,11 +53,49 @@ struct nv_device_class {
52#define NV_DMA_ACCESS_WR 0x00000200 53#define NV_DMA_ACCESS_WR 0x00000200
53#define NV_DMA_ACCESS_RDWR 0x00000300 54#define NV_DMA_ACCESS_RDWR 0x00000300
54 55
56/* NV50:NVC0 */
57#define NV50_DMA_CONF0_ENABLE 0x80000000
58#define NV50_DMA_CONF0_PRIV 0x00300000
59#define NV50_DMA_CONF0_PRIV_VM 0x00000000
60#define NV50_DMA_CONF0_PRIV_US 0x00100000
61#define NV50_DMA_CONF0_PRIV__S 0x00200000
62#define NV50_DMA_CONF0_PART 0x00030000
63#define NV50_DMA_CONF0_PART_VM 0x00000000
64#define NV50_DMA_CONF0_PART_256 0x00010000
65#define NV50_DMA_CONF0_PART_1KB 0x00020000
66#define NV50_DMA_CONF0_COMP 0x00000180
67#define NV50_DMA_CONF0_COMP_NONE 0x00000000
68#define NV50_DMA_CONF0_COMP_VM 0x00000180
69#define NV50_DMA_CONF0_TYPE 0x0000007f
70#define NV50_DMA_CONF0_TYPE_LINEAR 0x00000000
71#define NV50_DMA_CONF0_TYPE_VM 0x0000007f
72
73/* NVC0:NVD9 */
74#define NVC0_DMA_CONF0_ENABLE 0x80000000
75#define NVC0_DMA_CONF0_PRIV 0x00300000
76#define NVC0_DMA_CONF0_PRIV_VM 0x00000000
77#define NVC0_DMA_CONF0_PRIV_US 0x00100000
78#define NVC0_DMA_CONF0_PRIV__S 0x00200000
79#define NVC0_DMA_CONF0_UNKN /* PART? */ 0x00030000
80#define NVC0_DMA_CONF0_TYPE 0x000000ff
81#define NVC0_DMA_CONF0_TYPE_LINEAR 0x00000000
82#define NVC0_DMA_CONF0_TYPE_VM 0x000000ff
83
84/* NVD9- */
85#define NVD0_DMA_CONF0_ENABLE 0x80000000
86#define NVD0_DMA_CONF0_PAGE 0x00000400
87#define NVD0_DMA_CONF0_PAGE_LP 0x00000000
88#define NVD0_DMA_CONF0_PAGE_SP 0x00000400
89#define NVD0_DMA_CONF0_TYPE 0x000000ff
90#define NVD0_DMA_CONF0_TYPE_LINEAR 0x00000000
91#define NVD0_DMA_CONF0_TYPE_VM 0x000000ff
92
55struct nv_dma_class { 93struct nv_dma_class {
56 u32 flags; 94 u32 flags;
57 u32 pad0; 95 u32 pad0;
58 u64 start; 96 u64 start;
59 u64 limit; 97 u64 limit;
98 u32 conf0;
60}; 99};
61 100
62/* DMA FIFO channel classes 101/* DMA FIFO channel classes
@@ -115,4 +154,190 @@ struct nve0_channel_ind_class {
115 u32 engine; 154 u32 engine;
116}; 155};
117 156
157/* 5070: NV50_DISP
158 * 8270: NV84_DISP
159 * 8370: NVA0_DISP
160 * 8870: NV94_DISP
161 * 8570: NVA3_DISP
162 * 9070: NVD0_DISP
163 * 9170: NVE0_DISP
164 */
165
166#define NV50_DISP_CLASS 0x00005070
167#define NV84_DISP_CLASS 0x00008270
168#define NVA0_DISP_CLASS 0x00008370
169#define NV94_DISP_CLASS 0x00008870
170#define NVA3_DISP_CLASS 0x00008570
171#define NVD0_DISP_CLASS 0x00009070
172#define NVE0_DISP_CLASS 0x00009170
173
174#define NV50_DISP_SOR_MTHD 0x00010000
175#define NV50_DISP_SOR_MTHD_TYPE 0x0000f000
176#define NV50_DISP_SOR_MTHD_HEAD 0x00000018
177#define NV50_DISP_SOR_MTHD_LINK 0x00000004
178#define NV50_DISP_SOR_MTHD_OR 0x00000003
179
180#define NV50_DISP_SOR_PWR 0x00010000
181#define NV50_DISP_SOR_PWR_STATE 0x00000001
182#define NV50_DISP_SOR_PWR_STATE_ON 0x00000001
183#define NV50_DISP_SOR_PWR_STATE_OFF 0x00000000
184#define NVA3_DISP_SOR_HDA_ELD 0x00010100
185#define NV84_DISP_SOR_HDMI_PWR 0x00012000
186#define NV84_DISP_SOR_HDMI_PWR_STATE 0x40000000
187#define NV84_DISP_SOR_HDMI_PWR_STATE_OFF 0x00000000
188#define NV84_DISP_SOR_HDMI_PWR_STATE_ON 0x40000000
189#define NV84_DISP_SOR_HDMI_PWR_MAX_AC_PACKET 0x001f0000
190#define NV84_DISP_SOR_HDMI_PWR_REKEY 0x0000007f
191#define NV50_DISP_SOR_LVDS_SCRIPT 0x00013000
192#define NV50_DISP_SOR_LVDS_SCRIPT_ID 0x0000ffff
193#define NV94_DISP_SOR_DP_TRAIN 0x00016000
194#define NV94_DISP_SOR_DP_TRAIN_OP 0xf0000000
195#define NV94_DISP_SOR_DP_TRAIN_OP_PATTERN 0x00000000
196#define NV94_DISP_SOR_DP_TRAIN_OP_INIT 0x10000000
197#define NV94_DISP_SOR_DP_TRAIN_OP_FINI 0x20000000
198#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD 0x00000001
199#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF 0x00000000
200#define NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON 0x00000001
201#define NV94_DISP_SOR_DP_TRAIN_PATTERN 0x00000003
202#define NV94_DISP_SOR_DP_TRAIN_PATTERN_DISABLED 0x00000000
203#define NV94_DISP_SOR_DP_LNKCTL 0x00016040
204#define NV94_DISP_SOR_DP_LNKCTL_FRAME 0x80000000
205#define NV94_DISP_SOR_DP_LNKCTL_FRAME_STD 0x00000000
206#define NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH 0x80000000
207#define NV94_DISP_SOR_DP_LNKCTL_WIDTH 0x00001f00
208#define NV94_DISP_SOR_DP_LNKCTL_COUNT 0x00000007
209#define NV94_DISP_SOR_DP_DRVCTL(l) ((l) * 0x40 + 0x00016100)
210#define NV94_DISP_SOR_DP_DRVCTL_VS 0x00000300
211#define NV94_DISP_SOR_DP_DRVCTL_PE 0x00000003
212
213#define NV50_DISP_DAC_MTHD 0x00020000
214#define NV50_DISP_DAC_MTHD_TYPE 0x0000f000
215#define NV50_DISP_DAC_MTHD_OR 0x00000003
216
217#define NV50_DISP_DAC_PWR 0x00020000
218#define NV50_DISP_DAC_PWR_HSYNC 0x00000001
219#define NV50_DISP_DAC_PWR_HSYNC_ON 0x00000000
220#define NV50_DISP_DAC_PWR_HSYNC_LO 0x00000001
221#define NV50_DISP_DAC_PWR_VSYNC 0x00000004
222#define NV50_DISP_DAC_PWR_VSYNC_ON 0x00000000
223#define NV50_DISP_DAC_PWR_VSYNC_LO 0x00000004
224#define NV50_DISP_DAC_PWR_DATA 0x00000010
225#define NV50_DISP_DAC_PWR_DATA_ON 0x00000000
226#define NV50_DISP_DAC_PWR_DATA_LO 0x00000010
227#define NV50_DISP_DAC_PWR_STATE 0x00000040
228#define NV50_DISP_DAC_PWR_STATE_ON 0x00000000
229#define NV50_DISP_DAC_PWR_STATE_OFF 0x00000040
230#define NV50_DISP_DAC_LOAD 0x0002000c
231#define NV50_DISP_DAC_LOAD_VALUE 0x00000007
232
233struct nv50_display_class {
234};
235
236/* 507a: NV50_DISP_CURS
237 * 827a: NV84_DISP_CURS
238 * 837a: NVA0_DISP_CURS
239 * 887a: NV94_DISP_CURS
240 * 857a: NVA3_DISP_CURS
241 * 907a: NVD0_DISP_CURS
242 * 917a: NVE0_DISP_CURS
243 */
244
245#define NV50_DISP_CURS_CLASS 0x0000507a
246#define NV84_DISP_CURS_CLASS 0x0000827a
247#define NVA0_DISP_CURS_CLASS 0x0000837a
248#define NV94_DISP_CURS_CLASS 0x0000887a
249#define NVA3_DISP_CURS_CLASS 0x0000857a
250#define NVD0_DISP_CURS_CLASS 0x0000907a
251#define NVE0_DISP_CURS_CLASS 0x0000917a
252
253struct nv50_display_curs_class {
254 u32 head;
255};
256
257/* 507b: NV50_DISP_OIMM
258 * 827b: NV84_DISP_OIMM
259 * 837b: NVA0_DISP_OIMM
260 * 887b: NV94_DISP_OIMM
261 * 857b: NVA3_DISP_OIMM
262 * 907b: NVD0_DISP_OIMM
263 * 917b: NVE0_DISP_OIMM
264 */
265
266#define NV50_DISP_OIMM_CLASS 0x0000507b
267#define NV84_DISP_OIMM_CLASS 0x0000827b
268#define NVA0_DISP_OIMM_CLASS 0x0000837b
269#define NV94_DISP_OIMM_CLASS 0x0000887b
270#define NVA3_DISP_OIMM_CLASS 0x0000857b
271#define NVD0_DISP_OIMM_CLASS 0x0000907b
272#define NVE0_DISP_OIMM_CLASS 0x0000917b
273
274struct nv50_display_oimm_class {
275 u32 head;
276};
277
278/* 507c: NV50_DISP_SYNC
279 * 827c: NV84_DISP_SYNC
280 * 837c: NVA0_DISP_SYNC
281 * 887c: NV94_DISP_SYNC
282 * 857c: NVA3_DISP_SYNC
283 * 907c: NVD0_DISP_SYNC
284 * 917c: NVE0_DISP_SYNC
285 */
286
287#define NV50_DISP_SYNC_CLASS 0x0000507c
288#define NV84_DISP_SYNC_CLASS 0x0000827c
289#define NVA0_DISP_SYNC_CLASS 0x0000837c
290#define NV94_DISP_SYNC_CLASS 0x0000887c
291#define NVA3_DISP_SYNC_CLASS 0x0000857c
292#define NVD0_DISP_SYNC_CLASS 0x0000907c
293#define NVE0_DISP_SYNC_CLASS 0x0000917c
294
295struct nv50_display_sync_class {
296 u32 pushbuf;
297 u32 head;
298};
299
300/* 507d: NV50_DISP_MAST
301 * 827d: NV84_DISP_MAST
302 * 837d: NVA0_DISP_MAST
303 * 887d: NV94_DISP_MAST
304 * 857d: NVA3_DISP_MAST
305 * 907d: NVD0_DISP_MAST
306 * 917d: NVE0_DISP_MAST
307 */
308
309#define NV50_DISP_MAST_CLASS 0x0000507d
310#define NV84_DISP_MAST_CLASS 0x0000827d
311#define NVA0_DISP_MAST_CLASS 0x0000837d
312#define NV94_DISP_MAST_CLASS 0x0000887d
313#define NVA3_DISP_MAST_CLASS 0x0000857d
314#define NVD0_DISP_MAST_CLASS 0x0000907d
315#define NVE0_DISP_MAST_CLASS 0x0000917d
316
317struct nv50_display_mast_class {
318 u32 pushbuf;
319};
320
321/* 507e: NV50_DISP_OVLY
322 * 827e: NV84_DISP_OVLY
323 * 837e: NVA0_DISP_OVLY
324 * 887e: NV94_DISP_OVLY
325 * 857e: NVA3_DISP_OVLY
326 * 907e: NVD0_DISP_OVLY
327 * 917e: NVE0_DISP_OVLY
328 */
329
330#define NV50_DISP_OVLY_CLASS 0x0000507e
331#define NV84_DISP_OVLY_CLASS 0x0000827e
332#define NVA0_DISP_OVLY_CLASS 0x0000837e
333#define NV94_DISP_OVLY_CLASS 0x0000887e
334#define NVA3_DISP_OVLY_CLASS 0x0000857e
335#define NVD0_DISP_OVLY_CLASS 0x0000907e
336#define NVE0_DISP_OVLY_CLASS 0x0000917e
337
338struct nv50_display_ovly_class {
339 u32 pushbuf;
340 u32 head;
341};
342
118#endif 343#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/engctx.h b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
index 8a947b6872eb..2fd48b564c7d 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/engctx.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/engctx.h
@@ -39,6 +39,9 @@ void nouveau_engctx_destroy(struct nouveau_engctx *);
39int nouveau_engctx_init(struct nouveau_engctx *); 39int nouveau_engctx_init(struct nouveau_engctx *);
40int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend); 40int nouveau_engctx_fini(struct nouveau_engctx *, bool suspend);
41 41
42int _nouveau_engctx_ctor(struct nouveau_object *, struct nouveau_object *,
43 struct nouveau_oclass *, void *, u32,
44 struct nouveau_object **);
42void _nouveau_engctx_dtor(struct nouveau_object *); 45void _nouveau_engctx_dtor(struct nouveau_object *);
43int _nouveau_engctx_init(struct nouveau_object *); 46int _nouveau_engctx_init(struct nouveau_object *);
44int _nouveau_engctx_fini(struct nouveau_object *, bool suspend); 47int _nouveau_engctx_fini(struct nouveau_object *, bool suspend);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/falcon.h b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
new file mode 100644
index 000000000000..1edec386ab36
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/core/falcon.h
@@ -0,0 +1,81 @@
1#ifndef __NOUVEAU_FALCON_H__
2#define __NOUVEAU_FALCON_H__
3
4#include <core/engine.h>
5#include <core/engctx.h>
6#include <core/gpuobj.h>
7
8struct nouveau_falcon_chan {
9 struct nouveau_engctx base;
10};
11
12#define nouveau_falcon_context_create(p,e,c,g,s,a,f,d) \
13 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
14#define nouveau_falcon_context_destroy(d) \
15 nouveau_engctx_destroy(&(d)->base)
16#define nouveau_falcon_context_init(d) \
17 nouveau_engctx_init(&(d)->base)
18#define nouveau_falcon_context_fini(d,s) \
19 nouveau_engctx_fini(&(d)->base, (s))
20
21#define _nouveau_falcon_context_ctor _nouveau_engctx_ctor
22#define _nouveau_falcon_context_dtor _nouveau_engctx_dtor
23#define _nouveau_falcon_context_init _nouveau_engctx_init
24#define _nouveau_falcon_context_fini _nouveau_engctx_fini
25#define _nouveau_falcon_context_rd32 _nouveau_engctx_rd32
26#define _nouveau_falcon_context_wr32 _nouveau_engctx_wr32
27
28struct nouveau_falcon_data {
29 bool external;
30};
31
32struct nouveau_falcon {
33 struct nouveau_engine base;
34
35 u32 addr;
36 u8 version;
37 u8 secret;
38
39 struct nouveau_gpuobj *core;
40 bool external;
41
42 struct {
43 u32 limit;
44 u32 *data;
45 u32 size;
46 } code;
47
48 struct {
49 u32 limit;
50 u32 *data;
51 u32 size;
52 } data;
53};
54
55#define nv_falcon(priv) (&(priv)->base)
56
57#define nouveau_falcon_create(p,e,c,b,d,i,f,r) \
58 nouveau_falcon_create_((p), (e), (c), (b), (d), (i), (f), \
59 sizeof(**r),(void **)r)
60#define nouveau_falcon_destroy(p) \
61 nouveau_engine_destroy(&(p)->base)
62#define nouveau_falcon_init(p) ({ \
63 struct nouveau_falcon *falcon = (p); \
64 _nouveau_falcon_init(nv_object(falcon)); \
65})
66#define nouveau_falcon_fini(p,s) ({ \
67 struct nouveau_falcon *falcon = (p); \
68 _nouveau_falcon_fini(nv_object(falcon), (s)); \
69})
70
71int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *,
72 struct nouveau_oclass *, u32, bool, const char *,
73 const char *, int, void **);
74
75#define _nouveau_falcon_dtor _nouveau_engine_dtor
76int _nouveau_falcon_init(struct nouveau_object *);
77int _nouveau_falcon_fini(struct nouveau_object *, bool);
78u32 _nouveau_falcon_rd32(struct nouveau_object *, u64);
79void _nouveau_falcon_wr32(struct nouveau_object *, u64, u32);
80
81#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
index 6eaff79377ae..b3b9ce4e9d38 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/gpuobj.h
@@ -65,7 +65,7 @@ nouveau_gpuobj_ref(struct nouveau_gpuobj *obj, struct nouveau_gpuobj **ref)
65void _nouveau_gpuobj_dtor(struct nouveau_object *); 65void _nouveau_gpuobj_dtor(struct nouveau_object *);
66int _nouveau_gpuobj_init(struct nouveau_object *); 66int _nouveau_gpuobj_init(struct nouveau_object *);
67int _nouveau_gpuobj_fini(struct nouveau_object *, bool); 67int _nouveau_gpuobj_fini(struct nouveau_object *, bool);
68u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u32); 68u32 _nouveau_gpuobj_rd32(struct nouveau_object *, u64);
69void _nouveau_gpuobj_wr32(struct nouveau_object *, u32, u32); 69void _nouveau_gpuobj_wr32(struct nouveau_object *, u64, u32);
70 70
71#endif 71#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/mm.h b/drivers/gpu/drm/nouveau/core/include/core/mm.h
index 975137ba34a6..2514e81ade02 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/mm.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/mm.h
@@ -21,6 +21,12 @@ struct nouveau_mm {
21 int heap_nodes; 21 int heap_nodes;
22}; 22};
23 23
24static inline bool
25nouveau_mm_initialised(struct nouveau_mm *mm)
26{
27 return mm->block_size != 0;
28}
29
24int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block); 30int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
25int nouveau_mm_fini(struct nouveau_mm *); 31int nouveau_mm_fini(struct nouveau_mm *);
26int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min, 32int nouveau_mm_head(struct nouveau_mm *, u8 type, u32 size_max, u32 size_min,
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index 486f1a9217fd..5982935ee23a 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -70,7 +70,8 @@ nv_pclass(struct nouveau_object *parent, u32 oclass)
70} 70}
71 71
72struct nouveau_omthds { 72struct nouveau_omthds {
73 u32 method; 73 u32 start;
74 u32 limit;
74 int (*call)(struct nouveau_object *, u32, void *, u32); 75 int (*call)(struct nouveau_object *, u32, void *, u32);
75}; 76};
76 77
@@ -81,12 +82,12 @@ struct nouveau_ofuncs {
81 void (*dtor)(struct nouveau_object *); 82 void (*dtor)(struct nouveau_object *);
82 int (*init)(struct nouveau_object *); 83 int (*init)(struct nouveau_object *);
83 int (*fini)(struct nouveau_object *, bool suspend); 84 int (*fini)(struct nouveau_object *, bool suspend);
84 u8 (*rd08)(struct nouveau_object *, u32 offset); 85 u8 (*rd08)(struct nouveau_object *, u64 offset);
85 u16 (*rd16)(struct nouveau_object *, u32 offset); 86 u16 (*rd16)(struct nouveau_object *, u64 offset);
86 u32 (*rd32)(struct nouveau_object *, u32 offset); 87 u32 (*rd32)(struct nouveau_object *, u64 offset);
87 void (*wr08)(struct nouveau_object *, u32 offset, u8 data); 88 void (*wr08)(struct nouveau_object *, u64 offset, u8 data);
88 void (*wr16)(struct nouveau_object *, u32 offset, u16 data); 89 void (*wr16)(struct nouveau_object *, u64 offset, u16 data);
89 void (*wr32)(struct nouveau_object *, u32 offset, u32 data); 90 void (*wr32)(struct nouveau_object *, u64 offset, u32 data);
90}; 91};
91 92
92static inline struct nouveau_ofuncs * 93static inline struct nouveau_ofuncs *
@@ -109,21 +110,27 @@ int nouveau_object_del(struct nouveau_object *, u32 parent, u32 handle);
109void nouveau_object_debug(void); 110void nouveau_object_debug(void);
110 111
111static inline int 112static inline int
112nv_call(void *obj, u32 mthd, u32 data) 113nv_exec(void *obj, u32 mthd, void *data, u32 size)
113{ 114{
114 struct nouveau_omthds *method = nv_oclass(obj)->omthds; 115 struct nouveau_omthds *method = nv_oclass(obj)->omthds;
115 116
116 while (method && method->call) { 117 while (method && method->call) {
117 if (method->method == mthd) 118 if (mthd >= method->start && mthd <= method->limit)
118 return method->call(obj, mthd, &data, sizeof(data)); 119 return method->call(obj, mthd, data, size);
119 method++; 120 method++;
120 } 121 }
121 122
122 return -EINVAL; 123 return -EINVAL;
123} 124}
124 125
126static inline int
127nv_call(void *obj, u32 mthd, u32 data)
128{
129 return nv_exec(obj, mthd, &data, sizeof(data));
130}
131
125static inline u8 132static inline u8
126nv_ro08(void *obj, u32 addr) 133nv_ro08(void *obj, u64 addr)
127{ 134{
128 u8 data = nv_ofuncs(obj)->rd08(obj, addr); 135 u8 data = nv_ofuncs(obj)->rd08(obj, addr);
129 nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data); 136 nv_spam(obj, "nv_ro08 0x%08x 0x%02x\n", addr, data);
@@ -131,7 +138,7 @@ nv_ro08(void *obj, u32 addr)
131} 138}
132 139
133static inline u16 140static inline u16
134nv_ro16(void *obj, u32 addr) 141nv_ro16(void *obj, u64 addr)
135{ 142{
136 u16 data = nv_ofuncs(obj)->rd16(obj, addr); 143 u16 data = nv_ofuncs(obj)->rd16(obj, addr);
137 nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data); 144 nv_spam(obj, "nv_ro16 0x%08x 0x%04x\n", addr, data);
@@ -139,7 +146,7 @@ nv_ro16(void *obj, u32 addr)
139} 146}
140 147
141static inline u32 148static inline u32
142nv_ro32(void *obj, u32 addr) 149nv_ro32(void *obj, u64 addr)
143{ 150{
144 u32 data = nv_ofuncs(obj)->rd32(obj, addr); 151 u32 data = nv_ofuncs(obj)->rd32(obj, addr);
145 nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data); 152 nv_spam(obj, "nv_ro32 0x%08x 0x%08x\n", addr, data);
@@ -147,28 +154,28 @@ nv_ro32(void *obj, u32 addr)
147} 154}
148 155
149static inline void 156static inline void
150nv_wo08(void *obj, u32 addr, u8 data) 157nv_wo08(void *obj, u64 addr, u8 data)
151{ 158{
152 nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data); 159 nv_spam(obj, "nv_wo08 0x%08x 0x%02x\n", addr, data);
153 nv_ofuncs(obj)->wr08(obj, addr, data); 160 nv_ofuncs(obj)->wr08(obj, addr, data);
154} 161}
155 162
156static inline void 163static inline void
157nv_wo16(void *obj, u32 addr, u16 data) 164nv_wo16(void *obj, u64 addr, u16 data)
158{ 165{
159 nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data); 166 nv_spam(obj, "nv_wo16 0x%08x 0x%04x\n", addr, data);
160 nv_ofuncs(obj)->wr16(obj, addr, data); 167 nv_ofuncs(obj)->wr16(obj, addr, data);
161} 168}
162 169
163static inline void 170static inline void
164nv_wo32(void *obj, u32 addr, u32 data) 171nv_wo32(void *obj, u64 addr, u32 data)
165{ 172{
166 nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data); 173 nv_spam(obj, "nv_wo32 0x%08x 0x%08x\n", addr, data);
167 nv_ofuncs(obj)->wr32(obj, addr, data); 174 nv_ofuncs(obj)->wr32(obj, addr, data);
168} 175}
169 176
170static inline u32 177static inline u32
171nv_mo32(void *obj, u32 addr, u32 mask, u32 data) 178nv_mo32(void *obj, u64 addr, u32 mask, u32 data)
172{ 179{
173 u32 temp = nv_ro32(obj, addr); 180 u32 temp = nv_ro32(obj, addr);
174 nv_wo32(obj, addr, (temp & ~mask) | data); 181 nv_wo32(obj, addr, (temp & ~mask) | data);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/parent.h b/drivers/gpu/drm/nouveau/core/include/core/parent.h
index 3c2e940eb0f8..31cd852c96df 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/parent.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/parent.h
@@ -14,7 +14,7 @@ struct nouveau_parent {
14 struct nouveau_object base; 14 struct nouveau_object base;
15 15
16 struct nouveau_sclass *sclass; 16 struct nouveau_sclass *sclass;
17 u32 engine; 17 u64 engine;
18 18
19 int (*context_attach)(struct nouveau_object *, 19 int (*context_attach)(struct nouveau_object *,
20 struct nouveau_object *); 20 struct nouveau_object *);
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
index 75d1ed5f85fd..13ccdf54dfad 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/bsp.h
@@ -1,45 +1,8 @@
1#ifndef __NOUVEAU_BSP_H__ 1#ifndef __NOUVEAU_BSP_H__
2#define __NOUVEAU_BSP_H__ 2#define __NOUVEAU_BSP_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_bsp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_bsp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_bsp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_bsp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_bsp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_bsp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_bsp_context_init _nouveau_engctx_init
22#define _nouveau_bsp_context_fini _nouveau_engctx_fini
23#define _nouveau_bsp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_bsp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_bsp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_bsp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PBSP", "bsp", (d))
32#define nouveau_bsp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_bsp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_bsp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_bsp_dtor _nouveau_engine_dtor
40#define _nouveau_bsp_init _nouveau_engine_init
41#define _nouveau_bsp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_bsp_oclass; 4extern struct nouveau_oclass nv84_bsp_oclass;
5extern struct nouveau_oclass nvc0_bsp_oclass;
6extern struct nouveau_oclass nve0_bsp_oclass;
44 7
45#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/copy.h b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
index 70b9d8c5fcf5..8cad2cf28cef 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/copy.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/copy.h
@@ -1,44 +1,7 @@
1#ifndef __NOUVEAU_COPY_H__ 1#ifndef __NOUVEAU_COPY_H__
2#define __NOUVEAU_COPY_H__ 2#define __NOUVEAU_COPY_H__
3 3
4#include <core/engine.h> 4void nva3_copy_intr(struct nouveau_subdev *);
5#include <core/engctx.h>
6
7struct nouveau_copy_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_copy_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_copy_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_copy_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_copy_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_copy_context_dtor _nouveau_engctx_dtor
21#define _nouveau_copy_context_init _nouveau_engctx_init
22#define _nouveau_copy_context_fini _nouveau_engctx_fini
23#define _nouveau_copy_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_copy_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_copy {
27 struct nouveau_engine base;
28};
29
30#define nouveau_copy_create(p,e,c,y,i,d) \
31 nouveau_engine_create((p), (e), (c), (y), "PCE"#i, "copy"#i, (d))
32#define nouveau_copy_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_copy_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_copy_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_copy_dtor _nouveau_engine_dtor
40#define _nouveau_copy_init _nouveau_engine_init
41#define _nouveau_copy_fini _nouveau_engine_fini
42 5
43extern struct nouveau_oclass nva3_copy_oclass; 6extern struct nouveau_oclass nva3_copy_oclass;
44extern struct nouveau_oclass nvc0_copy0_oclass; 7extern struct nouveau_oclass nvc0_copy0_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
index e3674743baaa..db975618e937 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/crypt.h
@@ -1,45 +1,6 @@
1#ifndef __NOUVEAU_CRYPT_H__ 1#ifndef __NOUVEAU_CRYPT_H__
2#define __NOUVEAU_CRYPT_H__ 2#define __NOUVEAU_CRYPT_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_crypt_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_crypt_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_crypt_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_crypt_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_crypt_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_crypt_context_dtor _nouveau_engctx_dtor
21#define _nouveau_crypt_context_init _nouveau_engctx_init
22#define _nouveau_crypt_context_fini _nouveau_engctx_fini
23#define _nouveau_crypt_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_crypt_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_crypt {
27 struct nouveau_engine base;
28};
29
30#define nouveau_crypt_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PCRYPT", "crypt", (d))
32#define nouveau_crypt_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_crypt_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_crypt_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_crypt_dtor _nouveau_engine_dtor
40#define _nouveau_crypt_init _nouveau_engine_init
41#define _nouveau_crypt_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_crypt_oclass; 4extern struct nouveau_oclass nv84_crypt_oclass;
44extern struct nouveau_oclass nv98_crypt_oclass; 5extern struct nouveau_oclass nv98_crypt_oclass;
45 6
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 38ec1252cbaa..46948285f3e7 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -39,6 +39,11 @@ nouveau_disp(void *obj)
39 39
40extern struct nouveau_oclass nv04_disp_oclass; 40extern struct nouveau_oclass nv04_disp_oclass;
41extern struct nouveau_oclass nv50_disp_oclass; 41extern struct nouveau_oclass nv50_disp_oclass;
42extern struct nouveau_oclass nv84_disp_oclass;
43extern struct nouveau_oclass nva0_disp_oclass;
44extern struct nouveau_oclass nv94_disp_oclass;
45extern struct nouveau_oclass nva3_disp_oclass;
42extern struct nouveau_oclass nvd0_disp_oclass; 46extern struct nouveau_oclass nvd0_disp_oclass;
47extern struct nouveau_oclass nve0_disp_oclass;
43 48
44#endif 49#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
index 700ccbb1941f..b28914ed1752 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/dmaobj.h
@@ -12,29 +12,17 @@ struct nouveau_dmaobj {
12 u32 access; 12 u32 access;
13 u64 start; 13 u64 start;
14 u64 limit; 14 u64 limit;
15 u32 conf0;
15}; 16};
16 17
17#define nouveau_dmaobj_create(p,e,c,a,s,d) \
18 nouveau_dmaobj_create_((p), (e), (c), (a), (s), sizeof(**d), (void **)d)
19#define nouveau_dmaobj_destroy(p) \
20 nouveau_object_destroy(&(p)->base)
21#define nouveau_dmaobj_init(p) \
22 nouveau_object_init(&(p)->base)
23#define nouveau_dmaobj_fini(p,s) \
24 nouveau_object_fini(&(p)->base, (s))
25
26int nouveau_dmaobj_create_(struct nouveau_object *, struct nouveau_object *,
27 struct nouveau_oclass *, void *data, u32 size,
28 int length, void **);
29
30#define _nouveau_dmaobj_dtor nouveau_object_destroy
31#define _nouveau_dmaobj_init nouveau_object_init
32#define _nouveau_dmaobj_fini nouveau_object_fini
33
34struct nouveau_dmaeng { 18struct nouveau_dmaeng {
35 struct nouveau_engine base; 19 struct nouveau_engine base;
36 int (*bind)(struct nouveau_dmaeng *, struct nouveau_object *parent, 20
37 struct nouveau_dmaobj *, struct nouveau_gpuobj **); 21 /* creates a "physical" dma object from a struct nouveau_dmaobj */
22 int (*bind)(struct nouveau_dmaeng *dmaeng,
23 struct nouveau_object *parent,
24 struct nouveau_dmaobj *dmaobj,
25 struct nouveau_gpuobj **);
38}; 26};
39 27
40#define nouveau_dmaeng_create(p,e,c,d) \ 28#define nouveau_dmaeng_create(p,e,c,d) \
@@ -53,5 +41,8 @@ struct nouveau_dmaeng {
53extern struct nouveau_oclass nv04_dmaeng_oclass; 41extern struct nouveau_oclass nv04_dmaeng_oclass;
54extern struct nouveau_oclass nv50_dmaeng_oclass; 42extern struct nouveau_oclass nv50_dmaeng_oclass;
55extern struct nouveau_oclass nvc0_dmaeng_oclass; 43extern struct nouveau_oclass nvc0_dmaeng_oclass;
44extern struct nouveau_oclass nvd0_dmaeng_oclass;
45
46extern struct nouveau_oclass nouveau_dmaobj_sclass[];
56 47
57#endif 48#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
index d67fed1e3970..f18846c8c6fe 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/fifo.h
@@ -33,15 +33,15 @@ int nouveau_fifo_channel_create_(struct nouveau_object *,
33 struct nouveau_object *, 33 struct nouveau_object *,
34 struct nouveau_oclass *, 34 struct nouveau_oclass *,
35 int bar, u32 addr, u32 size, u32 push, 35 int bar, u32 addr, u32 size, u32 push,
36 u32 engmask, int len, void **); 36 u64 engmask, int len, void **);
37void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *); 37void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *);
38 38
39#define _nouveau_fifo_channel_init _nouveau_namedb_init 39#define _nouveau_fifo_channel_init _nouveau_namedb_init
40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini 40#define _nouveau_fifo_channel_fini _nouveau_namedb_fini
41 41
42void _nouveau_fifo_channel_dtor(struct nouveau_object *); 42void _nouveau_fifo_channel_dtor(struct nouveau_object *);
43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u32); 43u32 _nouveau_fifo_channel_rd32(struct nouveau_object *, u64);
44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u32, u32); 44void _nouveau_fifo_channel_wr32(struct nouveau_object *, u64, u32);
45 45
46struct nouveau_fifo_base { 46struct nouveau_fifo_base {
47 struct nouveau_gpuobj base; 47 struct nouveau_gpuobj base;
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
index 74d554fb3281..0a66781e8cf1 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/ppp.h
@@ -1,45 +1,7 @@
1#ifndef __NOUVEAU_PPP_H__ 1#ifndef __NOUVEAU_PPP_H__
2#define __NOUVEAU_PPP_H__ 2#define __NOUVEAU_PPP_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_ppp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_ppp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_ppp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_ppp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_ppp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_ppp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_ppp_context_init _nouveau_engctx_init
22#define _nouveau_ppp_context_fini _nouveau_engctx_fini
23#define _nouveau_ppp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_ppp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_ppp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_ppp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PPPP", "ppp", (d))
32#define nouveau_ppp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_ppp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_ppp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_ppp_dtor _nouveau_engine_dtor
40#define _nouveau_ppp_init _nouveau_engine_init
41#define _nouveau_ppp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv98_ppp_oclass; 4extern struct nouveau_oclass nv98_ppp_oclass;
5extern struct nouveau_oclass nvc0_ppp_oclass;
44 6
45#endif 7#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/vp.h b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
index 05cd08fba377..d7b287b115bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/vp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/vp.h
@@ -1,45 +1,8 @@
1#ifndef __NOUVEAU_VP_H__ 1#ifndef __NOUVEAU_VP_H__
2#define __NOUVEAU_VP_H__ 2#define __NOUVEAU_VP_H__
3 3
4#include <core/engine.h>
5#include <core/engctx.h>
6
7struct nouveau_vp_chan {
8 struct nouveau_engctx base;
9};
10
11#define nouveau_vp_context_create(p,e,c,g,s,a,f,d) \
12 nouveau_engctx_create((p), (e), (c), (g), (s), (a), (f), (d))
13#define nouveau_vp_context_destroy(d) \
14 nouveau_engctx_destroy(&(d)->base)
15#define nouveau_vp_context_init(d) \
16 nouveau_engctx_init(&(d)->base)
17#define nouveau_vp_context_fini(d,s) \
18 nouveau_engctx_fini(&(d)->base, (s))
19
20#define _nouveau_vp_context_dtor _nouveau_engctx_dtor
21#define _nouveau_vp_context_init _nouveau_engctx_init
22#define _nouveau_vp_context_fini _nouveau_engctx_fini
23#define _nouveau_vp_context_rd32 _nouveau_engctx_rd32
24#define _nouveau_vp_context_wr32 _nouveau_engctx_wr32
25
26struct nouveau_vp {
27 struct nouveau_engine base;
28};
29
30#define nouveau_vp_create(p,e,c,d) \
31 nouveau_engine_create((p), (e), (c), true, "PVP", "vp", (d))
32#define nouveau_vp_destroy(d) \
33 nouveau_engine_destroy(&(d)->base)
34#define nouveau_vp_init(d) \
35 nouveau_engine_init(&(d)->base)
36#define nouveau_vp_fini(d,s) \
37 nouveau_engine_fini(&(d)->base, (s))
38
39#define _nouveau_vp_dtor _nouveau_engine_dtor
40#define _nouveau_vp_init _nouveau_engine_init
41#define _nouveau_vp_fini _nouveau_engine_fini
42
43extern struct nouveau_oclass nv84_vp_oclass; 4extern struct nouveau_oclass nv84_vp_oclass;
5extern struct nouveau_oclass nvc0_vp_oclass;
6extern struct nouveau_oclass nve0_vp_oclass;
44 7
45#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
index d682fb625833..b79025da581e 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dcb.h
@@ -23,6 +23,7 @@ struct dcb_output {
23 uint8_t bus; 23 uint8_t bus;
24 uint8_t location; 24 uint8_t location;
25 uint8_t or; 25 uint8_t or;
26 uint8_t link;
26 bool duallink_possible; 27 bool duallink_possible;
27 union { 28 union {
28 struct sor_conf { 29 struct sor_conf {
@@ -55,36 +56,11 @@ struct dcb_output {
55 56
56u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len); 57u16 dcb_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *ent, u8 *len);
57u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); 58u16 dcb_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len);
59u16 dcb_outp_parse(struct nouveau_bios *, u8 idx, u8 *, u8 *,
60 struct dcb_output *);
61u16 dcb_outp_match(struct nouveau_bios *, u16 type, u16 mask, u8 *, u8 *,
62 struct dcb_output *);
58int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec) 63int dcb_outp_foreach(struct nouveau_bios *, void *data, int (*exec)
59 (struct nouveau_bios *, void *, int index, u16 entry)); 64 (struct nouveau_bios *, void *, int index, u16 entry));
60 65
61
62/* BIT 'U'/'d' table encoder subtables have hashes matching them to
63 * a particular set of encoders.
64 *
65 * This function returns true if a particular DCB entry matches.
66 */
67static inline bool
68dcb_hash_match(struct dcb_output *dcb, u32 hash)
69{
70 if ((hash & 0x000000f0) != (dcb->location << 4))
71 return false;
72 if ((hash & 0x0000000f) != dcb->type)
73 return false;
74 if (!(hash & (dcb->or << 16)))
75 return false;
76
77 switch (dcb->type) {
78 case DCB_OUTPUT_TMDS:
79 case DCB_OUTPUT_LVDS:
80 case DCB_OUTPUT_DP:
81 if (hash & 0x00c00000) {
82 if (!(hash & (dcb->sorconf.link << 22)))
83 return false;
84 }
85 default:
86 return true;
87 }
88}
89
90#endif 66#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
new file mode 100644
index 000000000000..c35937e2f6a4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/disp.h
@@ -0,0 +1,48 @@
1#ifndef __NVBIOS_DISP_H__
2#define __NVBIOS_DISP_H__
3
4u16 nvbios_disp_table(struct nouveau_bios *,
5 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub);
6
7struct nvbios_disp {
8 u16 data;
9};
10
11u16 nvbios_disp_entry(struct nouveau_bios *, u8 idx,
12 u8 *ver, u8 *hdr__, u8 *sub);
13u16 nvbios_disp_parse(struct nouveau_bios *, u8 idx,
14 u8 *ver, u8 *hdr__, u8 *sub,
15 struct nvbios_disp *);
16
17struct nvbios_outp {
18 u16 type;
19 u16 mask;
20 u16 script[3];
21};
22
23u16 nvbios_outp_entry(struct nouveau_bios *, u8 idx,
24 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
25u16 nvbios_outp_parse(struct nouveau_bios *, u8 idx,
26 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
27 struct nvbios_outp *);
28u16 nvbios_outp_match(struct nouveau_bios *, u16 type, u16 mask,
29 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
30 struct nvbios_outp *);
31
32
33struct nvbios_ocfg {
34 u16 match;
35 u16 clkcmp[2];
36};
37
38u16 nvbios_ocfg_entry(struct nouveau_bios *, u16 outp, u8 idx,
39 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
40u16 nvbios_ocfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
41 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
42 struct nvbios_ocfg *);
43u16 nvbios_ocfg_match(struct nouveau_bios *, u16 outp, u16 type,
44 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
45 struct nvbios_ocfg *);
46u16 nvbios_oclk_match(struct nouveau_bios *, u16 cmp, u32 khz);
47
48#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
index 73b5e5d3e75a..6e54218b55fc 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/dp.h
@@ -1,8 +1,34 @@
1#ifndef __NVBIOS_DP_H__ 1#ifndef __NVBIOS_DP_H__
2#define __NVBIOS_DP_H__ 2#define __NVBIOS_DP_H__
3 3
4u16 dp_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 4struct nvbios_dpout {
5u16 dp_outp(struct nouveau_bios *, u8 idx, u8 *ver, u8 *len); 5 u16 type;
6u16 dp_outp_match(struct nouveau_bios *, struct dcb_output *, u8 *ver, u8 *len); 6 u16 mask;
7 u8 flags;
8 u32 script[5];
9 u32 lnkcmp;
10};
11
12u16 nvbios_dpout_parse(struct nouveau_bios *, u8 idx,
13 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
14 struct nvbios_dpout *);
15u16 nvbios_dpout_match(struct nouveau_bios *, u16 type, u16 mask,
16 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
17 struct nvbios_dpout *);
18
19struct nvbios_dpcfg {
20 u8 drv;
21 u8 pre;
22 u8 unk;
23};
24
25u16
26nvbios_dpcfg_parse(struct nouveau_bios *, u16 outp, u8 idx,
27 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
28 struct nvbios_dpcfg *);
29u16
30nvbios_dpcfg_match(struct nouveau_bios *, u16 outp, u8 un, u8 vs, u8 pe,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
32 struct nvbios_dpcfg *);
7 33
8#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 5c1b5e1904f9..da470e6851b1 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -69,8 +69,11 @@ struct nouveau_fb {
69 } type; 69 } type;
70 u64 stolen; 70 u64 stolen;
71 u64 size; 71 u64 size;
72
72 int ranks; 73 int ranks;
74 int parts;
73 75
76 int (*init)(struct nouveau_fb *);
74 int (*get)(struct nouveau_fb *, u64 size, u32 align, 77 int (*get)(struct nouveau_fb *, u64 size, u32 align,
75 u32 size_nc, u32 type, struct nouveau_mem **); 78 u32 size_nc, u32 type, struct nouveau_mem **);
76 void (*put)(struct nouveau_fb *, struct nouveau_mem **); 79 void (*put)(struct nouveau_fb *, struct nouveau_mem **);
@@ -84,6 +87,8 @@ struct nouveau_fb {
84 int regions; 87 int regions;
85 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size, 88 void (*init)(struct nouveau_fb *, int i, u32 addr, u32 size,
86 u32 pitch, u32 flags, struct nouveau_fb_tile *); 89 u32 pitch, u32 flags, struct nouveau_fb_tile *);
90 void (*comp)(struct nouveau_fb *, int i, u32 size, u32 flags,
91 struct nouveau_fb_tile *);
87 void (*fini)(struct nouveau_fb *, int i, 92 void (*fini)(struct nouveau_fb *, int i,
88 struct nouveau_fb_tile *); 93 struct nouveau_fb_tile *);
89 void (*prog)(struct nouveau_fb *, int i, 94 void (*prog)(struct nouveau_fb *, int i,
@@ -99,7 +104,7 @@ nouveau_fb(void *obj)
99 104
100#define nouveau_fb_create(p,e,c,d) \ 105#define nouveau_fb_create(p,e,c,d) \
101 nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d)) 106 nouveau_subdev_create((p), (e), (c), 0, "PFB", "fb", (d))
102int nouveau_fb_created(struct nouveau_fb *); 107int nouveau_fb_preinit(struct nouveau_fb *);
103void nouveau_fb_destroy(struct nouveau_fb *); 108void nouveau_fb_destroy(struct nouveau_fb *);
104int nouveau_fb_init(struct nouveau_fb *); 109int nouveau_fb_init(struct nouveau_fb *);
105#define nouveau_fb_fini(p,s) \ 110#define nouveau_fb_fini(p,s) \
@@ -111,9 +116,19 @@ int _nouveau_fb_init(struct nouveau_object *);
111 116
112extern struct nouveau_oclass nv04_fb_oclass; 117extern struct nouveau_oclass nv04_fb_oclass;
113extern struct nouveau_oclass nv10_fb_oclass; 118extern struct nouveau_oclass nv10_fb_oclass;
119extern struct nouveau_oclass nv1a_fb_oclass;
114extern struct nouveau_oclass nv20_fb_oclass; 120extern struct nouveau_oclass nv20_fb_oclass;
121extern struct nouveau_oclass nv25_fb_oclass;
115extern struct nouveau_oclass nv30_fb_oclass; 122extern struct nouveau_oclass nv30_fb_oclass;
123extern struct nouveau_oclass nv35_fb_oclass;
124extern struct nouveau_oclass nv36_fb_oclass;
116extern struct nouveau_oclass nv40_fb_oclass; 125extern struct nouveau_oclass nv40_fb_oclass;
126extern struct nouveau_oclass nv41_fb_oclass;
127extern struct nouveau_oclass nv44_fb_oclass;
128extern struct nouveau_oclass nv46_fb_oclass;
129extern struct nouveau_oclass nv47_fb_oclass;
130extern struct nouveau_oclass nv49_fb_oclass;
131extern struct nouveau_oclass nv4e_fb_oclass;
117extern struct nouveau_oclass nv50_fb_oclass; 132extern struct nouveau_oclass nv50_fb_oclass;
118extern struct nouveau_oclass nvc0_fb_oclass; 133extern struct nouveau_oclass nvc0_fb_oclass;
119 134
@@ -122,13 +137,35 @@ int nouveau_fb_bios_memtype(struct nouveau_bios *);
122 137
123bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype); 138bool nv04_fb_memtype_valid(struct nouveau_fb *, u32 memtype);
124 139
140void nv10_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
141 u32 pitch, u32 flags, struct nouveau_fb_tile *);
142void nv10_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
125void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); 143void nv10_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
126 144
145int nv20_fb_vram_init(struct nouveau_fb *);
146void nv20_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
147 u32 pitch, u32 flags, struct nouveau_fb_tile *);
148void nv20_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *);
149void nv20_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
150
151int nv30_fb_init(struct nouveau_object *);
127void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, 152void nv30_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
128 u32 pitch, u32 flags, struct nouveau_fb_tile *); 153 u32 pitch, u32 flags, struct nouveau_fb_tile *);
129void nv30_fb_tile_fini(struct nouveau_fb *, int i, struct nouveau_fb_tile *); 154
155void nv40_fb_tile_comp(struct nouveau_fb *, int i, u32 size, u32 flags,
156 struct nouveau_fb_tile *);
157
158int nv41_fb_vram_init(struct nouveau_fb *);
159int nv41_fb_init(struct nouveau_object *);
160void nv41_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
161
162int nv44_fb_vram_init(struct nouveau_fb *);
163int nv44_fb_init(struct nouveau_object *);
164void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
165
166void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
167 u32 pitch, u32 flags, struct nouveau_fb_tile *);
130 168
131void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **); 169void nv50_fb_vram_del(struct nouveau_fb *, struct nouveau_mem **);
132void nv50_fb_trap(struct nouveau_fb *, int display);
133 170
134#endif 171#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index cd01c533007a..d70ba342aa2e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -65,14 +65,14 @@ nouveau_barobj_dtor(struct nouveau_object *object)
65} 65}
66 66
67static u32 67static u32
68nouveau_barobj_rd32(struct nouveau_object *object, u32 addr) 68nouveau_barobj_rd32(struct nouveau_object *object, u64 addr)
69{ 69{
70 struct nouveau_barobj *barobj = (void *)object; 70 struct nouveau_barobj *barobj = (void *)object;
71 return ioread32_native(barobj->iomem + addr); 71 return ioread32_native(barobj->iomem + addr);
72} 72}
73 73
74static void 74static void
75nouveau_barobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 75nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
76{ 76{
77 struct nouveau_barobj *barobj = (void *)object; 77 struct nouveau_barobj *barobj = (void *)object;
78 iowrite32_native(data, barobj->iomem + addr); 78 iowrite32_native(data, barobj->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 70ca7d5a1aa1..dd111947eb86 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -63,7 +63,7 @@ nouveau_bios_shadow_of(struct nouveau_bios *bios)
63 struct pci_dev *pdev = nv_device(bios)->pdev; 63 struct pci_dev *pdev = nv_device(bios)->pdev;
64 struct device_node *dn; 64 struct device_node *dn;
65 const u32 *data; 65 const u32 *data;
66 int size, i; 66 int size;
67 67
68 dn = pci_device_to_OF_node(pdev); 68 dn = pci_device_to_OF_node(pdev);
69 if (!dn) { 69 if (!dn) {
@@ -210,11 +210,19 @@ nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
210 return; 210 return;
211 211
212 bios->data = kmalloc(bios->size, GFP_KERNEL); 212 bios->data = kmalloc(bios->size, GFP_KERNEL);
213 for (i = 0; bios->data && i < bios->size; i += cnt) { 213 if (bios->data) {
214 cnt = min((bios->size - i), (u32)4096); 214 /* disobey the acpi spec - much faster on at least w530 ... */
215 ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt); 215 ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
216 if (ret != cnt) 216 if (ret != bios->size ||
217 break; 217 nvbios_checksum(bios->data, bios->size)) {
218 /* ... that didn't work, ok, i'll be good now */
219 for (i = 0; i < bios->size; i += cnt) {
220 cnt = min((bios->size - i), (u32)4096);
221 ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
222 if (ret != cnt)
223 break;
224 }
225 }
218 } 226 }
219} 227}
220 228
@@ -358,42 +366,42 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
358} 366}
359 367
360static u8 368static u8
361nouveau_bios_rd08(struct nouveau_object *object, u32 addr) 369nouveau_bios_rd08(struct nouveau_object *object, u64 addr)
362{ 370{
363 struct nouveau_bios *bios = (void *)object; 371 struct nouveau_bios *bios = (void *)object;
364 return bios->data[addr]; 372 return bios->data[addr];
365} 373}
366 374
367static u16 375static u16
368nouveau_bios_rd16(struct nouveau_object *object, u32 addr) 376nouveau_bios_rd16(struct nouveau_object *object, u64 addr)
369{ 377{
370 struct nouveau_bios *bios = (void *)object; 378 struct nouveau_bios *bios = (void *)object;
371 return get_unaligned_le16(&bios->data[addr]); 379 return get_unaligned_le16(&bios->data[addr]);
372} 380}
373 381
374static u32 382static u32
375nouveau_bios_rd32(struct nouveau_object *object, u32 addr) 383nouveau_bios_rd32(struct nouveau_object *object, u64 addr)
376{ 384{
377 struct nouveau_bios *bios = (void *)object; 385 struct nouveau_bios *bios = (void *)object;
378 return get_unaligned_le32(&bios->data[addr]); 386 return get_unaligned_le32(&bios->data[addr]);
379} 387}
380 388
381static void 389static void
382nouveau_bios_wr08(struct nouveau_object *object, u32 addr, u8 data) 390nouveau_bios_wr08(struct nouveau_object *object, u64 addr, u8 data)
383{ 391{
384 struct nouveau_bios *bios = (void *)object; 392 struct nouveau_bios *bios = (void *)object;
385 bios->data[addr] = data; 393 bios->data[addr] = data;
386} 394}
387 395
388static void 396static void
389nouveau_bios_wr16(struct nouveau_object *object, u32 addr, u16 data) 397nouveau_bios_wr16(struct nouveau_object *object, u64 addr, u16 data)
390{ 398{
391 struct nouveau_bios *bios = (void *)object; 399 struct nouveau_bios *bios = (void *)object;
392 put_unaligned_le16(data, &bios->data[addr]); 400 put_unaligned_le16(data, &bios->data[addr]);
393} 401}
394 402
395static void 403static void
396nouveau_bios_wr32(struct nouveau_object *object, u32 addr, u32 data) 404nouveau_bios_wr32(struct nouveau_object *object, u64 addr, u32 data)
397{ 405{
398 struct nouveau_bios *bios = (void *)object; 406 struct nouveau_bios *bios = (void *)object;
399 put_unaligned_le32(data, &bios->data[addr]); 407 put_unaligned_le32(data, &bios->data[addr]);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index c51197157749..0fd87df99dd6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -107,6 +107,69 @@ dcb_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len)
107 return 0x0000; 107 return 0x0000;
108} 108}
109 109
110u16
111dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
112 struct dcb_output *outp)
113{
114 u16 dcb = dcb_outp(bios, idx, ver, len);
115 if (dcb) {
116 if (*ver >= 0x20) {
117 u32 conn = nv_ro32(bios, dcb + 0x00);
118 outp->or = (conn & 0x0f000000) >> 24;
119 outp->location = (conn & 0x00300000) >> 20;
120 outp->bus = (conn & 0x000f0000) >> 16;
121 outp->connector = (conn & 0x0000f000) >> 12;
122 outp->heads = (conn & 0x00000f00) >> 8;
123 outp->i2c_index = (conn & 0x000000f0) >> 4;
124 outp->type = (conn & 0x0000000f);
125 outp->link = 0;
126 } else {
127 dcb = 0x0000;
128 }
129
130 if (*ver >= 0x40) {
131 u32 conf = nv_ro32(bios, dcb + 0x04);
132 switch (outp->type) {
133 case DCB_OUTPUT_TMDS:
134 case DCB_OUTPUT_LVDS:
135 case DCB_OUTPUT_DP:
136 outp->link = (conf & 0x00000030) >> 4;
137 outp->sorconf.link = outp->link; /*XXX*/
138 break;
139 default:
140 break;
141 }
142 }
143 }
144 return dcb;
145}
146
147static inline u16
148dcb_outp_hasht(struct dcb_output *outp)
149{
150 return outp->type;
151}
152
153static inline u16
154dcb_outp_hashm(struct dcb_output *outp)
155{
156 return (outp->heads << 8) | (outp->link << 6) | outp->or;
157}
158
159u16
160dcb_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
161 u8 *ver, u8 *len, struct dcb_output *outp)
162{
163 u16 dcb, idx = 0;
164 while ((dcb = dcb_outp_parse(bios, idx++, ver, len, outp))) {
165 if (dcb_outp_hasht(outp) == type) {
166 if ((dcb_outp_hashm(outp) & mask) == mask)
167 break;
168 }
169 }
170 return dcb;
171}
172
110int 173int
111dcb_outp_foreach(struct nouveau_bios *bios, void *data, 174dcb_outp_foreach(struct nouveau_bios *bios, void *data,
112 int (*exec)(struct nouveau_bios *, void *, int, u16)) 175 int (*exec)(struct nouveau_bios *, void *, int, u16))
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
new file mode 100644
index 000000000000..7f16e52d9bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -0,0 +1,178 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/disp.h>
28
29u16
30nvbios_disp_table(struct nouveau_bios *bios,
31 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *sub)
32{
33 struct bit_entry U;
34
35 if (!bit_entry(bios, 'U', &U)) {
36 if (U.version == 1) {
37 u16 data = nv_ro16(bios, U.offset);
38 if (data) {
39 *ver = nv_ro08(bios, data + 0x00);
40 switch (*ver) {
41 case 0x20:
42 case 0x21:
43 *hdr = nv_ro08(bios, data + 0x01);
44 *len = nv_ro08(bios, data + 0x02);
45 *cnt = nv_ro08(bios, data + 0x03);
46 *sub = nv_ro08(bios, data + 0x04);
47 return data;
48 default:
49 break;
50 }
51 }
52 }
53 }
54
55 return 0x0000;
56}
57
58u16
59nvbios_disp_entry(struct nouveau_bios *bios, u8 idx,
60 u8 *ver, u8 *len, u8 *sub)
61{
62 u8 hdr, cnt;
63 u16 data = nvbios_disp_table(bios, ver, &hdr, &cnt, len, sub);
64 if (data && idx < cnt)
65 return data + hdr + (idx * *len);
66 *ver = 0x00;
67 return 0x0000;
68}
69
70u16
71nvbios_disp_parse(struct nouveau_bios *bios, u8 idx,
72 u8 *ver, u8 *len, u8 *sub,
73 struct nvbios_disp *info)
74{
75 u16 data = nvbios_disp_entry(bios, idx, ver, len, sub);
76 if (data && *len >= 2) {
77 info->data = nv_ro16(bios, data + 0);
78 return data;
79 }
80 return 0x0000;
81}
82
83u16
84nvbios_outp_entry(struct nouveau_bios *bios, u8 idx,
85 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
86{
87 struct nvbios_disp info;
88 u16 data = nvbios_disp_parse(bios, idx, ver, len, hdr, &info);
89 if (data) {
90 *cnt = nv_ro08(bios, info.data + 0x05);
91 *len = 0x06;
92 data = info.data;
93 }
94 return data;
95}
96
97u16
98nvbios_outp_parse(struct nouveau_bios *bios, u8 idx,
99 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
100 struct nvbios_outp *info)
101{
102 u16 data = nvbios_outp_entry(bios, idx, ver, hdr, cnt, len);
103 if (data && *hdr >= 0x0a) {
104 info->type = nv_ro16(bios, data + 0x00);
105 info->mask = nv_ro32(bios, data + 0x02);
106 if (*ver <= 0x20) /* match any link */
107 info->mask |= 0x00c0;
108 info->script[0] = nv_ro16(bios, data + 0x06);
109 info->script[1] = nv_ro16(bios, data + 0x08);
110 info->script[2] = 0x0000;
111 if (*hdr >= 0x0c)
112 info->script[2] = nv_ro16(bios, data + 0x0a);
113 return data;
114 }
115 return 0x0000;
116}
117
118u16
119nvbios_outp_match(struct nouveau_bios *bios, u16 type, u16 mask,
120 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
121 struct nvbios_outp *info)
122{
123 u16 data, idx = 0;
124 while ((data = nvbios_outp_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
125 if (data && info->type == type) {
126 if ((info->mask & mask) == mask)
127 break;
128 }
129 }
130 return data;
131}
132
133u16
134nvbios_ocfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
135 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
136{
137 if (idx < *cnt)
138 return outp + *hdr + (idx * *len);
139 return 0x0000;
140}
141
142u16
143nvbios_ocfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
144 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
145 struct nvbios_ocfg *info)
146{
147 u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
148 if (data) {
149 info->match = nv_ro16(bios, data + 0x00);
150 info->clkcmp[0] = nv_ro16(bios, data + 0x02);
151 info->clkcmp[1] = nv_ro16(bios, data + 0x04);
152 }
153 return data;
154}
155
156u16
157nvbios_ocfg_match(struct nouveau_bios *bios, u16 outp, u16 type,
158 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
159 struct nvbios_ocfg *info)
160{
161 u16 data, idx = 0;
162 while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
163 if (info->match == type)
164 break;
165 }
166 return data;
167}
168
169u16
170nvbios_oclk_match(struct nouveau_bios *bios, u16 cmp, u32 khz)
171{
172 while (cmp) {
173 if (khz / 10 >= nv_ro16(bios, cmp + 0x00))
174 return nv_ro16(bios, cmp + 0x02);
175 cmp += 0x04;
176 }
177 return 0x0000;
178}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index 3cbc0f3e8d5e..663853bcca82 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -25,23 +25,29 @@
25 25
26#include "subdev/bios.h" 26#include "subdev/bios.h"
27#include "subdev/bios/bit.h" 27#include "subdev/bios/bit.h"
28#include "subdev/bios/dcb.h"
29#include "subdev/bios/dp.h" 28#include "subdev/bios/dp.h"
30 29
31u16 30static u16
32dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 31nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
33{ 32{
34 struct bit_entry bit_d; 33 struct bit_entry d;
35 34
36 if (!bit_entry(bios, 'd', &bit_d)) { 35 if (!bit_entry(bios, 'd', &d)) {
37 if (bit_d.version == 1) { 36 if (d.version == 1 && d.length >= 2) {
38 u16 data = nv_ro16(bios, bit_d.offset); 37 u16 data = nv_ro16(bios, d.offset);
39 if (data) { 38 if (data) {
40 *ver = nv_ro08(bios, data + 0); 39 *ver = nv_ro08(bios, data + 0x00);
41 *hdr = nv_ro08(bios, data + 1); 40 switch (*ver) {
42 *len = nv_ro08(bios, data + 2); 41 case 0x21:
43 *cnt = nv_ro08(bios, data + 3); 42 case 0x30:
44 return data; 43 case 0x40:
44 *hdr = nv_ro08(bios, data + 0x01);
45 *len = nv_ro08(bios, data + 0x02);
46 *cnt = nv_ro08(bios, data + 0x03);
47 return data;
48 default:
49 break;
50 }
45 } 51 }
46 } 52 }
47 } 53 }
@@ -49,28 +55,150 @@ dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
49 return 0x0000; 55 return 0x0000;
50} 56}
51 57
58static u16
59nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
60 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
61{
62 u16 data = nvbios_dp_table(bios, ver, hdr, cnt, len);
63 if (data && idx < *cnt) {
64 u16 outp = nv_ro16(bios, data + *hdr + idx * *len);
65 switch (*ver * !!outp) {
66 case 0x21:
67 case 0x30:
68 *hdr = nv_ro08(bios, data + 0x04);
69 *len = nv_ro08(bios, data + 0x05);
70 *cnt = nv_ro08(bios, outp + 0x04);
71 break;
72 case 0x40:
73 *hdr = nv_ro08(bios, data + 0x04);
74 *cnt = 0;
75 *len = 0;
76 break;
77 default:
78 break;
79 }
80 return outp;
81 }
82 *ver = 0x00;
83 return 0x0000;
84}
85
52u16 86u16
53dp_outp(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) 87nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
88 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
89 struct nvbios_dpout *info)
54{ 90{
55 u8 hdr, cnt; 91 u16 data = nvbios_dpout_entry(bios, idx, ver, hdr, cnt, len);
56 u16 table = dp_table(bios, ver, &hdr, &cnt, len); 92 if (data && *ver) {
57 if (table && idx < cnt) 93 info->type = nv_ro16(bios, data + 0x00);
58 return nv_ro16(bios, table + hdr + (idx * *len)); 94 info->mask = nv_ro16(bios, data + 0x02);
59 return 0xffff; 95 switch (*ver) {
96 case 0x21:
97 case 0x30:
98 info->flags = nv_ro08(bios, data + 0x05);
99 info->script[0] = nv_ro16(bios, data + 0x06);
100 info->script[1] = nv_ro16(bios, data + 0x08);
101 info->lnkcmp = nv_ro16(bios, data + 0x0a);
102 info->script[2] = nv_ro16(bios, data + 0x0c);
103 info->script[3] = nv_ro16(bios, data + 0x0e);
104 info->script[4] = nv_ro16(bios, data + 0x10);
105 break;
106 case 0x40:
107 info->flags = nv_ro08(bios, data + 0x04);
108 info->script[0] = nv_ro16(bios, data + 0x05);
109 info->script[1] = nv_ro16(bios, data + 0x07);
110 info->lnkcmp = nv_ro16(bios, data + 0x09);
111 info->script[2] = nv_ro16(bios, data + 0x0b);
112 info->script[3] = nv_ro16(bios, data + 0x0d);
113 info->script[4] = nv_ro16(bios, data + 0x0f);
114 break;
115 default:
116 data = 0x0000;
117 break;
118 }
119 }
120 return data;
60} 121}
61 122
62u16 123u16
63dp_outp_match(struct nouveau_bios *bios, struct dcb_output *outp, 124nvbios_dpout_match(struct nouveau_bios *bios, u16 type, u16 mask,
64 u8 *ver, u8 *len) 125 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
126 struct nvbios_dpout *info)
65{ 127{
66 u8 idx = 0; 128 u16 data, idx = 0;
67 u16 data; 129 while ((data = nvbios_dpout_parse(bios, idx++, ver, hdr, cnt, len, info)) || *ver) {
68 while ((data = dp_outp(bios, idx++, ver, len)) != 0xffff) { 130 if (data && info->type == type) {
69 if (data) { 131 if ((info->mask & mask) == mask)
70 u32 hash = nv_ro32(bios, data); 132 break;
71 if (dcb_hash_match(outp, hash))
72 return data;
73 } 133 }
74 } 134 }
135 return data;
136}
137
138static u16
139nvbios_dpcfg_entry(struct nouveau_bios *bios, u16 outp, u8 idx,
140 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
141{
142 if (*ver >= 0x40) {
143 outp = nvbios_dp_table(bios, ver, hdr, cnt, len);
144 *hdr = *hdr + (*len * * cnt);
145 *len = nv_ro08(bios, outp + 0x06);
146 *cnt = nv_ro08(bios, outp + 0x07);
147 }
148
149 if (idx < *cnt)
150 return outp + *hdr + (idx * *len);
151
75 return 0x0000; 152 return 0x0000;
76} 153}
154
155u16
156nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
157 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
158 struct nvbios_dpcfg *info)
159{
160 u16 data = nvbios_dpcfg_entry(bios, outp, idx, ver, hdr, cnt, len);
161 if (data) {
162 switch (*ver) {
163 case 0x21:
164 info->drv = nv_ro08(bios, data + 0x02);
165 info->pre = nv_ro08(bios, data + 0x03);
166 info->unk = nv_ro08(bios, data + 0x04);
167 break;
168 case 0x30:
169 case 0x40:
170 info->drv = nv_ro08(bios, data + 0x01);
171 info->pre = nv_ro08(bios, data + 0x02);
172 info->unk = nv_ro08(bios, data + 0x03);
173 break;
174 default:
175 data = 0x0000;
176 break;
177 }
178 }
179 return data;
180}
181
182u16
183nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 un, u8 vs, u8 pe,
184 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
185 struct nvbios_dpcfg *info)
186{
187 u8 idx = 0xff;
188 u16 data;
189
190 if (*ver >= 0x30) {
191 const u8 vsoff[] = { 0, 4, 7, 9 };
192 idx = (un * 10) + vsoff[vs] + pe;
193 } else {
194 while ((data = nvbios_dpcfg_entry(bios, outp, idx,
195 ver, hdr, cnt, len))) {
196 if (nv_ro08(bios, data + 0x00) == vs &&
197 nv_ro08(bios, data + 0x01) == pe)
198 break;
199 idx++;
200 }
201 }
202
203 return nvbios_dpcfg_parse(bios, outp, pe, ver, hdr, cnt, len, info);
204}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
index 4c9f1e508165..c90d4aa3ae4f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/gpio.c
@@ -101,8 +101,8 @@ dcb_gpio_parse(struct nouveau_bios *bios, int idx, u8 func, u8 line,
101 } 101 }
102 102
103 /* DCB 2.2, fixed TVDAC GPIO data */ 103 /* DCB 2.2, fixed TVDAC GPIO data */
104 if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len)) && ver >= 0x22) { 104 if ((entry = dcb_table(bios, &ver, &hdr, &cnt, &len))) {
105 if (func == DCB_GPIO_TVDAC0) { 105 if (ver >= 0x22 && ver < 0x30 && func == DCB_GPIO_TVDAC0) {
106 u8 conf = nv_ro08(bios, entry - 5); 106 u8 conf = nv_ro08(bios, entry - 5);
107 u8 addr = nv_ro08(bios, entry - 4); 107 u8 addr = nv_ro08(bios, entry - 4);
108 if (conf & 0x01) { 108 if (conf & 0x01) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 6be8c32f6e4c..ae168bbb86d8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -743,9 +743,10 @@ static void
743init_dp_condition(struct nvbios_init *init) 743init_dp_condition(struct nvbios_init *init)
744{ 744{
745 struct nouveau_bios *bios = init->bios; 745 struct nouveau_bios *bios = init->bios;
746 struct nvbios_dpout info;
746 u8 cond = nv_ro08(bios, init->offset + 1); 747 u8 cond = nv_ro08(bios, init->offset + 1);
747 u8 unkn = nv_ro08(bios, init->offset + 2); 748 u8 unkn = nv_ro08(bios, init->offset + 2);
748 u8 ver, len; 749 u8 ver, hdr, cnt, len;
749 u16 data; 750 u16 data;
750 751
751 trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn); 752 trace("DP_CONDITION\t0x%02x 0x%02x\n", cond, unkn);
@@ -759,10 +760,12 @@ init_dp_condition(struct nvbios_init *init)
759 case 1: 760 case 1:
760 case 2: 761 case 2:
761 if ( init->outp && 762 if ( init->outp &&
762 (data = dp_outp_match(bios, init->outp, &ver, &len))) { 763 (data = nvbios_dpout_match(bios, DCB_OUTPUT_DP,
763 if (ver <= 0x40 && !(nv_ro08(bios, data + 5) & cond)) 764 (init->outp->or << 0) |
764 init_exec_set(init, false); 765 (init->outp->sorconf.link << 6),
765 if (ver == 0x40 && !(nv_ro08(bios, data + 4) & cond)) 766 &ver, &hdr, &cnt, &len, &info)))
767 {
768 if (!(info.flags & cond))
766 init_exec_set(init, false); 769 init_exec_set(init, false);
767 break; 770 break;
768 } 771 }
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/base.c b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
index ca9a4648bd8a..f8a7ed4166cf 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/base.c
@@ -25,7 +25,6 @@
25#include <core/object.h> 25#include <core/object.h>
26#include <core/device.h> 26#include <core/device.h>
27#include <core/client.h> 27#include <core/client.h>
28#include <core/device.h>
29#include <core/option.h> 28#include <core/option.h>
30 29
31#include <core/class.h> 30#include <core/class.h>
@@ -61,19 +60,24 @@ struct nouveau_devobj {
61 60
62static const u64 disable_map[] = { 61static const u64 disable_map[] = {
63 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS, 62 [NVDEV_SUBDEV_VBIOS] = NV_DEVICE_DISABLE_VBIOS,
63 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE,
64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE, 64 [NVDEV_SUBDEV_GPIO] = NV_DEVICE_DISABLE_CORE,
65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE, 65 [NVDEV_SUBDEV_I2C] = NV_DEVICE_DISABLE_CORE,
66 [NVDEV_SUBDEV_DEVINIT] = NV_DEVICE_DISABLE_CORE, 66 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MXM] = NV_DEVICE_DISABLE_CORE,
67 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE, 68 [NVDEV_SUBDEV_MC] = NV_DEVICE_DISABLE_CORE,
68 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE, 69 [NVDEV_SUBDEV_TIMER] = NV_DEVICE_DISABLE_CORE,
69 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE, 70 [NVDEV_SUBDEV_FB] = NV_DEVICE_DISABLE_CORE,
70 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE, 71 [NVDEV_SUBDEV_LTCG] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_IBUS] = NV_DEVICE_DISABLE_CORE,
71 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE, 73 [NVDEV_SUBDEV_INSTMEM] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_VM] = NV_DEVICE_DISABLE_CORE,
72 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE, 75 [NVDEV_SUBDEV_BAR] = NV_DEVICE_DISABLE_CORE,
73 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE, 76 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_DISABLE_CORE,
74 [NVDEV_SUBDEV_CLOCK] = NV_DEVICE_DISABLE_CORE,
75 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE, 77 [NVDEV_SUBDEV_THERM] = NV_DEVICE_DISABLE_CORE,
76 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE, 78 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_DISABLE_CORE,
79 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO,
80 [NVDEV_ENGINE_SW] = NV_DEVICE_DISABLE_FIFO,
77 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH, 81 [NVDEV_ENGINE_GR] = NV_DEVICE_DISABLE_GRAPH,
78 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG, 82 [NVDEV_ENGINE_MPEG] = NV_DEVICE_DISABLE_MPEG,
79 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME, 83 [NVDEV_ENGINE_ME] = NV_DEVICE_DISABLE_ME,
@@ -84,7 +88,7 @@ static const u64 disable_map[] = {
84 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0, 88 [NVDEV_ENGINE_COPY0] = NV_DEVICE_DISABLE_COPY0,
85 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1, 89 [NVDEV_ENGINE_COPY1] = NV_DEVICE_DISABLE_COPY1,
86 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1, 90 [NVDEV_ENGINE_UNK1C1] = NV_DEVICE_DISABLE_UNK1C1,
87 [NVDEV_ENGINE_FIFO] = NV_DEVICE_DISABLE_FIFO, 91 [NVDEV_ENGINE_VENC] = NV_DEVICE_DISABLE_VENC,
88 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP, 92 [NVDEV_ENGINE_DISP] = NV_DEVICE_DISABLE_DISP,
89 [NVDEV_SUBDEV_NR] = 0, 93 [NVDEV_SUBDEV_NR] = 0,
90}; 94};
@@ -208,7 +212,7 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
208 212
209 /* determine frequency of timing crystal */ 213 /* determine frequency of timing crystal */
210 if ( device->chipset < 0x17 || 214 if ( device->chipset < 0x17 ||
211 (device->chipset >= 0x20 && device->chipset <= 0x25)) 215 (device->chipset >= 0x20 && device->chipset < 0x25))
212 strap &= 0x00000040; 216 strap &= 0x00000040;
213 else 217 else
214 strap &= 0x00400040; 218 strap &= 0x00400040;
@@ -356,37 +360,37 @@ fail:
356} 360}
357 361
358static u8 362static u8
359nouveau_devobj_rd08(struct nouveau_object *object, u32 addr) 363nouveau_devobj_rd08(struct nouveau_object *object, u64 addr)
360{ 364{
361 return nv_rd08(object->engine, addr); 365 return nv_rd08(object->engine, addr);
362} 366}
363 367
364static u16 368static u16
365nouveau_devobj_rd16(struct nouveau_object *object, u32 addr) 369nouveau_devobj_rd16(struct nouveau_object *object, u64 addr)
366{ 370{
367 return nv_rd16(object->engine, addr); 371 return nv_rd16(object->engine, addr);
368} 372}
369 373
370static u32 374static u32
371nouveau_devobj_rd32(struct nouveau_object *object, u32 addr) 375nouveau_devobj_rd32(struct nouveau_object *object, u64 addr)
372{ 376{
373 return nv_rd32(object->engine, addr); 377 return nv_rd32(object->engine, addr);
374} 378}
375 379
376static void 380static void
377nouveau_devobj_wr08(struct nouveau_object *object, u32 addr, u8 data) 381nouveau_devobj_wr08(struct nouveau_object *object, u64 addr, u8 data)
378{ 382{
379 nv_wr08(object->engine, addr, data); 383 nv_wr08(object->engine, addr, data);
380} 384}
381 385
382static void 386static void
383nouveau_devobj_wr16(struct nouveau_object *object, u32 addr, u16 data) 387nouveau_devobj_wr16(struct nouveau_object *object, u64 addr, u16 data)
384{ 388{
385 nv_wr16(object->engine, addr, data); 389 nv_wr16(object->engine, addr, data);
386} 390}
387 391
388static void 392static void
389nouveau_devobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 393nouveau_devobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
390{ 394{
391 nv_wr32(object->engine, addr, data); 395 nv_wr32(object->engine, addr, data);
392} 396}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
index f09accfd0e31..9c40b0fb23f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv10.c
@@ -105,7 +105,7 @@ nv10_identify(struct nouveau_device *device)
105 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 105 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
106 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 106 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 107 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
108 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 108 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 109 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
110 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 110 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
111 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 111 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -159,7 +159,7 @@ nv10_identify(struct nouveau_device *device)
159 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 159 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
160 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 160 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
161 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 161 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
162 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass; 162 device->oclass[NVDEV_SUBDEV_FB ] = &nv1a_fb_oclass;
163 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 163 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
164 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 164 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 165 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
index 5fa58b7369b5..74f88f48e1c2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv20.c
@@ -72,7 +72,7 @@ nv20_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -90,7 +90,7 @@ nv20_identify(struct nouveau_device *device)
90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 90 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 91 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 92 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
93 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 93 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 94 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 95 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 96 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -108,7 +108,7 @@ nv20_identify(struct nouveau_device *device)
108 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 108 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
109 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 109 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
110 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 110 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
111 device->oclass[NVDEV_SUBDEV_FB ] = &nv20_fb_oclass; 111 device->oclass[NVDEV_SUBDEV_FB ] = &nv25_fb_oclass;
112 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 112 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
113 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 113 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 114 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
index 7f4b8fe6cccc..0ac1b2c4f61d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv30.c
@@ -72,7 +72,7 @@ nv30_identify(struct nouveau_device *device)
72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 72 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 73 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 74 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
75 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 75 device->oclass[NVDEV_SUBDEV_FB ] = &nv35_fb_oclass;
76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 76 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 77 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 78 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -109,7 +109,7 @@ nv30_identify(struct nouveau_device *device)
109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass; 109 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv20_devinit_oclass;
110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 110 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
111 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 111 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
112 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 112 device->oclass[NVDEV_SUBDEV_FB ] = &nv36_fb_oclass;
113 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 113 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
114 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 114 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
115 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 115 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -128,7 +128,7 @@ nv30_identify(struct nouveau_device *device)
128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass; 128 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv10_devinit_oclass;
129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 129 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
130 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 130 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
131 device->oclass[NVDEV_SUBDEV_FB ] = &nv30_fb_oclass; 131 device->oclass[NVDEV_SUBDEV_FB ] = &nv10_fb_oclass;
132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass; 132 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv04_instmem_oclass;
133 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass; 133 device->oclass[NVDEV_SUBDEV_VM ] = &nv04_vmmgr_oclass;
134 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 134 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
index 42deadca0f0a..41d59689a021 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv40.c
@@ -76,7 +76,7 @@ nv40_identify(struct nouveau_device *device)
76 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 76 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
77 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 77 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 78 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
79 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 79 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
80 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 80 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
81 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 81 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
82 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 82 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -96,7 +96,7 @@ nv40_identify(struct nouveau_device *device)
96 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 96 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
97 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 97 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 98 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
99 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 99 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
100 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 100 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
101 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 101 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 102 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -116,7 +116,7 @@ nv40_identify(struct nouveau_device *device)
116 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 116 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
117 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 117 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
118 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 118 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
119 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 119 device->oclass[NVDEV_SUBDEV_FB ] = &nv41_fb_oclass;
120 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 120 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
121 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 121 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 122 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -156,7 +156,7 @@ nv40_identify(struct nouveau_device *device)
156 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 156 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
157 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 157 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 158 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
159 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 159 device->oclass[NVDEV_SUBDEV_FB ] = &nv47_fb_oclass;
160 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 160 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
161 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 161 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 162 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -176,7 +176,7 @@ nv40_identify(struct nouveau_device *device)
176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 176 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
177 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 177 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 178 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
179 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 179 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 180 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 181 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
182 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 182 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -196,7 +196,7 @@ nv40_identify(struct nouveau_device *device)
196 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 196 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
197 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass; 197 device->oclass[NVDEV_SUBDEV_MC ] = &nv04_mc_oclass;
198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 198 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
199 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 199 device->oclass[NVDEV_SUBDEV_FB ] = &nv49_fb_oclass;
200 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 200 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
201 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass; 201 device->oclass[NVDEV_SUBDEV_VM ] = &nv41_vmmgr_oclass;
202 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 202 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -216,7 +216,7 @@ nv40_identify(struct nouveau_device *device)
216 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 216 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
217 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 217 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 218 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
219 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 219 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 220 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
221 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 221 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
222 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 222 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -236,7 +236,7 @@ nv40_identify(struct nouveau_device *device)
236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 236 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
237 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 237 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 238 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
239 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 239 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 240 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
241 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 241 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 242 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -256,7 +256,7 @@ nv40_identify(struct nouveau_device *device)
256 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 256 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
257 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 257 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
258 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 258 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
259 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 259 device->oclass[NVDEV_SUBDEV_FB ] = &nv44_fb_oclass;
260 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 260 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
261 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 261 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
262 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 262 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -276,7 +276,7 @@ nv40_identify(struct nouveau_device *device)
276 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 276 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
277 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 277 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
278 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 278 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
279 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 279 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 280 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
281 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 281 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
282 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 282 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -296,7 +296,7 @@ nv40_identify(struct nouveau_device *device)
296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 296 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
297 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 297 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
298 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 298 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
299 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 299 device->oclass[NVDEV_SUBDEV_FB ] = &nv4e_fb_oclass;
300 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 300 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
301 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 301 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 302 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -316,7 +316,7 @@ nv40_identify(struct nouveau_device *device)
316 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 316 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
317 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 317 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
318 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 318 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
319 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 319 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
320 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 320 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
321 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 321 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
322 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 322 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -336,7 +336,7 @@ nv40_identify(struct nouveau_device *device)
336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 336 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
337 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 337 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
338 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 338 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
339 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 339 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
340 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 340 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
341 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 341 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
342 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 342 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
@@ -356,7 +356,7 @@ nv40_identify(struct nouveau_device *device)
356 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass; 356 device->oclass[NVDEV_SUBDEV_DEVINIT] = &nv1a_devinit_oclass;
357 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass; 357 device->oclass[NVDEV_SUBDEV_MC ] = &nv44_mc_oclass;
358 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; 358 device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
359 device->oclass[NVDEV_SUBDEV_FB ] = &nv40_fb_oclass; 359 device->oclass[NVDEV_SUBDEV_FB ] = &nv46_fb_oclass;
360 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass; 360 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv40_instmem_oclass;
361 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass; 361 device->oclass[NVDEV_SUBDEV_VM ] = &nv44_vmmgr_oclass;
362 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass; 362 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nv04_dmaeng_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
index fec3bcc9a6fc..6ccfd8585ba2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nv50.c
@@ -98,7 +98,7 @@ nv50_identify(struct nouveau_device *device)
98 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 98 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
99 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 99 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
100 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 100 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
101 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 101 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
102 break; 102 break;
103 case 0x86: 103 case 0x86:
104 device->cname = "G86"; 104 device->cname = "G86";
@@ -123,7 +123,7 @@ nv50_identify(struct nouveau_device *device)
123 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 123 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
124 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 124 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
125 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 125 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
126 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 126 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
127 break; 127 break;
128 case 0x92: 128 case 0x92:
129 device->cname = "G92"; 129 device->cname = "G92";
@@ -148,7 +148,7 @@ nv50_identify(struct nouveau_device *device)
148 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 148 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
149 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 149 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
150 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 150 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
151 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 151 device->oclass[NVDEV_ENGINE_DISP ] = &nv84_disp_oclass;
152 break; 152 break;
153 case 0x94: 153 case 0x94:
154 device->cname = "G94"; 154 device->cname = "G94";
@@ -173,7 +173,7 @@ nv50_identify(struct nouveau_device *device)
173 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 173 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
174 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 174 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
175 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 175 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
176 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 176 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
177 break; 177 break;
178 case 0x96: 178 case 0x96:
179 device->cname = "G96"; 179 device->cname = "G96";
@@ -198,7 +198,7 @@ nv50_identify(struct nouveau_device *device)
198 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 198 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
199 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 199 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
200 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 200 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
201 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 201 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
202 break; 202 break;
203 case 0x98: 203 case 0x98:
204 device->cname = "G98"; 204 device->cname = "G98";
@@ -223,7 +223,7 @@ nv50_identify(struct nouveau_device *device)
223 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 223 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
224 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 224 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
225 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 225 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
226 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 226 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
227 break; 227 break;
228 case 0xa0: 228 case 0xa0:
229 device->cname = "G200"; 229 device->cname = "G200";
@@ -248,7 +248,7 @@ nv50_identify(struct nouveau_device *device)
248 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 248 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass;
249 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass; 249 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv84_crypt_oclass;
250 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 250 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
251 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 251 device->oclass[NVDEV_ENGINE_DISP ] = &nva0_disp_oclass;
252 break; 252 break;
253 case 0xaa: 253 case 0xaa:
254 device->cname = "MCP77/MCP78"; 254 device->cname = "MCP77/MCP78";
@@ -273,7 +273,7 @@ nv50_identify(struct nouveau_device *device)
273 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 273 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 276 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
277 break; 277 break;
278 case 0xac: 278 case 0xac:
279 device->cname = "MCP79/MCP7A"; 279 device->cname = "MCP79/MCP7A";
@@ -298,7 +298,7 @@ nv50_identify(struct nouveau_device *device)
298 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass; 298 device->oclass[NVDEV_ENGINE_CRYPT ] = &nv98_crypt_oclass;
299 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 299 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
300 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 300 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
301 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 301 device->oclass[NVDEV_ENGINE_DISP ] = &nv94_disp_oclass;
302 break; 302 break;
303 case 0xa3: 303 case 0xa3:
304 device->cname = "GT215"; 304 device->cname = "GT215";
@@ -324,7 +324,7 @@ nv50_identify(struct nouveau_device *device)
324 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 324 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
325 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 325 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
326 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 326 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
327 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 327 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
328 break; 328 break;
329 case 0xa5: 329 case 0xa5:
330 device->cname = "GT216"; 330 device->cname = "GT216";
@@ -349,7 +349,7 @@ nv50_identify(struct nouveau_device *device)
349 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 349 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
350 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 350 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
351 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 351 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
352 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 352 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
353 break; 353 break;
354 case 0xa8: 354 case 0xa8:
355 device->cname = "GT218"; 355 device->cname = "GT218";
@@ -374,7 +374,7 @@ nv50_identify(struct nouveau_device *device)
374 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 374 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
375 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 375 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
376 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 376 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
377 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 377 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
378 break; 378 break;
379 case 0xaf: 379 case 0xaf:
380 device->cname = "MCP89"; 380 device->cname = "MCP89";
@@ -399,7 +399,7 @@ nv50_identify(struct nouveau_device *device)
399 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 399 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass;
400 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 400 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass;
401 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass; 401 device->oclass[NVDEV_ENGINE_COPY0 ] = &nva3_copy_oclass;
402 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 402 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
403 break; 403 break;
404 default: 404 default:
405 nv_fatal(device, "unknown Tesla chipset\n"); 405 nv_fatal(device, "unknown Tesla chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
index 6697f0f9c293..f0461685a422 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nvc0.c
@@ -74,12 +74,12 @@ nvc0_identify(struct nouveau_device *device)
74 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 74 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
76 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 76 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
77 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 77 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
78 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 78 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
79 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 79 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 80 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
81 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 81 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
82 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 82 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
83 break; 83 break;
84 case 0xc4: 84 case 0xc4:
85 device->cname = "GF104"; 85 device->cname = "GF104";
@@ -102,12 +102,12 @@ nvc0_identify(struct nouveau_device *device)
102 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 102 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
104 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 104 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
105 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 105 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
106 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 106 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
107 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 107 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
108 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 108 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
109 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 109 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
110 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 110 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
111 break; 111 break;
112 case 0xc3: 112 case 0xc3:
113 device->cname = "GF106"; 113 device->cname = "GF106";
@@ -130,12 +130,12 @@ nvc0_identify(struct nouveau_device *device)
130 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 130 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 131 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 132 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
133 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 133 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
134 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 134 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
135 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 135 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
136 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 136 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
137 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 137 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
138 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 138 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
139 break; 139 break;
140 case 0xce: 140 case 0xce:
141 device->cname = "GF114"; 141 device->cname = "GF114";
@@ -158,12 +158,12 @@ nvc0_identify(struct nouveau_device *device)
158 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 158 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
159 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 159 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
160 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 160 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
161 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 161 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
162 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 162 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
163 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 163 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
164 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 164 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
165 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 165 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
166 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 166 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
167 break; 167 break;
168 case 0xcf: 168 case 0xcf:
169 device->cname = "GF116"; 169 device->cname = "GF116";
@@ -186,12 +186,12 @@ nvc0_identify(struct nouveau_device *device)
186 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 186 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
187 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 187 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
188 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 188 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
189 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 189 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
190 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 190 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
191 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 191 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
192 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 192 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
193 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 193 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
194 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 194 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
195 break; 195 break;
196 case 0xc1: 196 case 0xc1:
197 device->cname = "GF108"; 197 device->cname = "GF108";
@@ -214,12 +214,12 @@ nvc0_identify(struct nouveau_device *device)
214 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 214 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
215 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 215 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
216 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 216 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
217 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 217 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
218 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 222 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
223 break; 223 break;
224 case 0xc8: 224 case 0xc8:
225 device->cname = "GF110"; 225 device->cname = "GF110";
@@ -242,12 +242,12 @@ nvc0_identify(struct nouveau_device *device)
242 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 242 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
243 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 243 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
244 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 244 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
245 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 245 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
246 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 246 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
247 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 247 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
248 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 248 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
249 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; 249 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
250 device->oclass[NVDEV_ENGINE_DISP ] = &nv50_disp_oclass; 250 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
251 break; 251 break;
252 case 0xd9: 252 case 0xd9:
253 device->cname = "GF119"; 253 device->cname = "GF119";
@@ -266,13 +266,13 @@ nvc0_identify(struct nouveau_device *device)
266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 266 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
267 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 267 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
268 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 268 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
269 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 269 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
270 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass; 270 device->oclass[NVDEV_ENGINE_FIFO ] = &nvc0_fifo_oclass;
271 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 271 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
272 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass; 272 device->oclass[NVDEV_ENGINE_GR ] = &nvc0_graph_oclass;
273 device->oclass[NVDEV_ENGINE_VP ] = &nv84_vp_oclass; 273 device->oclass[NVDEV_ENGINE_VP ] = &nvc0_vp_oclass;
274 device->oclass[NVDEV_ENGINE_BSP ] = &nv84_bsp_oclass; 274 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
275 device->oclass[NVDEV_ENGINE_PPP ] = &nv98_ppp_oclass; 275 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
276 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 276 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
277 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 277 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass;
278 break; 278 break;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
index 4a280b7ab853..9b7881e76634 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/device/nve0.c
@@ -45,6 +45,9 @@
45#include <engine/graph.h> 45#include <engine/graph.h>
46#include <engine/disp.h> 46#include <engine/disp.h>
47#include <engine/copy.h> 47#include <engine/copy.h>
48#include <engine/bsp.h>
49#include <engine/vp.h>
50#include <engine/ppp.h>
48 51
49int 52int
50nve0_identify(struct nouveau_device *device) 53nve0_identify(struct nouveau_device *device)
@@ -67,13 +70,16 @@ nve0_identify(struct nouveau_device *device)
67 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 70 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
68 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 71 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
69 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 72 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
70 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 73 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
71 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 74 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
72 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 75 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
73 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; 76 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
74 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 77 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
75 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 78 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
76 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 79 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
80 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
81 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
82 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
77 break; 83 break;
78 case 0xe7: 84 case 0xe7:
79 device->cname = "GK107"; 85 device->cname = "GK107";
@@ -92,13 +98,16 @@ nve0_identify(struct nouveau_device *device)
92 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass; 98 device->oclass[NVDEV_SUBDEV_INSTMEM] = &nv50_instmem_oclass;
93 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; 99 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
94 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; 100 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
95 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvc0_dmaeng_oclass; 101 device->oclass[NVDEV_ENGINE_DMAOBJ ] = &nvd0_dmaeng_oclass;
96 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass; 102 device->oclass[NVDEV_ENGINE_FIFO ] = &nve0_fifo_oclass;
97 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass; 103 device->oclass[NVDEV_ENGINE_SW ] = &nvc0_software_oclass;
98 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass; 104 device->oclass[NVDEV_ENGINE_GR ] = &nve0_graph_oclass;
99 device->oclass[NVDEV_ENGINE_DISP ] = &nvd0_disp_oclass; 105 device->oclass[NVDEV_ENGINE_DISP ] = &nve0_disp_oclass;
100 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; 106 device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
101 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; 107 device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
108 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
109 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
110 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
102 break; 111 break;
103 default: 112 default:
104 nv_fatal(device, "unknown Kepler chipset\n"); 113 nv_fatal(device, "unknown Kepler chipset\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index 61becfa732e9..ae7249b09797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -22,6 +22,10 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <subdev/bios.h>
26#include <subdev/bios/dcb.h>
27#include <subdev/bios/disp.h>
28#include <subdev/bios/init.h>
25#include <subdev/devinit.h> 29#include <subdev/devinit.h>
26#include <subdev/vga.h> 30#include <subdev/vga.h>
27 31
@@ -55,7 +59,12 @@ nv50_devinit_dtor(struct nouveau_object *object)
55static int 59static int
56nv50_devinit_init(struct nouveau_object *object) 60nv50_devinit_init(struct nouveau_object *object)
57{ 61{
62 struct nouveau_bios *bios = nouveau_bios(object);
58 struct nv50_devinit_priv *priv = (void *)object; 63 struct nv50_devinit_priv *priv = (void *)object;
64 struct nvbios_outp info;
65 struct dcb_output outp;
66 u8 ver = 0xff, hdr, cnt, len;
67 int ret, i = 0;
59 68
60 if (!priv->base.post) { 69 if (!priv->base.post) {
61 if (!nv_rdvgac(priv, 0, 0x00) && 70 if (!nv_rdvgac(priv, 0, 0x00) &&
@@ -65,7 +74,30 @@ nv50_devinit_init(struct nouveau_object *object)
65 } 74 }
66 } 75 }
67 76
68 return nouveau_devinit_init(&priv->base); 77 ret = nouveau_devinit_init(&priv->base);
78 if (ret)
79 return ret;
80
81 /* if we ran the init tables, execute first script pointer for each
82 * display table output entry that has a matching dcb entry.
83 */
84 while (priv->base.post && ver) {
85 u16 data = nvbios_outp_parse(bios, i++, &ver, &hdr, &cnt, &len, &info);
86 if (data && dcb_outp_match(bios, info.type, info.mask, &ver, &len, &outp)) {
87 struct nvbios_init init = {
88 .subdev = nv_subdev(priv),
89 .bios = bios,
90 .offset = info.script[0],
91 .outp = &outp,
92 .crtc = -1,
93 .execute = 1,
94 };
95
96 nvbios_exec(&init);
97 }
98 };
99
100 return 0;
69} 101}
70 102
71static int 103static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f0086de8af31..d6d16007ec1a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -57,25 +57,45 @@ nouveau_fb_bios_memtype(struct nouveau_bios *bios)
57} 57}
58 58
59int 59int
60nouveau_fb_init(struct nouveau_fb *pfb) 60nouveau_fb_preinit(struct nouveau_fb *pfb)
61{ 61{
62 int ret, i; 62 static const char *name[] = {
63 [NV_MEM_TYPE_UNKNOWN] = "unknown",
64 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
65 [NV_MEM_TYPE_SGRAM ] = "SGRAM",
66 [NV_MEM_TYPE_SDRAM ] = "SDRAM",
67 [NV_MEM_TYPE_DDR1 ] = "DDR1",
68 [NV_MEM_TYPE_DDR2 ] = "DDR2",
69 [NV_MEM_TYPE_DDR3 ] = "DDR3",
70 [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
71 [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
72 [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
73 [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
74 };
75 int ret, tags;
63 76
64 ret = nouveau_subdev_init(&pfb->base); 77 tags = pfb->ram.init(pfb);
65 if (ret) 78 if (tags < 0 || !pfb->ram.size) {
66 return ret; 79 nv_fatal(pfb, "error detecting memory configuration!!\n");
80 return (tags < 0) ? tags : -ERANGE;
81 }
67 82
68 for (i = 0; i < pfb->tile.regions; i++) 83 if (!nouveau_mm_initialised(&pfb->vram)) {
69 pfb->tile.prog(pfb, i, &pfb->tile.region[i]); 84 ret = nouveau_mm_init(&pfb->vram, 0, pfb->ram.size >> 12, 1);
85 if (ret)
86 return ret;
87 }
70 88
71 return 0; 89 if (!nouveau_mm_initialised(&pfb->tags) && tags) {
72} 90 ret = nouveau_mm_init(&pfb->tags, 0, ++tags, 1);
91 if (ret)
92 return ret;
93 }
73 94
74int 95 nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
75_nouveau_fb_init(struct nouveau_object *object) 96 nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
76{ 97 nv_info(pfb, " ZCOMP: %d tags\n", tags);
77 struct nouveau_fb *pfb = (void *)object; 98 return 0;
78 return nouveau_fb_init(pfb);
79} 99}
80 100
81void 101void
@@ -85,12 +105,8 @@ nouveau_fb_destroy(struct nouveau_fb *pfb)
85 105
86 for (i = 0; i < pfb->tile.regions; i++) 106 for (i = 0; i < pfb->tile.regions; i++)
87 pfb->tile.fini(pfb, i, &pfb->tile.region[i]); 107 pfb->tile.fini(pfb, i, &pfb->tile.region[i]);
88 108 nouveau_mm_fini(&pfb->tags);
89 if (pfb->tags.block_size) 109 nouveau_mm_fini(&pfb->vram);
90 nouveau_mm_fini(&pfb->tags);
91
92 if (pfb->vram.block_size)
93 nouveau_mm_fini(&pfb->vram);
94 110
95 nouveau_subdev_destroy(&pfb->base); 111 nouveau_subdev_destroy(&pfb->base);
96} 112}
@@ -101,30 +117,24 @@ _nouveau_fb_dtor(struct nouveau_object *object)
101 struct nouveau_fb *pfb = (void *)object; 117 struct nouveau_fb *pfb = (void *)object;
102 nouveau_fb_destroy(pfb); 118 nouveau_fb_destroy(pfb);
103} 119}
104
105int 120int
106nouveau_fb_created(struct nouveau_fb *pfb) 121nouveau_fb_init(struct nouveau_fb *pfb)
107{ 122{
108 static const char *name[] = { 123 int ret, i;
109 [NV_MEM_TYPE_UNKNOWN] = "unknown",
110 [NV_MEM_TYPE_STOLEN ] = "stolen system memory",
111 [NV_MEM_TYPE_SGRAM ] = "SGRAM",
112 [NV_MEM_TYPE_SDRAM ] = "SDRAM",
113 [NV_MEM_TYPE_DDR1 ] = "DDR1",
114 [NV_MEM_TYPE_DDR2 ] = "DDR2",
115 [NV_MEM_TYPE_DDR3 ] = "DDR3",
116 [NV_MEM_TYPE_GDDR2 ] = "GDDR2",
117 [NV_MEM_TYPE_GDDR3 ] = "GDDR3",
118 [NV_MEM_TYPE_GDDR4 ] = "GDDR4",
119 [NV_MEM_TYPE_GDDR5 ] = "GDDR5",
120 };
121 124
122 if (pfb->ram.size == 0) { 125 ret = nouveau_subdev_init(&pfb->base);
123 nv_fatal(pfb, "no vram detected!!\n"); 126 if (ret)
124 return -ERANGE; 127 return ret;
125 } 128
129 for (i = 0; i < pfb->tile.regions; i++)
130 pfb->tile.prog(pfb, i, &pfb->tile.region[i]);
126 131
127 nv_info(pfb, "RAM type: %s\n", name[pfb->ram.type]);
128 nv_info(pfb, "RAM size: %d MiB\n", (int)(pfb->ram.size >> 20));
129 return 0; 132 return 0;
130} 133}
134
135int
136_nouveau_fb_init(struct nouveau_object *object)
137{
138 struct nouveau_fb *pfb = (void *)object;
139 return nouveau_fb_init(pfb);
140}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
index eb06836b69f7..6e369f85361e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv04.c
@@ -56,6 +56,37 @@ nv04_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
56} 56}
57 57
58static int 58static int
59nv04_fb_vram_init(struct nouveau_fb *pfb)
60{
61 u32 boot0 = nv_rd32(pfb, NV04_PFB_BOOT_0);
62 if (boot0 & 0x00000100) {
63 pfb->ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
64 pfb->ram.size *= 1024 * 1024;
65 } else {
66 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
67 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
68 pfb->ram.size = 32 * 1024 * 1024;
69 break;
70 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
71 pfb->ram.size = 16 * 1024 * 1024;
72 break;
73 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
74 pfb->ram.size = 8 * 1024 * 1024;
75 break;
76 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
77 pfb->ram.size = 4 * 1024 * 1024;
78 break;
79 }
80 }
81
82 if ((boot0 & 0x00000038) <= 0x10)
83 pfb->ram.type = NV_MEM_TYPE_SGRAM;
84 else
85 pfb->ram.type = NV_MEM_TYPE_SDRAM;
86 return 0;
87}
88
89static int
59nv04_fb_init(struct nouveau_object *object) 90nv04_fb_init(struct nouveau_object *object)
60{ 91{
61 struct nv04_fb_priv *priv = (void *)object; 92 struct nv04_fb_priv *priv = (void *)object;
@@ -79,7 +110,6 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
79 struct nouveau_object **pobject) 110 struct nouveau_object **pobject)
80{ 111{
81 struct nv04_fb_priv *priv; 112 struct nv04_fb_priv *priv;
82 u32 boot0;
83 int ret; 113 int ret;
84 114
85 ret = nouveau_fb_create(parent, engine, oclass, &priv); 115 ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -87,35 +117,9 @@ nv04_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
87 if (ret) 117 if (ret)
88 return ret; 118 return ret;
89 119
90 boot0 = nv_rd32(priv, NV04_PFB_BOOT_0);
91 if (boot0 & 0x00000100) {
92 priv->base.ram.size = ((boot0 >> 12) & 0xf) * 2 + 2;
93 priv->base.ram.size *= 1024 * 1024;
94 } else {
95 switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
96 case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
97 priv->base.ram.size = 32 * 1024 * 1024;
98 break;
99 case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
100 priv->base.ram.size = 16 * 1024 * 1024;
101 break;
102 case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
103 priv->base.ram.size = 8 * 1024 * 1024;
104 break;
105 case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
106 priv->base.ram.size = 4 * 1024 * 1024;
107 break;
108 }
109 }
110
111 if ((boot0 & 0x00000038) <= 0x10)
112 priv->base.ram.type = NV_MEM_TYPE_SGRAM;
113 else
114 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
115
116
117 priv->base.memtype_valid = nv04_fb_memtype_valid; 120 priv->base.memtype_valid = nv04_fb_memtype_valid;
118 return nouveau_fb_created(&priv->base); 121 priv->base.ram.init = nv04_fb_vram_init;
122 return nouveau_fb_preinit(&priv->base);
119} 123}
120 124
121struct nouveau_oclass 125struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
index f037a422d2f4..edbbe26e858d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv10.c
@@ -30,7 +30,20 @@ struct nv10_fb_priv {
30 struct nouveau_fb base; 30 struct nouveau_fb base;
31}; 31};
32 32
33static void 33static int
34nv10_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 cfg0 = nv_rd32(pfb, 0x100200);
37 if (cfg0 & 0x00000001)
38 pfb->ram.type = NV_MEM_TYPE_DDR1;
39 else
40 pfb->ram.type = NV_MEM_TYPE_SDRAM;
41
42 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
43 return 0;
44}
45
46void
34nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 47nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile) 48 u32 flags, struct nouveau_fb_tile *tile)
36{ 49{
@@ -39,7 +52,7 @@ nv10_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
39 tile->pitch = pitch; 52 tile->pitch = pitch;
40} 53}
41 54
42static void 55void
43nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 56nv10_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{ 57{
45 tile->addr = 0; 58 tile->addr = 0;
@@ -54,6 +67,7 @@ nv10_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
54 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); 67 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
55 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); 68 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
56 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); 69 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
70 nv_rd32(pfb, 0x100240 + (i * 0x10));
57} 71}
58 72
59static int 73static int
@@ -61,7 +75,6 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
61 struct nouveau_oclass *oclass, void *data, u32 size, 75 struct nouveau_oclass *oclass, void *data, u32 size,
62 struct nouveau_object **pobject) 76 struct nouveau_object **pobject)
63{ 77{
64 struct nouveau_device *device = nv_device(parent);
65 struct nv10_fb_priv *priv; 78 struct nv10_fb_priv *priv;
66 int ret; 79 int ret;
67 80
@@ -70,42 +83,13 @@ nv10_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
70 if (ret) 83 if (ret)
71 return ret; 84 return ret;
72 85
73 if (device->chipset == 0x1a || device->chipset == 0x1f) {
74 struct pci_dev *bridge;
75 u32 mem, mib;
76
77 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
78 if (!bridge) {
79 nv_fatal(device, "no bridge device\n");
80 return 0;
81 }
82
83 if (device->chipset == 0x1a) {
84 pci_read_config_dword(bridge, 0x7c, &mem);
85 mib = ((mem >> 6) & 31) + 1;
86 } else {
87 pci_read_config_dword(bridge, 0x84, &mem);
88 mib = ((mem >> 4) & 127) + 1;
89 }
90
91 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
92 priv->base.ram.size = mib * 1024 * 1024;
93 } else {
94 u32 cfg0 = nv_rd32(priv, 0x100200);
95 if (cfg0 & 0x00000001)
96 priv->base.ram.type = NV_MEM_TYPE_DDR1;
97 else
98 priv->base.ram.type = NV_MEM_TYPE_SDRAM;
99
100 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
101 }
102
103 priv->base.memtype_valid = nv04_fb_memtype_valid; 86 priv->base.memtype_valid = nv04_fb_memtype_valid;
87 priv->base.ram.init = nv10_fb_vram_init;
104 priv->base.tile.regions = 8; 88 priv->base.tile.regions = 8;
105 priv->base.tile.init = nv10_fb_tile_init; 89 priv->base.tile.init = nv10_fb_tile_init;
106 priv->base.tile.fini = nv10_fb_tile_fini; 90 priv->base.tile.fini = nv10_fb_tile_fini;
107 priv->base.tile.prog = nv10_fb_tile_prog; 91 priv->base.tile.prog = nv10_fb_tile_prog;
108 return nouveau_fb_created(&priv->base); 92 return nouveau_fb_preinit(&priv->base);
109} 93}
110 94
111struct nouveau_oclass 95struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
new file mode 100644
index 000000000000..48366841db4a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -0,0 +1,89 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv1a_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv1a_fb_vram_init(struct nouveau_fb *pfb)
35{
36 struct pci_dev *bridge;
37 u32 mem, mib;
38
39 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
40 if (!bridge) {
41 nv_fatal(pfb, "no bridge device\n");
42 return -ENODEV;
43 }
44
45 if (nv_device(pfb)->chipset == 0x1a) {
46 pci_read_config_dword(bridge, 0x7c, &mem);
47 mib = ((mem >> 6) & 31) + 1;
48 } else {
49 pci_read_config_dword(bridge, 0x84, &mem);
50 mib = ((mem >> 4) & 127) + 1;
51 }
52
53 pfb->ram.type = NV_MEM_TYPE_STOLEN;
54 pfb->ram.size = mib * 1024 * 1024;
55 return 0;
56}
57
58static int
59nv1a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
60 struct nouveau_oclass *oclass, void *data, u32 size,
61 struct nouveau_object **pobject)
62{
63 struct nv1a_fb_priv *priv;
64 int ret;
65
66 ret = nouveau_fb_create(parent, engine, oclass, &priv);
67 *pobject = nv_object(priv);
68 if (ret)
69 return ret;
70
71 priv->base.memtype_valid = nv04_fb_memtype_valid;
72 priv->base.ram.init = nv1a_fb_vram_init;
73 priv->base.tile.regions = 8;
74 priv->base.tile.init = nv10_fb_tile_init;
75 priv->base.tile.fini = nv10_fb_tile_fini;
76 priv->base.tile.prog = nv10_fb_tile_prog;
77 return nouveau_fb_preinit(&priv->base);
78}
79
80struct nouveau_oclass
81nv1a_fb_oclass = {
82 .handle = NV_SUBDEV(FB, 0x1a),
83 .ofuncs = &(struct nouveau_ofuncs) {
84 .ctor = nv1a_fb_ctor,
85 .dtor = _nouveau_fb_dtor,
86 .init = _nouveau_fb_init,
87 .fini = _nouveau_fb_fini,
88 },
89};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
index 4b3578fcb7fb..5d14612a2c8e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv20.c
@@ -30,43 +30,54 @@ struct nv20_fb_priv {
30 struct nouveau_fb base; 30 struct nouveau_fb base;
31}; 31};
32 32
33static void 33int
34nv20_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pbus1218 = nv_rd32(pfb, 0x001218);
37
38 switch (pbus1218 & 0x00000300) {
39 case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
40 case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
41 case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
42 case 0x00000300: pfb->ram.type = NV_MEM_TYPE_GDDR2; break;
43 }
44 pfb->ram.size = (nv_rd32(pfb, 0x10020c) & 0xff000000);
45 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
46
47 return nv_rd32(pfb, 0x100320);
48}
49
50void
34nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 51nv20_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile) 52 u32 flags, struct nouveau_fb_tile *tile)
36{ 53{
37 struct nouveau_device *device = nv_device(pfb);
38 int bpp = (flags & 2) ? 32 : 16;
39
40 tile->addr = 0x00000001 | addr; 54 tile->addr = 0x00000001 | addr;
41 tile->limit = max(1u, addr + size) - 1; 55 tile->limit = max(1u, addr + size) - 1;
42 tile->pitch = pitch; 56 tile->pitch = pitch;
43
44 /* Allocate some of the on-die tag memory, used to store Z
45 * compression meta-data (most likely just a bitmap determining
46 * if a given tile is compressed or not).
47 */
48 size /= 256;
49 if (flags & 4) { 57 if (flags & 4) {
50 if (!nouveau_mm_head(&pfb->tags, 1, size, size, 1, &tile->tag)) { 58 pfb->tile.comp(pfb, i, size, flags, tile);
51 /* Enable Z compression */
52 tile->zcomp = tile->tag->offset;
53 if (device->chipset >= 0x25) {
54 if (bpp == 16)
55 tile->zcomp |= 0x00100000;
56 else
57 tile->zcomp |= 0x00200000;
58 } else {
59 tile->zcomp |= 0x80000000;
60 if (bpp != 16)
61 tile->zcomp |= 0x04000000;
62 }
63 }
64
65 tile->addr |= 2; 59 tile->addr |= 2;
66 } 60 }
67} 61}
68 62
69static void 63static void
64nv20_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
65 struct nouveau_fb_tile *tile)
66{
67 u32 tiles = DIV_ROUND_UP(size, 0x40);
68 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
69 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
70 if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
71 else tile->zcomp = 0x04000000; /* Z24S8 */
72 tile->zcomp |= tile->tag->offset;
73 tile->zcomp |= 0x80000000; /* enable */
74#ifdef __BIG_ENDIAN
75 tile->zcomp |= 0x08000000;
76#endif
77 }
78}
79
80void
70nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 81nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
71{ 82{
72 tile->addr = 0; 83 tile->addr = 0;
@@ -76,12 +87,13 @@ nv20_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
76 nouveau_mm_free(&pfb->tags, &tile->tag); 87 nouveau_mm_free(&pfb->tags, &tile->tag);
77} 88}
78 89
79static void 90void
80nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 91nv20_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
81{ 92{
82 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit); 93 nv_wr32(pfb, 0x100244 + (i * 0x10), tile->limit);
83 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch); 94 nv_wr32(pfb, 0x100248 + (i * 0x10), tile->pitch);
84 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr); 95 nv_wr32(pfb, 0x100240 + (i * 0x10), tile->addr);
96 nv_rd32(pfb, 0x100240 + (i * 0x10));
85 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp); 97 nv_wr32(pfb, 0x100300 + (i * 0x04), tile->zcomp);
86} 98}
87 99
@@ -90,9 +102,7 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
90 struct nouveau_oclass *oclass, void *data, u32 size, 102 struct nouveau_oclass *oclass, void *data, u32 size,
91 struct nouveau_object **pobject) 103 struct nouveau_object **pobject)
92{ 104{
93 struct nouveau_device *device = nv_device(parent);
94 struct nv20_fb_priv *priv; 105 struct nv20_fb_priv *priv;
95 u32 pbus1218;
96 int ret; 106 int ret;
97 107
98 ret = nouveau_fb_create(parent, engine, oclass, &priv); 108 ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -100,28 +110,14 @@ nv20_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 if (ret) 110 if (ret)
101 return ret; 111 return ret;
102 112
103 pbus1218 = nv_rd32(priv, 0x001218);
104 switch (pbus1218 & 0x00000300) {
105 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
106 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
107 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
108 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
109 }
110 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
111
112 if (device->chipset >= 0x25)
113 ret = nouveau_mm_init(&priv->base.tags, 0, 64 * 1024, 1);
114 else
115 ret = nouveau_mm_init(&priv->base.tags, 0, 32 * 1024, 1);
116 if (ret)
117 return ret;
118
119 priv->base.memtype_valid = nv04_fb_memtype_valid; 113 priv->base.memtype_valid = nv04_fb_memtype_valid;
114 priv->base.ram.init = nv20_fb_vram_init;
120 priv->base.tile.regions = 8; 115 priv->base.tile.regions = 8;
121 priv->base.tile.init = nv20_fb_tile_init; 116 priv->base.tile.init = nv20_fb_tile_init;
117 priv->base.tile.comp = nv20_fb_tile_comp;
122 priv->base.tile.fini = nv20_fb_tile_fini; 118 priv->base.tile.fini = nv20_fb_tile_fini;
123 priv->base.tile.prog = nv20_fb_tile_prog; 119 priv->base.tile.prog = nv20_fb_tile_prog;
124 return nouveau_fb_created(&priv->base); 120 return nouveau_fb_preinit(&priv->base);
125} 121}
126 122
127struct nouveau_oclass 123struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
new file mode 100644
index 000000000000..0042ace6bef9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv25.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv25_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv25_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
35 struct nouveau_fb_tile *tile)
36{
37 u32 tiles = DIV_ROUND_UP(size, 0x40);
38 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
39 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
40 if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
41 else tile->zcomp = 0x00200000; /* Z24S8 */
42 tile->zcomp |= tile->tag->offset;
43#ifdef __BIG_ENDIAN
44 tile->zcomp |= 0x01000000;
45#endif
46 }
47}
48
49static int
50nv25_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
51 struct nouveau_oclass *oclass, void *data, u32 size,
52 struct nouveau_object **pobject)
53{
54 struct nv25_fb_priv *priv;
55 int ret;
56
57 ret = nouveau_fb_create(parent, engine, oclass, &priv);
58 *pobject = nv_object(priv);
59 if (ret)
60 return ret;
61
62 priv->base.memtype_valid = nv04_fb_memtype_valid;
63 priv->base.ram.init = nv20_fb_vram_init;
64 priv->base.tile.regions = 8;
65 priv->base.tile.init = nv20_fb_tile_init;
66 priv->base.tile.comp = nv25_fb_tile_comp;
67 priv->base.tile.fini = nv20_fb_tile_fini;
68 priv->base.tile.prog = nv20_fb_tile_prog;
69 return nouveau_fb_preinit(&priv->base);
70}
71
72struct nouveau_oclass
73nv25_fb_oclass = {
74 .handle = NV_SUBDEV(FB, 0x25),
75 .ofuncs = &(struct nouveau_ofuncs) {
76 .ctor = nv25_fb_ctor,
77 .dtor = _nouveau_fb_dtor,
78 .init = _nouveau_fb_init,
79 .fini = _nouveau_fb_fini,
80 },
81};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
index cba67bc91390..a7ba0d048aec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv30.c
@@ -34,17 +34,36 @@ void
34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch, 34nv30_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile) 35 u32 flags, struct nouveau_fb_tile *tile)
36{ 36{
37 tile->addr = addr | 1; 37 /* for performance, select alternate bank offset for zeta */
38 if (!(flags & 4)) {
39 tile->addr = (0 << 4);
40 } else {
41 if (pfb->tile.comp) /* z compression */
42 pfb->tile.comp(pfb, i, size, flags, tile);
43 tile->addr = (1 << 4);
44 }
45
46 tile->addr |= 0x00000001; /* enable */
47 tile->addr |= addr;
38 tile->limit = max(1u, addr + size) - 1; 48 tile->limit = max(1u, addr + size) - 1;
39 tile->pitch = pitch; 49 tile->pitch = pitch;
40} 50}
41 51
42void 52static void
43nv30_fb_tile_fini(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile) 53nv30_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
54 struct nouveau_fb_tile *tile)
44{ 55{
45 tile->addr = 0; 56 u32 tiles = DIV_ROUND_UP(size, 0x40);
46 tile->limit = 0; 57 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
47 tile->pitch = 0; 58 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
59 if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
60 else tile->zcomp |= 0x02000000; /* Z24S8 */
61 tile->zcomp |= ((tile->tag->offset ) >> 6);
62 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 12;
63#ifdef __BIG_ENDIAN
64 tile->zcomp |= 0x10000000;
65#endif
66 }
48} 67}
49 68
50static int 69static int
@@ -72,7 +91,7 @@ calc_ref(struct nv30_fb_priv *priv, int l, int k, int i)
72 return x; 91 return x;
73} 92}
74 93
75static int 94int
76nv30_fb_init(struct nouveau_object *object) 95nv30_fb_init(struct nouveau_object *object)
77{ 96{
78 struct nouveau_device *device = nv_device(object); 97 struct nouveau_device *device = nv_device(object);
@@ -111,7 +130,6 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
111 struct nouveau_object **pobject) 130 struct nouveau_object **pobject)
112{ 131{
113 struct nv30_fb_priv *priv; 132 struct nv30_fb_priv *priv;
114 u32 pbus1218;
115 int ret; 133 int ret;
116 134
117 ret = nouveau_fb_create(parent, engine, oclass, &priv); 135 ret = nouveau_fb_create(parent, engine, oclass, &priv);
@@ -119,21 +137,14 @@ nv30_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
119 if (ret) 137 if (ret)
120 return ret; 138 return ret;
121 139
122 pbus1218 = nv_rd32(priv, 0x001218);
123 switch (pbus1218 & 0x00000300) {
124 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
125 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
126 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
127 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_GDDR2; break;
128 }
129 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
130
131 priv->base.memtype_valid = nv04_fb_memtype_valid; 140 priv->base.memtype_valid = nv04_fb_memtype_valid;
141 priv->base.ram.init = nv20_fb_vram_init;
132 priv->base.tile.regions = 8; 142 priv->base.tile.regions = 8;
133 priv->base.tile.init = nv30_fb_tile_init; 143 priv->base.tile.init = nv30_fb_tile_init;
134 priv->base.tile.fini = nv30_fb_tile_fini; 144 priv->base.tile.comp = nv30_fb_tile_comp;
135 priv->base.tile.prog = nv10_fb_tile_prog; 145 priv->base.tile.fini = nv20_fb_tile_fini;
136 return nouveau_fb_created(&priv->base); 146 priv->base.tile.prog = nv20_fb_tile_prog;
147 return nouveau_fb_preinit(&priv->base);
137} 148}
138 149
139struct nouveau_oclass 150struct nouveau_oclass
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
new file mode 100644
index 000000000000..092f6f4f3521
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv35.c
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv35_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv35_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
35 struct nouveau_fb_tile *tile)
36{
37 u32 tiles = DIV_ROUND_UP(size, 0x40);
38 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
39 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
40 if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
41 else tile->zcomp |= 0x08000000; /* Z24S8 */
42 tile->zcomp |= ((tile->tag->offset ) >> 6);
43 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 13;
44#ifdef __BIG_ENDIAN
45 tile->zcomp |= 0x40000000;
46#endif
47 }
48}
49
50static int
51nv35_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, void *data, u32 size,
53 struct nouveau_object **pobject)
54{
55 struct nv35_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.ram.init = nv20_fb_vram_init;
65 priv->base.tile.regions = 8;
66 priv->base.tile.init = nv30_fb_tile_init;
67 priv->base.tile.comp = nv35_fb_tile_comp;
68 priv->base.tile.fini = nv20_fb_tile_fini;
69 priv->base.tile.prog = nv20_fb_tile_prog;
70 return nouveau_fb_preinit(&priv->base);
71}
72
73struct nouveau_oclass
74nv35_fb_oclass = {
75 .handle = NV_SUBDEV(FB, 0x35),
76 .ofuncs = &(struct nouveau_ofuncs) {
77 .ctor = nv35_fb_ctor,
78 .dtor = _nouveau_fb_dtor,
79 .init = nv30_fb_init,
80 .fini = _nouveau_fb_fini,
81 },
82};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
new file mode 100644
index 000000000000..797ab3b821b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv36.c
@@ -0,0 +1,82 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv36_fb_priv {
30 struct nouveau_fb base;
31};
32
33static void
34nv36_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
35 struct nouveau_fb_tile *tile)
36{
37 u32 tiles = DIV_ROUND_UP(size, 0x40);
38 u32 tags = round_up(tiles / pfb->ram.parts, 0x40);
39 if (!nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
40 if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
41 else tile->zcomp |= 0x20000000; /* Z24S8 */
42 tile->zcomp |= ((tile->tag->offset ) >> 6);
43 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 6) << 14;
44#ifdef __BIG_ENDIAN
45 tile->zcomp |= 0x80000000;
46#endif
47 }
48}
49
50static int
51nv36_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, void *data, u32 size,
53 struct nouveau_object **pobject)
54{
55 struct nv36_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.ram.init = nv20_fb_vram_init;
65 priv->base.tile.regions = 8;
66 priv->base.tile.init = nv30_fb_tile_init;
67 priv->base.tile.comp = nv36_fb_tile_comp;
68 priv->base.tile.fini = nv20_fb_tile_fini;
69 priv->base.tile.prog = nv20_fb_tile_prog;
70 return nouveau_fb_preinit(&priv->base);
71}
72
73struct nouveau_oclass
74nv36_fb_oclass = {
75 .handle = NV_SUBDEV(FB, 0x36),
76 .ofuncs = &(struct nouveau_ofuncs) {
77 .ctor = nv36_fb_ctor,
78 .dtor = _nouveau_fb_dtor,
79 .init = nv30_fb_init,
80 .fini = _nouveau_fb_fini,
81 },
82};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
index 347a496fcad8..65e131b90f37 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv40.c
@@ -30,34 +30,37 @@ struct nv40_fb_priv {
30 struct nouveau_fb base; 30 struct nouveau_fb base;
31}; 31};
32 32
33static inline int 33static int
34nv44_graph_class(struct nouveau_device *device) 34nv40_fb_vram_init(struct nouveau_fb *pfb)
35{
36 if ((device->chipset & 0xf0) == 0x60)
37 return 1;
38
39 return !(0x0baf & (1 << (device->chipset & 0x0f)));
40}
41
42static void
43nv40_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
44{ 35{
45 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit); 36 u32 pbus1218 = nv_rd32(pfb, 0x001218);
46 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch); 37 switch (pbus1218 & 0x00000300) {
47 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr); 38 case 0x00000000: pfb->ram.type = NV_MEM_TYPE_SDRAM; break;
48} 39 case 0x00000100: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
40 case 0x00000200: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
41 case 0x00000300: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
42 }
49 43
50static void 44 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
51nv40_fb_init_gart(struct nv40_fb_priv *priv) 45 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
52{ 46 return nv_rd32(pfb, 0x100320);
53 nv_wr32(priv, 0x100800, 0x00000001);
54} 47}
55 48
56static void 49void
57nv44_fb_init_gart(struct nv40_fb_priv *priv) 50nv40_fb_tile_comp(struct nouveau_fb *pfb, int i, u32 size, u32 flags,
51 struct nouveau_fb_tile *tile)
58{ 52{
59 nv_wr32(priv, 0x100850, 0x80000000); 53 u32 tiles = DIV_ROUND_UP(size, 0x80);
60 nv_wr32(priv, 0x100800, 0x00000001); 54 u32 tags = round_up(tiles / pfb->ram.parts, 0x100);
55 if ( (flags & 2) &&
56 !nouveau_mm_head(&pfb->tags, 1, tags, tags, 1, &tile->tag)) {
57 tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
58 tile->zcomp |= ((tile->tag->offset ) >> 8);
59 tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
60#ifdef __BIG_ENDIAN
61 tile->zcomp |= 0x40000000;
62#endif
63 }
61} 64}
62 65
63static int 66static int
@@ -70,19 +73,7 @@ nv40_fb_init(struct nouveau_object *object)
70 if (ret) 73 if (ret)
71 return ret; 74 return ret;
72 75
73 switch (nv_device(priv)->chipset) { 76 nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
74 case 0x40:
75 case 0x45:
76 nv_mask(priv, 0x10033c, 0x00008000, 0x00000000);
77 break;
78 default:
79 if (nv44_graph_class(nv_device(priv)))
80 nv44_fb_init_gart(priv);
81 else
82 nv40_fb_init_gart(priv);
83 break;
84 }
85
86 return 0; 77 return 0;
87} 78}
88 79
@@ -91,7 +82,6 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
91 struct nouveau_oclass *oclass, void *data, u32 size, 82 struct nouveau_oclass *oclass, void *data, u32 size,
92 struct nouveau_object **pobject) 83 struct nouveau_object **pobject)
93{ 84{
94 struct nouveau_device *device = nv_device(parent);
95 struct nv40_fb_priv *priv; 85 struct nv40_fb_priv *priv;
96 int ret; 86 int ret;
97 87
@@ -100,69 +90,14 @@ nv40_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
100 if (ret) 90 if (ret)
101 return ret; 91 return ret;
102 92
103 /* 0x001218 is actually present on a few other NV4X I looked at,
104 * and even contains sane values matching 0x100474. From looking
105 * at various vbios images however, this isn't the case everywhere.
106 * So, I chose to use the same regs I've seen NVIDIA reading around
107 * the memory detection, hopefully that'll get us the right numbers
108 */
109 if (device->chipset == 0x40) {
110 u32 pbus1218 = nv_rd32(priv, 0x001218);
111 switch (pbus1218 & 0x00000300) {
112 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_SDRAM; break;
113 case 0x00000100: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
114 case 0x00000200: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
115 case 0x00000300: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
116 }
117 } else
118 if (device->chipset == 0x49 || device->chipset == 0x4b) {
119 u32 pfb914 = nv_rd32(priv, 0x100914);
120 switch (pfb914 & 0x00000003) {
121 case 0x00000000: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
122 case 0x00000001: priv->base.ram.type = NV_MEM_TYPE_DDR2; break;
123 case 0x00000002: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
124 case 0x00000003: break;
125 }
126 } else
127 if (device->chipset != 0x4e) {
128 u32 pfb474 = nv_rd32(priv, 0x100474);
129 if (pfb474 & 0x00000004)
130 priv->base.ram.type = NV_MEM_TYPE_GDDR3;
131 if (pfb474 & 0x00000002)
132 priv->base.ram.type = NV_MEM_TYPE_DDR2;
133 if (pfb474 & 0x00000001)
134 priv->base.ram.type = NV_MEM_TYPE_DDR1;
135 } else {
136 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
137 }
138
139 priv->base.ram.size = nv_rd32(priv, 0x10020c) & 0xff000000;
140
141 priv->base.memtype_valid = nv04_fb_memtype_valid; 93 priv->base.memtype_valid = nv04_fb_memtype_valid;
142 switch (device->chipset) { 94 priv->base.ram.init = nv40_fb_vram_init;
143 case 0x40: 95 priv->base.tile.regions = 8;
144 case 0x45:
145 priv->base.tile.regions = 8;
146 break;
147 case 0x46:
148 case 0x47:
149 case 0x49:
150 case 0x4b:
151 case 0x4c:
152 priv->base.tile.regions = 15;
153 break;
154 default:
155 priv->base.tile.regions = 12;
156 break;
157 }
158 priv->base.tile.init = nv30_fb_tile_init; 96 priv->base.tile.init = nv30_fb_tile_init;
159 priv->base.tile.fini = nv30_fb_tile_fini; 97 priv->base.tile.comp = nv40_fb_tile_comp;
160 if (device->chipset == 0x40) 98 priv->base.tile.fini = nv20_fb_tile_fini;
161 priv->base.tile.prog = nv10_fb_tile_prog; 99 priv->base.tile.prog = nv20_fb_tile_prog;
162 else 100 return nouveau_fb_preinit(&priv->base);
163 priv->base.tile.prog = nv40_fb_tile_prog;
164
165 return nouveau_fb_created(&priv->base);
166} 101}
167 102
168 103
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
new file mode 100644
index 000000000000..e9e5a08c41a1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv41.c
@@ -0,0 +1,106 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv41_fb_priv {
30 struct nouveau_fb base;
31};
32
33int
34nv41_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pfb474 = nv_rd32(pfb, 0x100474);
37 if (pfb474 & 0x00000004)
38 pfb->ram.type = NV_MEM_TYPE_GDDR3;
39 if (pfb474 & 0x00000002)
40 pfb->ram.type = NV_MEM_TYPE_DDR2;
41 if (pfb474 & 0x00000001)
42 pfb->ram.type = NV_MEM_TYPE_DDR1;
43
44 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
45 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
46 return nv_rd32(pfb, 0x100320);
47}
48
49void
50nv41_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
51{
52 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
53 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
54 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
55 nv_rd32(pfb, 0x100600 + (i * 0x10));
56 nv_wr32(pfb, 0x100700 + (i * 0x04), tile->zcomp);
57}
58
59int
60nv41_fb_init(struct nouveau_object *object)
61{
62 struct nv41_fb_priv *priv = (void *)object;
63 int ret;
64
65 ret = nouveau_fb_init(&priv->base);
66 if (ret)
67 return ret;
68
69 nv_wr32(priv, 0x100800, 0x00000001);
70 return 0;
71}
72
73static int
74nv41_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
75 struct nouveau_oclass *oclass, void *data, u32 size,
76 struct nouveau_object **pobject)
77{
78 struct nv41_fb_priv *priv;
79 int ret;
80
81 ret = nouveau_fb_create(parent, engine, oclass, &priv);
82 *pobject = nv_object(priv);
83 if (ret)
84 return ret;
85
86 priv->base.memtype_valid = nv04_fb_memtype_valid;
87 priv->base.ram.init = nv41_fb_vram_init;
88 priv->base.tile.regions = 12;
89 priv->base.tile.init = nv30_fb_tile_init;
90 priv->base.tile.comp = nv40_fb_tile_comp;
91 priv->base.tile.fini = nv20_fb_tile_fini;
92 priv->base.tile.prog = nv41_fb_tile_prog;
93 return nouveau_fb_preinit(&priv->base);
94}
95
96
97struct nouveau_oclass
98nv41_fb_oclass = {
99 .handle = NV_SUBDEV(FB, 0x41),
100 .ofuncs = &(struct nouveau_ofuncs) {
101 .ctor = nv41_fb_ctor,
102 .dtor = _nouveau_fb_dtor,
103 .init = nv41_fb_init,
104 .fini = _nouveau_fb_fini,
105 },
106};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
new file mode 100644
index 000000000000..ae89b5006f7a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv44.c
@@ -0,0 +1,114 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv44_fb_priv {
30 struct nouveau_fb base;
31};
32
33int
34nv44_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pfb474 = nv_rd32(pfb, 0x100474);
37 if (pfb474 & 0x00000004)
38 pfb->ram.type = NV_MEM_TYPE_GDDR3;
39 if (pfb474 & 0x00000002)
40 pfb->ram.type = NV_MEM_TYPE_DDR2;
41 if (pfb474 & 0x00000001)
42 pfb->ram.type = NV_MEM_TYPE_DDR1;
43
44 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
45 return 0;
46}
47
48static void
49nv44_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
50 u32 flags, struct nouveau_fb_tile *tile)
51{
52 tile->addr = 0x00000001; /* mode = vram */
53 tile->addr |= addr;
54 tile->limit = max(1u, addr + size) - 1;
55 tile->pitch = pitch;
56}
57
58void
59nv44_fb_tile_prog(struct nouveau_fb *pfb, int i, struct nouveau_fb_tile *tile)
60{
61 nv_wr32(pfb, 0x100604 + (i * 0x10), tile->limit);
62 nv_wr32(pfb, 0x100608 + (i * 0x10), tile->pitch);
63 nv_wr32(pfb, 0x100600 + (i * 0x10), tile->addr);
64 nv_rd32(pfb, 0x100600 + (i * 0x10));
65}
66
67int
68nv44_fb_init(struct nouveau_object *object)
69{
70 struct nv44_fb_priv *priv = (void *)object;
71 int ret;
72
73 ret = nouveau_fb_init(&priv->base);
74 if (ret)
75 return ret;
76
77 nv_wr32(priv, 0x100850, 0x80000000);
78 nv_wr32(priv, 0x100800, 0x00000001);
79 return 0;
80}
81
82static int
83nv44_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
84 struct nouveau_oclass *oclass, void *data, u32 size,
85 struct nouveau_object **pobject)
86{
87 struct nv44_fb_priv *priv;
88 int ret;
89
90 ret = nouveau_fb_create(parent, engine, oclass, &priv);
91 *pobject = nv_object(priv);
92 if (ret)
93 return ret;
94
95 priv->base.memtype_valid = nv04_fb_memtype_valid;
96 priv->base.ram.init = nv44_fb_vram_init;
97 priv->base.tile.regions = 12;
98 priv->base.tile.init = nv44_fb_tile_init;
99 priv->base.tile.fini = nv20_fb_tile_fini;
100 priv->base.tile.prog = nv44_fb_tile_prog;
101 return nouveau_fb_preinit(&priv->base);
102}
103
104
105struct nouveau_oclass
106nv44_fb_oclass = {
107 .handle = NV_SUBDEV(FB, 0x44),
108 .ofuncs = &(struct nouveau_ofuncs) {
109 .ctor = nv44_fb_ctor,
110 .dtor = _nouveau_fb_dtor,
111 .init = nv44_fb_init,
112 .fini = _nouveau_fb_fini,
113 },
114};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
new file mode 100644
index 000000000000..589b93ea2994
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv46.c
@@ -0,0 +1,79 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv46_fb_priv {
30 struct nouveau_fb base;
31};
32
33void
34nv46_fb_tile_init(struct nouveau_fb *pfb, int i, u32 addr, u32 size, u32 pitch,
35 u32 flags, struct nouveau_fb_tile *tile)
36{
37 /* for performance, select alternate bank offset for zeta */
38 if (!(flags & 4)) tile->addr = (0 << 3);
39 else tile->addr = (1 << 3);
40
41 tile->addr |= 0x00000001; /* mode = vram */
42 tile->addr |= addr;
43 tile->limit = max(1u, addr + size) - 1;
44 tile->pitch = pitch;
45}
46
47static int
48nv46_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
49 struct nouveau_oclass *oclass, void *data, u32 size,
50 struct nouveau_object **pobject)
51{
52 struct nv46_fb_priv *priv;
53 int ret;
54
55 ret = nouveau_fb_create(parent, engine, oclass, &priv);
56 *pobject = nv_object(priv);
57 if (ret)
58 return ret;
59
60 priv->base.memtype_valid = nv04_fb_memtype_valid;
61 priv->base.ram.init = nv44_fb_vram_init;
62 priv->base.tile.regions = 15;
63 priv->base.tile.init = nv46_fb_tile_init;
64 priv->base.tile.fini = nv20_fb_tile_fini;
65 priv->base.tile.prog = nv44_fb_tile_prog;
66 return nouveau_fb_preinit(&priv->base);
67}
68
69
70struct nouveau_oclass
71nv46_fb_oclass = {
72 .handle = NV_SUBDEV(FB, 0x46),
73 .ofuncs = &(struct nouveau_ofuncs) {
74 .ctor = nv46_fb_ctor,
75 .dtor = _nouveau_fb_dtor,
76 .init = nv44_fb_init,
77 .fini = _nouveau_fb_fini,
78 },
79};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
new file mode 100644
index 000000000000..818bba35b368
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv47.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv47_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv47_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
35 struct nouveau_oclass *oclass, void *data, u32 size,
36 struct nouveau_object **pobject)
37{
38 struct nv47_fb_priv *priv;
39 int ret;
40
41 ret = nouveau_fb_create(parent, engine, oclass, &priv);
42 *pobject = nv_object(priv);
43 if (ret)
44 return ret;
45
46 priv->base.memtype_valid = nv04_fb_memtype_valid;
47 priv->base.ram.init = nv41_fb_vram_init;
48 priv->base.tile.regions = 15;
49 priv->base.tile.init = nv30_fb_tile_init;
50 priv->base.tile.comp = nv40_fb_tile_comp;
51 priv->base.tile.fini = nv20_fb_tile_fini;
52 priv->base.tile.prog = nv41_fb_tile_prog;
53 return nouveau_fb_preinit(&priv->base);
54}
55
56
57struct nouveau_oclass
58nv47_fb_oclass = {
59 .handle = NV_SUBDEV(FB, 0x47),
60 .ofuncs = &(struct nouveau_ofuncs) {
61 .ctor = nv47_fb_ctor,
62 .dtor = _nouveau_fb_dtor,
63 .init = nv41_fb_init,
64 .fini = _nouveau_fb_fini,
65 },
66};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
new file mode 100644
index 000000000000..84a31af16ab4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv49.c
@@ -0,0 +1,84 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv49_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv49_fb_vram_init(struct nouveau_fb *pfb)
35{
36 u32 pfb914 = nv_rd32(pfb, 0x100914);
37
38 switch (pfb914 & 0x00000003) {
39 case 0x00000000: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
40 case 0x00000001: pfb->ram.type = NV_MEM_TYPE_DDR2; break;
41 case 0x00000002: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
42 case 0x00000003: break;
43 }
44
45 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
46 pfb->ram.parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
47 return nv_rd32(pfb, 0x100320);
48}
49
50static int
51nv49_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
52 struct nouveau_oclass *oclass, void *data, u32 size,
53 struct nouveau_object **pobject)
54{
55 struct nv49_fb_priv *priv;
56 int ret;
57
58 ret = nouveau_fb_create(parent, engine, oclass, &priv);
59 *pobject = nv_object(priv);
60 if (ret)
61 return ret;
62
63 priv->base.memtype_valid = nv04_fb_memtype_valid;
64 priv->base.ram.init = nv49_fb_vram_init;
65 priv->base.tile.regions = 15;
66 priv->base.tile.init = nv30_fb_tile_init;
67 priv->base.tile.comp = nv40_fb_tile_comp;
68 priv->base.tile.fini = nv20_fb_tile_fini;
69 priv->base.tile.prog = nv41_fb_tile_prog;
70
71 return nouveau_fb_preinit(&priv->base);
72}
73
74
75struct nouveau_oclass
76nv49_fb_oclass = {
77 .handle = NV_SUBDEV(FB, 0x49),
78 .ofuncs = &(struct nouveau_ofuncs) {
79 .ctor = nv49_fb_ctor,
80 .dtor = _nouveau_fb_dtor,
81 .init = nv41_fb_init,
82 .fini = _nouveau_fb_fini,
83 },
84};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
new file mode 100644
index 000000000000..797fd558170b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv4e.c
@@ -0,0 +1,72 @@
1/*
2 * Copyright (C) 2010 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <subdev/fb.h>
28
29struct nv4e_fb_priv {
30 struct nouveau_fb base;
31};
32
33static int
34nv4e_fb_vram_init(struct nouveau_fb *pfb)
35{
36 pfb->ram.size = nv_rd32(pfb, 0x10020c) & 0xff000000;
37 pfb->ram.type = NV_MEM_TYPE_STOLEN;
38 return 0;
39}
40
41static int
42nv4e_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
43 struct nouveau_oclass *oclass, void *data, u32 size,
44 struct nouveau_object **pobject)
45{
46 struct nv4e_fb_priv *priv;
47 int ret;
48
49 ret = nouveau_fb_create(parent, engine, oclass, &priv);
50 *pobject = nv_object(priv);
51 if (ret)
52 return ret;
53
54 priv->base.memtype_valid = nv04_fb_memtype_valid;
55 priv->base.ram.init = nv4e_fb_vram_init;
56 priv->base.tile.regions = 12;
57 priv->base.tile.init = nv46_fb_tile_init;
58 priv->base.tile.fini = nv20_fb_tile_fini;
59 priv->base.tile.prog = nv44_fb_tile_prog;
60 return nouveau_fb_preinit(&priv->base);
61}
62
63struct nouveau_oclass
64nv4e_fb_oclass = {
65 .handle = NV_SUBDEV(FB, 0x4e),
66 .ofuncs = &(struct nouveau_ofuncs) {
67 .ctor = nv4e_fb_ctor,
68 .dtor = _nouveau_fb_dtor,
69 .init = nv44_fb_init,
70 .fini = _nouveau_fb_fini,
71 },
72};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
index 5f570806143a..487cb8c6c204 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv50.c
@@ -51,6 +51,101 @@ nv50_fb_memtype_valid(struct nouveau_fb *pfb, u32 memtype)
51 return types[(memtype & 0xff00) >> 8] != 0; 51 return types[(memtype & 0xff00) >> 8] != 0;
52} 52}
53 53
54static u32
55nv50_fb_vram_rblock(struct nouveau_fb *pfb)
56{
57 int i, parts, colbits, rowbitsa, rowbitsb, banks;
58 u64 rowsize, predicted;
59 u32 r0, r4, rt, ru, rblock_size;
60
61 r0 = nv_rd32(pfb, 0x100200);
62 r4 = nv_rd32(pfb, 0x100204);
63 rt = nv_rd32(pfb, 0x100250);
64 ru = nv_rd32(pfb, 0x001540);
65 nv_debug(pfb, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
66
67 for (i = 0, parts = 0; i < 8; i++) {
68 if (ru & (0x00010000 << i))
69 parts++;
70 }
71
72 colbits = (r4 & 0x0000f000) >> 12;
73 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
74 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
75 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
76
77 rowsize = parts * banks * (1 << colbits) * 8;
78 predicted = rowsize << rowbitsa;
79 if (r0 & 0x00000004)
80 predicted += rowsize << rowbitsb;
81
82 if (predicted != pfb->ram.size) {
83 nv_warn(pfb, "memory controller reports %d MiB VRAM\n",
84 (u32)(pfb->ram.size >> 20));
85 }
86
87 rblock_size = rowsize;
88 if (rt & 1)
89 rblock_size *= 3;
90
91 nv_debug(pfb, "rblock %d bytes\n", rblock_size);
92 return rblock_size;
93}
94
95static int
96nv50_fb_vram_init(struct nouveau_fb *pfb)
97{
98 struct nouveau_device *device = nv_device(pfb);
99 struct nouveau_bios *bios = nouveau_bios(device);
100 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
101 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
102 u32 size;
103 int ret;
104
105 pfb->ram.size = nv_rd32(pfb, 0x10020c);
106 pfb->ram.size = (pfb->ram.size & 0xffffff00) |
107 ((pfb->ram.size & 0x000000ff) << 32);
108
109 size = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
110 switch (device->chipset) {
111 case 0xaa:
112 case 0xac:
113 case 0xaf: /* IGPs, no reordering, no real VRAM */
114 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size, 1);
115 if (ret)
116 return ret;
117
118 pfb->ram.type = NV_MEM_TYPE_STOLEN;
119 pfb->ram.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
120 break;
121 default:
122 switch (nv_rd32(pfb, 0x100714) & 0x00000007) {
123 case 0: pfb->ram.type = NV_MEM_TYPE_DDR1; break;
124 case 1:
125 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
126 pfb->ram.type = NV_MEM_TYPE_DDR3;
127 else
128 pfb->ram.type = NV_MEM_TYPE_DDR2;
129 break;
130 case 2: pfb->ram.type = NV_MEM_TYPE_GDDR3; break;
131 case 3: pfb->ram.type = NV_MEM_TYPE_GDDR4; break;
132 case 4: pfb->ram.type = NV_MEM_TYPE_GDDR5; break;
133 default:
134 break;
135 }
136
137 ret = nouveau_mm_init(&pfb->vram, rsvd_head, size,
138 nv50_fb_vram_rblock(pfb) >> 12);
139 if (ret)
140 return ret;
141
142 pfb->ram.ranks = (nv_rd32(pfb, 0x100200) & 0x4) ? 2 : 1;
143 break;
144 }
145
146 return nv_rd32(pfb, 0x100320);
147}
148
54static int 149static int
55nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, 150nv50_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
56 u32 memtype, struct nouveau_mem **pmem) 151 u32 memtype, struct nouveau_mem **pmem)
@@ -140,195 +235,6 @@ nv50_fb_vram_del(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
140 kfree(mem); 235 kfree(mem);
141} 236}
142 237
143static u32
144nv50_vram_rblock(struct nv50_fb_priv *priv)
145{
146 int i, parts, colbits, rowbitsa, rowbitsb, banks;
147 u64 rowsize, predicted;
148 u32 r0, r4, rt, ru, rblock_size;
149
150 r0 = nv_rd32(priv, 0x100200);
151 r4 = nv_rd32(priv, 0x100204);
152 rt = nv_rd32(priv, 0x100250);
153 ru = nv_rd32(priv, 0x001540);
154 nv_debug(priv, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
155
156 for (i = 0, parts = 0; i < 8; i++) {
157 if (ru & (0x00010000 << i))
158 parts++;
159 }
160
161 colbits = (r4 & 0x0000f000) >> 12;
162 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
163 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
164 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
165
166 rowsize = parts * banks * (1 << colbits) * 8;
167 predicted = rowsize << rowbitsa;
168 if (r0 & 0x00000004)
169 predicted += rowsize << rowbitsb;
170
171 if (predicted != priv->base.ram.size) {
172 nv_warn(priv, "memory controller reports %d MiB VRAM\n",
173 (u32)(priv->base.ram.size >> 20));
174 }
175
176 rblock_size = rowsize;
177 if (rt & 1)
178 rblock_size *= 3;
179
180 nv_debug(priv, "rblock %d bytes\n", rblock_size);
181 return rblock_size;
182}
183
184static int
185nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
186 struct nouveau_oclass *oclass, void *data, u32 size,
187 struct nouveau_object **pobject)
188{
189 struct nouveau_device *device = nv_device(parent);
190 struct nouveau_bios *bios = nouveau_bios(device);
191 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
192 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
193 struct nv50_fb_priv *priv;
194 u32 tags;
195 int ret;
196
197 ret = nouveau_fb_create(parent, engine, oclass, &priv);
198 *pobject = nv_object(priv);
199 if (ret)
200 return ret;
201
202 switch (nv_rd32(priv, 0x100714) & 0x00000007) {
203 case 0: priv->base.ram.type = NV_MEM_TYPE_DDR1; break;
204 case 1:
205 if (nouveau_fb_bios_memtype(bios) == NV_MEM_TYPE_DDR3)
206 priv->base.ram.type = NV_MEM_TYPE_DDR3;
207 else
208 priv->base.ram.type = NV_MEM_TYPE_DDR2;
209 break;
210 case 2: priv->base.ram.type = NV_MEM_TYPE_GDDR3; break;
211 case 3: priv->base.ram.type = NV_MEM_TYPE_GDDR4; break;
212 case 4: priv->base.ram.type = NV_MEM_TYPE_GDDR5; break;
213 default:
214 break;
215 }
216
217 priv->base.ram.size = nv_rd32(priv, 0x10020c);
218 priv->base.ram.size = (priv->base.ram.size & 0xffffff00) |
219 ((priv->base.ram.size & 0x000000ff) << 32);
220
221 tags = nv_rd32(priv, 0x100320);
222 ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
223 if (ret)
224 return ret;
225
226 nv_debug(priv, "%d compression tags\n", tags);
227
228 size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
229 switch (device->chipset) {
230 case 0xaa:
231 case 0xac:
232 case 0xaf: /* IGPs, no reordering, no real VRAM */
233 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size, 1);
234 if (ret)
235 return ret;
236
237 priv->base.ram.stolen = (u64)nv_rd32(priv, 0x100e10) << 12;
238 priv->base.ram.type = NV_MEM_TYPE_STOLEN;
239 break;
240 default:
241 ret = nouveau_mm_init(&priv->base.vram, rsvd_head, size,
242 nv50_vram_rblock(priv) >> 12);
243 if (ret)
244 return ret;
245
246 priv->base.ram.ranks = (nv_rd32(priv, 0x100200) & 0x4) ? 2 : 1;
247 break;
248 }
249
250 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
251 if (priv->r100c08_page) {
252 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
253 0, PAGE_SIZE,
254 PCI_DMA_BIDIRECTIONAL);
255 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
256 nv_warn(priv, "failed 0x100c08 page map\n");
257 } else {
258 nv_warn(priv, "failed 0x100c08 page alloc\n");
259 }
260
261 priv->base.memtype_valid = nv50_fb_memtype_valid;
262 priv->base.ram.get = nv50_fb_vram_new;
263 priv->base.ram.put = nv50_fb_vram_del;
264 return nouveau_fb_created(&priv->base);
265}
266
267static void
268nv50_fb_dtor(struct nouveau_object *object)
269{
270 struct nouveau_device *device = nv_device(object);
271 struct nv50_fb_priv *priv = (void *)object;
272
273 if (priv->r100c08_page) {
274 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
275 PCI_DMA_BIDIRECTIONAL);
276 __free_page(priv->r100c08_page);
277 }
278
279 nouveau_fb_destroy(&priv->base);
280}
281
282static int
283nv50_fb_init(struct nouveau_object *object)
284{
285 struct nouveau_device *device = nv_device(object);
286 struct nv50_fb_priv *priv = (void *)object;
287 int ret;
288
289 ret = nouveau_fb_init(&priv->base);
290 if (ret)
291 return ret;
292
293 /* Not a clue what this is exactly. Without pointing it at a
294 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
295 * cause IOMMU "read from address 0" errors (rh#561267)
296 */
297 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
298
299 /* This is needed to get meaningful information from 100c90
300 * on traps. No idea what these values mean exactly. */
301 switch (device->chipset) {
302 case 0x50:
303 nv_wr32(priv, 0x100c90, 0x000707ff);
304 break;
305 case 0xa3:
306 case 0xa5:
307 case 0xa8:
308 nv_wr32(priv, 0x100c90, 0x000d0fff);
309 break;
310 case 0xaf:
311 nv_wr32(priv, 0x100c90, 0x089d1fff);
312 break;
313 default:
314 nv_wr32(priv, 0x100c90, 0x001d07ff);
315 break;
316 }
317
318 return 0;
319}
320
321struct nouveau_oclass
322nv50_fb_oclass = {
323 .handle = NV_SUBDEV(FB, 0x50),
324 .ofuncs = &(struct nouveau_ofuncs) {
325 .ctor = nv50_fb_ctor,
326 .dtor = nv50_fb_dtor,
327 .init = nv50_fb_init,
328 .fini = _nouveau_fb_fini,
329 },
330};
331
332static const struct nouveau_enum vm_dispatch_subclients[] = { 238static const struct nouveau_enum vm_dispatch_subclients[] = {
333 { 0x00000000, "GRCTX", NULL }, 239 { 0x00000000, "GRCTX", NULL },
334 { 0x00000001, "NOTIFY", NULL }, 240 { 0x00000001, "NOTIFY", NULL },
@@ -424,11 +330,11 @@ static const struct nouveau_enum vm_fault[] = {
424 {} 330 {}
425}; 331};
426 332
427void 333static void
428nv50_fb_trap(struct nouveau_fb *pfb, int display) 334nv50_fb_intr(struct nouveau_subdev *subdev)
429{ 335{
430 struct nouveau_device *device = nv_device(pfb); 336 struct nouveau_device *device = nv_device(subdev);
431 struct nv50_fb_priv *priv = (void *)pfb; 337 struct nv50_fb_priv *priv = (void *)subdev;
432 const struct nouveau_enum *en, *cl; 338 const struct nouveau_enum *en, *cl;
433 u32 trap[6], idx, chan; 339 u32 trap[6], idx, chan;
434 u8 st0, st1, st2, st3; 340 u8 st0, st1, st2, st3;
@@ -445,9 +351,6 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
445 } 351 }
446 nv_wr32(priv, 0x100c90, idx | 0x80000000); 352 nv_wr32(priv, 0x100c90, idx | 0x80000000);
447 353
448 if (!display)
449 return;
450
451 /* decode status bits into something more useful */ 354 /* decode status bits into something more useful */
452 if (device->chipset < 0xa3 || 355 if (device->chipset < 0xa3 ||
453 device->chipset == 0xaa || device->chipset == 0xac) { 356 device->chipset == 0xaa || device->chipset == 0xac) {
@@ -494,3 +397,101 @@ nv50_fb_trap(struct nouveau_fb *pfb, int display)
494 else 397 else
495 printk("0x%08x\n", st1); 398 printk("0x%08x\n", st1);
496} 399}
400
401static int
402nv50_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
403 struct nouveau_oclass *oclass, void *data, u32 size,
404 struct nouveau_object **pobject)
405{
406 struct nouveau_device *device = nv_device(parent);
407 struct nv50_fb_priv *priv;
408 int ret;
409
410 ret = nouveau_fb_create(parent, engine, oclass, &priv);
411 *pobject = nv_object(priv);
412 if (ret)
413 return ret;
414
415 priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
416 if (priv->r100c08_page) {
417 priv->r100c08 = pci_map_page(device->pdev, priv->r100c08_page,
418 0, PAGE_SIZE,
419 PCI_DMA_BIDIRECTIONAL);
420 if (pci_dma_mapping_error(device->pdev, priv->r100c08))
421 nv_warn(priv, "failed 0x100c08 page map\n");
422 } else {
423 nv_warn(priv, "failed 0x100c08 page alloc\n");
424 }
425
426 priv->base.memtype_valid = nv50_fb_memtype_valid;
427 priv->base.ram.init = nv50_fb_vram_init;
428 priv->base.ram.get = nv50_fb_vram_new;
429 priv->base.ram.put = nv50_fb_vram_del;
430 nv_subdev(priv)->intr = nv50_fb_intr;
431 return nouveau_fb_preinit(&priv->base);
432}
433
434static void
435nv50_fb_dtor(struct nouveau_object *object)
436{
437 struct nouveau_device *device = nv_device(object);
438 struct nv50_fb_priv *priv = (void *)object;
439
440 if (priv->r100c08_page) {
441 pci_unmap_page(device->pdev, priv->r100c08, PAGE_SIZE,
442 PCI_DMA_BIDIRECTIONAL);
443 __free_page(priv->r100c08_page);
444 }
445
446 nouveau_fb_destroy(&priv->base);
447}
448
449static int
450nv50_fb_init(struct nouveau_object *object)
451{
452 struct nouveau_device *device = nv_device(object);
453 struct nv50_fb_priv *priv = (void *)object;
454 int ret;
455
456 ret = nouveau_fb_init(&priv->base);
457 if (ret)
458 return ret;
459
460 /* Not a clue what this is exactly. Without pointing it at a
461 * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
462 * cause IOMMU "read from address 0" errors (rh#561267)
463 */
464 nv_wr32(priv, 0x100c08, priv->r100c08 >> 8);
465
466 /* This is needed to get meaningful information from 100c90
467 * on traps. No idea what these values mean exactly. */
468 switch (device->chipset) {
469 case 0x50:
470 nv_wr32(priv, 0x100c90, 0x000707ff);
471 break;
472 case 0xa3:
473 case 0xa5:
474 case 0xa8:
475 nv_wr32(priv, 0x100c90, 0x000d0fff);
476 break;
477 case 0xaf:
478 nv_wr32(priv, 0x100c90, 0x089d1fff);
479 break;
480 default:
481 nv_wr32(priv, 0x100c90, 0x001d07ff);
482 break;
483 }
484
485 return 0;
486}
487
488struct nouveau_oclass
489nv50_fb_oclass = {
490 .handle = NV_SUBDEV(FB, 0x50),
491 .ofuncs = &(struct nouveau_ofuncs) {
492 .ctor = nv50_fb_ctor,
493 .dtor = nv50_fb_dtor,
494 .init = nv50_fb_init,
495 .fini = _nouveau_fb_fini,
496 },
497};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
index 9f59f2bf0079..306bdf121452 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nvc0.c
@@ -62,6 +62,65 @@ nvc0_fb_memtype_valid(struct nouveau_fb *pfb, u32 tile_flags)
62} 62}
63 63
64static int 64static int
65nvc0_fb_vram_init(struct nouveau_fb *pfb)
66{
67 struct nouveau_bios *bios = nouveau_bios(pfb);
68 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
69 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
70 u32 parts = nv_rd32(pfb, 0x022438);
71 u32 pmask = nv_rd32(pfb, 0x022554);
72 u32 bsize = nv_rd32(pfb, 0x10f20c);
73 u32 offset, length;
74 bool uniform = true;
75 int ret, part;
76
77 nv_debug(pfb, "0x100800: 0x%08x\n", nv_rd32(pfb, 0x100800));
78 nv_debug(pfb, "parts 0x%08x mask 0x%08x\n", parts, pmask);
79
80 pfb->ram.type = nouveau_fb_bios_memtype(bios);
81 pfb->ram.ranks = (nv_rd32(pfb, 0x10f200) & 0x00000004) ? 2 : 1;
82
83 /* read amount of vram attached to each memory controller */
84 for (part = 0; part < parts; part++) {
85 if (!(pmask & (1 << part))) {
86 u32 psize = nv_rd32(pfb, 0x11020c + (part * 0x1000));
87 if (psize != bsize) {
88 if (psize < bsize)
89 bsize = psize;
90 uniform = false;
91 }
92
93 nv_debug(pfb, "%d: mem_amount 0x%08x\n", part, psize);
94 pfb->ram.size += (u64)psize << 20;
95 }
96 }
97
98 /* if all controllers have the same amount attached, there's no holes */
99 if (uniform) {
100 offset = rsvd_head;
101 length = (pfb->ram.size >> 12) - rsvd_head - rsvd_tail;
102 return nouveau_mm_init(&pfb->vram, offset, length, 1);
103 }
104
105 /* otherwise, address lowest common amount from 0GiB */
106 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
107 if (ret)
108 return ret;
109
110 /* and the rest starting from (8GiB + common_size) */
111 offset = (0x0200000000ULL >> 12) + (bsize << 8);
112 length = (pfb->ram.size >> 12) - (bsize << 8) - rsvd_tail;
113
114 ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
115 if (ret) {
116 nouveau_mm_fini(&pfb->vram);
117 return ret;
118 }
119
120 return 0;
121}
122
123static int
65nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin, 124nvc0_fb_vram_new(struct nouveau_fb *pfb, u64 size, u32 align, u32 ncmin,
66 u32 memtype, struct nouveau_mem **pmem) 125 u32 memtype, struct nouveau_mem **pmem)
67{ 126{
@@ -139,66 +198,6 @@ nvc0_fb_dtor(struct nouveau_object *object)
139} 198}
140 199
141static int 200static int
142nvc0_vram_detect(struct nvc0_fb_priv *priv)
143{
144 struct nouveau_bios *bios = nouveau_bios(priv);
145 struct nouveau_fb *pfb = &priv->base;
146 const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
147 const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
148 u32 parts = nv_rd32(priv, 0x022438);
149 u32 pmask = nv_rd32(priv, 0x022554);
150 u32 bsize = nv_rd32(priv, 0x10f20c);
151 u32 offset, length;
152 bool uniform = true;
153 int ret, part;
154
155 nv_debug(priv, "0x100800: 0x%08x\n", nv_rd32(priv, 0x100800));
156 nv_debug(priv, "parts 0x%08x mask 0x%08x\n", parts, pmask);
157
158 priv->base.ram.type = nouveau_fb_bios_memtype(bios);
159 priv->base.ram.ranks = (nv_rd32(priv, 0x10f200) & 0x00000004) ? 2 : 1;
160
161 /* read amount of vram attached to each memory controller */
162 for (part = 0; part < parts; part++) {
163 if (!(pmask & (1 << part))) {
164 u32 psize = nv_rd32(priv, 0x11020c + (part * 0x1000));
165 if (psize != bsize) {
166 if (psize < bsize)
167 bsize = psize;
168 uniform = false;
169 }
170
171 nv_debug(priv, "%d: mem_amount 0x%08x\n", part, psize);
172 priv->base.ram.size += (u64)psize << 20;
173 }
174 }
175
176 /* if all controllers have the same amount attached, there's no holes */
177 if (uniform) {
178 offset = rsvd_head;
179 length = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
180 return nouveau_mm_init(&pfb->vram, offset, length, 1);
181 }
182
183 /* otherwise, address lowest common amount from 0GiB */
184 ret = nouveau_mm_init(&pfb->vram, rsvd_head, (bsize << 8) * parts, 1);
185 if (ret)
186 return ret;
187
188 /* and the rest starting from (8GiB + common_size) */
189 offset = (0x0200000000ULL >> 12) + (bsize << 8);
190 length = (priv->base.ram.size >> 12) - (bsize << 8) - rsvd_tail;
191
192 ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
193 if (ret) {
194 nouveau_mm_fini(&pfb->vram);
195 return ret;
196 }
197
198 return 0;
199}
200
201static int
202nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 201nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
203 struct nouveau_oclass *oclass, void *data, u32 size, 202 struct nouveau_oclass *oclass, void *data, u32 size,
204 struct nouveau_object **pobject) 203 struct nouveau_object **pobject)
@@ -213,13 +212,10 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
213 return ret; 212 return ret;
214 213
215 priv->base.memtype_valid = nvc0_fb_memtype_valid; 214 priv->base.memtype_valid = nvc0_fb_memtype_valid;
215 priv->base.ram.init = nvc0_fb_vram_init;
216 priv->base.ram.get = nvc0_fb_vram_new; 216 priv->base.ram.get = nvc0_fb_vram_new;
217 priv->base.ram.put = nv50_fb_vram_del; 217 priv->base.ram.put = nv50_fb_vram_del;
218 218
219 ret = nvc0_vram_detect(priv);
220 if (ret)
221 return ret;
222
223 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO); 219 priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
224 if (!priv->r100c10_page) 220 if (!priv->r100c10_page)
225 return -ENOMEM; 221 return -ENOMEM;
@@ -229,7 +225,7 @@ nvc0_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
229 if (pci_dma_mapping_error(device->pdev, priv->r100c10)) 225 if (pci_dma_mapping_error(device->pdev, priv->r100c10))
230 return -EFAULT; 226 return -EFAULT;
231 227
232 return nouveau_fb_created(&priv->base); 228 return nouveau_fb_preinit(&priv->base);
233} 229}
234 230
235 231
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
index fe1ebf199ba9..dc27e794a851 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/aux.c
@@ -50,7 +50,7 @@ auxch_init(struct nouveau_i2c *aux, int ch)
50 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50)); 50 ctrl = nv_rd32(aux, 0x00e4e4 + (ch * 0x50));
51 udelay(1); 51 udelay(1);
52 if (!timeout--) { 52 if (!timeout--) {
53 AUX_ERR("begin idle timeout 0x%08x", ctrl); 53 AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
54 return -EBUSY; 54 return -EBUSY;
55 } 55 }
56 } while (ctrl & 0x03010000); 56 } while (ctrl & 0x03010000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
index ba4d28b50368..f5bbd3834116 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv04.c
@@ -63,14 +63,14 @@ nv04_instobj_dtor(struct nouveau_object *object)
63} 63}
64 64
65static u32 65static u32
66nv04_instobj_rd32(struct nouveau_object *object, u32 addr) 66nv04_instobj_rd32(struct nouveau_object *object, u64 addr)
67{ 67{
68 struct nv04_instobj_priv *node = (void *)object; 68 struct nv04_instobj_priv *node = (void *)object;
69 return nv_ro32(object->engine, node->mem->offset + addr); 69 return nv_ro32(object->engine, node->mem->offset + addr);
70} 70}
71 71
72static void 72static void
73nv04_instobj_wr32(struct nouveau_object *object, u32 addr, u32 data) 73nv04_instobj_wr32(struct nouveau_object *object, u64 addr, u32 data)
74{ 74{
75 struct nv04_instobj_priv *node = (void *)object; 75 struct nv04_instobj_priv *node = (void *)object;
76 nv_wo32(object->engine, node->mem->offset + addr, data); 76 nv_wo32(object->engine, node->mem->offset + addr, data);
@@ -173,13 +173,13 @@ nv04_instmem_dtor(struct nouveau_object *object)
173} 173}
174 174
175static u32 175static u32
176nv04_instmem_rd32(struct nouveau_object *object, u32 addr) 176nv04_instmem_rd32(struct nouveau_object *object, u64 addr)
177{ 177{
178 return nv_rd32(object, 0x700000 + addr); 178 return nv_rd32(object, 0x700000 + addr);
179} 179}
180 180
181static void 181static void
182nv04_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) 182nv04_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
183{ 183{
184 return nv_wr32(object, 0x700000 + addr, data); 184 return nv_wr32(object, 0x700000 + addr, data);
185} 185}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 73c52ebd5932..da64253201ef 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -111,14 +111,14 @@ nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
111} 111}
112 112
113static u32 113static u32
114nv40_instmem_rd32(struct nouveau_object *object, u32 addr) 114nv40_instmem_rd32(struct nouveau_object *object, u64 addr)
115{ 115{
116 struct nv04_instmem_priv *priv = (void *)object; 116 struct nv04_instmem_priv *priv = (void *)object;
117 return ioread32_native(priv->iomem + addr); 117 return ioread32_native(priv->iomem + addr);
118} 118}
119 119
120static void 120static void
121nv40_instmem_wr32(struct nouveau_object *object, u32 addr, u32 data) 121nv40_instmem_wr32(struct nouveau_object *object, u64 addr, u32 data)
122{ 122{
123 struct nv04_instmem_priv *priv = (void *)object; 123 struct nv04_instmem_priv *priv = (void *)object;
124 iowrite32_native(data, priv->iomem + addr); 124 iowrite32_native(data, priv->iomem + addr);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
index 27ef0891d10b..cfc7e31461de 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv50.c
@@ -76,7 +76,7 @@ nv50_instobj_dtor(struct nouveau_object *object)
76} 76}
77 77
78static u32 78static u32
79nv50_instobj_rd32(struct nouveau_object *object, u32 offset) 79nv50_instobj_rd32(struct nouveau_object *object, u64 offset)
80{ 80{
81 struct nv50_instmem_priv *priv = (void *)object->engine; 81 struct nv50_instmem_priv *priv = (void *)object->engine;
82 struct nv50_instobj_priv *node = (void *)object; 82 struct nv50_instobj_priv *node = (void *)object;
@@ -96,7 +96,7 @@ nv50_instobj_rd32(struct nouveau_object *object, u32 offset)
96} 96}
97 97
98static void 98static void
99nv50_instobj_wr32(struct nouveau_object *object, u32 offset, u32 data) 99nv50_instobj_wr32(struct nouveau_object *object, u64 offset, u32 data)
100{ 100{
101 struct nv50_instmem_priv *priv = (void *)object->engine; 101 struct nv50_instmem_priv *priv = (void *)object->engine;
102 struct nv50_instobj_priv *node = (void *)object; 102 struct nv50_instobj_priv *node = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index de5721cfc4c2..8379aafa6e1b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -30,20 +30,20 @@ nouveau_mc_intr(struct nouveau_subdev *subdev)
30 struct nouveau_mc *pmc = nouveau_mc(subdev); 30 struct nouveau_mc *pmc = nouveau_mc(subdev);
31 const struct nouveau_mc_intr *map = pmc->intr_map; 31 const struct nouveau_mc_intr *map = pmc->intr_map;
32 struct nouveau_subdev *unit; 32 struct nouveau_subdev *unit;
33 u32 stat; 33 u32 stat, intr;
34 34
35 stat = nv_rd32(pmc, 0x000100); 35 intr = stat = nv_rd32(pmc, 0x000100);
36 while (stat && map->stat) { 36 while (stat && map->stat) {
37 if (stat & map->stat) { 37 if (stat & map->stat) {
38 unit = nouveau_subdev(subdev, map->unit); 38 unit = nouveau_subdev(subdev, map->unit);
39 if (unit && unit->intr) 39 if (unit && unit->intr)
40 unit->intr(unit); 40 unit->intr(unit);
41 stat &= ~map->stat; 41 intr &= ~map->stat;
42 } 42 }
43 map++; 43 map++;
44 } 44 }
45 45
46 if (stat) { 46 if (intr) {
47 nv_error(pmc, "unknown intr 0x%08x\n", stat); 47 nv_error(pmc, "unknown intr 0x%08x\n", stat);
48 } 48 }
49} 49}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index cedf33b02977..8d759f830323 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -39,6 +39,7 @@ nv50_mc_intr[] = {
39 { 0x00200000, NVDEV_SUBDEV_GPIO }, 39 { 0x00200000, NVDEV_SUBDEV_GPIO },
40 { 0x04000000, NVDEV_ENGINE_DISP }, 40 { 0x04000000, NVDEV_ENGINE_DISP },
41 { 0x80000000, NVDEV_ENGINE_SW }, 41 { 0x80000000, NVDEV_ENGINE_SW },
42 { 0x0000d101, NVDEV_SUBDEV_FB },
42 {}, 43 {},
43}; 44};
44 45
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index a001e4c4d38d..ceb5c83f9459 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -40,6 +40,7 @@ nv98_mc_intr[] = {
40 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */ 40 { 0x00400000, NVDEV_ENGINE_COPY0 }, /* NVA3- */
41 { 0x04000000, NVDEV_ENGINE_DISP }, 41 { 0x04000000, NVDEV_ENGINE_DISP },
42 { 0x80000000, NVDEV_ENGINE_SW }, 42 { 0x80000000, NVDEV_ENGINE_SW },
43 { 0x0040d101, NVDEV_SUBDEV_FB },
43 {}, 44 {},
44}; 45};
45 46
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c2b81e30a17d..92796682722d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -36,6 +36,7 @@ nvc0_mc_intr[] = {
36 { 0x00000100, NVDEV_ENGINE_FIFO }, 36 { 0x00000100, NVDEV_ENGINE_FIFO },
37 { 0x00001000, NVDEV_ENGINE_GR }, 37 { 0x00001000, NVDEV_ENGINE_GR },
38 { 0x00008000, NVDEV_ENGINE_BSP }, 38 { 0x00008000, NVDEV_ENGINE_BSP },
39 { 0x00020000, NVDEV_ENGINE_VP },
39 { 0x00100000, NVDEV_SUBDEV_TIMER }, 40 { 0x00100000, NVDEV_SUBDEV_TIMER },
40 { 0x00200000, NVDEV_SUBDEV_GPIO }, 41 { 0x00200000, NVDEV_SUBDEV_GPIO },
41 { 0x02000000, NVDEV_SUBDEV_LTCG }, 42 { 0x02000000, NVDEV_SUBDEV_LTCG },
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index cbf1fc60a386..41241922263f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -246,14 +246,26 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
246 return nouveau_abi16_put(abi16, -ENODEV); 246 return nouveau_abi16_put(abi16, -ENODEV);
247 247
248 client = nv_client(abi16->client); 248 client = nv_client(abi16->client);
249
250 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
251 return nouveau_abi16_put(abi16, -EINVAL);
252
253 device = nv_device(abi16->device); 249 device = nv_device(abi16->device);
254 imem = nouveau_instmem(device); 250 imem = nouveau_instmem(device);
255 pfb = nouveau_fb(device); 251 pfb = nouveau_fb(device);
256 252
253 /* hack to allow channel engine type specification on kepler */
254 if (device->card_type >= NV_E0) {
255 if (init->fb_ctxdma_handle != ~0)
256 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
257 else
258 init->fb_ctxdma_handle = init->tt_ctxdma_handle;
259
260 /* allow flips to be executed if this is a graphics channel */
261 init->tt_ctxdma_handle = 0;
262 if (init->fb_ctxdma_handle == NVE0_CHANNEL_IND_ENGINE_GR)
263 init->tt_ctxdma_handle = 1;
264 }
265
266 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
267 return nouveau_abi16_put(abi16, -EINVAL);
268
257 /* allocate "abi16 channel" data and make up a handle for it */ 269 /* allocate "abi16 channel" data and make up a handle for it */
258 init->channel = ffsll(~abi16->handles); 270 init->channel = ffsll(~abi16->handles);
259 if (!init->channel--) 271 if (!init->channel--)
@@ -268,11 +280,6 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
268 abi16->handles |= (1 << init->channel); 280 abi16->handles |= (1 << init->channel);
269 281
270 /* create channel object and initialise dma and fence management */ 282 /* create channel object and initialise dma and fence management */
271 if (device->card_type >= NV_E0) {
272 init->fb_ctxdma_handle = NVE0_CHANNEL_IND_ENGINE_GR;
273 init->tt_ctxdma_handle = 0;
274 }
275
276 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN | 283 ret = nouveau_channel_new(drm, cli, NVDRM_DEVICE, NVDRM_CHAN |
277 init->channel, init->fb_ctxdma_handle, 284 init->channel, init->fb_ctxdma_handle,
278 init->tt_ctxdma_handle, &chan->chan); 285 init->tt_ctxdma_handle, &chan->chan);
@@ -382,7 +389,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
382 struct nouveau_abi16_chan *chan, *temp; 389 struct nouveau_abi16_chan *chan, *temp;
383 struct nouveau_abi16_ntfy *ntfy; 390 struct nouveau_abi16_ntfy *ntfy;
384 struct nouveau_object *object; 391 struct nouveau_object *object;
385 struct nv_dma_class args; 392 struct nv_dma_class args = {};
386 int ret; 393 int ret;
387 394
388 if (unlikely(!abi16)) 395 if (unlikely(!abi16))
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48783e14114c..d97f20069d3e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -35,6 +35,14 @@ static struct nouveau_dsm_priv {
35 acpi_handle rom_handle; 35 acpi_handle rom_handle;
36} nouveau_dsm_priv; 36} nouveau_dsm_priv;
37 37
38bool nouveau_is_optimus(void) {
39 return nouveau_dsm_priv.optimus_detected;
40}
41
42bool nouveau_is_v1_dsm(void) {
43 return nouveau_dsm_priv.dsm_detected;
44}
45
38#define NOUVEAU_DSM_HAS_MUX 0x1 46#define NOUVEAU_DSM_HAS_MUX 0x1
39#define NOUVEAU_DSM_HAS_OPT 0x2 47#define NOUVEAU_DSM_HAS_OPT 0x2
40 48
@@ -183,9 +191,7 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
183 191
184static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) 192static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
185{ 193{
186 /* perhaps the _DSM functions are mutually exclusive, but prepare for 194 if (!nouveau_dsm_priv.dsm_detected)
187 * the future */
188 if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
189 return 0; 195 return 0;
190 if (id == VGA_SWITCHEROO_IGD) 196 if (id == VGA_SWITCHEROO_IGD)
191 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA); 197 return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
@@ -201,7 +207,7 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
201 207
202 /* Optimus laptops have the card already disabled in 208 /* Optimus laptops have the card already disabled in
203 * nouveau_switcheroo_set_state */ 209 * nouveau_switcheroo_set_state */
204 if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected) 210 if (!nouveau_dsm_priv.dsm_detected)
205 return 0; 211 return 0;
206 212
207 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); 213 return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
@@ -283,24 +289,24 @@ static bool nouveau_dsm_detect(void)
283 has_optimus = 1; 289 has_optimus = 1;
284 } 290 }
285 291
286 if (vga_count == 2 && has_dsm && guid_valid) { 292 /* find the optimus DSM or the old v1 DSM */
293 if (has_optimus == 1) {
287 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 294 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
288 &buffer); 295 &buffer);
289 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 296 printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
290 acpi_method_name); 297 acpi_method_name);
291 nouveau_dsm_priv.dsm_detected = true; 298 nouveau_dsm_priv.optimus_detected = true;
292 ret = true; 299 ret = true;
293 } 300 } else if (vga_count == 2 && has_dsm && guid_valid) {
294
295 if (has_optimus == 1) {
296 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 301 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
297 &buffer); 302 &buffer);
298 printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", 303 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
299 acpi_method_name); 304 acpi_method_name);
300 nouveau_dsm_priv.optimus_detected = true; 305 nouveau_dsm_priv.dsm_detected = true;
301 ret = true; 306 ret = true;
302 } 307 }
303 308
309
304 return ret; 310 return ret;
305} 311}
306 312
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 08af67722b57..d0da230d7706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -4,6 +4,8 @@
4#define ROM_BIOS_PAGE 4096 4#define ROM_BIOS_PAGE 4096
5 5
6#if defined(CONFIG_ACPI) 6#if defined(CONFIG_ACPI)
7bool nouveau_is_optimus(void);
8bool nouveau_is_v1_dsm(void);
7void nouveau_register_dsm_handler(void); 9void nouveau_register_dsm_handler(void);
8void nouveau_unregister_dsm_handler(void); 10void nouveau_unregister_dsm_handler(void);
9void nouveau_switcheroo_optimus_dsm(void); 11void nouveau_switcheroo_optimus_dsm(void);
@@ -11,6 +13,8 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
11bool nouveau_acpi_rom_supported(struct pci_dev *pdev); 13bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
12void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *); 14void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
13#else 15#else
16static inline bool nouveau_is_optimus(void) { return false; };
17static inline bool nouveau_is_v1_dsm(void) { return false; };
14static inline void nouveau_register_dsm_handler(void) {} 18static inline void nouveau_register_dsm_handler(void) {}
15static inline void nouveau_unregister_dsm_handler(void) {} 19static inline void nouveau_unregister_dsm_handler(void) {}
16static inline void nouveau_switcheroo_optimus_dsm(void) {} 20static inline void nouveau_switcheroo_optimus_dsm(void) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 09fdef235882..865eddfa30a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -624,206 +624,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
624 return 0; 624 return 0;
625} 625}
626 626
627/* BIT 'U'/'d' table encoder subtables have hashes matching them to
628 * a particular set of encoders.
629 *
630 * This function returns true if a particular DCB entry matches.
631 */
632bool
633bios_encoder_match(struct dcb_output *dcb, u32 hash)
634{
635 if ((hash & 0x000000f0) != (dcb->location << 4))
636 return false;
637 if ((hash & 0x0000000f) != dcb->type)
638 return false;
639 if (!(hash & (dcb->or << 16)))
640 return false;
641
642 switch (dcb->type) {
643 case DCB_OUTPUT_TMDS:
644 case DCB_OUTPUT_LVDS:
645 case DCB_OUTPUT_DP:
646 if (hash & 0x00c00000) {
647 if (!(hash & (dcb->sorconf.link << 22)))
648 return false;
649 }
650 default:
651 return true;
652 }
653}
654
655int
656nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
657 struct dcb_output *dcbent, int crtc)
658{
659 /*
660 * The display script table is located by the BIT 'U' table.
661 *
662 * It contains an array of pointers to various tables describing
663 * a particular output type. The first 32-bits of the output
664 * tables contains similar information to a DCB entry, and is
665 * used to decide whether that particular table is suitable for
666 * the output you want to access.
667 *
668 * The "record header length" field here seems to indicate the
669 * offset of the first configuration entry in the output tables.
670 * This is 10 on most cards I've seen, but 12 has been witnessed
671 * on DP cards, and there's another script pointer within the
672 * header.
673 *
674 * offset + 0 ( 8 bits): version
675 * offset + 1 ( 8 bits): header length
676 * offset + 2 ( 8 bits): record length
677 * offset + 3 ( 8 bits): number of records
678 * offset + 4 ( 8 bits): record header length
679 * offset + 5 (16 bits): pointer to first output script table
680 */
681
682 struct nouveau_drm *drm = nouveau_drm(dev);
683 struct nvbios *bios = &drm->vbios;
684 uint8_t *table = &bios->data[bios->display.script_table_ptr];
685 uint8_t *otable = NULL;
686 uint16_t script;
687 int i;
688
689 if (!bios->display.script_table_ptr) {
690 NV_ERROR(drm, "No pointer to output script table\n");
691 return 1;
692 }
693
694 /*
695 * Nothing useful has been in any of the pre-2.0 tables I've seen,
696 * so until they are, we really don't need to care.
697 */
698 if (table[0] < 0x20)
699 return 1;
700
701 if (table[0] != 0x20 && table[0] != 0x21) {
702 NV_ERROR(drm, "Output script table version 0x%02x unknown\n",
703 table[0]);
704 return 1;
705 }
706
707 /*
708 * The output script tables describing a particular output type
709 * look as follows:
710 *
711 * offset + 0 (32 bits): output this table matches (hash of DCB)
712 * offset + 4 ( 8 bits): unknown
713 * offset + 5 ( 8 bits): number of configurations
714 * offset + 6 (16 bits): pointer to some script
715 * offset + 8 (16 bits): pointer to some script
716 *
717 * headerlen == 10
718 * offset + 10 : configuration 0
719 *
720 * headerlen == 12
721 * offset + 10 : pointer to some script
722 * offset + 12 : configuration 0
723 *
724 * Each config entry is as follows:
725 *
726 * offset + 0 (16 bits): unknown, assumed to be a match value
727 * offset + 2 (16 bits): pointer to script table (clock set?)
728 * offset + 4 (16 bits): pointer to script table (reset?)
729 *
730 * There doesn't appear to be a count value to say how many
731 * entries exist in each script table, instead, a 0 value in
732 * the first 16-bit word seems to indicate both the end of the
733 * list and the default entry. The second 16-bit word in the
734 * script tables is a pointer to the script to execute.
735 */
736
737 NV_DEBUG(drm, "Searching for output entry for %d %d %d\n",
738 dcbent->type, dcbent->location, dcbent->or);
739 for (i = 0; i < table[3]; i++) {
740 otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
741 if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
742 break;
743 }
744
745 if (!otable) {
746 NV_DEBUG(drm, "failed to match any output table\n");
747 return 1;
748 }
749
750 if (pclk < -2 || pclk > 0) {
751 /* Try to find matching script table entry */
752 for (i = 0; i < otable[5]; i++) {
753 if (ROM16(otable[table[4] + i*6]) == type)
754 break;
755 }
756
757 if (i == otable[5]) {
758 NV_ERROR(drm, "Table 0x%04x not found for %d/%d, "
759 "using first\n",
760 type, dcbent->type, dcbent->or);
761 i = 0;
762 }
763 }
764
765 if (pclk == 0) {
766 script = ROM16(otable[6]);
767 if (!script) {
768 NV_DEBUG(drm, "output script 0 not found\n");
769 return 1;
770 }
771
772 NV_DEBUG(drm, "0x%04X: parsing output script 0\n", script);
773 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
774 } else
775 if (pclk == -1) {
776 script = ROM16(otable[8]);
777 if (!script) {
778 NV_DEBUG(drm, "output script 1 not found\n");
779 return 1;
780 }
781
782 NV_DEBUG(drm, "0x%04X: parsing output script 1\n", script);
783 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
784 } else
785 if (pclk == -2) {
786 if (table[4] >= 12)
787 script = ROM16(otable[10]);
788 else
789 script = 0;
790 if (!script) {
791 NV_DEBUG(drm, "output script 2 not found\n");
792 return 1;
793 }
794
795 NV_DEBUG(drm, "0x%04X: parsing output script 2\n", script);
796 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
797 } else
798 if (pclk > 0) {
799 script = ROM16(otable[table[4] + i*6 + 2]);
800 if (script)
801 script = clkcmptable(bios, script, pclk);
802 if (!script) {
803 NV_DEBUG(drm, "clock script 0 not found\n");
804 return 1;
805 }
806
807 NV_DEBUG(drm, "0x%04X: parsing clock script 0\n", script);
808 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
809 } else
810 if (pclk < 0) {
811 script = ROM16(otable[table[4] + i*6 + 4]);
812 if (script)
813 script = clkcmptable(bios, script, -pclk);
814 if (!script) {
815 NV_DEBUG(drm, "clock script 1 not found\n");
816 return 1;
817 }
818
819 NV_DEBUG(drm, "0x%04X: parsing clock script 1\n", script);
820 nouveau_bios_run_init_table(dev, script, dcbent, crtc);
821 }
822
823 return 0;
824}
825
826
827int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk) 627int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head, int pxclk)
828{ 628{
829 /* 629 /*
@@ -1212,31 +1012,6 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
1212 return 0; 1012 return 0;
1213} 1013}
1214 1014
1215static int
1216parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
1217 struct bit_entry *bitentry)
1218{
1219 /*
1220 * Parses the pointer to the G80 output script tables
1221 *
1222 * Starting at bitentry->offset:
1223 *
1224 * offset + 0 (16 bits): output script table pointer
1225 */
1226
1227 struct nouveau_drm *drm = nouveau_drm(dev);
1228 uint16_t outputscripttableptr;
1229
1230 if (bitentry->length != 3) {
1231 NV_ERROR(drm, "Do not understand BIT U table\n");
1232 return -EINVAL;
1233 }
1234
1235 outputscripttableptr = ROM16(bios->data[bitentry->offset]);
1236 bios->display.script_table_ptr = outputscripttableptr;
1237 return 0;
1238}
1239
1240struct bit_table { 1015struct bit_table {
1241 const char id; 1016 const char id;
1242 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *); 1017 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -1313,7 +1088,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
1313 parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */ 1088 parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
1314 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds)); 1089 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
1315 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds)); 1090 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
1316 parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
1317 1091
1318 return 0; 1092 return 0;
1319} 1093}
@@ -2324,7 +2098,7 @@ nouveau_run_vbios_init(struct drm_device *dev)
2324{ 2098{
2325 struct nouveau_drm *drm = nouveau_drm(dev); 2099 struct nouveau_drm *drm = nouveau_drm(dev);
2326 struct nvbios *bios = &drm->vbios; 2100 struct nvbios *bios = &drm->vbios;
2327 int i, ret = 0; 2101 int ret = 0;
2328 2102
2329 /* Reset the BIOS head to 0. */ 2103 /* Reset the BIOS head to 0. */
2330 bios->state.crtchead = 0; 2104 bios->state.crtchead = 0;
@@ -2337,13 +2111,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
2337 bios->fp.lvds_init_run = false; 2111 bios->fp.lvds_init_run = false;
2338 } 2112 }
2339 2113
2340 if (nv_device(drm->device)->card_type >= NV_50) {
2341 for (i = 0; bios->execute && i < bios->dcb.entries; i++) {
2342 nouveau_bios_run_display_table(dev, 0, 0,
2343 &bios->dcb.entry[i], -1);
2344 }
2345 }
2346
2347 return ret; 2114 return ret;
2348} 2115}
2349 2116
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 3befbb821a56..f68c54ca422f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -128,12 +128,6 @@ struct nvbios {
128 } state; 128 } state;
129 129
130 struct { 130 struct {
131 struct dcb_output *output;
132 int crtc;
133 uint16_t script_table_ptr;
134 } display;
135
136 struct {
137 uint16_t fptablepointer; /* also used by tmds */ 131 uint16_t fptablepointer; /* also used by tmds */
138 uint16_t fpxlatetableptr; 132 uint16_t fpxlatetableptr;
139 int xlatwidth; 133 int xlatwidth;
@@ -185,8 +179,6 @@ void nouveau_bios_takedown(struct drm_device *dev);
185int nouveau_run_vbios_init(struct drm_device *); 179int nouveau_run_vbios_init(struct drm_device *);
186struct dcb_connector_table_entry * 180struct dcb_connector_table_entry *
187nouveau_bios_connector_entry(struct drm_device *, int index); 181nouveau_bios_connector_entry(struct drm_device *, int index);
188int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
189 struct dcb_output *, int crtc);
190bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *); 182bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
191uint8_t *nouveau_bios_embedded_edid(struct drm_device *); 183uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
192int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk, 184int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -195,6 +187,5 @@ int run_tmds_table(struct drm_device *, struct dcb_output *,
195 int head, int pxclk); 187 int head, int pxclk);
196int call_lvds_script(struct drm_device *, struct dcb_output *, int head, 188int call_lvds_script(struct drm_device *, struct dcb_output *, int head,
197 enum LVDS_script, int pxclk); 189 enum LVDS_script, int pxclk);
198bool bios_encoder_match(struct dcb_output *, u32 hash);
199 190
200#endif 191#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 35ac57f0aab6..5614c89148cb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -225,7 +225,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
225 225
226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, 226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
227 type, &nvbo->placement, 227 type, &nvbo->placement,
228 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg, 228 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
229 nouveau_bo_del_ttm); 229 nouveau_bo_del_ttm);
230 if (ret) { 230 if (ret) {
231 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 231 /* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -315,7 +315,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
315 315
316 nouveau_bo_placement_set(nvbo, memtype, 0); 316 nouveau_bo_placement_set(nvbo, memtype, 0);
317 317
318 ret = nouveau_bo_validate(nvbo, false, false, false); 318 ret = nouveau_bo_validate(nvbo, false, false);
319 if (ret == 0) { 319 if (ret == 0) {
320 switch (bo->mem.mem_type) { 320 switch (bo->mem.mem_type) {
321 case TTM_PL_VRAM: 321 case TTM_PL_VRAM:
@@ -351,7 +351,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
351 351
352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
353 353
354 ret = nouveau_bo_validate(nvbo, false, false, false); 354 ret = nouveau_bo_validate(nvbo, false, false);
355 if (ret == 0) { 355 if (ret == 0) {
356 switch (bo->mem.mem_type) { 356 switch (bo->mem.mem_type) {
357 case TTM_PL_VRAM: 357 case TTM_PL_VRAM:
@@ -392,12 +392,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
392 392
393int 393int
394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
395 bool no_wait_reserve, bool no_wait_gpu) 395 bool no_wait_gpu)
396{ 396{
397 int ret; 397 int ret;
398 398
399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, 399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
400 no_wait_reserve, no_wait_gpu); 400 interruptible, no_wait_gpu);
401 if (ret) 401 if (ret)
402 return ret; 402 return ret;
403 403
@@ -556,8 +556,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
556static int 556static int
557nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 557nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
558 struct nouveau_bo *nvbo, bool evict, 558 struct nouveau_bo *nvbo, bool evict,
559 bool no_wait_reserve, bool no_wait_gpu, 559 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
560 struct ttm_mem_reg *new_mem)
561{ 560{
562 struct nouveau_fence *fence = NULL; 561 struct nouveau_fence *fence = NULL;
563 int ret; 562 int ret;
@@ -566,8 +565,8 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
566 if (ret) 565 if (ret)
567 return ret; 566 return ret;
568 567
569 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, 568 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
570 no_wait_reserve, no_wait_gpu, new_mem); 569 no_wait_gpu, new_mem);
571 nouveau_fence_unref(&fence); 570 nouveau_fence_unref(&fence);
572 return ret; 571 return ret;
573} 572}
@@ -965,8 +964,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
965 964
966static int 965static int
967nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 966nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
968 bool no_wait_reserve, bool no_wait_gpu, 967 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
969 struct ttm_mem_reg *new_mem)
970{ 968{
971 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 969 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
972 struct nouveau_channel *chan = chan = drm->channel; 970 struct nouveau_channel *chan = chan = drm->channel;
@@ -995,7 +993,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
995 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); 993 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
996 if (ret == 0) { 994 if (ret == 0) {
997 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, 995 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
998 no_wait_reserve,
999 no_wait_gpu, new_mem); 996 no_wait_gpu, new_mem);
1000 } 997 }
1001 998
@@ -1064,8 +1061,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1064 1061
1065static int 1062static int
1066nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1063nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1067 bool no_wait_reserve, bool no_wait_gpu, 1064 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1068 struct ttm_mem_reg *new_mem)
1069{ 1065{
1070 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1066 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1071 struct ttm_placement placement; 1067 struct ttm_placement placement;
@@ -1078,7 +1074,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1078 1074
1079 tmp_mem = *new_mem; 1075 tmp_mem = *new_mem;
1080 tmp_mem.mm_node = NULL; 1076 tmp_mem.mm_node = NULL;
1081 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1077 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1082 if (ret) 1078 if (ret)
1083 return ret; 1079 return ret;
1084 1080
@@ -1086,11 +1082,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1086 if (ret) 1082 if (ret)
1087 goto out; 1083 goto out;
1088 1084
1089 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 1085 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1090 if (ret) 1086 if (ret)
1091 goto out; 1087 goto out;
1092 1088
1093 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 1089 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1094out: 1090out:
1095 ttm_bo_mem_put(bo, &tmp_mem); 1091 ttm_bo_mem_put(bo, &tmp_mem);
1096 return ret; 1092 return ret;
@@ -1098,8 +1094,7 @@ out:
1098 1094
1099static int 1095static int
1100nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1096nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1101 bool no_wait_reserve, bool no_wait_gpu, 1097 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1102 struct ttm_mem_reg *new_mem)
1103{ 1098{
1104 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 1099 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1105 struct ttm_placement placement; 1100 struct ttm_placement placement;
@@ -1112,15 +1107,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1112 1107
1113 tmp_mem = *new_mem; 1108 tmp_mem = *new_mem;
1114 tmp_mem.mm_node = NULL; 1109 tmp_mem.mm_node = NULL;
1115 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 1110 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1116 if (ret) 1111 if (ret)
1117 return ret; 1112 return ret;
1118 1113
1119 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 1114 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1120 if (ret) 1115 if (ret)
1121 goto out; 1116 goto out;
1122 1117
1123 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); 1118 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1124 if (ret) 1119 if (ret)
1125 goto out; 1120 goto out;
1126 1121
@@ -1195,8 +1190,7 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1195 1190
1196static int 1191static int
1197nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 1192nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1198 bool no_wait_reserve, bool no_wait_gpu, 1193 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1199 struct ttm_mem_reg *new_mem)
1200{ 1194{
1201 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1195 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1202 struct nouveau_bo *nvbo = nouveau_bo(bo); 1196 struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1220,23 +1214,26 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1220 1214
1221 /* CPU copy if we have no accelerated method available */ 1215 /* CPU copy if we have no accelerated method available */
1222 if (!drm->ttm.move) { 1216 if (!drm->ttm.move) {
1223 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1217 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1224 goto out; 1218 goto out;
1225 } 1219 }
1226 1220
1227 /* Hardware assisted copy. */ 1221 /* Hardware assisted copy. */
1228 if (new_mem->mem_type == TTM_PL_SYSTEM) 1222 if (new_mem->mem_type == TTM_PL_SYSTEM)
1229 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1223 ret = nouveau_bo_move_flipd(bo, evict, intr,
1224 no_wait_gpu, new_mem);
1230 else if (old_mem->mem_type == TTM_PL_SYSTEM) 1225 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1231 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1226 ret = nouveau_bo_move_flips(bo, evict, intr,
1227 no_wait_gpu, new_mem);
1232 else 1228 else
1233 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 1229 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1230 no_wait_gpu, new_mem);
1234 1231
1235 if (!ret) 1232 if (!ret)
1236 goto out; 1233 goto out;
1237 1234
1238 /* Fallback to software copy. */ 1235 /* Fallback to software copy. */
1239 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 1236 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1240 1237
1241out: 1238out:
1242 if (nv_device(drm->device)->card_type < NV_50) { 1239 if (nv_device(drm->device)->card_type < NV_50) {
@@ -1343,7 +1340,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1343 nvbo->placement.fpfn = 0; 1340 nvbo->placement.fpfn = 0;
1344 nvbo->placement.lpfn = mappable; 1341 nvbo->placement.lpfn = mappable;
1345 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1342 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1346 return nouveau_bo_validate(nvbo, false, true, false); 1343 return nouveau_bo_validate(nvbo, false, false);
1347} 1344}
1348 1345
1349static int 1346static int
@@ -1472,19 +1469,19 @@ nouveau_bo_fence_ref(void *sync_obj)
1472} 1469}
1473 1470
1474static bool 1471static bool
1475nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg) 1472nouveau_bo_fence_signalled(void *sync_obj)
1476{ 1473{
1477 return nouveau_fence_done(sync_obj); 1474 return nouveau_fence_done(sync_obj);
1478} 1475}
1479 1476
1480static int 1477static int
1481nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) 1478nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
1482{ 1479{
1483 return nouveau_fence_wait(sync_obj, lazy, intr); 1480 return nouveau_fence_wait(sync_obj, lazy, intr);
1484} 1481}
1485 1482
1486static int 1483static int
1487nouveau_bo_fence_flush(void *sync_obj, void *sync_arg) 1484nouveau_bo_fence_flush(void *sync_obj)
1488{ 1485{
1489 return 0; 1486 return 0;
1490} 1487}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index dec51b1098fe..25ca37989d2c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -76,7 +76,7 @@ u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val); 76void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *); 77void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *);
78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 78int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
79 bool no_wait_reserve, bool no_wait_gpu); 79 bool no_wait_gpu);
80 80
81struct nouveau_vma * 81struct nouveau_vma *
82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); 82nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index c1d7301c0e9c..174300b6a02e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -76,6 +76,8 @@ nouveau_channel_del(struct nouveau_channel **pchan)
76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle); 76 nouveau_object_del(client, NVDRM_DEVICE, chan->push.handle);
77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma); 77 nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
78 nouveau_bo_unmap(chan->push.buffer); 78 nouveau_bo_unmap(chan->push.buffer);
79 if (chan->push.buffer && chan->push.buffer->pin_refcnt)
80 nouveau_bo_unpin(chan->push.buffer);
79 nouveau_bo_ref(NULL, &chan->push.buffer); 81 nouveau_bo_ref(NULL, &chan->push.buffer);
80 kfree(chan); 82 kfree(chan);
81 } 83 }
@@ -267,7 +269,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
267 struct nouveau_fb *pfb = nouveau_fb(device); 269 struct nouveau_fb *pfb = nouveau_fb(device);
268 struct nouveau_software_chan *swch; 270 struct nouveau_software_chan *swch;
269 struct nouveau_object *object; 271 struct nouveau_object *object;
270 struct nv_dma_class args; 272 struct nv_dma_class args = {};
271 int ret, i; 273 int ret, i;
272 274
273 /* allocate dma objects to cover all allowed vram, and gart */ 275 /* allocate dma objects to cover all allowed vram, and gart */
@@ -346,7 +348,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
346 /* allocate software object class (used for fences on <= nv05, and 348 /* allocate software object class (used for fences on <= nv05, and
347 * to signal flip completion), bind it to a subchannel. 349 * to signal flip completion), bind it to a subchannel.
348 */ 350 */
349 if (chan != chan->drm->cechan) { 351 if ((device->card_type < NV_E0) || gart /* nve0: want_nvsw */) {
350 ret = nouveau_object_new(nv_object(client), chan->handle, 352 ret = nouveau_object_new(nv_object(client), chan->handle,
351 NvSw, nouveau_abi16_swclass(chan->drm), 353 NvSw, nouveau_abi16_swclass(chan->drm),
352 NULL, 0, &object); 354 NULL, 0, &object);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d3595b23434a..ac340ba32017 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -110,7 +110,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
110 dev = nv_connector->base.dev; 110 dev = nv_connector->base.dev;
111 drm = nouveau_drm(dev); 111 drm = nouveau_drm(dev);
112 gpio = nouveau_gpio(drm->device); 112 gpio = nouveau_gpio(drm->device);
113 NV_DEBUG(drm, "\n");
114 113
115 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) { 114 if (gpio && nv_connector->hpd != DCB_GPIO_UNUSED) {
116 gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff, 115 gpio->isr_del(gpio, 0, nv_connector->hpd, 0xff,
@@ -221,7 +220,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
221 } 220 }
222 221
223 if (nv_connector->type == DCB_CONNECTOR_DVI_I) { 222 if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
224 drm_connector_property_set_value(connector, 223 drm_object_property_set_value(&connector->base,
225 dev->mode_config.dvi_i_subconnector_property, 224 dev->mode_config.dvi_i_subconnector_property,
226 nv_encoder->dcb->type == DCB_OUTPUT_TMDS ? 225 nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
227 DRM_MODE_SUBCONNECTOR_DVID : 226 DRM_MODE_SUBCONNECTOR_DVID :
@@ -929,8 +928,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
929 int type, ret = 0; 928 int type, ret = 0;
930 bool dummy; 929 bool dummy;
931 930
932 NV_DEBUG(drm, "\n");
933
934 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 931 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
935 nv_connector = nouveau_connector(connector); 932 nv_connector = nouveau_connector(connector);
936 if (nv_connector->index == index) 933 if (nv_connector->index == index)
@@ -1043,7 +1040,7 @@ nouveau_connector_create(struct drm_device *dev, int index)
1043 1040
1044 /* Init DVI-I specific properties */ 1041 /* Init DVI-I specific properties */
1045 if (nv_connector->type == DCB_CONNECTOR_DVI_I) 1042 if (nv_connector->type == DCB_CONNECTOR_DVI_I)
1046 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); 1043 drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
1047 1044
1048 /* Add overscan compensation options to digital outputs */ 1045 /* Add overscan compensation options to digital outputs */
1049 if (disp->underscan_property && 1046 if (disp->underscan_property &&
@@ -1051,31 +1048,31 @@ nouveau_connector_create(struct drm_device *dev, int index)
1051 type == DRM_MODE_CONNECTOR_DVII || 1048 type == DRM_MODE_CONNECTOR_DVII ||
1052 type == DRM_MODE_CONNECTOR_HDMIA || 1049 type == DRM_MODE_CONNECTOR_HDMIA ||
1053 type == DRM_MODE_CONNECTOR_DisplayPort)) { 1050 type == DRM_MODE_CONNECTOR_DisplayPort)) {
1054 drm_connector_attach_property(connector, 1051 drm_object_attach_property(&connector->base,
1055 disp->underscan_property, 1052 disp->underscan_property,
1056 UNDERSCAN_OFF); 1053 UNDERSCAN_OFF);
1057 drm_connector_attach_property(connector, 1054 drm_object_attach_property(&connector->base,
1058 disp->underscan_hborder_property, 1055 disp->underscan_hborder_property,
1059 0); 1056 0);
1060 drm_connector_attach_property(connector, 1057 drm_object_attach_property(&connector->base,
1061 disp->underscan_vborder_property, 1058 disp->underscan_vborder_property,
1062 0); 1059 0);
1063 } 1060 }
1064 1061
1065 /* Add hue and saturation options */ 1062 /* Add hue and saturation options */
1066 if (disp->vibrant_hue_property) 1063 if (disp->vibrant_hue_property)
1067 drm_connector_attach_property(connector, 1064 drm_object_attach_property(&connector->base,
1068 disp->vibrant_hue_property, 1065 disp->vibrant_hue_property,
1069 90); 1066 90);
1070 if (disp->color_vibrance_property) 1067 if (disp->color_vibrance_property)
1071 drm_connector_attach_property(connector, 1068 drm_object_attach_property(&connector->base,
1072 disp->color_vibrance_property, 1069 disp->color_vibrance_property,
1073 150); 1070 150);
1074 1071
1075 switch (nv_connector->type) { 1072 switch (nv_connector->type) {
1076 case DCB_CONNECTOR_VGA: 1073 case DCB_CONNECTOR_VGA:
1077 if (nv_device(drm->device)->card_type >= NV_50) { 1074 if (nv_device(drm->device)->card_type >= NV_50) {
1078 drm_connector_attach_property(connector, 1075 drm_object_attach_property(&connector->base,
1079 dev->mode_config.scaling_mode_property, 1076 dev->mode_config.scaling_mode_property,
1080 nv_connector->scaling_mode); 1077 nv_connector->scaling_mode);
1081 } 1078 }
@@ -1088,18 +1085,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
1088 default: 1085 default:
1089 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; 1086 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
1090 1087
1091 drm_connector_attach_property(connector, 1088 drm_object_attach_property(&connector->base,
1092 dev->mode_config.scaling_mode_property, 1089 dev->mode_config.scaling_mode_property,
1093 nv_connector->scaling_mode); 1090 nv_connector->scaling_mode);
1094 if (disp->dithering_mode) { 1091 if (disp->dithering_mode) {
1095 nv_connector->dithering_mode = DITHERING_MODE_AUTO; 1092 nv_connector->dithering_mode = DITHERING_MODE_AUTO;
1096 drm_connector_attach_property(connector, 1093 drm_object_attach_property(&connector->base,
1097 disp->dithering_mode, 1094 disp->dithering_mode,
1098 nv_connector->dithering_mode); 1095 nv_connector->dithering_mode);
1099 } 1096 }
1100 if (disp->dithering_depth) { 1097 if (disp->dithering_depth) {
1101 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO; 1098 nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
1102 drm_connector_attach_property(connector, 1099 drm_object_attach_property(&connector->base,
1103 disp->dithering_depth, 1100 disp->dithering_depth,
1104 nv_connector->dithering_depth); 1101 nv_connector->dithering_depth);
1105 } 1102 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index ebdb87670a8f..20eb84cce9e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -28,6 +28,7 @@
28#define __NOUVEAU_CONNECTOR_H__ 28#define __NOUVEAU_CONNECTOR_H__
29 29
30#include <drm/drm_edid.h> 30#include <drm/drm_edid.h>
31#include "nouveau_crtc.h"
31 32
32struct nouveau_i2c_port; 33struct nouveau_i2c_port;
33 34
@@ -80,6 +81,21 @@ static inline struct nouveau_connector *nouveau_connector(
80 return container_of(con, struct nouveau_connector, base); 81 return container_of(con, struct nouveau_connector, base);
81} 82}
82 83
84static inline struct nouveau_connector *
85nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
86{
87 struct drm_device *dev = nv_crtc->base.dev;
88 struct drm_connector *connector;
89 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
90
91 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
92 if (connector->encoder && connector->encoder->crtc == crtc)
93 return nouveau_connector(connector);
94 }
95
96 return NULL;
97}
98
83struct drm_connector * 99struct drm_connector *
84nouveau_connector_create(struct drm_device *, int index); 100nouveau_connector_create(struct drm_device *, int index);
85 101
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index e6d0d1eb0133..d1e5890784d7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -82,16 +82,6 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
82 return &crtc->base; 82 return &crtc->base;
83} 83}
84 84
85int nv50_crtc_create(struct drm_device *dev, int index);
86int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
87 uint32_t buffer_handle, uint32_t width,
88 uint32_t height);
89int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
90
91int nv04_cursor_init(struct nouveau_crtc *); 85int nv04_cursor_init(struct nouveau_crtc *);
92int nv50_cursor_init(struct nouveau_crtc *);
93
94struct nouveau_connector *
95nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
96 86
97#endif /* __NOUVEAU_CRTC_H__ */ 87#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 86124b131f4f..e4188f24fc75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -98,12 +98,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
98 nv_fb->r_dma = NvEvoVRAM_LP; 98 nv_fb->r_dma = NvEvoVRAM_LP;
99 99
100 switch (fb->depth) { 100 switch (fb->depth) {
101 case 8: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_8; break; 101 case 8: nv_fb->r_format = 0x1e00; break;
102 case 15: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_15; break; 102 case 15: nv_fb->r_format = 0xe900; break;
103 case 16: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_16; break; 103 case 16: nv_fb->r_format = 0xe800; break;
104 case 24: 104 case 24:
105 case 32: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_24; break; 105 case 32: nv_fb->r_format = 0xcf00; break;
106 case 30: nv_fb->r_format = NV50_EVO_CRTC_FB_DEPTH_30; break; 106 case 30: nv_fb->r_format = 0xd100; break;
107 default: 107 default:
108 NV_ERROR(drm, "unknown depth %d\n", fb->depth); 108 NV_ERROR(drm, "unknown depth %d\n", fb->depth);
109 return -EINVAL; 109 return -EINVAL;
@@ -324,7 +324,7 @@ nouveau_display_create(struct drm_device *dev)
324 disp->underscan_vborder_property = 324 disp->underscan_vborder_property =
325 drm_property_create_range(dev, 0, "underscan vborder", 0, 128); 325 drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
326 326
327 if (gen == 1) { 327 if (gen >= 1) {
328 disp->vibrant_hue_property = 328 disp->vibrant_hue_property =
329 drm_property_create(dev, DRM_MODE_PROP_RANGE, 329 drm_property_create(dev, DRM_MODE_PROP_RANGE,
330 "vibrant hue", 2); 330 "vibrant hue", 2);
@@ -366,10 +366,7 @@ nouveau_display_create(struct drm_device *dev)
366 if (nv_device(drm->device)->card_type < NV_50) 366 if (nv_device(drm->device)->card_type < NV_50)
367 ret = nv04_display_create(dev); 367 ret = nv04_display_create(dev);
368 else 368 else
369 if (nv_device(drm->device)->card_type < NV_D0)
370 ret = nv50_display_create(dev); 369 ret = nv50_display_create(dev);
371 else
372 ret = nvd0_display_create(dev);
373 if (ret) 370 if (ret)
374 goto disp_create_err; 371 goto disp_create_err;
375 372
@@ -400,11 +397,12 @@ nouveau_display_destroy(struct drm_device *dev)
400 nouveau_backlight_exit(dev); 397 nouveau_backlight_exit(dev);
401 drm_vblank_cleanup(dev); 398 drm_vblank_cleanup(dev);
402 399
400 drm_kms_helper_poll_fini(dev);
401 drm_mode_config_cleanup(dev);
402
403 if (disp->dtor) 403 if (disp->dtor)
404 disp->dtor(dev); 404 disp->dtor(dev);
405 405
406 drm_kms_helper_poll_fini(dev);
407 drm_mode_config_cleanup(dev);
408 nouveau_drm(dev)->display = NULL; 406 nouveau_drm(dev)->display = NULL;
409 kfree(disp); 407 kfree(disp);
410} 408}
@@ -659,10 +657,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
659 657
660 /* Emit a page flip */ 658 /* Emit a page flip */
661 if (nv_device(drm->device)->card_type >= NV_50) { 659 if (nv_device(drm->device)->card_type >= NV_50) {
662 if (nv_device(drm->device)->card_type >= NV_D0) 660 ret = nv50_display_flip_next(crtc, fb, chan, 0);
663 ret = nvd0_display_flip_next(crtc, fb, chan, 0);
664 else
665 ret = nv50_display_flip_next(crtc, fb, chan);
666 if (ret) { 661 if (ret) {
667 mutex_unlock(&chan->cli->mutex); 662 mutex_unlock(&chan->cli->mutex);
668 goto fail_unreserve; 663 goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 978a108ba7a1..59838651ee8f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -30,60 +30,17 @@
30#include "nouveau_encoder.h" 30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32 32
33#include <core/class.h>
34
33#include <subdev/gpio.h> 35#include <subdev/gpio.h>
34#include <subdev/i2c.h> 36#include <subdev/i2c.h>
35 37
36u8 *
37nouveau_dp_bios_data(struct drm_device *dev, struct dcb_output *dcb, u8 **entry)
38{
39 struct nouveau_drm *drm = nouveau_drm(dev);
40 struct bit_entry d;
41 u8 *table;
42 int i;
43
44 if (bit_table(dev, 'd', &d)) {
45 NV_ERROR(drm, "BIT 'd' table not found\n");
46 return NULL;
47 }
48
49 if (d.version != 1) {
50 NV_ERROR(drm, "BIT 'd' table version %d unknown\n", d.version);
51 return NULL;
52 }
53
54 table = ROMPTR(dev, d.data[0]);
55 if (!table) {
56 NV_ERROR(drm, "displayport table pointer invalid\n");
57 return NULL;
58 }
59
60 switch (table[0]) {
61 case 0x20:
62 case 0x21:
63 case 0x30:
64 case 0x40:
65 break;
66 default:
67 NV_ERROR(drm, "displayport table 0x%02x unknown\n", table[0]);
68 return NULL;
69 }
70
71 for (i = 0; i < table[3]; i++) {
72 *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
73 if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
74 return table;
75 }
76
77 NV_ERROR(drm, "displayport encoder table not found\n");
78 return NULL;
79}
80
81/****************************************************************************** 38/******************************************************************************
82 * link training 39 * link training
83 *****************************************************************************/ 40 *****************************************************************************/
84struct dp_state { 41struct dp_state {
85 struct nouveau_i2c_port *auxch; 42 struct nouveau_i2c_port *auxch;
86 struct dp_train_func *func; 43 struct nouveau_object *core;
87 struct dcb_output *dcb; 44 struct dcb_output *dcb;
88 int crtc; 45 int crtc;
89 u8 *dpcd; 46 u8 *dpcd;
@@ -97,13 +54,20 @@ static void
97dp_set_link_config(struct drm_device *dev, struct dp_state *dp) 54dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
98{ 55{
99 struct nouveau_drm *drm = nouveau_drm(dev); 56 struct nouveau_drm *drm = nouveau_drm(dev);
57 struct dcb_output *dcb = dp->dcb;
58 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
59 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
100 u8 sink[2]; 60 u8 sink[2];
61 u32 data;
101 62
102 NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw); 63 NV_DEBUG(drm, "%d lanes at %d KB/s\n", dp->link_nr, dp->link_bw);
103 64
104 /* set desired link configuration on the source */ 65 /* set desired link configuration on the source */
105 dp->func->link_set(dev, dp->dcb, dp->crtc, dp->link_nr, dp->link_bw, 66 data = ((dp->link_bw / 27000) << 8) | dp->link_nr;
106 dp->dpcd[2] & DP_ENHANCED_FRAME_CAP); 67 if (dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)
68 data |= NV94_DISP_SOR_DP_LNKCTL_FRAME_ENH;
69
70 nv_call(dp->core, NV94_DISP_SOR_DP_LNKCTL + moff, data);
107 71
108 /* inform the sink of the new configuration */ 72 /* inform the sink of the new configuration */
109 sink[0] = dp->link_bw / 27000; 73 sink[0] = dp->link_bw / 27000;
@@ -118,11 +82,14 @@ static void
118dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern) 82dp_set_training_pattern(struct drm_device *dev, struct dp_state *dp, u8 pattern)
119{ 83{
120 struct nouveau_drm *drm = nouveau_drm(dev); 84 struct nouveau_drm *drm = nouveau_drm(dev);
85 struct dcb_output *dcb = dp->dcb;
86 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
87 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
121 u8 sink_tp; 88 u8 sink_tp;
122 89
123 NV_DEBUG(drm, "training pattern %d\n", pattern); 90 NV_DEBUG(drm, "training pattern %d\n", pattern);
124 91
125 dp->func->train_set(dev, dp->dcb, pattern); 92 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, pattern);
126 93
127 nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1); 94 nv_rdaux(dp->auxch, DP_TRAINING_PATTERN_SET, &sink_tp, 1);
128 sink_tp &= ~DP_TRAINING_PATTERN_MASK; 95 sink_tp &= ~DP_TRAINING_PATTERN_MASK;
@@ -134,6 +101,9 @@ static int
134dp_link_train_commit(struct drm_device *dev, struct dp_state *dp) 101dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
135{ 102{
136 struct nouveau_drm *drm = nouveau_drm(dev); 103 struct nouveau_drm *drm = nouveau_drm(dev);
104 struct dcb_output *dcb = dp->dcb;
105 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
106 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
137 int i; 107 int i;
138 108
139 for (i = 0; i < dp->link_nr; i++) { 109 for (i = 0; i < dp->link_nr; i++) {
@@ -148,7 +118,8 @@ dp_link_train_commit(struct drm_device *dev, struct dp_state *dp)
148 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 118 dp->conf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
149 119
150 NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]); 120 NV_DEBUG(drm, "config lane %d %02x\n", i, dp->conf[i]);
151 dp->func->train_adj(dev, dp->dcb, i, lvsw, lpre); 121
122 nv_call(dp->core, NV94_DISP_SOR_DP_DRVCTL(i) + moff, (lvsw << 8) | lpre);
152 } 123 }
153 124
154 return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4); 125 return nv_wraux(dp->auxch, DP_TRAINING_LANE0_SET, dp->conf, 4);
@@ -234,59 +205,32 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
234} 205}
235 206
236static void 207static void
237dp_set_downspread(struct drm_device *dev, struct dp_state *dp, bool enable) 208dp_link_train_init(struct drm_device *dev, struct dp_state *dp, bool spread)
238{ 209{
239 u16 script = 0x0000; 210 struct dcb_output *dcb = dp->dcb;
240 u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); 211 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
241 if (table) { 212 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
242 if (table[0] >= 0x20 && table[0] <= 0x30) { 213
243 if (enable) script = ROM16(entry[12]); 214 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff, (spread ?
244 else script = ROM16(entry[14]); 215 NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_ON :
245 } else 216 NV94_DISP_SOR_DP_TRAIN_INIT_SPREAD_OFF) |
246 if (table[0] == 0x40) { 217 NV94_DISP_SOR_DP_TRAIN_OP_INIT);
247 if (enable) script = ROM16(entry[11]);
248 else script = ROM16(entry[13]);
249 }
250 }
251
252 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
253}
254
255static void
256dp_link_train_init(struct drm_device *dev, struct dp_state *dp)
257{
258 u16 script = 0x0000;
259 u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry);
260 if (table) {
261 if (table[0] >= 0x20 && table[0] <= 0x30)
262 script = ROM16(entry[6]);
263 else
264 if (table[0] == 0x40)
265 script = ROM16(entry[5]);
266 }
267
268 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc);
269} 218}
270 219
271static void 220static void
272dp_link_train_fini(struct drm_device *dev, struct dp_state *dp) 221dp_link_train_fini(struct drm_device *dev, struct dp_state *dp)
273{ 222{
274 u16 script = 0x0000; 223 struct dcb_output *dcb = dp->dcb;
275 u8 *entry, *table = nouveau_dp_bios_data(dev, dp->dcb, &entry); 224 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
276 if (table) { 225 const u32 moff = (dp->crtc << 3) | (link << 2) | or;
277 if (table[0] >= 0x20 && table[0] <= 0x30)
278 script = ROM16(entry[8]);
279 else
280 if (table[0] == 0x40)
281 script = ROM16(entry[7]);
282 }
283 226
284 nouveau_bios_run_init_table(dev, script, dp->dcb, dp->crtc); 227 nv_call(dp->core, NV94_DISP_SOR_DP_TRAIN + moff,
228 NV94_DISP_SOR_DP_TRAIN_OP_FINI);
285} 229}
286 230
287static bool 231static bool
288nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate, 232nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
289 struct dp_train_func *func) 233 struct nouveau_object *core)
290{ 234{
291 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 235 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
292 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); 236 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
@@ -304,7 +248,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
304 if (!dp.auxch) 248 if (!dp.auxch)
305 return false; 249 return false;
306 250
307 dp.func = func; 251 dp.core = core;
308 dp.dcb = nv_encoder->dcb; 252 dp.dcb = nv_encoder->dcb;
309 dp.crtc = nv_crtc->index; 253 dp.crtc = nv_crtc->index;
310 dp.dpcd = nv_encoder->dp.dpcd; 254 dp.dpcd = nv_encoder->dp.dpcd;
@@ -318,11 +262,8 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
318 */ 262 */
319 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false); 263 gpio->irq(gpio, 0, nv_connector->hpd, 0xff, false);
320 264
321 /* enable down-spreading, if possible */ 265 /* enable down-spreading and execute pre-train script from vbios */
322 dp_set_downspread(dev, &dp, nv_encoder->dp.dpcd[3] & 1); 266 dp_link_train_init(dev, &dp, nv_encoder->dp.dpcd[3] & 1);
323
324 /* execute pre-train script from vbios */
325 dp_link_train_init(dev, &dp);
326 267
327 /* start off at highest link rate supported by encoder and display */ 268 /* start off at highest link rate supported by encoder and display */
328 while (*link_bw > nv_encoder->dp.link_bw) 269 while (*link_bw > nv_encoder->dp.link_bw)
@@ -365,7 +306,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate,
365 306
366void 307void
367nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate, 308nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
368 struct dp_train_func *func) 309 struct nouveau_object *core)
369{ 310{
370 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 311 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
371 struct nouveau_drm *drm = nouveau_drm(encoder->dev); 312 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
@@ -385,7 +326,7 @@ nouveau_dp_dpms(struct drm_encoder *encoder, int mode, u32 datarate,
385 nv_wraux(auxch, DP_SET_POWER, &status, 1); 326 nv_wraux(auxch, DP_SET_POWER, &status, 1);
386 327
387 if (mode == DRM_MODE_DPMS_ON) 328 if (mode == DRM_MODE_DPMS_ON)
388 nouveau_dp_link_train(encoder, datarate, func); 329 nouveau_dp_link_train(encoder, datarate, core);
389} 330}
390 331
391static void 332static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8503b2ea570a..01c403ddb99b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -49,8 +49,6 @@
49#include "nouveau_fbcon.h" 49#include "nouveau_fbcon.h"
50#include "nouveau_fence.h" 50#include "nouveau_fence.h"
51 51
52#include "nouveau_ttm.h"
53
54MODULE_PARM_DESC(config, "option string to pass to driver core"); 52MODULE_PARM_DESC(config, "option string to pass to driver core");
55static char *nouveau_config; 53static char *nouveau_config;
56module_param_named(config, nouveau_config, charp, 0400); 54module_param_named(config, nouveau_config, charp, 0400);
@@ -149,7 +147,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
149 NV_ERROR(drm, "failed to create ce channel, %d\n", ret); 147 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
150 148
151 arg0 = NVE0_CHANNEL_IND_ENGINE_GR; 149 arg0 = NVE0_CHANNEL_IND_ENGINE_GR;
152 arg1 = 0; 150 arg1 = 1;
153 } else { 151 } else {
154 arg0 = NvDmaFB; 152 arg0 = NvDmaFB;
155 arg1 = NvDmaTT; 153 arg1 = NvDmaTT;
@@ -224,6 +222,7 @@ nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent)
224 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; 222 boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
225#endif 223#endif
226 remove_conflicting_framebuffers(aper, "nouveaufb", boot); 224 remove_conflicting_framebuffers(aper, "nouveaufb", boot);
225 kfree(aper);
227 226
228 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev), 227 ret = nouveau_device_create(pdev, nouveau_name(pdev), pci_name(pdev),
229 nouveau_config, nouveau_debug, &device); 228 nouveau_config, nouveau_debug, &device);
@@ -395,17 +394,12 @@ nouveau_drm_remove(struct pci_dev *pdev)
395} 394}
396 395
397int 396int
398nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state) 397nouveau_do_suspend(struct drm_device *dev)
399{ 398{
400 struct drm_device *dev = pci_get_drvdata(pdev);
401 struct nouveau_drm *drm = nouveau_drm(dev); 399 struct nouveau_drm *drm = nouveau_drm(dev);
402 struct nouveau_cli *cli; 400 struct nouveau_cli *cli;
403 int ret; 401 int ret;
404 402
405 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
406 pm_state.event == PM_EVENT_PRETHAW)
407 return 0;
408
409 if (dev->mode_config.num_crtc) { 403 if (dev->mode_config.num_crtc) {
410 NV_INFO(drm, "suspending fbcon...\n"); 404 NV_INFO(drm, "suspending fbcon...\n");
411 nouveau_fbcon_set_suspend(dev, 1); 405 nouveau_fbcon_set_suspend(dev, 1);
@@ -436,13 +430,6 @@ nouveau_drm_suspend(struct pci_dev *pdev, pm_message_t pm_state)
436 goto fail_client; 430 goto fail_client;
437 431
438 nouveau_agp_fini(drm); 432 nouveau_agp_fini(drm);
439
440 pci_save_state(pdev);
441 if (pm_state.event == PM_EVENT_SUSPEND) {
442 pci_disable_device(pdev);
443 pci_set_power_state(pdev, PCI_D3hot);
444 }
445
446 return 0; 433 return 0;
447 434
448fail_client: 435fail_client:
@@ -457,24 +444,33 @@ fail_client:
457 return ret; 444 return ret;
458} 445}
459 446
460int 447int nouveau_pmops_suspend(struct device *dev)
461nouveau_drm_resume(struct pci_dev *pdev)
462{ 448{
463 struct drm_device *dev = pci_get_drvdata(pdev); 449 struct pci_dev *pdev = to_pci_dev(dev);
464 struct nouveau_drm *drm = nouveau_drm(dev); 450 struct drm_device *drm_dev = pci_get_drvdata(pdev);
465 struct nouveau_cli *cli;
466 int ret; 451 int ret;
467 452
468 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 453 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
469 return 0; 454 return 0;
470 455
471 NV_INFO(drm, "re-enabling device...\n"); 456 ret = nouveau_do_suspend(drm_dev);
472 pci_set_power_state(pdev, PCI_D0);
473 pci_restore_state(pdev);
474 ret = pci_enable_device(pdev);
475 if (ret) 457 if (ret)
476 return ret; 458 return ret;
477 pci_set_master(pdev); 459
460 pci_save_state(pdev);
461 pci_disable_device(pdev);
462 pci_set_power_state(pdev, PCI_D3hot);
463
464 return 0;
465}
466
467int
468nouveau_do_resume(struct drm_device *dev)
469{
470 struct nouveau_drm *drm = nouveau_drm(dev);
471 struct nouveau_cli *cli;
472
473 NV_INFO(drm, "re-enabling device...\n");
478 474
479 nouveau_agp_reset(drm); 475 nouveau_agp_reset(drm);
480 476
@@ -500,6 +496,42 @@ nouveau_drm_resume(struct pci_dev *pdev)
500 return 0; 496 return 0;
501} 497}
502 498
499int nouveau_pmops_resume(struct device *dev)
500{
501 struct pci_dev *pdev = to_pci_dev(dev);
502 struct drm_device *drm_dev = pci_get_drvdata(pdev);
503 int ret;
504
505 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
506 return 0;
507
508 pci_set_power_state(pdev, PCI_D0);
509 pci_restore_state(pdev);
510 ret = pci_enable_device(pdev);
511 if (ret)
512 return ret;
513 pci_set_master(pdev);
514
515 return nouveau_do_resume(drm_dev);
516}
517
518static int nouveau_pmops_freeze(struct device *dev)
519{
520 struct pci_dev *pdev = to_pci_dev(dev);
521 struct drm_device *drm_dev = pci_get_drvdata(pdev);
522
523 return nouveau_do_suspend(drm_dev);
524}
525
526static int nouveau_pmops_thaw(struct device *dev)
527{
528 struct pci_dev *pdev = to_pci_dev(dev);
529 struct drm_device *drm_dev = pci_get_drvdata(pdev);
530
531 return nouveau_do_resume(drm_dev);
532}
533
534
503static int 535static int
504nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) 536nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
505{ 537{
@@ -652,14 +684,22 @@ nouveau_drm_pci_table[] = {
652 {} 684 {}
653}; 685};
654 686
687static const struct dev_pm_ops nouveau_pm_ops = {
688 .suspend = nouveau_pmops_suspend,
689 .resume = nouveau_pmops_resume,
690 .freeze = nouveau_pmops_freeze,
691 .thaw = nouveau_pmops_thaw,
692 .poweroff = nouveau_pmops_freeze,
693 .restore = nouveau_pmops_resume,
694};
695
655static struct pci_driver 696static struct pci_driver
656nouveau_drm_pci_driver = { 697nouveau_drm_pci_driver = {
657 .name = "nouveau", 698 .name = "nouveau",
658 .id_table = nouveau_drm_pci_table, 699 .id_table = nouveau_drm_pci_table,
659 .probe = nouveau_drm_probe, 700 .probe = nouveau_drm_probe,
660 .remove = nouveau_drm_remove, 701 .remove = nouveau_drm_remove,
661 .suspend = nouveau_drm_suspend, 702 .driver.pm = &nouveau_pm_ops,
662 .resume = nouveau_drm_resume,
663}; 703};
664 704
665static int __init 705static int __init
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index a10169927086..aa89eb938b47 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -129,8 +129,8 @@ nouveau_dev(struct drm_device *dev)
129 return nv_device(nouveau_drm(dev)->device); 129 return nv_device(nouveau_drm(dev)->device);
130} 130}
131 131
132int nouveau_drm_suspend(struct pci_dev *, pm_message_t); 132int nouveau_pmops_suspend(struct device *);
133int nouveau_drm_resume(struct pci_dev *); 133int nouveau_pmops_resume(struct device *);
134 134
135#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args) 135#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
136#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args) 136#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 6a17bf2ba9a4..d0d95bd511ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -93,14 +93,9 @@ get_slave_funcs(struct drm_encoder *enc)
93/* nouveau_dp.c */ 93/* nouveau_dp.c */
94bool nouveau_dp_detect(struct drm_encoder *); 94bool nouveau_dp_detect(struct drm_encoder *);
95void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate, 95void nouveau_dp_dpms(struct drm_encoder *, int mode, u32 datarate,
96 struct dp_train_func *); 96 struct nouveau_object *);
97u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_output *, u8 **);
98 97
99struct nouveau_connector * 98struct nouveau_connector *
100nouveau_encoder_connector_get(struct nouveau_encoder *encoder); 99nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
101int nv50_sor_create(struct drm_connector *, struct dcb_output *);
102void nv50_sor_dp_calc_tu(struct drm_device *, int, int, u32, u32);
103int nv50_dac_create(struct drm_connector *, struct dcb_output *);
104
105 100
106#endif /* __NOUVEAU_ENCODER_H__ */ 101#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5e2f52158f19..8bf695c52f95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -433,7 +433,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
433 return ret; 433 return ret;
434 } 434 }
435 435
436 ret = nouveau_bo_validate(nvbo, true, false, false); 436 ret = nouveau_bo_validate(nvbo, true, false);
437 if (unlikely(ret)) { 437 if (unlikely(ret)) {
438 if (ret != -ERESTARTSYS) 438 if (ret != -ERESTARTSYS)
439 NV_ERROR(drm, "fail ttm_validate\n"); 439 NV_ERROR(drm, "fail ttm_validate\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
deleted file mode 100644
index 2c672cebc889..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c
+++ /dev/null
@@ -1,261 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26#include "nouveau_drm.h"
27#include "nouveau_connector.h"
28#include "nouveau_encoder.h"
29#include "nouveau_crtc.h"
30
31static bool
32hdmi_sor(struct drm_encoder *encoder)
33{
34 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
35 if (nv_device(drm->device)->chipset < 0xa3 ||
36 nv_device(drm->device)->chipset == 0xaa ||
37 nv_device(drm->device)->chipset == 0xac)
38 return false;
39 return true;
40}
41
42static inline u32
43hdmi_base(struct drm_encoder *encoder)
44{
45 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
46 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
47 if (!hdmi_sor(encoder))
48 return 0x616500 + (nv_crtc->index * 0x800);
49 return 0x61c500 + (nv_encoder->or * 0x800);
50}
51
52static void
53hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
54{
55 struct nouveau_device *device = nouveau_dev(encoder->dev);
56 nv_wr32(device, hdmi_base(encoder) + reg, val);
57}
58
59static u32
60hdmi_rd32(struct drm_encoder *encoder, u32 reg)
61{
62 struct nouveau_device *device = nouveau_dev(encoder->dev);
63 return nv_rd32(device, hdmi_base(encoder) + reg);
64}
65
66static u32
67hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
68{
69 u32 tmp = hdmi_rd32(encoder, reg);
70 hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
71 return tmp;
72}
73
74static void
75nouveau_audio_disconnect(struct drm_encoder *encoder)
76{
77 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
78 struct nouveau_device *device = nouveau_dev(encoder->dev);
79 u32 or = nv_encoder->or * 0x800;
80
81 if (hdmi_sor(encoder))
82 nv_mask(device, 0x61c448 + or, 0x00000003, 0x00000000);
83}
84
85static void
86nouveau_audio_mode_set(struct drm_encoder *encoder,
87 struct drm_display_mode *mode)
88{
89 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
90 struct nouveau_device *device = nouveau_dev(encoder->dev);
91 struct nouveau_connector *nv_connector;
92 u32 or = nv_encoder->or * 0x800;
93 int i;
94
95 nv_connector = nouveau_encoder_connector_get(nv_encoder);
96 if (!drm_detect_monitor_audio(nv_connector->edid)) {
97 nouveau_audio_disconnect(encoder);
98 return;
99 }
100
101 if (hdmi_sor(encoder)) {
102 nv_mask(device, 0x61c448 + or, 0x00000001, 0x00000001);
103
104 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
105 if (nv_connector->base.eld[0]) {
106 u8 *eld = nv_connector->base.eld;
107 for (i = 0; i < eld[2] * 4; i++)
108 nv_wr32(device, 0x61c440 + or, (i << 8) | eld[i]);
109 for (i = eld[2] * 4; i < 0x60; i++)
110 nv_wr32(device, 0x61c440 + or, (i << 8) | 0x00);
111 nv_mask(device, 0x61c448 + or, 0x00000002, 0x00000002);
112 }
113 }
114}
115
116static void
117nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
118{
119 /* calculate checksum for the infoframe */
120 u8 sum = 0, i;
121 for (i = 0; i < frame[2]; i++)
122 sum += frame[i];
123 frame[3] = 256 - sum;
124
125 /* disable infoframe, and write header */
126 hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
127 hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
128
129 /* register scans tell me the audio infoframe has only one set of
130 * subpack regs, according to tegra (gee nvidia, it'd be nice if we
131 * could get those docs too!), the hdmi block pads out the rest of
132 * the packet on its own.
133 */
134 if (ctrl == 0x020)
135 frame[2] = 6;
136
137 /* write out checksum and data, weird weird 7 byte register pairs */
138 for (i = 0; i < frame[2] + 1; i += 7) {
139 u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
140 u32 *subpack = (u32 *)&frame[3 + i];
141 hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
142 hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
143 }
144
145 /* enable the infoframe */
146 hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
147}
148
149static void
150nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
151 struct drm_display_mode *mode)
152{
153 const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
154 const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
155 const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
156 u8 frame[20];
157
158 frame[0x00] = 0x82; /* AVI infoframe */
159 frame[0x01] = 0x02; /* version */
160 frame[0x02] = 0x0d; /* length */
161 frame[0x03] = 0x00;
162 frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
163 frame[0x05] = (C << 6) | (M << 4) | R;
164 frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
165 frame[0x07] = VIC;
166 frame[0x08] = PR;
167 frame[0x09] = bar_top & 0xff;
168 frame[0x0a] = bar_top >> 8;
169 frame[0x0b] = bar_bottom & 0xff;
170 frame[0x0c] = bar_bottom >> 8;
171 frame[0x0d] = bar_left & 0xff;
172 frame[0x0e] = bar_left >> 8;
173 frame[0x0f] = bar_right & 0xff;
174 frame[0x10] = bar_right >> 8;
175 frame[0x11] = 0x00;
176 frame[0x12] = 0x00;
177 frame[0x13] = 0x00;
178
179 nouveau_hdmi_infoframe(encoder, 0x020, frame);
180}
181
182static void
183nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
184 struct drm_display_mode *mode)
185{
186 const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
187 const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
188 u8 frame[12];
189
190 frame[0x00] = 0x84; /* Audio infoframe */
191 frame[0x01] = 0x01; /* version */
192 frame[0x02] = 0x0a; /* length */
193 frame[0x03] = 0x00;
194 frame[0x04] = (CT << 4) | CC;
195 frame[0x05] = (SF << 2) | ceaSS;
196 frame[0x06] = FMT;
197 frame[0x07] = CA;
198 frame[0x08] = (DM_INH << 7) | (LSV << 3);
199 frame[0x09] = 0x00;
200 frame[0x0a] = 0x00;
201 frame[0x0b] = 0x00;
202
203 nouveau_hdmi_infoframe(encoder, 0x000, frame);
204}
205
206static void
207nouveau_hdmi_disconnect(struct drm_encoder *encoder)
208{
209 nouveau_audio_disconnect(encoder);
210
211 /* disable audio and avi infoframes */
212 hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
213 hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
214
215 /* disable hdmi */
216 hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
217}
218
219void
220nouveau_hdmi_mode_set(struct drm_encoder *encoder,
221 struct drm_display_mode *mode)
222{
223 struct nouveau_device *device = nouveau_dev(encoder->dev);
224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
225 struct nouveau_connector *nv_connector;
226 u32 max_ac_packet, rekey;
227
228 nv_connector = nouveau_encoder_connector_get(nv_encoder);
229 if (!mode || !nv_connector || !nv_connector->edid ||
230 !drm_detect_hdmi_monitor(nv_connector->edid)) {
231 nouveau_hdmi_disconnect(encoder);
232 return;
233 }
234
235 nouveau_hdmi_video_infoframe(encoder, mode);
236 nouveau_hdmi_audio_infoframe(encoder, mode);
237
238 hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
239 hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
240 hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
241
242 nv_mask(device, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
243 nv_mask(device, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
244 nv_mask(device, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
245
246 /* value matches nvidia binary driver, and tegra constant */
247 rekey = 56;
248
249 max_ac_packet = mode->htotal - mode->hdisplay;
250 max_ac_packet -= rekey;
251 max_ac_packet -= 18; /* constant from tegra */
252 max_ac_packet /= 32;
253
254 /* enable hdmi */
255 hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
256 0x1f000000 | /* unknown */
257 max_ac_packet << 16 |
258 rekey);
259
260 nouveau_audio_mode_set(encoder, mode);
261}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 1d8cb506a28a..1303680affd3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -60,18 +60,6 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
60 return IRQ_NONE; 60 return IRQ_NONE;
61 61
62 nv_subdev(pmc)->intr(nv_subdev(pmc)); 62 nv_subdev(pmc)->intr(nv_subdev(pmc));
63
64 if (dev->mode_config.num_crtc) {
65 if (device->card_type >= NV_D0) {
66 if (nv_rd32(device, 0x000100) & 0x04000000)
67 nvd0_display_intr(dev);
68 } else
69 if (device->card_type >= NV_50) {
70 if (nv_rd32(device, 0x000100) & 0x04000000)
71 nv50_display_intr(dev);
72 }
73 }
74
75 return IRQ_HANDLED; 63 return IRQ_HANDLED;
76} 64}
77 65
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 366462cf8a2c..3543fec2355e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -155,10 +155,6 @@ nouveau_prime_new(struct drm_device *dev,
155 return ret; 155 return ret;
156 nvbo = *pnvbo; 156 nvbo = *pnvbo;
157 157
158 /* we restrict allowed domains on nv50+ to only the types
159 * that were requested at creation time. not possibly on
160 * earlier chips without busting the ABI.
161 */
162 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 158 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
163 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); 159 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
164 if (!nvbo->gem) { 160 if (!nvbo->gem) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 6f0ac64873df..25d3495725eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -31,12 +31,11 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
31 enum vga_switcheroo_state state) 31 enum vga_switcheroo_state state)
32{ 32{
33 struct drm_device *dev = pci_get_drvdata(pdev); 33 struct drm_device *dev = pci_get_drvdata(pdev);
34 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
35 34
36 if (state == VGA_SWITCHEROO_ON) { 35 if (state == VGA_SWITCHEROO_ON) {
37 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); 36 printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
38 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 37 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
39 nouveau_drm_resume(pdev); 38 nouveau_pmops_resume(&pdev->dev);
40 drm_kms_helper_poll_enable(dev); 39 drm_kms_helper_poll_enable(dev);
41 dev->switch_power_state = DRM_SWITCH_POWER_ON; 40 dev->switch_power_state = DRM_SWITCH_POWER_ON;
42 } else { 41 } else {
@@ -44,7 +43,7 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
44 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 43 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
45 drm_kms_helper_poll_disable(dev); 44 drm_kms_helper_poll_disable(dev);
46 nouveau_switcheroo_optimus_dsm(); 45 nouveau_switcheroo_optimus_dsm();
47 nouveau_drm_suspend(pdev, pmm); 46 nouveau_pmops_suspend(&pdev->dev);
48 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 47 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
49 } 48 }
50} 49}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 82a0d9c6cda3..6578cd28c556 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -730,6 +730,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
730 drm_crtc_cleanup(crtc); 730 drm_crtc_cleanup(crtc);
731 731
732 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 732 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
733 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
733 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 734 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
734 kfree(nv_crtc); 735 kfree(nv_crtc);
735} 736}
@@ -1056,8 +1057,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1056 0, 0x0000, NULL, &nv_crtc->cursor.nvbo); 1057 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
1057 if (!ret) { 1058 if (!ret) {
1058 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1059 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
1059 if (!ret) 1060 if (!ret) {
1060 ret = nouveau_bo_map(nv_crtc->cursor.nvbo); 1061 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
1062 if (ret)
1063 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
1064 }
1061 if (ret) 1065 if (ret)
1062 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 1066 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
1063 } 1067 }
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 846050f04c23..2cd6fb8c548e 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -60,8 +60,6 @@ nv04_display_create(struct drm_device *dev)
60 struct nv04_display *disp; 60 struct nv04_display *disp;
61 int i, ret; 61 int i, ret;
62 62
63 NV_DEBUG(drm, "\n");
64
65 disp = kzalloc(sizeof(*disp), GFP_KERNEL); 63 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
66 if (!disp) 64 if (!disp)
67 return -ENOMEM; 65 return -ENOMEM;
@@ -132,13 +130,10 @@ nv04_display_create(struct drm_device *dev)
132void 130void
133nv04_display_destroy(struct drm_device *dev) 131nv04_display_destroy(struct drm_device *dev)
134{ 132{
135 struct nouveau_drm *drm = nouveau_drm(dev);
136 struct nv04_display *disp = nv04_display(dev); 133 struct nv04_display *disp = nv04_display(dev);
137 struct drm_encoder *encoder; 134 struct drm_encoder *encoder;
138 struct drm_crtc *crtc; 135 struct drm_crtc *crtc;
139 136
140 NV_DEBUG(drm, "\n");
141
142 /* Turn every CRTC off. */ 137 /* Turn every CRTC off. */
143 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 138 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
144 struct drm_mode_set modeset = { 139 struct drm_mode_set modeset = {
diff --git a/drivers/gpu/drm/nouveau/nv10_fence.c b/drivers/gpu/drm/nouveau/nv10_fence.c
index ce752bf5cc4e..7ae7f97a6d4d 100644
--- a/drivers/gpu/drm/nouveau/nv10_fence.c
+++ b/drivers/gpu/drm/nouveau/nv10_fence.c
@@ -155,6 +155,8 @@ nv10_fence_destroy(struct nouveau_drm *drm)
155{ 155{
156 struct nv10_fence_priv *priv = drm->fence; 156 struct nv10_fence_priv *priv = drm->fence;
157 nouveau_bo_unmap(priv->bo); 157 nouveau_bo_unmap(priv->bo);
158 if (priv->bo)
159 nouveau_bo_unpin(priv->bo);
158 nouveau_bo_ref(NULL, &priv->bo); 160 nouveau_bo_ref(NULL, &priv->bo);
159 drm->fence = NULL; 161 drm->fence = NULL;
160 kfree(priv); 162 kfree(priv);
@@ -183,8 +185,11 @@ nv10_fence_create(struct nouveau_drm *drm)
183 0, 0x0000, NULL, &priv->bo); 185 0, 0x0000, NULL, &priv->bo);
184 if (!ret) { 186 if (!ret) {
185 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 187 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
186 if (!ret) 188 if (!ret) {
187 ret = nouveau_bo_map(priv->bo); 189 ret = nouveau_bo_map(priv->bo);
190 if (ret)
191 nouveau_bo_unpin(priv->bo);
192 }
188 if (ret) 193 if (ret)
189 nouveau_bo_ref(NULL, &priv->bo); 194 nouveau_bo_ref(NULL, &priv->bo);
190 } 195 }
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 897b63621e2d..2ca276ada507 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -195,7 +195,7 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
195 break; 195 break;
196 } 196 }
197 197
198 drm_connector_property_set_value(connector, 198 drm_object_property_set_value(&connector->base,
199 conf->tv_subconnector_property, 199 conf->tv_subconnector_property,
200 tv_enc->subconnector); 200 tv_enc->subconnector);
201 201
@@ -672,25 +672,25 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
672 672
673 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); 673 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
674 674
675 drm_connector_attach_property(connector, 675 drm_object_attach_property(&connector->base,
676 conf->tv_select_subconnector_property, 676 conf->tv_select_subconnector_property,
677 tv_enc->select_subconnector); 677 tv_enc->select_subconnector);
678 drm_connector_attach_property(connector, 678 drm_object_attach_property(&connector->base,
679 conf->tv_subconnector_property, 679 conf->tv_subconnector_property,
680 tv_enc->subconnector); 680 tv_enc->subconnector);
681 drm_connector_attach_property(connector, 681 drm_object_attach_property(&connector->base,
682 conf->tv_mode_property, 682 conf->tv_mode_property,
683 tv_enc->tv_norm); 683 tv_enc->tv_norm);
684 drm_connector_attach_property(connector, 684 drm_object_attach_property(&connector->base,
685 conf->tv_flicker_reduction_property, 685 conf->tv_flicker_reduction_property,
686 tv_enc->flicker); 686 tv_enc->flicker);
687 drm_connector_attach_property(connector, 687 drm_object_attach_property(&connector->base,
688 conf->tv_saturation_property, 688 conf->tv_saturation_property,
689 tv_enc->saturation); 689 tv_enc->saturation);
690 drm_connector_attach_property(connector, 690 drm_object_attach_property(&connector->base,
691 conf->tv_hue_property, 691 conf->tv_hue_property,
692 tv_enc->hue); 692 tv_enc->hue);
693 drm_connector_attach_property(connector, 693 drm_object_attach_property(&connector->base,
694 conf->tv_overscan_property, 694 conf->tv_overscan_property,
695 tv_enc->overscan); 695 tv_enc->overscan);
696 696
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
deleted file mode 100644
index 222de77d6269..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ /dev/null
@@ -1,764 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#include "nouveau_reg.h"
31#include "nouveau_drm.h"
32#include "nouveau_dma.h"
33#include "nouveau_gem.h"
34#include "nouveau_hw.h"
35#include "nouveau_encoder.h"
36#include "nouveau_crtc.h"
37#include "nouveau_connector.h"
38#include "nv50_display.h"
39
40#include <subdev/clock.h>
41
42static void
43nv50_crtc_lut_load(struct drm_crtc *crtc)
44{
45 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
46 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
47 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
48 int i;
49
50 NV_DEBUG(drm, "\n");
51
52 for (i = 0; i < 256; i++) {
53 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
54 writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
55 writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
56 }
57
58 if (nv_crtc->lut.depth == 30) {
59 writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
60 writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
61 writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
62 }
63}
64
65int
66nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
67{
68 struct drm_device *dev = nv_crtc->base.dev;
69 struct nouveau_drm *drm = nouveau_drm(dev);
70 struct nouveau_channel *evo = nv50_display(dev)->master;
71 int index = nv_crtc->index, ret;
72
73 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
74 NV_DEBUG(drm, "%s\n", blanked ? "blanked" : "unblanked");
75
76 if (blanked) {
77 nv_crtc->cursor.hide(nv_crtc, false);
78
79 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 7 : 5);
80 if (ret) {
81 NV_ERROR(drm, "no space while blanking crtc\n");
82 return ret;
83 }
84 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
85 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
86 OUT_RING(evo, 0);
87 if (nv_device(drm->device)->chipset != 0x50) {
88 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
89 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
90 }
91
92 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
93 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
94 } else {
95 if (nv_crtc->cursor.visible)
96 nv_crtc->cursor.show(nv_crtc, false);
97 else
98 nv_crtc->cursor.hide(nv_crtc, false);
99
100 ret = RING_SPACE(evo, nv_device(drm->device)->chipset != 0x50 ? 10 : 8);
101 if (ret) {
102 NV_ERROR(drm, "no space while unblanking crtc\n");
103 return ret;
104 }
105 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
106 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
107 NV50_EVO_CRTC_CLUT_MODE_OFF :
108 NV50_EVO_CRTC_CLUT_MODE_ON);
109 OUT_RING(evo, nv_crtc->lut.nvbo->bo.offset >> 8);
110 if (nv_device(drm->device)->chipset != 0x50) {
111 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
112 OUT_RING(evo, NvEvoVRAM);
113 }
114
115 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
116 OUT_RING(evo, nv_crtc->fb.offset >> 8);
117 OUT_RING(evo, 0);
118 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
119 if (nv_device(drm->device)->chipset != 0x50)
120 if (nv_crtc->fb.tile_flags == 0x7a00 ||
121 nv_crtc->fb.tile_flags == 0xfe00)
122 OUT_RING(evo, NvEvoFB32);
123 else
124 if (nv_crtc->fb.tile_flags == 0x7000)
125 OUT_RING(evo, NvEvoFB16);
126 else
127 OUT_RING(evo, NvEvoVRAM_LP);
128 else
129 OUT_RING(evo, NvEvoVRAM_LP);
130 }
131
132 nv_crtc->fb.blanked = blanked;
133 return 0;
134}
135
136static int
137nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
138{
139 struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
140 struct nouveau_connector *nv_connector;
141 struct drm_connector *connector;
142 int head = nv_crtc->index, ret;
143 u32 mode = 0x00;
144
145 nv_connector = nouveau_crtc_connector_get(nv_crtc);
146 connector = &nv_connector->base;
147 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
148 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
149 mode = DITHERING_MODE_DYNAMIC2X2;
150 } else {
151 mode = nv_connector->dithering_mode;
152 }
153
154 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
155 if (connector->display_info.bpc >= 8)
156 mode |= DITHERING_DEPTH_8BPC;
157 } else {
158 mode |= nv_connector->dithering_depth;
159 }
160
161 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
162 if (ret == 0) {
163 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
164 OUT_RING (evo, mode);
165 if (update) {
166 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
167 OUT_RING (evo, 0);
168 FIRE_RING (evo);
169 }
170 }
171
172 return ret;
173}
174
175static int
176nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
177{
178 struct drm_device *dev = nv_crtc->base.dev;
179 struct nouveau_drm *drm = nouveau_drm(dev);
180 struct nouveau_channel *evo = nv50_display(dev)->master;
181 int ret;
182 int adj;
183 u32 hue, vib;
184
185 NV_DEBUG(drm, "vibrance = %i, hue = %i\n",
186 nv_crtc->color_vibrance, nv_crtc->vibrant_hue);
187
188 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
189 if (ret) {
190 NV_ERROR(drm, "no space while setting color vibrance\n");
191 return ret;
192 }
193
194 adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
195 vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
196
197 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
198
199 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
200 OUT_RING (evo, (hue << 20) | (vib << 8));
201
202 if (update) {
203 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
204 OUT_RING (evo, 0);
205 FIRE_RING (evo);
206 }
207
208 return 0;
209}
210
211struct nouveau_connector *
212nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
213{
214 struct drm_device *dev = nv_crtc->base.dev;
215 struct drm_connector *connector;
216 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
217
218 /* The safest approach is to find an encoder with the right crtc, that
219 * is also linked to a connector. */
220 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
221 if (connector->encoder)
222 if (connector->encoder->crtc == crtc)
223 return nouveau_connector(connector);
224 }
225
226 return NULL;
227}
228
229static int
230nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
231{
232 struct nouveau_connector *nv_connector;
233 struct drm_crtc *crtc = &nv_crtc->base;
234 struct drm_device *dev = crtc->dev;
235 struct nouveau_drm *drm = nouveau_drm(dev);
236 struct nouveau_channel *evo = nv50_display(dev)->master;
237 struct drm_display_mode *umode = &crtc->mode;
238 struct drm_display_mode *omode;
239 int scaling_mode, ret;
240 u32 ctrl = 0, oX, oY;
241
242 NV_DEBUG(drm, "\n");
243
244 nv_connector = nouveau_crtc_connector_get(nv_crtc);
245 if (!nv_connector || !nv_connector->native_mode) {
246 NV_ERROR(drm, "no native mode, forcing panel scaling\n");
247 scaling_mode = DRM_MODE_SCALE_NONE;
248 } else {
249 scaling_mode = nv_connector->scaling_mode;
250 }
251
252 /* start off at the resolution we programmed the crtc for, this
253 * effectively handles NONE/FULL scaling
254 */
255 if (scaling_mode != DRM_MODE_SCALE_NONE)
256 omode = nv_connector->native_mode;
257 else
258 omode = umode;
259
260 oX = omode->hdisplay;
261 oY = omode->vdisplay;
262 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
263 oY *= 2;
264
265 /* add overscan compensation if necessary, will keep the aspect
266 * ratio the same as the backend mode unless overridden by the
267 * user setting both hborder and vborder properties.
268 */
269 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
270 (nv_connector->underscan == UNDERSCAN_AUTO &&
271 nv_connector->edid &&
272 drm_detect_hdmi_monitor(nv_connector->edid)))) {
273 u32 bX = nv_connector->underscan_hborder;
274 u32 bY = nv_connector->underscan_vborder;
275 u32 aspect = (oY << 19) / oX;
276
277 if (bX) {
278 oX -= (bX * 2);
279 if (bY) oY -= (bY * 2);
280 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
281 } else {
282 oX -= (oX >> 4) + 32;
283 if (bY) oY -= (bY * 2);
284 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
285 }
286 }
287
288 /* handle CENTER/ASPECT scaling, taking into account the areas
289 * removed already for overscan compensation
290 */
291 switch (scaling_mode) {
292 case DRM_MODE_SCALE_CENTER:
293 oX = min((u32)umode->hdisplay, oX);
294 oY = min((u32)umode->vdisplay, oY);
295 /* fall-through */
296 case DRM_MODE_SCALE_ASPECT:
297 if (oY < oX) {
298 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
299 oX = ((oY * aspect) + (aspect / 2)) >> 19;
300 } else {
301 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
302 oY = ((oX * aspect) + (aspect / 2)) >> 19;
303 }
304 break;
305 default:
306 break;
307 }
308
309 if (umode->hdisplay != oX || umode->vdisplay != oY ||
310 umode->flags & DRM_MODE_FLAG_INTERLACE ||
311 umode->flags & DRM_MODE_FLAG_DBLSCAN)
312 ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
313
314 ret = RING_SPACE(evo, 5);
315 if (ret)
316 return ret;
317
318 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
319 OUT_RING (evo, ctrl);
320 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
321 OUT_RING (evo, oY << 16 | oX);
322 OUT_RING (evo, oY << 16 | oX);
323
324 if (update) {
325 nv50_display_flip_stop(crtc);
326 nv50_display_sync(dev);
327 nv50_display_flip_next(crtc, crtc->fb, NULL);
328 }
329
330 return 0;
331}
332
333int
334nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
335{
336 struct nouveau_device *device = nouveau_dev(dev);
337 struct nouveau_clock *clk = nouveau_clock(device);
338
339 return clk->pll_set(clk, PLL_VPLL0 + head, pclk);
340}
341
342static void
343nv50_crtc_destroy(struct drm_crtc *crtc)
344{
345 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
346 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
347
348 NV_DEBUG(drm, "\n");
349
350 nouveau_bo_unmap(nv_crtc->lut.nvbo);
351 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
352 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
353 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
354 drm_crtc_cleanup(&nv_crtc->base);
355 kfree(nv_crtc);
356}
357
358int
359nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
360 uint32_t buffer_handle, uint32_t width, uint32_t height)
361{
362 struct drm_device *dev = crtc->dev;
363 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
364 struct nouveau_bo *cursor = NULL;
365 struct drm_gem_object *gem;
366 int ret = 0, i;
367
368 if (!buffer_handle) {
369 nv_crtc->cursor.hide(nv_crtc, true);
370 return 0;
371 }
372
373 if (width != 64 || height != 64)
374 return -EINVAL;
375
376 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
377 if (!gem)
378 return -ENOENT;
379 cursor = nouveau_gem_object(gem);
380
381 ret = nouveau_bo_map(cursor);
382 if (ret)
383 goto out;
384
385 /* The simple will do for now. */
386 for (i = 0; i < 64 * 64; i++)
387 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
388
389 nouveau_bo_unmap(cursor);
390
391 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
392 nv_crtc->cursor.show(nv_crtc, true);
393
394out:
395 drm_gem_object_unreference_unlocked(gem);
396 return ret;
397}
398
399int
400nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
401{
402 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
403
404 nv_crtc->cursor.set_pos(nv_crtc, x, y);
405 return 0;
406}
407
408static void
409nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
410 uint32_t start, uint32_t size)
411{
412 int end = (start + size > 256) ? 256 : start + size, i;
413 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
414
415 for (i = start; i < end; i++) {
416 nv_crtc->lut.r[i] = r[i];
417 nv_crtc->lut.g[i] = g[i];
418 nv_crtc->lut.b[i] = b[i];
419 }
420
421 /* We need to know the depth before we upload, but it's possible to
422 * get called before a framebuffer is bound. If this is the case,
423 * mark the lut values as dirty by setting depth==0, and it'll be
424 * uploaded on the first mode_set_base()
425 */
426 if (!nv_crtc->base.fb) {
427 nv_crtc->lut.depth = 0;
428 return;
429 }
430
431 nv50_crtc_lut_load(crtc);
432}
433
434static void
435nv50_crtc_save(struct drm_crtc *crtc)
436{
437 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
438 NV_ERROR(drm, "!!\n");
439}
440
441static void
442nv50_crtc_restore(struct drm_crtc *crtc)
443{
444 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
445 NV_ERROR(drm, "!!\n");
446}
447
448static const struct drm_crtc_funcs nv50_crtc_funcs = {
449 .save = nv50_crtc_save,
450 .restore = nv50_crtc_restore,
451 .cursor_set = nv50_crtc_cursor_set,
452 .cursor_move = nv50_crtc_cursor_move,
453 .gamma_set = nv50_crtc_gamma_set,
454 .set_config = drm_crtc_helper_set_config,
455 .page_flip = nouveau_crtc_page_flip,
456 .destroy = nv50_crtc_destroy,
457};
458
459static void
460nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
461{
462}
463
464static void
465nv50_crtc_prepare(struct drm_crtc *crtc)
466{
467 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
468 struct drm_device *dev = crtc->dev;
469 struct nouveau_drm *drm = nouveau_drm(dev);
470
471 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
472
473 nv50_display_flip_stop(crtc);
474 drm_vblank_pre_modeset(dev, nv_crtc->index);
475 nv50_crtc_blank(nv_crtc, true);
476}
477
478static void
479nv50_crtc_commit(struct drm_crtc *crtc)
480{
481 struct drm_device *dev = crtc->dev;
482 struct nouveau_drm *drm = nouveau_drm(dev);
483 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
484
485 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
486
487 nv50_crtc_blank(nv_crtc, false);
488 drm_vblank_post_modeset(dev, nv_crtc->index);
489 nv50_display_sync(dev);
490 nv50_display_flip_next(crtc, crtc->fb, NULL);
491}
492
493static bool
494nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
495 struct drm_display_mode *adjusted_mode)
496{
497 return true;
498}
499
500static int
501nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
502 struct drm_framebuffer *passed_fb,
503 int x, int y, bool atomic)
504{
505 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
506 struct drm_device *dev = nv_crtc->base.dev;
507 struct nouveau_drm *drm = nouveau_drm(dev);
508 struct nouveau_channel *evo = nv50_display(dev)->master;
509 struct drm_framebuffer *drm_fb;
510 struct nouveau_framebuffer *fb;
511 int ret;
512
513 NV_DEBUG(drm, "index %d\n", nv_crtc->index);
514
515 /* no fb bound */
516 if (!atomic && !crtc->fb) {
517 NV_DEBUG(drm, "No FB bound\n");
518 return 0;
519 }
520
521 /* If atomic, we want to switch to the fb we were passed, so
522 * now we update pointers to do that. (We don't pin; just
523 * assume we're already pinned and update the base address.)
524 */
525 if (atomic) {
526 drm_fb = passed_fb;
527 fb = nouveau_framebuffer(passed_fb);
528 } else {
529 drm_fb = crtc->fb;
530 fb = nouveau_framebuffer(crtc->fb);
531 /* If not atomic, we can go ahead and pin, and unpin the
532 * old fb we were passed.
533 */
534 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
535 if (ret)
536 return ret;
537
538 if (passed_fb) {
539 struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
540 nouveau_bo_unpin(ofb->nvbo);
541 }
542 }
543
544 nv_crtc->fb.offset = fb->nvbo->bo.offset;
545 nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo);
546 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
547 if (!nv_crtc->fb.blanked && nv_device(drm->device)->chipset != 0x50) {
548 ret = RING_SPACE(evo, 2);
549 if (ret)
550 return ret;
551
552 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
553 OUT_RING (evo, fb->r_dma);
554 }
555
556 ret = RING_SPACE(evo, 12);
557 if (ret)
558 return ret;
559
560 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
561 OUT_RING (evo, nv_crtc->fb.offset >> 8);
562 OUT_RING (evo, 0);
563 OUT_RING (evo, (drm_fb->height << 16) | drm_fb->width);
564 OUT_RING (evo, fb->r_pitch);
565 OUT_RING (evo, fb->r_format);
566
567 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
568 OUT_RING (evo, fb->base.depth == 8 ?
569 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
570
571 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
572 OUT_RING (evo, (y << 16) | x);
573
574 if (nv_crtc->lut.depth != fb->base.depth) {
575 nv_crtc->lut.depth = fb->base.depth;
576 nv50_crtc_lut_load(crtc);
577 }
578
579 return 0;
580}
581
582static int
583nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
584 struct drm_display_mode *mode, int x, int y,
585 struct drm_framebuffer *old_fb)
586{
587 struct drm_device *dev = crtc->dev;
588 struct nouveau_channel *evo = nv50_display(dev)->master;
589 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
590 u32 head = nv_crtc->index * 0x400;
591 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
592 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
593 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
594 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
595 u32 vblan2e = 0, vblan2s = 1;
596 int ret;
597
598 /* hw timing description looks like this:
599 *
600 * <sync> <back porch> <---------display---------> <front porch>
601 * ______
602 * |____________|---------------------------|____________|
603 *
604 * ^ synce ^ blanke ^ blanks ^ active
605 *
606 * interlaced modes also have 2 additional values pointing at the end
607 * and start of the next field's blanking period.
608 */
609
610 hactive = mode->htotal;
611 hsynce = mode->hsync_end - mode->hsync_start - 1;
612 hbackp = mode->htotal - mode->hsync_end;
613 hblanke = hsynce + hbackp;
614 hfrontp = mode->hsync_start - mode->hdisplay;
615 hblanks = mode->htotal - hfrontp - 1;
616
617 vactive = mode->vtotal * vscan / ilace;
618 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
619 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
620 vblanke = vsynce + vbackp;
621 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
622 vblanks = vactive - vfrontp - 1;
623 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
624 vblan2e = vactive + vsynce + vbackp;
625 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
626 vactive = (vactive * 2) + 1;
627 }
628
629 ret = RING_SPACE(evo, 18);
630 if (ret == 0) {
631 BEGIN_NV04(evo, 0, 0x0804 + head, 2);
632 OUT_RING (evo, 0x00800000 | mode->clock);
633 OUT_RING (evo, (ilace == 2) ? 2 : 0);
634 BEGIN_NV04(evo, 0, 0x0810 + head, 6);
635 OUT_RING (evo, 0x00000000); /* border colour */
636 OUT_RING (evo, (vactive << 16) | hactive);
637 OUT_RING (evo, ( vsynce << 16) | hsynce);
638 OUT_RING (evo, (vblanke << 16) | hblanke);
639 OUT_RING (evo, (vblanks << 16) | hblanks);
640 OUT_RING (evo, (vblan2e << 16) | vblan2s);
641 BEGIN_NV04(evo, 0, 0x082c + head, 1);
642 OUT_RING (evo, 0x00000000);
643 BEGIN_NV04(evo, 0, 0x0900 + head, 1);
644 OUT_RING (evo, 0x00000311); /* makes sync channel work */
645 BEGIN_NV04(evo, 0, 0x08c8 + head, 1);
646 OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
647 BEGIN_NV04(evo, 0, 0x08d4 + head, 1);
648 OUT_RING (evo, 0x00000000); /* screen position */
649 }
650
651 nv_crtc->set_dither(nv_crtc, false);
652 nv_crtc->set_scale(nv_crtc, false);
653 nv_crtc->set_color_vibrance(nv_crtc, false);
654
655 return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
656}
657
658static int
659nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
660 struct drm_framebuffer *old_fb)
661{
662 int ret;
663
664 nv50_display_flip_stop(crtc);
665 ret = nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
666 if (ret)
667 return ret;
668
669 ret = nv50_display_sync(crtc->dev);
670 if (ret)
671 return ret;
672
673 return nv50_display_flip_next(crtc, crtc->fb, NULL);
674}
675
676static int
677nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
678 struct drm_framebuffer *fb,
679 int x, int y, enum mode_set_atomic state)
680{
681 int ret;
682
683 nv50_display_flip_stop(crtc);
684 ret = nv50_crtc_do_mode_set_base(crtc, fb, x, y, true);
685 if (ret)
686 return ret;
687
688 return nv50_display_sync(crtc->dev);
689}
690
691static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
692 .dpms = nv50_crtc_dpms,
693 .prepare = nv50_crtc_prepare,
694 .commit = nv50_crtc_commit,
695 .mode_fixup = nv50_crtc_mode_fixup,
696 .mode_set = nv50_crtc_mode_set,
697 .mode_set_base = nv50_crtc_mode_set_base,
698 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
699 .load_lut = nv50_crtc_lut_load,
700};
701
702int
703nv50_crtc_create(struct drm_device *dev, int index)
704{
705 struct nouveau_drm *drm = nouveau_drm(dev);
706 struct nouveau_crtc *nv_crtc = NULL;
707 int ret, i;
708
709 NV_DEBUG(drm, "\n");
710
711 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
712 if (!nv_crtc)
713 return -ENOMEM;
714
715 nv_crtc->index = index;
716 nv_crtc->set_dither = nv50_crtc_set_dither;
717 nv_crtc->set_scale = nv50_crtc_set_scale;
718 nv_crtc->set_color_vibrance = nv50_crtc_set_color_vibrance;
719 nv_crtc->color_vibrance = 50;
720 nv_crtc->vibrant_hue = 0;
721 nv_crtc->lut.depth = 0;
722 for (i = 0; i < 256; i++) {
723 nv_crtc->lut.r[i] = i << 8;
724 nv_crtc->lut.g[i] = i << 8;
725 nv_crtc->lut.b[i] = i << 8;
726 }
727
728 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
729 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
730 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
731
732 ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
733 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
734 if (!ret) {
735 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
736 if (!ret)
737 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
738 if (ret)
739 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
740 }
741
742 if (ret)
743 goto out;
744
745
746 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
747 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
748 if (!ret) {
749 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
750 if (!ret)
751 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
752 if (ret)
753 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
754 }
755
756 if (ret)
757 goto out;
758
759 nv50_cursor_init(nv_crtc);
760out:
761 if (ret)
762 nv50_crtc_destroy(&nv_crtc->base);
763 return ret;
764}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
deleted file mode 100644
index 223da113ceee..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_cursor.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28
29#include "nouveau_drm.h"
30#include "nouveau_dma.h"
31#include "nouveau_crtc.h"
32#include "nv50_display.h"
33
34static void
35nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
36{
37 struct drm_device *dev = nv_crtc->base.dev;
38 struct nouveau_drm *drm = nouveau_drm(dev);
39 struct nouveau_channel *evo = nv50_display(dev)->master;
40 int ret;
41
42 NV_DEBUG(drm, "\n");
43
44 if (update && nv_crtc->cursor.visible)
45 return;
46
47 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
48 if (ret) {
49 NV_ERROR(drm, "no space while unhiding cursor\n");
50 return;
51 }
52
53 if (nv_device(drm->device)->chipset != 0x50) {
54 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
55 OUT_RING(evo, NvEvoVRAM);
56 }
57 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
58 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
59 OUT_RING(evo, nv_crtc->cursor.offset >> 8);
60
61 if (update) {
62 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
63 OUT_RING(evo, 0);
64 FIRE_RING(evo);
65 nv_crtc->cursor.visible = true;
66 }
67}
68
69static void
70nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
71{
72 struct drm_device *dev = nv_crtc->base.dev;
73 struct nouveau_drm *drm = nouveau_drm(dev);
74 struct nouveau_channel *evo = nv50_display(dev)->master;
75 int ret;
76
77 NV_DEBUG(drm, "\n");
78
79 if (update && !nv_crtc->cursor.visible)
80 return;
81
82 ret = RING_SPACE(evo, (nv_device(drm->device)->chipset != 0x50 ? 5 : 3) + update * 2);
83 if (ret) {
84 NV_ERROR(drm, "no space while hiding cursor\n");
85 return;
86 }
87 BEGIN_NV04(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
88 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
89 OUT_RING(evo, 0);
90 if (nv_device(drm->device)->chipset != 0x50) {
91 BEGIN_NV04(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
92 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
93 }
94
95 if (update) {
96 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
97 OUT_RING(evo, 0);
98 FIRE_RING(evo);
99 nv_crtc->cursor.visible = false;
100 }
101}
102
103static void
104nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
105{
106 struct nouveau_device *device = nouveau_dev(nv_crtc->base.dev);
107
108 nv_crtc->cursor_saved_x = x; nv_crtc->cursor_saved_y = y;
109 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
110 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
111 /* Needed to make the cursor move. */
112 nv_wr32(device, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
113}
114
115static void
116nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
117{
118 if (offset == nv_crtc->cursor.offset)
119 return;
120
121 nv_crtc->cursor.offset = offset;
122 if (nv_crtc->cursor.visible) {
123 nv_crtc->cursor.visible = false;
124 nv_crtc->cursor.show(nv_crtc, true);
125 }
126}
127
128int
129nv50_cursor_init(struct nouveau_crtc *nv_crtc)
130{
131 nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
132 nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
133 nv_crtc->cursor.hide = nv50_cursor_hide;
134 nv_crtc->cursor.show = nv50_cursor_show;
135 return 0;
136}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
deleted file mode 100644
index 6a30a1748573..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ /dev/null
@@ -1,321 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drm.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39#include <subdev/timer.h>
40
41static void
42nv50_dac_disconnect(struct drm_encoder *encoder)
43{
44 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
45 struct drm_device *dev = encoder->dev;
46 struct nouveau_drm *drm = nouveau_drm(dev);
47 struct nouveau_channel *evo = nv50_display(dev)->master;
48 int ret;
49
50 if (!nv_encoder->crtc)
51 return;
52 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
53
54 NV_DEBUG(drm, "Disconnecting DAC %d\n", nv_encoder->or);
55
56 ret = RING_SPACE(evo, 4);
57 if (ret) {
58 NV_ERROR(drm, "no space while disconnecting DAC\n");
59 return;
60 }
61 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
62 OUT_RING (evo, 0);
63 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
64 OUT_RING (evo, 0);
65
66 nv_encoder->crtc = NULL;
67}
68
69static enum drm_connector_status
70nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
71{
72 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
73 struct nouveau_device *device = nouveau_dev(encoder->dev);
74 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
75 enum drm_connector_status status = connector_status_disconnected;
76 uint32_t dpms_state, load_pattern, load_state;
77 int or = nv_encoder->or;
78
79 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
80 dpms_state = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
81
82 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
83 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
84 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
85 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
86 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
87 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
88 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
89 return status;
90 }
91
92 /* Use bios provided value if possible. */
93 if (drm->vbios.dactestval) {
94 load_pattern = drm->vbios.dactestval;
95 NV_DEBUG(drm, "Using bios provided load_pattern of %d\n",
96 load_pattern);
97 } else {
98 load_pattern = 340;
99 NV_DEBUG(drm, "Using default load_pattern of %d\n",
100 load_pattern);
101 }
102
103 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
104 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
105 mdelay(45); /* give it some time to process */
106 load_state = nv_rd32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
107
108 nv_wr32(device, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
109 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
110 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
111
112 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
113 NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
114 status = connector_status_connected;
115
116 if (status == connector_status_connected)
117 NV_DEBUG(drm, "Load was detected on output with or %d\n", or);
118 else
119 NV_DEBUG(drm, "Load was not detected on output with or %d\n", or);
120
121 return status;
122}
123
124static void
125nv50_dac_dpms(struct drm_encoder *encoder, int mode)
126{
127 struct nouveau_device *device = nouveau_dev(encoder->dev);
128 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
129 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
130 uint32_t val;
131 int or = nv_encoder->or;
132
133 NV_DEBUG(drm, "or %d mode %d\n", or, mode);
134
135 /* wait for it to be done */
136 if (!nv_wait(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
137 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
138 NV_ERROR(drm, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
139 NV_ERROR(drm, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
140 nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
141 return;
142 }
143
144 val = nv_rd32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
145
146 if (mode != DRM_MODE_DPMS_ON)
147 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
148
149 switch (mode) {
150 case DRM_MODE_DPMS_STANDBY:
151 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
152 break;
153 case DRM_MODE_DPMS_SUSPEND:
154 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
155 break;
156 case DRM_MODE_DPMS_OFF:
157 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
158 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
159 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
160 break;
161 default:
162 break;
163 }
164
165 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
166 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
167}
168
169static void
170nv50_dac_save(struct drm_encoder *encoder)
171{
172 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
173 NV_ERROR(drm, "!!\n");
174}
175
176static void
177nv50_dac_restore(struct drm_encoder *encoder)
178{
179 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
180 NV_ERROR(drm, "!!\n");
181}
182
183static bool
184nv50_dac_mode_fixup(struct drm_encoder *encoder,
185 const struct drm_display_mode *mode,
186 struct drm_display_mode *adjusted_mode)
187{
188 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
189 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
190 struct nouveau_connector *connector;
191
192 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
193
194 connector = nouveau_encoder_connector_get(nv_encoder);
195 if (!connector) {
196 NV_ERROR(drm, "Encoder has no connector\n");
197 return false;
198 }
199
200 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
201 connector->native_mode)
202 drm_mode_copy(adjusted_mode, connector->native_mode);
203
204 return true;
205}
206
207static void
208nv50_dac_commit(struct drm_encoder *encoder)
209{
210}
211
212static void
213nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
214 struct drm_display_mode *adjusted_mode)
215{
216 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
217 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
218 struct drm_device *dev = encoder->dev;
219 struct nouveau_channel *evo = nv50_display(dev)->master;
220 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
221 uint32_t mode_ctl = 0, mode_ctl2 = 0;
222 int ret;
223
224 NV_DEBUG(drm, "or %d type %d crtc %d\n",
225 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
226
227 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
228
229 if (crtc->index == 1)
230 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
231 else
232 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
233
234 /* Lacking a working tv-out, this is not a 100% sure. */
235 if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
236 mode_ctl |= 0x40;
237 else
238 if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
239 mode_ctl |= 0x100;
240
241 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
242 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
243
244 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
245 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
246
247 ret = RING_SPACE(evo, 3);
248 if (ret) {
249 NV_ERROR(drm, "no space while connecting DAC\n");
250 return;
251 }
252 BEGIN_NV04(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
253 OUT_RING(evo, mode_ctl);
254 OUT_RING(evo, mode_ctl2);
255
256 nv_encoder->crtc = encoder->crtc;
257}
258
259static struct drm_crtc *
260nv50_dac_crtc_get(struct drm_encoder *encoder)
261{
262 return nouveau_encoder(encoder)->crtc;
263}
264
265static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
266 .dpms = nv50_dac_dpms,
267 .save = nv50_dac_save,
268 .restore = nv50_dac_restore,
269 .mode_fixup = nv50_dac_mode_fixup,
270 .prepare = nv50_dac_disconnect,
271 .commit = nv50_dac_commit,
272 .mode_set = nv50_dac_mode_set,
273 .get_crtc = nv50_dac_crtc_get,
274 .detect = nv50_dac_detect,
275 .disable = nv50_dac_disconnect
276};
277
278static void
279nv50_dac_destroy(struct drm_encoder *encoder)
280{
281 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
282 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
283
284 if (!encoder)
285 return;
286
287 NV_DEBUG(drm, "\n");
288
289 drm_encoder_cleanup(encoder);
290 kfree(nv_encoder);
291}
292
293static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
294 .destroy = nv50_dac_destroy,
295};
296
297int
298nv50_dac_create(struct drm_connector *connector, struct dcb_output *entry)
299{
300 struct nouveau_encoder *nv_encoder;
301 struct drm_encoder *encoder;
302
303 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
304 if (!nv_encoder)
305 return -ENOMEM;
306 encoder = to_drm_encoder(nv_encoder);
307
308 nv_encoder->dcb = entry;
309 nv_encoder->or = ffs(entry->or) - 1;
310
311 drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
312 DRM_MODE_ENCODER_DAC);
313 drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
314
315 encoder->possible_crtcs = entry->heads;
316 encoder->possible_clones = 0;
317
318 drm_mode_connector_attach_encoder(connector, encoder);
319 return 0;
320}
321
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index f97b42cbb6bb..35874085a61e 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1,969 +1,2058 @@
1/* 1 /*
2 * Copyright (C) 2008 Maarten Maathuis. 2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 * 3 *
5 * Permission is hereby granted, free of charge, to any person obtaining 4 * Permission is hereby granted, free of charge, to any person obtaining a
6 * a copy of this software and associated documentation files (the 5 * copy of this software and associated documentation files (the "Software"),
7 * "Software"), to deal in the Software without restriction, including 6 * to deal in the Software without restriction, including without limitation
8 * without limitation the rights to use, copy, modify, merge, publish, 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * distribute, sublicense, and/or sell copies of the Software, and to 8 * and/or sell copies of the Software, and to permit persons to whom the
10 * permit persons to whom the Software is furnished to do so, subject to 9 * Software is furnished to do so, subject to the following conditions:
11 * the following conditions:
12 * 10 *
13 * The above copyright notice and this permission notice (including the 11 * The above copyright notice and this permission notice shall be included in
14 * next paragraph) shall be included in all copies or substantial 12 * all copies or substantial portions of the Software.
15 * portions of the Software.
16 * 13 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
24 * 21 *
22 * Authors: Ben Skeggs
25 */ 23 */
26 24
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
27#include "nouveau_drm.h" 30#include "nouveau_drm.h"
28#include "nouveau_dma.h" 31#include "nouveau_dma.h"
29 32#include "nouveau_gem.h"
30#include "nv50_display.h"
31#include "nouveau_crtc.h"
32#include "nouveau_encoder.h"
33#include "nouveau_connector.h" 33#include "nouveau_connector.h"
34#include "nouveau_fbcon.h" 34#include "nouveau_encoder.h"
35#include <drm/drm_crtc_helper.h> 35#include "nouveau_crtc.h"
36#include "nouveau_fence.h" 36#include "nouveau_fence.h"
37#include "nv50_display.h"
37 38
39#include <core/client.h>
38#include <core/gpuobj.h> 40#include <core/gpuobj.h>
39#include <subdev/timer.h> 41#include <core/class.h>
40 42
41static void nv50_display_bh(unsigned long); 43#include <subdev/timer.h>
42 44#include <subdev/bar.h>
43static inline int 45#include <subdev/fb.h>
44nv50_sor_nr(struct drm_device *dev) 46
47#define EVO_DMA_NR 9
48
49#define EVO_MASTER (0x00)
50#define EVO_FLIP(c) (0x01 + (c))
51#define EVO_OVLY(c) (0x05 + (c))
52#define EVO_OIMM(c) (0x09 + (c))
53#define EVO_CURS(c) (0x0d + (c))
54
55/* offsets in shared sync bo of various structures */
56#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
57#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
58#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
59#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
60
61#define EVO_CORE_HANDLE (0xd1500000)
62#define EVO_CHAN_HANDLE(t,i) (0xd15c0000 | (((t) & 0x00ff) << 8) | (i))
63#define EVO_CHAN_OCLASS(t,c) ((nv_hclass(c) & 0xff00) | ((t) & 0x00ff))
64#define EVO_PUSH_HANDLE(t,i) (0xd15b0000 | (i) | \
65 (((NV50_DISP_##t##_CLASS) & 0x00ff) << 8))
66
67/******************************************************************************
68 * EVO channel
69 *****************************************************************************/
70
71struct nv50_chan {
72 struct nouveau_object *user;
73 u32 handle;
74};
75
76static int
77nv50_chan_create(struct nouveau_object *core, u32 bclass, u8 head,
78 void *data, u32 size, struct nv50_chan *chan)
45{ 79{
46 struct nouveau_device *device = nouveau_dev(dev); 80 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
81 const u32 oclass = EVO_CHAN_OCLASS(bclass, core);
82 const u32 handle = EVO_CHAN_HANDLE(bclass, head);
83 int ret;
47 84
48 if (device->chipset < 0x90 || 85 ret = nouveau_object_new(client, EVO_CORE_HANDLE, handle,
49 device->chipset == 0x92 || 86 oclass, data, size, &chan->user);
50 device->chipset == 0xa0) 87 if (ret)
51 return 2; 88 return ret;
52 89
53 return 4; 90 chan->handle = handle;
91 return 0;
54} 92}
55 93
56u32 94static void
57nv50_display_active_crtcs(struct drm_device *dev) 95nv50_chan_destroy(struct nouveau_object *core, struct nv50_chan *chan)
58{ 96{
59 struct nouveau_device *device = nouveau_dev(dev); 97 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
60 u32 mask = 0; 98 if (chan->handle)
61 int i; 99 nouveau_object_del(client, EVO_CORE_HANDLE, chan->handle);
62 100}
63 if (device->chipset < 0x90 ||
64 device->chipset == 0x92 ||
65 device->chipset == 0xa0) {
66 for (i = 0; i < 2; i++)
67 mask |= nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
68 } else {
69 for (i = 0; i < 4; i++)
70 mask |= nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
71 }
72 101
73 for (i = 0; i < 3; i++) 102/******************************************************************************
74 mask |= nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 103 * PIO EVO channel
104 *****************************************************************************/
75 105
76 return mask & 3; 106struct nv50_pioc {
77} 107 struct nv50_chan base;
108};
78 109
79int 110static void
80nv50_display_early_init(struct drm_device *dev) 111nv50_pioc_destroy(struct nouveau_object *core, struct nv50_pioc *pioc)
81{ 112{
82 return 0; 113 nv50_chan_destroy(core, &pioc->base);
83} 114}
84 115
85void 116static int
86nv50_display_late_takedown(struct drm_device *dev) 117nv50_pioc_create(struct nouveau_object *core, u32 bclass, u8 head,
118 void *data, u32 size, struct nv50_pioc *pioc)
87{ 119{
120 return nv50_chan_create(core, bclass, head, data, size, &pioc->base);
88} 121}
89 122
90int 123/******************************************************************************
91nv50_display_sync(struct drm_device *dev) 124 * DMA EVO channel
92{ 125 *****************************************************************************/
93 struct nv50_display *disp = nv50_display(dev);
94 struct nouveau_channel *evo = disp->master;
95 int ret;
96 126
97 ret = RING_SPACE(evo, 6); 127struct nv50_dmac {
98 if (ret == 0) { 128 struct nv50_chan base;
99 BEGIN_NV04(evo, 0, 0x0084, 1); 129 dma_addr_t handle;
100 OUT_RING (evo, 0x80000000); 130 u32 *ptr;
101 BEGIN_NV04(evo, 0, 0x0080, 1); 131};
102 OUT_RING (evo, 0);
103 BEGIN_NV04(evo, 0, 0x0084, 1);
104 OUT_RING (evo, 0x00000000);
105 132
106 nv_wo32(disp->ramin, 0x2000, 0x00000000); 133static void
107 FIRE_RING (evo); 134nv50_dmac_destroy(struct nouveau_object *core, struct nv50_dmac *dmac)
108 135{
109 if (nv_wait_ne(disp->ramin, 0x2000, 0xffffffff, 0x00000000)) 136 if (dmac->ptr) {
110 return 0; 137 struct pci_dev *pdev = nv_device(core)->pdev;
138 pci_free_consistent(pdev, PAGE_SIZE, dmac->ptr, dmac->handle);
111 } 139 }
112 140
113 return 0; 141 nv50_chan_destroy(core, &dmac->base);
114} 142}
115 143
116int 144static int
117nv50_display_init(struct drm_device *dev) 145nv50_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
118{ 146{
119 struct nouveau_drm *drm = nouveau_drm(dev); 147 struct nouveau_fb *pfb = nouveau_fb(core);
120 struct nouveau_device *device = nouveau_dev(dev); 148 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
121 struct nouveau_channel *evo; 149 struct nouveau_object *object;
122 int ret, i; 150 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
123 u32 val; 151 NV_DMA_IN_MEMORY_CLASS,
124 152 &(struct nv_dma_class) {
125 NV_DEBUG(drm, "\n"); 153 .flags = NV_DMA_TARGET_VRAM |
126 154 NV_DMA_ACCESS_RDWR,
127 nv_wr32(device, 0x00610184, nv_rd32(device, 0x00614004)); 155 .start = 0,
128 156 .limit = pfb->ram.size - 1,
129 /* 157 .conf0 = NV50_DMA_CONF0_ENABLE |
130 * I think the 0x006101XX range is some kind of main control area 158 NV50_DMA_CONF0_PART_256,
131 * that enables things. 159 }, sizeof(struct nv_dma_class), &object);
132 */
133 /* CRTC? */
134 for (i = 0; i < 2; i++) {
135 val = nv_rd32(device, 0x00616100 + (i * 0x800));
136 nv_wr32(device, 0x00610190 + (i * 0x10), val);
137 val = nv_rd32(device, 0x00616104 + (i * 0x800));
138 nv_wr32(device, 0x00610194 + (i * 0x10), val);
139 val = nv_rd32(device, 0x00616108 + (i * 0x800));
140 nv_wr32(device, 0x00610198 + (i * 0x10), val);
141 val = nv_rd32(device, 0x0061610c + (i * 0x800));
142 nv_wr32(device, 0x0061019c + (i * 0x10), val);
143 }
144
145 /* DAC */
146 for (i = 0; i < 3; i++) {
147 val = nv_rd32(device, 0x0061a000 + (i * 0x800));
148 nv_wr32(device, 0x006101d0 + (i * 0x04), val);
149 }
150
151 /* SOR */
152 for (i = 0; i < nv50_sor_nr(dev); i++) {
153 val = nv_rd32(device, 0x0061c000 + (i * 0x800));
154 nv_wr32(device, 0x006101e0 + (i * 0x04), val);
155 }
156
157 /* EXT */
158 for (i = 0; i < 3; i++) {
159 val = nv_rd32(device, 0x0061e000 + (i * 0x800));
160 nv_wr32(device, 0x006101f0 + (i * 0x04), val);
161 }
162
163 for (i = 0; i < 3; i++) {
164 nv_wr32(device, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
165 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
166 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
167 }
168
169 /* The precise purpose is unknown, i suspect it has something to do
170 * with text mode.
171 */
172 if (nv_rd32(device, NV50_PDISPLAY_INTR_1) & 0x100) {
173 nv_wr32(device, NV50_PDISPLAY_INTR_1, 0x100);
174 nv_wr32(device, 0x006194e8, nv_rd32(device, 0x006194e8) & ~1);
175 if (!nv_wait(device, 0x006194e8, 2, 0)) {
176 NV_ERROR(drm, "timeout: (0x6194e8 & 2) != 0\n");
177 NV_ERROR(drm, "0x6194e8 = 0x%08x\n",
178 nv_rd32(device, 0x6194e8));
179 return -EBUSY;
180 }
181 }
182
183 for (i = 0; i < 2; i++) {
184 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
185 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
186 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
187 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
188 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
189 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
190 return -EBUSY;
191 }
192
193 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
194 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
195 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
196 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
197 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
198 NV_ERROR(drm, "timeout: "
199 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
200 NV_ERROR(drm, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
201 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
202 return -EBUSY;
203 }
204 }
205
206 nv_wr32(device, NV50_PDISPLAY_PIO_CTRL, 0x00000000);
207 nv_mask(device, NV50_PDISPLAY_INTR_0, 0x00000000, 0x00000000);
208 nv_wr32(device, NV50_PDISPLAY_INTR_EN_0, 0x00000000);
209 nv_mask(device, NV50_PDISPLAY_INTR_1, 0x00000000, 0x00000000);
210 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1,
211 NV50_PDISPLAY_INTR_EN_1_CLK_UNK10 |
212 NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
213 NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
214
215 ret = nv50_evo_init(dev);
216 if (ret) 160 if (ret)
217 return ret; 161 return ret;
218 evo = nv50_display(dev)->master;
219
220 nv_wr32(device, NV50_PDISPLAY_OBJECTS, (nv50_display(dev)->ramin->addr >> 8) | 9);
221 162
222 ret = RING_SPACE(evo, 3); 163 ret = nouveau_object_new(client, parent, NvEvoFB16,
164 NV_DMA_IN_MEMORY_CLASS,
165 &(struct nv_dma_class) {
166 .flags = NV_DMA_TARGET_VRAM |
167 NV_DMA_ACCESS_RDWR,
168 .start = 0,
169 .limit = pfb->ram.size - 1,
170 .conf0 = NV50_DMA_CONF0_ENABLE | 0x70 |
171 NV50_DMA_CONF0_PART_256,
172 }, sizeof(struct nv_dma_class), &object);
223 if (ret) 173 if (ret)
224 return ret; 174 return ret;
225 BEGIN_NV04(evo, 0, NV50_EVO_UNK84, 2);
226 OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
227 OUT_RING (evo, NvEvoSync);
228 175
229 return nv50_display_sync(dev); 176 ret = nouveau_object_new(client, parent, NvEvoFB32,
177 NV_DMA_IN_MEMORY_CLASS,
178 &(struct nv_dma_class) {
179 .flags = NV_DMA_TARGET_VRAM |
180 NV_DMA_ACCESS_RDWR,
181 .start = 0,
182 .limit = pfb->ram.size - 1,
183 .conf0 = NV50_DMA_CONF0_ENABLE | 0x7a |
184 NV50_DMA_CONF0_PART_256,
185 }, sizeof(struct nv_dma_class), &object);
186 return ret;
230} 187}
231 188
232void 189static int
233nv50_display_fini(struct drm_device *dev) 190nvc0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
234{ 191{
235 struct nouveau_drm *drm = nouveau_drm(dev); 192 struct nouveau_fb *pfb = nouveau_fb(core);
236 struct nouveau_device *device = nouveau_dev(dev); 193 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
237 struct nv50_display *disp = nv50_display(dev); 194 struct nouveau_object *object;
238 struct nouveau_channel *evo = disp->master; 195 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
239 struct drm_crtc *drm_crtc; 196 NV_DMA_IN_MEMORY_CLASS,
240 int ret, i; 197 &(struct nv_dma_class) {
198 .flags = NV_DMA_TARGET_VRAM |
199 NV_DMA_ACCESS_RDWR,
200 .start = 0,
201 .limit = pfb->ram.size - 1,
202 .conf0 = NVC0_DMA_CONF0_ENABLE,
203 }, sizeof(struct nv_dma_class), &object);
204 if (ret)
205 return ret;
241 206
242 NV_DEBUG(drm, "\n"); 207 ret = nouveau_object_new(client, parent, NvEvoFB16,
208 NV_DMA_IN_MEMORY_CLASS,
209 &(struct nv_dma_class) {
210 .flags = NV_DMA_TARGET_VRAM |
211 NV_DMA_ACCESS_RDWR,
212 .start = 0,
213 .limit = pfb->ram.size - 1,
214 .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
215 }, sizeof(struct nv_dma_class), &object);
216 if (ret)
217 return ret;
243 218
244 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { 219 ret = nouveau_object_new(client, parent, NvEvoFB32,
245 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); 220 NV_DMA_IN_MEMORY_CLASS,
221 &(struct nv_dma_class) {
222 .flags = NV_DMA_TARGET_VRAM |
223 NV_DMA_ACCESS_RDWR,
224 .start = 0,
225 .limit = pfb->ram.size - 1,
226 .conf0 = NVC0_DMA_CONF0_ENABLE | 0xfe,
227 }, sizeof(struct nv_dma_class), &object);
228 return ret;
229}
246 230
247 nv50_crtc_blank(crtc, true); 231static int
248 } 232nvd0_dmac_create_fbdma(struct nouveau_object *core, u32 parent)
233{
234 struct nouveau_fb *pfb = nouveau_fb(core);
235 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
236 struct nouveau_object *object;
237 int ret = nouveau_object_new(client, parent, NvEvoVRAM_LP,
238 NV_DMA_IN_MEMORY_CLASS,
239 &(struct nv_dma_class) {
240 .flags = NV_DMA_TARGET_VRAM |
241 NV_DMA_ACCESS_RDWR,
242 .start = 0,
243 .limit = pfb->ram.size - 1,
244 .conf0 = NVD0_DMA_CONF0_ENABLE |
245 NVD0_DMA_CONF0_PAGE_LP,
246 }, sizeof(struct nv_dma_class), &object);
247 if (ret)
248 return ret;
249 249
250 ret = RING_SPACE(evo, 2); 250 ret = nouveau_object_new(client, parent, NvEvoFB32,
251 if (ret == 0) { 251 NV_DMA_IN_MEMORY_CLASS,
252 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1); 252 &(struct nv_dma_class) {
253 OUT_RING(evo, 0); 253 .flags = NV_DMA_TARGET_VRAM |
254 } 254 NV_DMA_ACCESS_RDWR,
255 FIRE_RING(evo); 255 .start = 0,
256 .limit = pfb->ram.size - 1,
257 .conf0 = NVD0_DMA_CONF0_ENABLE | 0xfe |
258 NVD0_DMA_CONF0_PAGE_LP,
259 }, sizeof(struct nv_dma_class), &object);
260 return ret;
261}
256 262
257 /* Almost like ack'ing a vblank interrupt, maybe in the spirit of 263static int
258 * cleaning up? 264nv50_dmac_create(struct nouveau_object *core, u32 bclass, u8 head,
259 */ 265 void *data, u32 size, u64 syncbuf,
260 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { 266 struct nv50_dmac *dmac)
261 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); 267{
262 uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index); 268 struct nouveau_fb *pfb = nouveau_fb(core);
269 struct nouveau_object *client = nv_pclass(core, NV_CLIENT_CLASS);
270 struct nouveau_object *object;
271 u32 pushbuf = *(u32 *)data;
272 int ret;
263 273
264 if (!crtc->base.enabled) 274 dmac->ptr = pci_alloc_consistent(nv_device(core)->pdev, PAGE_SIZE,
265 continue; 275 &dmac->handle);
276 if (!dmac->ptr)
277 return -ENOMEM;
266 278
267 nv_wr32(device, NV50_PDISPLAY_INTR_1, mask); 279 ret = nouveau_object_new(client, NVDRM_DEVICE, pushbuf,
268 if (!nv_wait(device, NV50_PDISPLAY_INTR_1, mask, mask)) { 280 NV_DMA_FROM_MEMORY_CLASS,
269 NV_ERROR(drm, "timeout: (0x610024 & 0x%08x) == " 281 &(struct nv_dma_class) {
270 "0x%08x\n", mask, mask); 282 .flags = NV_DMA_TARGET_PCI_US |
271 NV_ERROR(drm, "0x610024 = 0x%08x\n", 283 NV_DMA_ACCESS_RD,
272 nv_rd32(device, NV50_PDISPLAY_INTR_1)); 284 .start = dmac->handle + 0x0000,
273 } 285 .limit = dmac->handle + 0x0fff,
274 } 286 }, sizeof(struct nv_dma_class), &object);
287 if (ret)
288 return ret;
275 289
276 for (i = 0; i < 2; i++) { 290 ret = nv50_chan_create(core, bclass, head, data, size, &dmac->base);
277 nv_wr32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0); 291 if (ret)
278 if (!nv_wait(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 292 return ret;
279 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
280 NV_ERROR(drm, "timeout: CURSOR_CTRL2_STATUS == 0\n");
281 NV_ERROR(drm, "CURSOR_CTRL2 = 0x%08x\n",
282 nv_rd32(device, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
283 }
284 }
285 293
286 nv50_evo_fini(dev); 294 ret = nouveau_object_new(client, dmac->base.handle, NvEvoSync,
295 NV_DMA_IN_MEMORY_CLASS,
296 &(struct nv_dma_class) {
297 .flags = NV_DMA_TARGET_VRAM |
298 NV_DMA_ACCESS_RDWR,
299 .start = syncbuf + 0x0000,
300 .limit = syncbuf + 0x0fff,
301 }, sizeof(struct nv_dma_class), &object);
302 if (ret)
303 return ret;
287 304
288 for (i = 0; i < 3; i++) { 305 ret = nouveau_object_new(client, dmac->base.handle, NvEvoVRAM,
289 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(i), 306 NV_DMA_IN_MEMORY_CLASS,
290 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) { 307 &(struct nv_dma_class) {
291 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i); 308 .flags = NV_DMA_TARGET_VRAM |
292 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", i, 309 NV_DMA_ACCESS_RDWR,
293 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(i))); 310 .start = 0,
294 } 311 .limit = pfb->ram.size - 1,
295 } 312 }, sizeof(struct nv_dma_class), &object);
313 if (ret)
314 return ret;
296 315
297 /* disable interrupts. */ 316 if (nv_device(core)->card_type < NV_C0)
298 nv_wr32(device, NV50_PDISPLAY_INTR_EN_1, 0x00000000); 317 ret = nv50_dmac_create_fbdma(core, dmac->base.handle);
318 else
319 if (nv_device(core)->card_type < NV_D0)
320 ret = nvc0_dmac_create_fbdma(core, dmac->base.handle);
321 else
322 ret = nvd0_dmac_create_fbdma(core, dmac->base.handle);
323 return ret;
299} 324}
300 325
301int 326struct nv50_mast {
302nv50_display_create(struct drm_device *dev) 327 struct nv50_dmac base;
328};
329
330struct nv50_curs {
331 struct nv50_pioc base;
332};
333
334struct nv50_sync {
335 struct nv50_dmac base;
336 struct {
337 u32 offset;
338 u16 value;
339 } sem;
340};
341
342struct nv50_ovly {
343 struct nv50_dmac base;
344};
345
346struct nv50_oimm {
347 struct nv50_pioc base;
348};
349
350struct nv50_head {
351 struct nouveau_crtc base;
352 struct nv50_curs curs;
353 struct nv50_sync sync;
354 struct nv50_ovly ovly;
355 struct nv50_oimm oimm;
356};
357
358#define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
359#define nv50_curs(c) (&nv50_head(c)->curs)
360#define nv50_sync(c) (&nv50_head(c)->sync)
361#define nv50_ovly(c) (&nv50_head(c)->ovly)
362#define nv50_oimm(c) (&nv50_head(c)->oimm)
363#define nv50_chan(c) (&(c)->base.base)
364#define nv50_vers(c) nv_mclass(nv50_chan(c)->user)
365
366struct nv50_disp {
367 struct nouveau_object *core;
368 struct nv50_mast mast;
369
370 u32 modeset;
371
372 struct nouveau_bo *sync;
373};
374
375static struct nv50_disp *
376nv50_disp(struct drm_device *dev)
303{ 377{
304 struct nouveau_drm *drm = nouveau_drm(dev); 378 return nouveau_display(dev)->priv;
305 struct dcb_table *dcb = &drm->vbios.dcb; 379}
306 struct drm_connector *connector, *ct;
307 struct nv50_display *priv;
308 int ret, i;
309 380
310 NV_DEBUG(drm, "\n"); 381#define nv50_mast(d) (&nv50_disp(d)->mast)
311 382
312 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 383static struct drm_crtc *
313 if (!priv) 384nv50_display_crtc_get(struct drm_encoder *encoder)
314 return -ENOMEM; 385{
315 386 return nouveau_encoder(encoder)->crtc;
316 nouveau_display(dev)->priv = priv; 387}
317 nouveau_display(dev)->dtor = nv50_display_destroy;
318 nouveau_display(dev)->init = nv50_display_init;
319 nouveau_display(dev)->fini = nv50_display_fini;
320 388
321 /* Create CRTC objects */ 389/******************************************************************************
322 for (i = 0; i < 2; i++) { 390 * EVO channel helpers
323 ret = nv50_crtc_create(dev, i); 391 *****************************************************************************/
324 if (ret) 392static u32 *
325 return ret; 393evo_wait(void *evoc, int nr)
326 } 394{
395 struct nv50_dmac *dmac = evoc;
396 u32 put = nv_ro32(dmac->base.user, 0x0000) / 4;
327 397
328 /* We setup the encoders from the BIOS table */ 398 if (put + nr >= (PAGE_SIZE / 4) - 8) {
329 for (i = 0 ; i < dcb->entries; i++) { 399 dmac->ptr[put] = 0x20000000;
330 struct dcb_output *entry = &dcb->entry[i];
331 400
332 if (entry->location != DCB_LOC_ON_CHIP) { 401 nv_wo32(dmac->base.user, 0x0000, 0x00000000);
333 NV_WARN(drm, "Off-chip encoder %d/%d unsupported\n", 402 if (!nv_wait(dmac->base.user, 0x0004, ~0, 0x00000000)) {
334 entry->type, ffs(entry->or) - 1); 403 NV_ERROR(dmac->base.user, "channel stalled\n");
335 continue; 404 return NULL;
336 } 405 }
337 406
338 connector = nouveau_connector_create(dev, entry->connector); 407 put = 0;
339 if (IS_ERR(connector))
340 continue;
341
342 switch (entry->type) {
343 case DCB_OUTPUT_TMDS:
344 case DCB_OUTPUT_LVDS:
345 case DCB_OUTPUT_DP:
346 nv50_sor_create(connector, entry);
347 break;
348 case DCB_OUTPUT_ANALOG:
349 nv50_dac_create(connector, entry);
350 break;
351 default:
352 NV_WARN(drm, "DCB encoder %d unknown\n", entry->type);
353 continue;
354 }
355 } 408 }
356 409
357 list_for_each_entry_safe(connector, ct, 410 return dmac->ptr + put;
358 &dev->mode_config.connector_list, head) { 411}
359 if (!connector->encoder_ids[0]) {
360 NV_WARN(drm, "%s has no encoders, removing\n",
361 drm_get_connector_name(connector));
362 connector->funcs->destroy(connector);
363 }
364 }
365 412
366 tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev); 413static void
414evo_kick(u32 *push, void *evoc)
415{
416 struct nv50_dmac *dmac = evoc;
417 nv_wo32(dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
418}
367 419
368 ret = nv50_evo_create(dev); 420#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
369 if (ret) { 421#define evo_data(p,d) *((p)++) = (d)
370 nv50_display_destroy(dev);
371 return ret;
372 }
373 422
374 return 0; 423static bool
424evo_sync_wait(void *data)
425{
426 return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
375} 427}
376 428
377void 429static int
378nv50_display_destroy(struct drm_device *dev) 430evo_sync(struct drm_device *dev)
379{ 431{
380 struct nv50_display *disp = nv50_display(dev); 432 struct nouveau_device *device = nouveau_dev(dev);
433 struct nv50_disp *disp = nv50_disp(dev);
434 struct nv50_mast *mast = nv50_mast(dev);
435 u32 *push = evo_wait(mast, 8);
436 if (push) {
437 nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
438 evo_mthd(push, 0x0084, 1);
439 evo_data(push, 0x80000000 | EVO_MAST_NTFY);
440 evo_mthd(push, 0x0080, 2);
441 evo_data(push, 0x00000000);
442 evo_data(push, 0x00000000);
443 evo_kick(push, mast);
444 if (nv_wait_cb(device, evo_sync_wait, disp->sync))
445 return 0;
446 }
381 447
382 nv50_evo_destroy(dev); 448 return -EBUSY;
383 kfree(disp);
384} 449}
385 450
451/******************************************************************************
452 * Page flipping channel
453 *****************************************************************************/
386struct nouveau_bo * 454struct nouveau_bo *
387nv50_display_crtc_sema(struct drm_device *dev, int crtc) 455nv50_display_crtc_sema(struct drm_device *dev, int crtc)
388{ 456{
389 return nv50_display(dev)->crtc[crtc].sem.bo; 457 return nv50_disp(dev)->sync;
390} 458}
391 459
392void 460void
393nv50_display_flip_stop(struct drm_crtc *crtc) 461nv50_display_flip_stop(struct drm_crtc *crtc)
394{ 462{
395 struct nv50_display *disp = nv50_display(crtc->dev); 463 struct nv50_sync *sync = nv50_sync(crtc);
396 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 464 u32 *push;
397 struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; 465
398 struct nouveau_channel *evo = dispc->sync; 466 push = evo_wait(sync, 8);
399 int ret; 467 if (push) {
400 468 evo_mthd(push, 0x0084, 1);
401 ret = RING_SPACE(evo, 8); 469 evo_data(push, 0x00000000);
402 if (ret) { 470 evo_mthd(push, 0x0094, 1);
403 WARN_ON(1); 471 evo_data(push, 0x00000000);
404 return; 472 evo_mthd(push, 0x00c0, 1);
473 evo_data(push, 0x00000000);
474 evo_mthd(push, 0x0080, 1);
475 evo_data(push, 0x00000000);
476 evo_kick(push, sync);
405 } 477 }
406
407 BEGIN_NV04(evo, 0, 0x0084, 1);
408 OUT_RING (evo, 0x00000000);
409 BEGIN_NV04(evo, 0, 0x0094, 1);
410 OUT_RING (evo, 0x00000000);
411 BEGIN_NV04(evo, 0, 0x00c0, 1);
412 OUT_RING (evo, 0x00000000);
413 BEGIN_NV04(evo, 0, 0x0080, 1);
414 OUT_RING (evo, 0x00000000);
415 FIRE_RING (evo);
416} 478}
417 479
418int 480int
419nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, 481nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
420 struct nouveau_channel *chan) 482 struct nouveau_channel *chan, u32 swap_interval)
421{ 483{
422 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
423 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); 484 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
424 struct nv50_display *disp = nv50_display(crtc->dev); 485 struct nv50_disp *disp = nv50_disp(crtc->dev);
425 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 486 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
426 struct nv50_display_crtc *dispc = &disp->crtc[nv_crtc->index]; 487 struct nv50_sync *sync = nv50_sync(crtc);
427 struct nouveau_channel *evo = dispc->sync; 488 u32 *push;
428 int ret; 489 int ret;
429 490
430 ret = RING_SPACE(evo, chan ? 25 : 27); 491 swap_interval <<= 4;
431 if (unlikely(ret)) 492 if (swap_interval == 0)
432 return ret; 493 swap_interval |= 0x100;
494
495 push = evo_wait(sync, 128);
496 if (unlikely(push == NULL))
497 return -EBUSY;
433 498
434 /* synchronise with the rendering channel, if necessary */ 499 /* synchronise with the rendering channel, if necessary */
435 if (likely(chan)) { 500 if (likely(chan)) {
436 ret = RING_SPACE(chan, 10); 501 ret = RING_SPACE(chan, 10);
437 if (ret) { 502 if (ret)
438 WIND_RING(evo);
439 return ret; 503 return ret;
440 }
441 504
442 if (nv_device(drm->device)->chipset < 0xc0) { 505 if (nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) {
443 BEGIN_NV04(chan, 0, 0x0060, 2); 506 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
444 OUT_RING (chan, NvEvoSema0 + nv_crtc->index); 507 OUT_RING (chan, NvEvoSema0 + nv_crtc->index);
445 OUT_RING (chan, dispc->sem.offset); 508 OUT_RING (chan, sync->sem.offset);
446 BEGIN_NV04(chan, 0, 0x006c, 1); 509 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
447 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 510 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
448 BEGIN_NV04(chan, 0, 0x0064, 2); 511 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_OFFSET, 2);
449 OUT_RING (chan, dispc->sem.offset ^ 0x10); 512 OUT_RING (chan, sync->sem.offset ^ 0x10);
450 OUT_RING (chan, 0x74b1e000); 513 OUT_RING (chan, 0x74b1e000);
451 BEGIN_NV04(chan, 0, 0x0060, 1); 514 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
452 if (nv_device(drm->device)->chipset < 0x84) 515 if (nv_mclass(chan->object) < NV84_CHANNEL_DMA_CLASS)
453 OUT_RING (chan, NvSema); 516 OUT_RING (chan, NvSema);
454 else 517 else
455 OUT_RING (chan, chan->vram); 518 OUT_RING (chan, chan->vram);
456 } else { 519 } else {
457 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index); 520 u64 offset = nvc0_fence_crtc(chan, nv_crtc->index);
458 offset += dispc->sem.offset; 521 offset += sync->sem.offset;
459 BEGIN_NVC0(chan, 0, 0x0010, 4); 522
523 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
460 OUT_RING (chan, upper_32_bits(offset)); 524 OUT_RING (chan, upper_32_bits(offset));
461 OUT_RING (chan, lower_32_bits(offset)); 525 OUT_RING (chan, lower_32_bits(offset));
462 OUT_RING (chan, 0xf00d0000 | dispc->sem.value); 526 OUT_RING (chan, 0xf00d0000 | sync->sem.value);
463 OUT_RING (chan, 0x1002); 527 OUT_RING (chan, 0x1002);
464 BEGIN_NVC0(chan, 0, 0x0010, 4); 528 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
465 OUT_RING (chan, upper_32_bits(offset)); 529 OUT_RING (chan, upper_32_bits(offset));
466 OUT_RING (chan, lower_32_bits(offset ^ 0x10)); 530 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
467 OUT_RING (chan, 0x74b1e000); 531 OUT_RING (chan, 0x74b1e000);
468 OUT_RING (chan, 0x1001); 532 OUT_RING (chan, 0x1001);
469 } 533 }
534
470 FIRE_RING (chan); 535 FIRE_RING (chan);
471 } else { 536 } else {
472 nouveau_bo_wr32(dispc->sem.bo, dispc->sem.offset / 4, 537 nouveau_bo_wr32(disp->sync, sync->sem.offset / 4,
473 0xf00d0000 | dispc->sem.value); 538 0xf00d0000 | sync->sem.value);
539 evo_sync(crtc->dev);
474 } 540 }
475 541
476 /* queue the flip on the crtc's "display sync" channel */ 542 /* queue the flip */
477 BEGIN_NV04(evo, 0, 0x0100, 1); 543 evo_mthd(push, 0x0100, 1);
478 OUT_RING (evo, 0xfffe0000); 544 evo_data(push, 0xfffe0000);
479 if (chan) { 545 evo_mthd(push, 0x0084, 1);
480 BEGIN_NV04(evo, 0, 0x0084, 1); 546 evo_data(push, swap_interval);
481 OUT_RING (evo, 0x00000100); 547 if (!(swap_interval & 0x00000100)) {
548 evo_mthd(push, 0x00e0, 1);
549 evo_data(push, 0x40000000);
550 }
551 evo_mthd(push, 0x0088, 4);
552 evo_data(push, sync->sem.offset);
553 evo_data(push, 0xf00d0000 | sync->sem.value);
554 evo_data(push, 0x74b1e000);
555 evo_data(push, NvEvoSync);
556 evo_mthd(push, 0x00a0, 2);
557 evo_data(push, 0x00000000);
558 evo_data(push, 0x00000000);
559 evo_mthd(push, 0x00c0, 1);
560 evo_data(push, nv_fb->r_dma);
561 evo_mthd(push, 0x0110, 2);
562 evo_data(push, 0x00000000);
563 evo_data(push, 0x00000000);
564 if (nv50_vers(sync) < NVD0_DISP_SYNC_CLASS) {
565 evo_mthd(push, 0x0800, 5);
566 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
567 evo_data(push, 0);
568 evo_data(push, (fb->height << 16) | fb->width);
569 evo_data(push, nv_fb->r_pitch);
570 evo_data(push, nv_fb->r_format);
482 } else { 571 } else {
483 BEGIN_NV04(evo, 0, 0x0084, 1); 572 evo_mthd(push, 0x0400, 5);
484 OUT_RING (evo, 0x00000010); 573 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
485 /* allows gamma somehow, PDISP will bitch at you if 574 evo_data(push, 0);
486 * you don't wait for vblank before changing this.. 575 evo_data(push, (fb->height << 16) | fb->width);
487 */ 576 evo_data(push, nv_fb->r_pitch);
488 BEGIN_NV04(evo, 0, 0x00e0, 1); 577 evo_data(push, nv_fb->r_format);
489 OUT_RING (evo, 0x40000000); 578 }
490 } 579 evo_mthd(push, 0x0080, 1);
491 BEGIN_NV04(evo, 0, 0x0088, 4); 580 evo_data(push, 0x00000000);
492 OUT_RING (evo, dispc->sem.offset); 581 evo_kick(push, sync);
493 OUT_RING (evo, 0xf00d0000 | dispc->sem.value); 582
494 OUT_RING (evo, 0x74b1e000); 583 sync->sem.offset ^= 0x10;
495 OUT_RING (evo, NvEvoSync); 584 sync->sem.value++;
496 BEGIN_NV04(evo, 0, 0x00a0, 2);
497 OUT_RING (evo, 0x00000000);
498 OUT_RING (evo, 0x00000000);
499 BEGIN_NV04(evo, 0, 0x00c0, 1);
500 OUT_RING (evo, nv_fb->r_dma);
501 BEGIN_NV04(evo, 0, 0x0110, 2);
502 OUT_RING (evo, 0x00000000);
503 OUT_RING (evo, 0x00000000);
504 BEGIN_NV04(evo, 0, 0x0800, 5);
505 OUT_RING (evo, nv_fb->nvbo->bo.offset >> 8);
506 OUT_RING (evo, 0);
507 OUT_RING (evo, (fb->height << 16) | fb->width);
508 OUT_RING (evo, nv_fb->r_pitch);
509 OUT_RING (evo, nv_fb->r_format);
510 BEGIN_NV04(evo, 0, 0x0080, 1);
511 OUT_RING (evo, 0x00000000);
512 FIRE_RING (evo);
513
514 dispc->sem.offset ^= 0x10;
515 dispc->sem.value++;
516 return 0; 585 return 0;
517} 586}
518 587
519static u16 588/******************************************************************************
520nv50_display_script_select(struct drm_device *dev, struct dcb_output *dcb, 589 * CRTC
521 u32 mc, int pxclk) 590 *****************************************************************************/
591static int
592nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
522{ 593{
523 struct nouveau_drm *drm = nouveau_drm(dev); 594 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
524 struct nouveau_connector *nv_connector = NULL; 595 struct nouveau_connector *nv_connector;
525 struct drm_encoder *encoder; 596 struct drm_connector *connector;
526 struct nvbios *bios = &drm->vbios; 597 u32 *push, mode = 0x00;
527 u32 script = 0, or; 598
599 nv_connector = nouveau_crtc_connector_get(nv_crtc);
600 connector = &nv_connector->base;
601 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
602 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
603 mode = DITHERING_MODE_DYNAMIC2X2;
604 } else {
605 mode = nv_connector->dithering_mode;
606 }
528 607
529 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 608 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
530 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 609 if (connector->display_info.bpc >= 8)
610 mode |= DITHERING_DEPTH_8BPC;
611 } else {
612 mode |= nv_connector->dithering_depth;
613 }
531 614
532 if (nv_encoder->dcb != dcb) 615 push = evo_wait(mast, 4);
533 continue; 616 if (push) {
617 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
618 evo_mthd(push, 0x08a0 + (nv_crtc->index * 0x0400), 1);
619 evo_data(push, mode);
620 } else
621 if (nv50_vers(mast) < NVE0_DISP_MAST_CLASS) {
622 evo_mthd(push, 0x0490 + (nv_crtc->index * 0x0300), 1);
623 evo_data(push, mode);
624 } else {
625 evo_mthd(push, 0x04a0 + (nv_crtc->index * 0x0300), 1);
626 evo_data(push, mode);
627 }
534 628
535 nv_connector = nouveau_encoder_connector_get(nv_encoder); 629 if (update) {
536 break; 630 evo_mthd(push, 0x0080, 1);
631 evo_data(push, 0x00000000);
632 }
633 evo_kick(push, mast);
537 } 634 }
538 635
539 or = ffs(dcb->or) - 1; 636 return 0;
540 switch (dcb->type) { 637}
541 case DCB_OUTPUT_LVDS:
542 script = (mc >> 8) & 0xf;
543 if (bios->fp_no_ddc) {
544 if (bios->fp.dual_link)
545 script |= 0x0100;
546 if (bios->fp.if_is_24bit)
547 script |= 0x0200;
548 } else {
549 /* determine number of lvds links */
550 if (nv_connector && nv_connector->edid &&
551 nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
552 /* http://www.spwg.org */
553 if (((u8 *)nv_connector->edid)[121] == 2)
554 script |= 0x0100;
555 } else
556 if (pxclk >= bios->fp.duallink_transition_clk) {
557 script |= 0x0100;
558 }
559 638
560 /* determine panel depth */ 639static int
561 if (script & 0x0100) { 640nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
562 if (bios->fp.strapless_is_24bit & 2) 641{
563 script |= 0x0200; 642 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
564 } else { 643 struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
565 if (bios->fp.strapless_is_24bit & 1) 644 struct drm_crtc *crtc = &nv_crtc->base;
566 script |= 0x0200; 645 struct nouveau_connector *nv_connector;
567 } 646 int mode = DRM_MODE_SCALE_NONE;
647 u32 oX, oY, *push;
648
649 /* start off at the resolution we programmed the crtc for, this
650 * effectively handles NONE/FULL scaling
651 */
652 nv_connector = nouveau_crtc_connector_get(nv_crtc);
653 if (nv_connector && nv_connector->native_mode)
654 mode = nv_connector->scaling_mode;
655
656 if (mode != DRM_MODE_SCALE_NONE)
657 omode = nv_connector->native_mode;
658 else
659 omode = umode;
660
661 oX = omode->hdisplay;
662 oY = omode->vdisplay;
663 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
664 oY *= 2;
665
666 /* add overscan compensation if necessary, will keep the aspect
667 * ratio the same as the backend mode unless overridden by the
668 * user setting both hborder and vborder properties.
669 */
670 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
671 (nv_connector->underscan == UNDERSCAN_AUTO &&
672 nv_connector->edid &&
673 drm_detect_hdmi_monitor(nv_connector->edid)))) {
674 u32 bX = nv_connector->underscan_hborder;
675 u32 bY = nv_connector->underscan_vborder;
676 u32 aspect = (oY << 19) / oX;
677
678 if (bX) {
679 oX -= (bX * 2);
680 if (bY) oY -= (bY * 2);
681 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
682 } else {
683 oX -= (oX >> 4) + 32;
684 if (bY) oY -= (bY * 2);
685 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
686 }
687 }
568 688
569 if (nv_connector && nv_connector->edid && 689 /* handle CENTER/ASPECT scaling, taking into account the areas
570 (nv_connector->edid->revision >= 4) && 690 * removed already for overscan compensation
571 (nv_connector->edid->input & 0x70) >= 0x20) 691 */
572 script |= 0x0200; 692 switch (mode) {
693 case DRM_MODE_SCALE_CENTER:
694 oX = min((u32)umode->hdisplay, oX);
695 oY = min((u32)umode->vdisplay, oY);
696 /* fall-through */
697 case DRM_MODE_SCALE_ASPECT:
698 if (oY < oX) {
699 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
700 oX = ((oY * aspect) + (aspect / 2)) >> 19;
701 } else {
702 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
703 oY = ((oX * aspect) + (aspect / 2)) >> 19;
573 } 704 }
574 break; 705 break;
575 case DCB_OUTPUT_TMDS:
576 script = (mc >> 8) & 0xf;
577 if (pxclk >= 165000)
578 script |= 0x0100;
579 break;
580 case DCB_OUTPUT_DP:
581 script = (mc >> 8) & 0xf;
582 break;
583 case DCB_OUTPUT_ANALOG:
584 script = 0xff;
585 break;
586 default: 706 default:
587 NV_ERROR(drm, "modeset on unsupported output type!\n");
588 break; 707 break;
589 } 708 }
590 709
591 return script; 710 push = evo_wait(mast, 8);
711 if (push) {
712 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
713 /*XXX: SCALE_CTRL_ACTIVE??? */
714 evo_mthd(push, 0x08d8 + (nv_crtc->index * 0x400), 2);
715 evo_data(push, (oY << 16) | oX);
716 evo_data(push, (oY << 16) | oX);
717 evo_mthd(push, 0x08a4 + (nv_crtc->index * 0x400), 1);
718 evo_data(push, 0x00000000);
719 evo_mthd(push, 0x08c8 + (nv_crtc->index * 0x400), 1);
720 evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
721 } else {
722 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
723 evo_data(push, (oY << 16) | oX);
724 evo_data(push, (oY << 16) | oX);
725 evo_data(push, (oY << 16) | oX);
726 evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
727 evo_data(push, 0x00000000);
728 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
729 evo_data(push, umode->vdisplay << 16 | umode->hdisplay);
730 }
731
732 evo_kick(push, mast);
733
734 if (update) {
735 nv50_display_flip_stop(crtc);
736 nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
737 }
738 }
739
740 return 0;
592} 741}
593 742
594static void 743static int
595nv50_display_unk10_handler(struct drm_device *dev) 744nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
596{ 745{
597 struct nouveau_device *device = nouveau_dev(dev); 746 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
598 struct nouveau_drm *drm = nouveau_drm(dev); 747 u32 *push, hue, vib;
599 struct nv50_display *disp = nv50_display(dev); 748 int adj;
600 u32 unk30 = nv_rd32(device, 0x610030), mc; 749
601 int i, crtc, or = 0, type = DCB_OUTPUT_ANY; 750 adj = (nv_crtc->color_vibrance > 0) ? 50 : 0;
751 vib = ((nv_crtc->color_vibrance * 2047 + adj) / 100) & 0xfff;
752 hue = ((nv_crtc->vibrant_hue * 2047) / 100) & 0xfff;
753
754 push = evo_wait(mast, 16);
755 if (push) {
756 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
757 evo_mthd(push, 0x08a8 + (nv_crtc->index * 0x400), 1);
758 evo_data(push, (hue << 20) | (vib << 8));
759 } else {
760 evo_mthd(push, 0x0498 + (nv_crtc->index * 0x300), 1);
761 evo_data(push, (hue << 20) | (vib << 8));
762 }
602 763
603 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); 764 if (update) {
604 disp->irq.dcb = NULL; 765 evo_mthd(push, 0x0080, 1);
766 evo_data(push, 0x00000000);
767 }
768 evo_kick(push, mast);
769 }
605 770
606 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) & ~8); 771 return 0;
772}
607 773
608 /* Determine which CRTC we're dealing with, only 1 ever will be 774static int
609 * signalled at the same time with the current nouveau code. 775nv50_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
610 */ 776 int x, int y, bool update)
611 crtc = ffs((unk30 & 0x00000060) >> 5) - 1; 777{
612 if (crtc < 0) 778 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
613 goto ack; 779 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
614 780 u32 *push;
615 /* Nothing needs to be done for the encoder */ 781
616 crtc = ffs((unk30 & 0x00000180) >> 7) - 1; 782 push = evo_wait(mast, 16);
617 if (crtc < 0) 783 if (push) {
618 goto ack; 784 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
619 785 evo_mthd(push, 0x0860 + (nv_crtc->index * 0x400), 1);
620 /* Find which encoder was connected to the CRTC */ 786 evo_data(push, nvfb->nvbo->bo.offset >> 8);
621 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) { 787 evo_mthd(push, 0x0868 + (nv_crtc->index * 0x400), 3);
622 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); 788 evo_data(push, (fb->height << 16) | fb->width);
623 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc); 789 evo_data(push, nvfb->r_pitch);
624 if (!(mc & (1 << crtc))) 790 evo_data(push, nvfb->r_format);
625 continue; 791 evo_mthd(push, 0x08c0 + (nv_crtc->index * 0x400), 1);
792 evo_data(push, (y << 16) | x);
793 if (nv50_vers(mast) > NV50_DISP_MAST_CLASS) {
794 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
795 evo_data(push, nvfb->r_dma);
796 }
797 } else {
798 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
799 evo_data(push, nvfb->nvbo->bo.offset >> 8);
800 evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
801 evo_data(push, (fb->height << 16) | fb->width);
802 evo_data(push, nvfb->r_pitch);
803 evo_data(push, nvfb->r_format);
804 evo_data(push, nvfb->r_dma);
805 evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
806 evo_data(push, (y << 16) | x);
807 }
626 808
627 switch ((mc & 0x00000f00) >> 8) { 809 if (update) {
628 case 0: type = DCB_OUTPUT_ANALOG; break; 810 evo_mthd(push, 0x0080, 1);
629 case 1: type = DCB_OUTPUT_TV; break; 811 evo_data(push, 0x00000000);
630 default:
631 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
632 goto ack;
633 } 812 }
813 evo_kick(push, mast);
814 }
634 815
635 or = i; 816 nv_crtc->fb.tile_flags = nvfb->r_dma;
817 return 0;
818}
819
820static void
821nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
822{
823 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
824 u32 *push = evo_wait(mast, 16);
825 if (push) {
826 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
827 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
828 evo_data(push, 0x85000000);
829 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
830 } else
831 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
832 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
833 evo_data(push, 0x85000000);
834 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
835 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
836 evo_data(push, NvEvoVRAM);
837 } else {
838 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
839 evo_data(push, 0x85000000);
840 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
841 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
842 evo_data(push, NvEvoVRAM);
843 }
844 evo_kick(push, mast);
636 } 845 }
846}
637 847
638 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 848static void
639 if (nv_device(drm->device)->chipset < 0x90 || 849nv50_crtc_cursor_hide(struct nouveau_crtc *nv_crtc)
640 nv_device(drm->device)->chipset == 0x92 || 850{
641 nv_device(drm->device)->chipset == 0xa0) 851 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
642 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); 852 u32 *push = evo_wait(mast, 16);
643 else 853 if (push) {
644 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); 854 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
855 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
856 evo_data(push, 0x05000000);
857 } else
858 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
859 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 1);
860 evo_data(push, 0x05000000);
861 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
862 evo_data(push, 0x00000000);
863 } else {
864 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
865 evo_data(push, 0x05000000);
866 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
867 evo_data(push, 0x00000000);
868 }
869 evo_kick(push, mast);
870 }
871}
645 872
646 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); 873static void
647 if (!(mc & (1 << crtc))) 874nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
648 continue; 875{
876 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
877
878 if (show)
879 nv50_crtc_cursor_show(nv_crtc);
880 else
881 nv50_crtc_cursor_hide(nv_crtc);
882
883 if (update) {
884 u32 *push = evo_wait(mast, 2);
885 if (push) {
886 evo_mthd(push, 0x0080, 1);
887 evo_data(push, 0x00000000);
888 evo_kick(push, mast);
889 }
890 }
891}
649 892
650 switch ((mc & 0x00000f00) >> 8) { 893static void
651 case 0: type = DCB_OUTPUT_LVDS; break; 894nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
652 case 1: type = DCB_OUTPUT_TMDS; break; 895{
653 case 2: type = DCB_OUTPUT_TMDS; break; 896}
654 case 5: type = DCB_OUTPUT_TMDS; break; 897
655 case 8: type = DCB_OUTPUT_DP; break; 898static void
656 case 9: type = DCB_OUTPUT_DP; break; 899nv50_crtc_prepare(struct drm_crtc *crtc)
657 default: 900{
658 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 901 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
659 goto ack; 902 struct nv50_mast *mast = nv50_mast(crtc->dev);
903 u32 *push;
904
905 nv50_display_flip_stop(crtc);
906
907 push = evo_wait(mast, 2);
908 if (push) {
909 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
910 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
911 evo_data(push, 0x00000000);
912 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
913 evo_data(push, 0x40000000);
914 } else
915 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
916 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
917 evo_data(push, 0x00000000);
918 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 1);
919 evo_data(push, 0x40000000);
920 evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
921 evo_data(push, 0x00000000);
922 } else {
923 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
924 evo_data(push, 0x00000000);
925 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
926 evo_data(push, 0x03000000);
927 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
928 evo_data(push, 0x00000000);
929 }
930
931 evo_kick(push, mast);
932 }
933
934 nv50_crtc_cursor_show_hide(nv_crtc, false, false);
935}
936
937static void
938nv50_crtc_commit(struct drm_crtc *crtc)
939{
940 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
941 struct nv50_mast *mast = nv50_mast(crtc->dev);
942 u32 *push;
943
944 push = evo_wait(mast, 32);
945 if (push) {
946 if (nv50_vers(mast) < NV84_DISP_MAST_CLASS) {
947 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
948 evo_data(push, NvEvoVRAM_LP);
949 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
950 evo_data(push, 0xc0000000);
951 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
952 } else
953 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
954 evo_mthd(push, 0x0874 + (nv_crtc->index * 0x400), 1);
955 evo_data(push, nv_crtc->fb.tile_flags);
956 evo_mthd(push, 0x0840 + (nv_crtc->index * 0x400), 2);
957 evo_data(push, 0xc0000000);
958 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
959 evo_mthd(push, 0x085c + (nv_crtc->index * 0x400), 1);
960 evo_data(push, NvEvoVRAM);
961 } else {
962 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
963 evo_data(push, nv_crtc->fb.tile_flags);
964 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
965 evo_data(push, 0x83000000);
966 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
967 evo_data(push, 0x00000000);
968 evo_data(push, 0x00000000);
969 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
970 evo_data(push, NvEvoVRAM);
971 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
972 evo_data(push, 0xffffff00);
660 } 973 }
661 974
662 or = i; 975 evo_kick(push, mast);
976 }
977
978 nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true);
979 nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
980}
981
982static bool
983nv50_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
984 struct drm_display_mode *adjusted_mode)
985{
986 return true;
987}
988
989static int
990nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
991{
992 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
993 int ret;
994
995 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
996 if (ret)
997 return ret;
998
999 if (old_fb) {
1000 nvfb = nouveau_framebuffer(old_fb);
1001 nouveau_bo_unpin(nvfb->nvbo);
663 } 1002 }
664 1003
665 /* There was no encoder to disable */ 1004 return 0;
666 if (type == DCB_OUTPUT_ANY) 1005}
667 goto ack; 1006
1007static int
1008nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1009 struct drm_display_mode *mode, int x, int y,
1010 struct drm_framebuffer *old_fb)
1011{
1012 struct nv50_mast *mast = nv50_mast(crtc->dev);
1013 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1014 struct nouveau_connector *nv_connector;
1015 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
1016 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
1017 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
1018 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
1019 u32 vblan2e = 0, vblan2s = 1;
1020 u32 *push;
1021 int ret;
1022
1023 hactive = mode->htotal;
1024 hsynce = mode->hsync_end - mode->hsync_start - 1;
1025 hbackp = mode->htotal - mode->hsync_end;
1026 hblanke = hsynce + hbackp;
1027 hfrontp = mode->hsync_start - mode->hdisplay;
1028 hblanks = mode->htotal - hfrontp - 1;
1029
1030 vactive = mode->vtotal * vscan / ilace;
1031 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
1032 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
1033 vblanke = vsynce + vbackp;
1034 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
1035 vblanks = vactive - vfrontp - 1;
1036 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1037 vblan2e = vactive + vsynce + vbackp;
1038 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
1039 vactive = (vactive * 2) + 1;
1040 }
668 1041
669 /* Disable the encoder */ 1042 ret = nv50_crtc_swap_fbs(crtc, old_fb);
670 for (i = 0; i < drm->vbios.dcb.entries; i++) { 1043 if (ret)
671 struct dcb_output *dcb = &drm->vbios.dcb.entry[i]; 1044 return ret;
672 1045
673 if (dcb->type == type && (dcb->or & (1 << or))) { 1046 push = evo_wait(mast, 64);
674 nouveau_bios_run_display_table(dev, 0, -1, dcb, -1); 1047 if (push) {
675 disp->irq.dcb = dcb; 1048 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
676 goto ack; 1049 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
1050 evo_data(push, 0x00800000 | mode->clock);
1051 evo_data(push, (ilace == 2) ? 2 : 0);
1052 evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
1053 evo_data(push, 0x00000000);
1054 evo_data(push, (vactive << 16) | hactive);
1055 evo_data(push, ( vsynce << 16) | hsynce);
1056 evo_data(push, (vblanke << 16) | hblanke);
1057 evo_data(push, (vblanks << 16) | hblanks);
1058 evo_data(push, (vblan2e << 16) | vblan2s);
1059 evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
1060 evo_data(push, 0x00000000);
1061 evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
1062 evo_data(push, 0x00000311);
1063 evo_data(push, 0x00000100);
1064 } else {
1065 evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
1066 evo_data(push, 0x00000000);
1067 evo_data(push, (vactive << 16) | hactive);
1068 evo_data(push, ( vsynce << 16) | hsynce);
1069 evo_data(push, (vblanke << 16) | hblanke);
1070 evo_data(push, (vblanks << 16) | hblanks);
1071 evo_data(push, (vblan2e << 16) | vblan2s);
1072 evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
1073 evo_data(push, 0x00000000); /* ??? */
1074 evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
1075 evo_data(push, mode->clock * 1000);
1076 evo_data(push, 0x00200000); /* ??? */
1077 evo_data(push, mode->clock * 1000);
1078 evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
1079 evo_data(push, 0x00000311);
1080 evo_data(push, 0x00000100);
677 } 1081 }
1082
1083 evo_kick(push, mast);
678 } 1084 }
679 1085
680 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); 1086 nv_connector = nouveau_crtc_connector_get(nv_crtc);
681ack: 1087 nv50_crtc_set_dither(nv_crtc, false);
682 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); 1088 nv50_crtc_set_scale(nv_crtc, false);
683 nv_wr32(device, 0x610030, 0x80000000); 1089 nv50_crtc_set_color_vibrance(nv_crtc, false);
1090 nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
1091 return 0;
1092}
1093
1094static int
1095nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1096 struct drm_framebuffer *old_fb)
1097{
1098 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
1099 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1100 int ret;
1101
1102 if (!crtc->fb) {
1103 NV_DEBUG(drm, "No FB bound\n");
1104 return 0;
1105 }
1106
1107 ret = nv50_crtc_swap_fbs(crtc, old_fb);
1108 if (ret)
1109 return ret;
1110
1111 nv50_display_flip_stop(crtc);
1112 nv50_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
1113 nv50_display_flip_next(crtc, crtc->fb, NULL, 1);
1114 return 0;
1115}
1116
1117static int
1118nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
1119 struct drm_framebuffer *fb, int x, int y,
1120 enum mode_set_atomic state)
1121{
1122 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1123 nv50_display_flip_stop(crtc);
1124 nv50_crtc_set_image(nv_crtc, fb, x, y, true);
1125 return 0;
684} 1126}
685 1127
686static void 1128static void
687nv50_display_unk20_handler(struct drm_device *dev) 1129nv50_crtc_lut_load(struct drm_crtc *crtc)
688{ 1130{
689 struct nouveau_device *device = nouveau_dev(dev); 1131 struct nv50_disp *disp = nv50_disp(crtc->dev);
690 struct nouveau_drm *drm = nouveau_drm(dev); 1132 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
691 struct nv50_display *disp = nv50_display(dev); 1133 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
692 u32 unk30 = nv_rd32(device, 0x610030), tmp, pclk, script, mc = 0; 1134 int i;
693 struct dcb_output *dcb;
694 int i, crtc, or = 0, type = DCB_OUTPUT_ANY;
695
696 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30);
697 dcb = disp->irq.dcb;
698 if (dcb) {
699 nouveau_bios_run_display_table(dev, 0, -2, dcb, -1);
700 disp->irq.dcb = NULL;
701 }
702
703 /* CRTC clock change requested? */
704 crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
705 if (crtc >= 0) {
706 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
707 pclk &= 0x003fffff;
708 if (pclk)
709 nv50_crtc_set_clock(dev, crtc, pclk);
710
711 tmp = nv_rd32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
712 tmp &= ~0x000000f;
713 nv_wr32(device, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
714 }
715
716 /* Nothing needs to be done for the encoder */
717 crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
718 if (crtc < 0)
719 goto ack;
720 pclk = nv_rd32(device, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
721
722 /* Find which encoder is connected to the CRTC */
723 for (i = 0; type == DCB_OUTPUT_ANY && i < 3; i++) {
724 mc = nv_rd32(device, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
725 NV_DEBUG(drm, "DAC-%d mc: 0x%08x\n", i, mc);
726 if (!(mc & (1 << crtc)))
727 continue;
728 1135
729 switch ((mc & 0x00000f00) >> 8) { 1136 for (i = 0; i < 256; i++) {
730 case 0: type = DCB_OUTPUT_ANALOG; break; 1137 u16 r = nv_crtc->lut.r[i] >> 2;
731 case 1: type = DCB_OUTPUT_TV; break; 1138 u16 g = nv_crtc->lut.g[i] >> 2;
732 default: 1139 u16 b = nv_crtc->lut.b[i] >> 2;
733 NV_ERROR(drm, "invalid mc, DAC-%d: 0x%08x\n", i, mc); 1140
734 goto ack; 1141 if (nv_mclass(disp->core) < NVD0_DISP_CLASS) {
1142 writew(r + 0x0000, lut + (i * 0x08) + 0);
1143 writew(g + 0x0000, lut + (i * 0x08) + 2);
1144 writew(b + 0x0000, lut + (i * 0x08) + 4);
1145 } else {
1146 writew(r + 0x6000, lut + (i * 0x20) + 0);
1147 writew(g + 0x6000, lut + (i * 0x20) + 2);
1148 writew(b + 0x6000, lut + (i * 0x20) + 4);
1149 }
1150 }
1151}
1152
1153static int
1154nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1155 uint32_t handle, uint32_t width, uint32_t height)
1156{
1157 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1158 struct drm_device *dev = crtc->dev;
1159 struct drm_gem_object *gem;
1160 struct nouveau_bo *nvbo;
1161 bool visible = (handle != 0);
1162 int i, ret = 0;
1163
1164 if (visible) {
1165 if (width != 64 || height != 64)
1166 return -EINVAL;
1167
1168 gem = drm_gem_object_lookup(dev, file_priv, handle);
1169 if (unlikely(!gem))
1170 return -ENOENT;
1171 nvbo = nouveau_gem_object(gem);
1172
1173 ret = nouveau_bo_map(nvbo);
1174 if (ret == 0) {
1175 for (i = 0; i < 64 * 64; i++) {
1176 u32 v = nouveau_bo_rd32(nvbo, i);
1177 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
1178 }
1179 nouveau_bo_unmap(nvbo);
735 } 1180 }
736 1181
737 or = i; 1182 drm_gem_object_unreference_unlocked(gem);
738 } 1183 }
739 1184
740 for (i = 0; type == DCB_OUTPUT_ANY && i < nv50_sor_nr(dev); i++) { 1185 if (visible != nv_crtc->cursor.visible) {
741 if (nv_device(drm->device)->chipset < 0x90 || 1186 nv50_crtc_cursor_show_hide(nv_crtc, visible, true);
742 nv_device(drm->device)->chipset == 0x92 || 1187 nv_crtc->cursor.visible = visible;
743 nv_device(drm->device)->chipset == 0xa0) 1188 }
744 mc = nv_rd32(device, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
745 else
746 mc = nv_rd32(device, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
747 1189
748 NV_DEBUG(drm, "SOR-%d mc: 0x%08x\n", i, mc); 1190 return ret;
749 if (!(mc & (1 << crtc))) 1191}
750 continue;
751 1192
752 switch ((mc & 0x00000f00) >> 8) { 1193static int
753 case 0: type = DCB_OUTPUT_LVDS; break; 1194nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
754 case 1: type = DCB_OUTPUT_TMDS; break; 1195{
755 case 2: type = DCB_OUTPUT_TMDS; break; 1196 struct nv50_curs *curs = nv50_curs(crtc);
756 case 5: type = DCB_OUTPUT_TMDS; break; 1197 struct nv50_chan *chan = nv50_chan(curs);
757 case 8: type = DCB_OUTPUT_DP; break; 1198 nv_wo32(chan->user, 0x0084, (y << 16) | (x & 0xffff));
758 case 9: type = DCB_OUTPUT_DP; break; 1199 nv_wo32(chan->user, 0x0080, 0x00000000);
759 default: 1200 return 0;
760 NV_ERROR(drm, "invalid mc, SOR-%d: 0x%08x\n", i, mc); 1201}
761 goto ack;
762 }
763 1202
764 or = i; 1203static void
1204nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1205 uint32_t start, uint32_t size)
1206{
1207 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1208 u32 end = max(start + size, (u32)256);
1209 u32 i;
1210
1211 for (i = start; i < end; i++) {
1212 nv_crtc->lut.r[i] = r[i];
1213 nv_crtc->lut.g[i] = g[i];
1214 nv_crtc->lut.b[i] = b[i];
765 } 1215 }
766 1216
767 if (type == DCB_OUTPUT_ANY) 1217 nv50_crtc_lut_load(crtc);
768 goto ack; 1218}
769 1219
770 /* Enable the encoder */ 1220static void
771 for (i = 0; i < drm->vbios.dcb.entries; i++) { 1221nv50_crtc_destroy(struct drm_crtc *crtc)
772 dcb = &drm->vbios.dcb.entry[i]; 1222{
773 if (dcb->type == type && (dcb->or & (1 << or))) 1223 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
774 break; 1224 struct nv50_disp *disp = nv50_disp(crtc->dev);
1225 struct nv50_head *head = nv50_head(crtc);
1226 nv50_dmac_destroy(disp->core, &head->ovly.base);
1227 nv50_pioc_destroy(disp->core, &head->oimm.base);
1228 nv50_dmac_destroy(disp->core, &head->sync.base);
1229 nv50_pioc_destroy(disp->core, &head->curs.base);
1230 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
1231 if (nv_crtc->cursor.nvbo)
1232 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
1233 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
1234 nouveau_bo_unmap(nv_crtc->lut.nvbo);
1235 if (nv_crtc->lut.nvbo)
1236 nouveau_bo_unpin(nv_crtc->lut.nvbo);
1237 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
1238 drm_crtc_cleanup(crtc);
1239 kfree(crtc);
1240}
1241
1242static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = {
1243 .dpms = nv50_crtc_dpms,
1244 .prepare = nv50_crtc_prepare,
1245 .commit = nv50_crtc_commit,
1246 .mode_fixup = nv50_crtc_mode_fixup,
1247 .mode_set = nv50_crtc_mode_set,
1248 .mode_set_base = nv50_crtc_mode_set_base,
1249 .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic,
1250 .load_lut = nv50_crtc_lut_load,
1251};
1252
1253static const struct drm_crtc_funcs nv50_crtc_func = {
1254 .cursor_set = nv50_crtc_cursor_set,
1255 .cursor_move = nv50_crtc_cursor_move,
1256 .gamma_set = nv50_crtc_gamma_set,
1257 .set_config = drm_crtc_helper_set_config,
1258 .destroy = nv50_crtc_destroy,
1259 .page_flip = nouveau_crtc_page_flip,
1260};
1261
1262static void
1263nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
1264{
1265}
1266
1267static void
1268nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
1269{
1270}
1271
1272static int
1273nv50_crtc_create(struct drm_device *dev, struct nouveau_object *core, int index)
1274{
1275 struct nv50_disp *disp = nv50_disp(dev);
1276 struct nv50_head *head;
1277 struct drm_crtc *crtc;
1278 int ret, i;
1279
1280 head = kzalloc(sizeof(*head), GFP_KERNEL);
1281 if (!head)
1282 return -ENOMEM;
1283
1284 head->base.index = index;
1285 head->base.set_dither = nv50_crtc_set_dither;
1286 head->base.set_scale = nv50_crtc_set_scale;
1287 head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
1288 head->base.color_vibrance = 50;
1289 head->base.vibrant_hue = 0;
1290 head->base.cursor.set_offset = nv50_cursor_set_offset;
1291 head->base.cursor.set_pos = nv50_cursor_set_pos;
1292 for (i = 0; i < 256; i++) {
1293 head->base.lut.r[i] = i << 8;
1294 head->base.lut.g[i] = i << 8;
1295 head->base.lut.b[i] = i << 8;
775 } 1296 }
776 1297
777 if (i == drm->vbios.dcb.entries) { 1298 crtc = &head->base.base;
778 NV_ERROR(drm, "no dcb for %d %d 0x%08x\n", or, type, mc); 1299 drm_crtc_init(dev, crtc, &nv50_crtc_func);
779 goto ack; 1300 drm_crtc_helper_add(crtc, &nv50_crtc_hfunc);
1301 drm_mode_crtc_set_gamma_size(crtc, 256);
1302
1303 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
1304 0, 0x0000, NULL, &head->base.lut.nvbo);
1305 if (!ret) {
1306 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM);
1307 if (!ret) {
1308 ret = nouveau_bo_map(head->base.lut.nvbo);
1309 if (ret)
1310 nouveau_bo_unpin(head->base.lut.nvbo);
1311 }
1312 if (ret)
1313 nouveau_bo_ref(NULL, &head->base.lut.nvbo);
780 } 1314 }
781 1315
782 script = nv50_display_script_select(dev, dcb, mc, pclk); 1316 if (ret)
783 nouveau_bios_run_display_table(dev, script, pclk, dcb, -1); 1317 goto out;
784 1318
785 if (type == DCB_OUTPUT_DP) { 1319 nv50_crtc_lut_load(crtc);
786 int link = !(dcb->dpconf.sor.link & 1); 1320
787 if ((mc & 0x000f0000) == 0x00020000) 1321 /* allocate cursor resources */
788 nv50_sor_dp_calc_tu(dev, or, link, pclk, 18); 1322 ret = nv50_pioc_create(disp->core, NV50_DISP_CURS_CLASS, index,
789 else 1323 &(struct nv50_display_curs_class) {
790 nv50_sor_dp_calc_tu(dev, or, link, pclk, 24); 1324 .head = index,
1325 }, sizeof(struct nv50_display_curs_class),
1326 &head->curs.base);
1327 if (ret)
1328 goto out;
1329
1330 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
1331 0, 0x0000, NULL, &head->base.cursor.nvbo);
1332 if (!ret) {
1333 ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
1334 if (!ret) {
1335 ret = nouveau_bo_map(head->base.cursor.nvbo);
1336 if (ret)
1337 nouveau_bo_unpin(head->base.lut.nvbo);
1338 }
1339 if (ret)
1340 nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
791 } 1341 }
792 1342
793 if (dcb->type != DCB_OUTPUT_ANALOG) { 1343 if (ret)
794 tmp = nv_rd32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); 1344 goto out;
795 tmp &= ~0x00000f0f; 1345
796 if (script & 0x0100) 1346 /* allocate page flip / sync resources */
797 tmp |= 0x00000101; 1347 ret = nv50_dmac_create(disp->core, NV50_DISP_SYNC_CLASS, index,
798 nv_wr32(device, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp); 1348 &(struct nv50_display_sync_class) {
799 } else { 1349 .pushbuf = EVO_PUSH_HANDLE(SYNC, index),
800 nv_wr32(device, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); 1350 .head = index,
1351 }, sizeof(struct nv50_display_sync_class),
1352 disp->sync->bo.offset, &head->sync.base);
1353 if (ret)
1354 goto out;
1355
1356 head->sync.sem.offset = EVO_SYNC(1 + index, 0x00);
1357
1358 /* allocate overlay resources */
1359 ret = nv50_pioc_create(disp->core, NV50_DISP_OIMM_CLASS, index,
1360 &(struct nv50_display_oimm_class) {
1361 .head = index,
1362 }, sizeof(struct nv50_display_oimm_class),
1363 &head->oimm.base);
1364 if (ret)
1365 goto out;
1366
1367 ret = nv50_dmac_create(disp->core, NV50_DISP_OVLY_CLASS, index,
1368 &(struct nv50_display_ovly_class) {
1369 .pushbuf = EVO_PUSH_HANDLE(OVLY, index),
1370 .head = index,
1371 }, sizeof(struct nv50_display_ovly_class),
1372 disp->sync->bo.offset, &head->ovly.base);
1373 if (ret)
1374 goto out;
1375
1376out:
1377 if (ret)
1378 nv50_crtc_destroy(crtc);
1379 return ret;
1380}
1381
1382/******************************************************************************
1383 * DAC
1384 *****************************************************************************/
1385static void
1386nv50_dac_dpms(struct drm_encoder *encoder, int mode)
1387{
1388 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1389 struct nv50_disp *disp = nv50_disp(encoder->dev);
1390 int or = nv_encoder->or;
1391 u32 dpms_ctrl;
1392
1393 dpms_ctrl = 0x00000000;
1394 if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
1395 dpms_ctrl |= 0x00000001;
1396 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
1397 dpms_ctrl |= 0x00000004;
1398
1399 nv_call(disp->core, NV50_DISP_DAC_PWR + or, dpms_ctrl);
1400}
1401
1402static bool
1403nv50_dac_mode_fixup(struct drm_encoder *encoder,
1404 const struct drm_display_mode *mode,
1405 struct drm_display_mode *adjusted_mode)
1406{
1407 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1408 struct nouveau_connector *nv_connector;
1409
1410 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1411 if (nv_connector && nv_connector->native_mode) {
1412 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
1413 int id = adjusted_mode->base.id;
1414 *adjusted_mode = *nv_connector->native_mode;
1415 adjusted_mode->base.id = id;
1416 }
801 } 1417 }
802 1418
803 disp->irq.dcb = dcb; 1419 return true;
804 disp->irq.pclk = pclk; 1420}
805 disp->irq.script = script; 1421
1422static void
1423nv50_dac_commit(struct drm_encoder *encoder)
1424{
1425}
1426
1427static void
1428nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1429 struct drm_display_mode *adjusted_mode)
1430{
1431 struct nv50_mast *mast = nv50_mast(encoder->dev);
1432 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1433 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1434 u32 *push;
1435
1436 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
1437
1438 push = evo_wait(mast, 8);
1439 if (push) {
1440 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1441 u32 syncs = 0x00000000;
1442
1443 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1444 syncs |= 0x00000001;
1445 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1446 syncs |= 0x00000002;
1447
1448 evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
1449 evo_data(push, 1 << nv_crtc->index);
1450 evo_data(push, syncs);
1451 } else {
1452 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
1453 u32 syncs = 0x00000001;
1454
1455 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1456 syncs |= 0x00000008;
1457 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1458 syncs |= 0x00000010;
1459
1460 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1461 magic |= 0x00000001;
1462
1463 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
1464 evo_data(push, syncs);
1465 evo_data(push, magic);
1466 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
1467 evo_data(push, 1 << nv_crtc->index);
1468 }
1469
1470 evo_kick(push, mast);
1471 }
806 1472
807ack: 1473 nv_encoder->crtc = encoder->crtc;
808 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
809 nv_wr32(device, 0x610030, 0x80000000);
810} 1474}
811 1475
812/* If programming a TMDS output on a SOR that can also be configured for
813 * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
814 *
815 * It looks like the VBIOS TMDS scripts make an attempt at this, however,
816 * the VBIOS scripts on at least one board I have only switch it off on
817 * link 0, causing a blank display if the output has previously been
818 * programmed for DisplayPort.
819 */
820static void 1476static void
821nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_output *dcb) 1477nv50_dac_disconnect(struct drm_encoder *encoder)
822{ 1478{
823 struct nouveau_device *device = nouveau_dev(dev); 1479 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
824 int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); 1480 struct nv50_mast *mast = nv50_mast(encoder->dev);
1481 const int or = nv_encoder->or;
1482 u32 *push;
1483
1484 if (nv_encoder->crtc) {
1485 nv50_crtc_prepare(nv_encoder->crtc);
1486
1487 push = evo_wait(mast, 4);
1488 if (push) {
1489 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1490 evo_mthd(push, 0x0400 + (or * 0x080), 1);
1491 evo_data(push, 0x00000000);
1492 } else {
1493 evo_mthd(push, 0x0180 + (or * 0x020), 1);
1494 evo_data(push, 0x00000000);
1495 }
1496
1497 evo_mthd(push, 0x0080, 1);
1498 evo_data(push, 0x00000000);
1499 evo_kick(push, mast);
1500 }
1501 }
1502
1503 nv_encoder->crtc = NULL;
1504}
1505
1506static enum drm_connector_status
1507nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1508{
1509 struct nv50_disp *disp = nv50_disp(encoder->dev);
1510 int ret, or = nouveau_encoder(encoder)->or;
1511 u32 load = 0;
1512
1513 ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
1514 if (ret || load != 7)
1515 return connector_status_disconnected;
1516
1517 return connector_status_connected;
1518}
1519
1520static void
1521nv50_dac_destroy(struct drm_encoder *encoder)
1522{
1523 drm_encoder_cleanup(encoder);
1524 kfree(encoder);
1525}
1526
1527static const struct drm_encoder_helper_funcs nv50_dac_hfunc = {
1528 .dpms = nv50_dac_dpms,
1529 .mode_fixup = nv50_dac_mode_fixup,
1530 .prepare = nv50_dac_disconnect,
1531 .commit = nv50_dac_commit,
1532 .mode_set = nv50_dac_mode_set,
1533 .disable = nv50_dac_disconnect,
1534 .get_crtc = nv50_display_crtc_get,
1535 .detect = nv50_dac_detect
1536};
1537
1538static const struct drm_encoder_funcs nv50_dac_func = {
1539 .destroy = nv50_dac_destroy,
1540};
1541
1542static int
1543nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1544{
1545 struct drm_device *dev = connector->dev;
1546 struct nouveau_encoder *nv_encoder;
825 struct drm_encoder *encoder; 1547 struct drm_encoder *encoder;
826 u32 tmp;
827 1548
828 if (dcb->type != DCB_OUTPUT_TMDS) 1549 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1550 if (!nv_encoder)
1551 return -ENOMEM;
1552 nv_encoder->dcb = dcbe;
1553 nv_encoder->or = ffs(dcbe->or) - 1;
1554
1555 encoder = to_drm_encoder(nv_encoder);
1556 encoder->possible_crtcs = dcbe->heads;
1557 encoder->possible_clones = 0;
1558 drm_encoder_init(dev, encoder, &nv50_dac_func, DRM_MODE_ENCODER_DAC);
1559 drm_encoder_helper_add(encoder, &nv50_dac_hfunc);
1560
1561 drm_mode_connector_attach_encoder(connector, encoder);
1562 return 0;
1563}
1564
1565/******************************************************************************
1566 * Audio
1567 *****************************************************************************/
1568static void
1569nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1570{
1571 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1572 struct nouveau_connector *nv_connector;
1573 struct nv50_disp *disp = nv50_disp(encoder->dev);
1574
1575 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1576 if (!drm_detect_monitor_audio(nv_connector->edid))
1577 return;
1578
1579 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1580
1581 nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or,
1582 nv_connector->base.eld,
1583 nv_connector->base.eld[2] * 4);
1584}
1585
1586static void
1587nv50_audio_disconnect(struct drm_encoder *encoder)
1588{
1589 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1590 struct nv50_disp *disp = nv50_disp(encoder->dev);
1591
1592 nv_exec(disp->core, NVA3_DISP_SOR_HDA_ELD + nv_encoder->or, NULL, 0);
1593}
1594
1595/******************************************************************************
1596 * HDMI
1597 *****************************************************************************/
1598static void
1599nv50_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1600{
1601 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1602 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1603 struct nouveau_connector *nv_connector;
1604 struct nv50_disp *disp = nv50_disp(encoder->dev);
1605 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
1606 u32 rekey = 56; /* binary driver, and tegra constant */
1607 u32 max_ac_packet;
1608
1609 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1610 if (!drm_detect_hdmi_monitor(nv_connector->edid))
829 return; 1611 return;
830 1612
831 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 1613 max_ac_packet = mode->htotal - mode->hdisplay;
832 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); 1614 max_ac_packet -= rekey;
1615 max_ac_packet -= 18; /* constant from tegra */
1616 max_ac_packet /= 32;
1617
1618 nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff,
1619 NV84_DISP_SOR_HDMI_PWR_STATE_ON |
1620 (max_ac_packet << 16) | rekey);
833 1621
834 if (nv_encoder->dcb->type == DCB_OUTPUT_DP && 1622 nv50_audio_mode_set(encoder, mode);
835 nv_encoder->dcb->or & (1 << or)) { 1623}
836 tmp = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)); 1624
837 tmp &= ~NV50_SOR_DP_CTRL_ENABLED; 1625static void
838 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), tmp); 1626nv50_hdmi_disconnect(struct drm_encoder *encoder)
1627{
1628 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1629 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1630 struct nv50_disp *disp = nv50_disp(encoder->dev);
1631 const u32 moff = (nv_crtc->index << 3) | nv_encoder->or;
1632
1633 nv50_audio_disconnect(encoder);
1634
1635 nv_call(disp->core, NV84_DISP_SOR_HDMI_PWR + moff, 0x00000000);
1636}
1637
1638/******************************************************************************
1639 * SOR
1640 *****************************************************************************/
1641static void
1642nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1643{
1644 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1645 struct drm_device *dev = encoder->dev;
1646 struct nv50_disp *disp = nv50_disp(dev);
1647 struct drm_encoder *partner;
1648 int or = nv_encoder->or;
1649
1650 nv_encoder->last_dpms = mode;
1651
1652 list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
1653 struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
1654
1655 if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
1656 continue;
1657
1658 if (nv_partner != nv_encoder &&
1659 nv_partner->dcb->or == nv_encoder->dcb->or) {
1660 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
1661 return;
839 break; 1662 break;
840 } 1663 }
841 } 1664 }
1665
1666 nv_call(disp->core, NV50_DISP_SOR_PWR + or, (mode == DRM_MODE_DPMS_ON));
1667
1668 if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
1669 nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, disp->core);
1670}
1671
1672static bool
1673nv50_sor_mode_fixup(struct drm_encoder *encoder,
1674 const struct drm_display_mode *mode,
1675 struct drm_display_mode *adjusted_mode)
1676{
1677 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1678 struct nouveau_connector *nv_connector;
1679
1680 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1681 if (nv_connector && nv_connector->native_mode) {
1682 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
1683 int id = adjusted_mode->base.id;
1684 *adjusted_mode = *nv_connector->native_mode;
1685 adjusted_mode->base.id = id;
1686 }
1687 }
1688
1689 return true;
842} 1690}
843 1691
844static void 1692static void
845nv50_display_unk40_handler(struct drm_device *dev) 1693nv50_sor_disconnect(struct drm_encoder *encoder)
846{ 1694{
847 struct nouveau_device *device = nouveau_dev(dev); 1695 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
848 struct nouveau_drm *drm = nouveau_drm(dev); 1696 struct nv50_mast *mast = nv50_mast(encoder->dev);
849 struct nv50_display *disp = nv50_display(dev); 1697 const int or = nv_encoder->or;
850 struct dcb_output *dcb = disp->irq.dcb; 1698 u32 *push;
851 u16 script = disp->irq.script; 1699
852 u32 unk30 = nv_rd32(device, 0x610030), pclk = disp->irq.pclk; 1700 if (nv_encoder->crtc) {
1701 nv50_crtc_prepare(nv_encoder->crtc);
1702
1703 push = evo_wait(mast, 4);
1704 if (push) {
1705 if (nv50_vers(mast) < NVD0_DISP_MAST_CLASS) {
1706 evo_mthd(push, 0x0600 + (or * 0x40), 1);
1707 evo_data(push, 0x00000000);
1708 } else {
1709 evo_mthd(push, 0x0200 + (or * 0x20), 1);
1710 evo_data(push, 0x00000000);
1711 }
1712
1713 evo_mthd(push, 0x0080, 1);
1714 evo_data(push, 0x00000000);
1715 evo_kick(push, mast);
1716 }
853 1717
854 NV_DEBUG(drm, "0x610030: 0x%08x\n", unk30); 1718 nv50_hdmi_disconnect(encoder);
855 disp->irq.dcb = NULL; 1719 }
856 if (!dcb)
857 goto ack;
858 1720
859 nouveau_bios_run_display_table(dev, script, -pclk, dcb, -1); 1721 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
860 nv50_display_unk40_dp_set_tmds(dev, dcb); 1722 nv_encoder->crtc = NULL;
1723}
861 1724
862ack: 1725static void
863 nv_wr32(device, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); 1726nv50_sor_prepare(struct drm_encoder *encoder)
864 nv_wr32(device, 0x610030, 0x80000000); 1727{
865 nv_wr32(device, 0x619494, nv_rd32(device, 0x619494) | 8); 1728 nv50_sor_disconnect(encoder);
1729 if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
1730 evo_sync(encoder->dev);
866} 1731}
867 1732
868static void 1733static void
869nv50_display_bh(unsigned long data) 1734nv50_sor_commit(struct drm_encoder *encoder)
870{ 1735{
871 struct drm_device *dev = (struct drm_device *)data; 1736}
872 struct nouveau_device *device = nouveau_dev(dev); 1737
1738static void
1739nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1740 struct drm_display_mode *mode)
1741{
1742 struct nv50_disp *disp = nv50_disp(encoder->dev);
1743 struct nv50_mast *mast = nv50_mast(encoder->dev);
1744 struct drm_device *dev = encoder->dev;
873 struct nouveau_drm *drm = nouveau_drm(dev); 1745 struct nouveau_drm *drm = nouveau_drm(dev);
1746 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1747 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1748 struct nouveau_connector *nv_connector;
1749 struct nvbios *bios = &drm->vbios;
1750 u32 *push, lvds = 0;
1751 u8 owner = 1 << nv_crtc->index;
1752 u8 proto = 0xf;
1753 u8 depth = 0x0;
874 1754
875 for (;;) { 1755 nv_connector = nouveau_encoder_connector_get(nv_encoder);
876 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); 1756 switch (nv_encoder->dcb->type) {
877 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1); 1757 case DCB_OUTPUT_TMDS:
1758 if (nv_encoder->dcb->sorconf.link & 1) {
1759 if (mode->clock < 165000)
1760 proto = 0x1;
1761 else
1762 proto = 0x5;
1763 } else {
1764 proto = 0x2;
1765 }
878 1766
879 NV_DEBUG(drm, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); 1767 nv50_hdmi_mode_set(encoder, mode);
1768 break;
1769 case DCB_OUTPUT_LVDS:
1770 proto = 0x0;
880 1771
881 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) 1772 if (bios->fp_no_ddc) {
882 nv50_display_unk10_handler(dev); 1773 if (bios->fp.dual_link)
883 else 1774 lvds |= 0x0100;
884 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20) 1775 if (bios->fp.if_is_24bit)
885 nv50_display_unk20_handler(dev); 1776 lvds |= 0x0200;
886 else 1777 } else {
887 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40) 1778 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
888 nv50_display_unk40_handler(dev); 1779 if (((u8 *)nv_connector->edid)[121] == 2)
1780 lvds |= 0x0100;
1781 } else
1782 if (mode->clock >= bios->fp.duallink_transition_clk) {
1783 lvds |= 0x0100;
1784 }
1785
1786 if (lvds & 0x0100) {
1787 if (bios->fp.strapless_is_24bit & 2)
1788 lvds |= 0x0200;
1789 } else {
1790 if (bios->fp.strapless_is_24bit & 1)
1791 lvds |= 0x0200;
1792 }
1793
1794 if (nv_connector->base.display_info.bpc == 8)
1795 lvds |= 0x0200;
1796 }
1797
1798 nv_call(disp->core, NV50_DISP_SOR_LVDS_SCRIPT + nv_encoder->or, lvds);
1799 break;
1800 case DCB_OUTPUT_DP:
1801 if (nv_connector->base.display_info.bpc == 6) {
1802 nv_encoder->dp.datarate = mode->clock * 18 / 8;
1803 depth = 0x2;
1804 } else
1805 if (nv_connector->base.display_info.bpc == 8) {
1806 nv_encoder->dp.datarate = mode->clock * 24 / 8;
1807 depth = 0x5;
1808 } else {
1809 nv_encoder->dp.datarate = mode->clock * 30 / 8;
1810 depth = 0x6;
1811 }
1812
1813 if (nv_encoder->dcb->sorconf.link & 1)
1814 proto = 0x8;
889 else 1815 else
890 break; 1816 proto = 0x9;
1817 break;
1818 default:
1819 BUG_ON(1);
1820 break;
891 } 1821 }
892 1822
893 nv_wr32(device, NV03_PMC_INTR_EN_0, 1); 1823 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
1824
1825 push = evo_wait(nv50_mast(dev), 8);
1826 if (push) {
1827 if (nv50_vers(mast) < NVD0_DISP_CLASS) {
1828 evo_mthd(push, 0x0600 + (nv_encoder->or * 0x040), 1);
1829 evo_data(push, (depth << 16) | (proto << 8) | owner);
1830 } else {
1831 u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
1832 u32 syncs = 0x00000001;
1833
1834 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1835 syncs |= 0x00000008;
1836 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1837 syncs |= 0x00000010;
1838
1839 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1840 magic |= 0x00000001;
1841
1842 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
1843 evo_data(push, syncs | (depth << 6));
1844 evo_data(push, magic);
1845 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 1);
1846 evo_data(push, owner | (proto << 8));
1847 }
1848
1849 evo_kick(push, mast);
1850 }
1851
1852 nv_encoder->crtc = encoder->crtc;
894} 1853}
895 1854
896static void 1855static void
897nv50_display_error_handler(struct drm_device *dev) 1856nv50_sor_destroy(struct drm_encoder *encoder)
898{ 1857{
899 struct nouveau_device *device = nouveau_dev(dev); 1858 drm_encoder_cleanup(encoder);
900 struct nouveau_drm *drm = nouveau_drm(dev); 1859 kfree(encoder);
901 u32 channels = (nv_rd32(device, NV50_PDISPLAY_INTR_0) & 0x001f0000) >> 16; 1860}
902 u32 addr, data;
903 int chid;
904 1861
905 for (chid = 0; chid < 5; chid++) { 1862static const struct drm_encoder_helper_funcs nv50_sor_hfunc = {
906 if (!(channels & (1 << chid))) 1863 .dpms = nv50_sor_dpms,
907 continue; 1864 .mode_fixup = nv50_sor_mode_fixup,
1865 .prepare = nv50_sor_prepare,
1866 .commit = nv50_sor_commit,
1867 .mode_set = nv50_sor_mode_set,
1868 .disable = nv50_sor_disconnect,
1869 .get_crtc = nv50_display_crtc_get,
1870};
1871
1872static const struct drm_encoder_funcs nv50_sor_func = {
1873 .destroy = nv50_sor_destroy,
1874};
1875
1876static int
1877nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1878{
1879 struct drm_device *dev = connector->dev;
1880 struct nouveau_encoder *nv_encoder;
1881 struct drm_encoder *encoder;
1882
1883 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1884 if (!nv_encoder)
1885 return -ENOMEM;
1886 nv_encoder->dcb = dcbe;
1887 nv_encoder->or = ffs(dcbe->or) - 1;
1888 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
908 1889
909 nv_wr32(device, NV50_PDISPLAY_INTR_0, 0x00010000 << chid); 1890 encoder = to_drm_encoder(nv_encoder);
910 addr = nv_rd32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid)); 1891 encoder->possible_crtcs = dcbe->heads;
911 data = nv_rd32(device, NV50_PDISPLAY_TRAPPED_DATA(chid)); 1892 encoder->possible_clones = 0;
912 NV_ERROR(drm, "EvoCh %d Mthd 0x%04x Data 0x%08x " 1893 drm_encoder_init(dev, encoder, &nv50_sor_func, DRM_MODE_ENCODER_TMDS);
913 "(0x%04x 0x%02x)\n", chid, 1894 drm_encoder_helper_add(encoder, &nv50_sor_hfunc);
914 addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf); 1895
1896 drm_mode_connector_attach_encoder(connector, encoder);
1897 return 0;
1898}
1899
1900/******************************************************************************
1901 * Init
1902 *****************************************************************************/
1903void
1904nv50_display_fini(struct drm_device *dev)
1905{
1906}
915 1907
916 nv_wr32(device, NV50_PDISPLAY_TRAPPED_ADDR(chid), 0x90000000); 1908int
1909nv50_display_init(struct drm_device *dev)
1910{
1911 u32 *push = evo_wait(nv50_mast(dev), 32);
1912 if (push) {
1913 evo_mthd(push, 0x0088, 1);
1914 evo_data(push, NvEvoSync);
1915 evo_kick(push, nv50_mast(dev));
1916 return evo_sync(dev);
917 } 1917 }
1918
1919 return -EBUSY;
918} 1920}
919 1921
920void 1922void
921nv50_display_intr(struct drm_device *dev) 1923nv50_display_destroy(struct drm_device *dev)
1924{
1925 struct nv50_disp *disp = nv50_disp(dev);
1926
1927 nv50_dmac_destroy(disp->core, &disp->mast.base);
1928
1929 nouveau_bo_unmap(disp->sync);
1930 if (disp->sync)
1931 nouveau_bo_unpin(disp->sync);
1932 nouveau_bo_ref(NULL, &disp->sync);
1933
1934 nouveau_display(dev)->priv = NULL;
1935 kfree(disp);
1936}
1937
1938int
1939nv50_display_create(struct drm_device *dev)
922{ 1940{
1941 static const u16 oclass[] = {
1942 NVE0_DISP_CLASS,
1943 NVD0_DISP_CLASS,
1944 NVA3_DISP_CLASS,
1945 NV94_DISP_CLASS,
1946 NVA0_DISP_CLASS,
1947 NV84_DISP_CLASS,
1948 NV50_DISP_CLASS,
1949 };
923 struct nouveau_device *device = nouveau_dev(dev); 1950 struct nouveau_device *device = nouveau_dev(dev);
924 struct nouveau_drm *drm = nouveau_drm(dev); 1951 struct nouveau_drm *drm = nouveau_drm(dev);
925 struct nv50_display *disp = nv50_display(dev); 1952 struct dcb_table *dcb = &drm->vbios.dcb;
926 uint32_t delayed = 0; 1953 struct drm_connector *connector, *tmp;
927 1954 struct nv50_disp *disp;
928 while (nv_rd32(device, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { 1955 struct dcb_output *dcbe;
929 uint32_t intr0 = nv_rd32(device, NV50_PDISPLAY_INTR_0); 1956 int crtcs, ret, i;
930 uint32_t intr1 = nv_rd32(device, NV50_PDISPLAY_INTR_1);
931 uint32_t clock;
932 1957
933 NV_DEBUG(drm, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); 1958 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
1959 if (!disp)
1960 return -ENOMEM;
934 1961
935 if (!intr0 && !(intr1 & ~delayed)) 1962 nouveau_display(dev)->priv = disp;
936 break; 1963 nouveau_display(dev)->dtor = nv50_display_destroy;
1964 nouveau_display(dev)->init = nv50_display_init;
1965 nouveau_display(dev)->fini = nv50_display_fini;
937 1966
938 if (intr0 & 0x001f0000) { 1967 /* small shared memory area we use for notifiers and semaphores */
939 nv50_display_error_handler(dev); 1968 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
940 intr0 &= ~0x001f0000; 1969 0, 0x0000, NULL, &disp->sync);
1970 if (!ret) {
1971 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
1972 if (!ret) {
1973 ret = nouveau_bo_map(disp->sync);
1974 if (ret)
1975 nouveau_bo_unpin(disp->sync);
941 } 1976 }
1977 if (ret)
1978 nouveau_bo_ref(NULL, &disp->sync);
1979 }
942 1980
943 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) { 1981 if (ret)
944 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC; 1982 goto out;
945 delayed |= NV50_PDISPLAY_INTR_1_VBLANK_CRTC; 1983
946 } 1984 /* attempt to allocate a supported evo display class */
1985 ret = -ENODEV;
1986 for (i = 0; ret && i < ARRAY_SIZE(oclass); i++) {
1987 ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
1988 0xd1500000, oclass[i], NULL, 0,
1989 &disp->core);
1990 }
947 1991
948 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 | 1992 if (ret)
949 NV50_PDISPLAY_INTR_1_CLK_UNK20 | 1993 goto out;
950 NV50_PDISPLAY_INTR_1_CLK_UNK40)); 1994
951 if (clock) { 1995 /* allocate master evo channel */
952 nv_wr32(device, NV03_PMC_INTR_EN_0, 0); 1996 ret = nv50_dmac_create(disp->core, NV50_DISP_MAST_CLASS, 0,
953 tasklet_schedule(&disp->tasklet); 1997 &(struct nv50_display_mast_class) {
954 delayed |= clock; 1998 .pushbuf = EVO_PUSH_HANDLE(MAST, 0),
955 intr1 &= ~clock; 1999 }, sizeof(struct nv50_display_mast_class),
956 } 2000 disp->sync->bo.offset, &disp->mast.base);
2001 if (ret)
2002 goto out;
2003
2004 /* create crtc objects to represent the hw heads */
2005 if (nv_mclass(disp->core) >= NVD0_DISP_CLASS)
2006 crtcs = nv_rd32(device, 0x022448);
2007 else
2008 crtcs = 2;
2009
2010 for (i = 0; i < crtcs; i++) {
2011 ret = nv50_crtc_create(dev, disp->core, i);
2012 if (ret)
2013 goto out;
2014 }
2015
2016 /* create encoder/connector objects based on VBIOS DCB table */
2017 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
2018 connector = nouveau_connector_create(dev, dcbe->connector);
2019 if (IS_ERR(connector))
2020 continue;
957 2021
958 if (intr0) { 2022 if (dcbe->location != DCB_LOC_ON_CHIP) {
959 NV_ERROR(drm, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0); 2023 NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
960 nv_wr32(device, NV50_PDISPLAY_INTR_0, intr0); 2024 dcbe->type, ffs(dcbe->or) - 1);
2025 continue;
961 } 2026 }
962 2027
963 if (intr1) { 2028 switch (dcbe->type) {
964 NV_ERROR(drm, 2029 case DCB_OUTPUT_TMDS:
965 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1); 2030 case DCB_OUTPUT_LVDS:
966 nv_wr32(device, NV50_PDISPLAY_INTR_1, intr1); 2031 case DCB_OUTPUT_DP:
2032 nv50_sor_create(connector, dcbe);
2033 break;
2034 case DCB_OUTPUT_ANALOG:
2035 nv50_dac_create(connector, dcbe);
2036 break;
2037 default:
2038 NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
2039 dcbe->type, ffs(dcbe->or) - 1);
2040 continue;
967 } 2041 }
968 } 2042 }
2043
2044 /* cull any connectors we created that don't have an encoder */
2045 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
2046 if (connector->encoder_ids[0])
2047 continue;
2048
2049 NV_WARN(drm, "%s has no encoders, removing\n",
2050 drm_get_connector_name(connector));
2051 connector->funcs->destroy(connector);
2052 }
2053
2054out:
2055 if (ret)
2056 nv50_display_destroy(dev);
2057 return ret;
969} 2058}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 973554d8a7a6..70da347aa8c5 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -30,77 +30,16 @@
30#include "nouveau_display.h" 30#include "nouveau_display.h"
31#include "nouveau_crtc.h" 31#include "nouveau_crtc.h"
32#include "nouveau_reg.h" 32#include "nouveau_reg.h"
33#include "nv50_evo.h"
34 33
35struct nv50_display_crtc { 34int nv50_display_create(struct drm_device *);
36 struct nouveau_channel *sync; 35void nv50_display_destroy(struct drm_device *);
37 struct { 36int nv50_display_init(struct drm_device *);
38 struct nouveau_bo *bo; 37void nv50_display_fini(struct drm_device *);
39 u32 offset;
40 u16 value;
41 } sem;
42};
43 38
44struct nv50_display {
45 struct nouveau_channel *master;
46
47 struct nouveau_gpuobj *ramin;
48 u32 dmao;
49 u32 hash;
50
51 struct nv50_display_crtc crtc[2];
52
53 struct tasklet_struct tasklet;
54 struct {
55 struct dcb_output *dcb;
56 u16 script;
57 u32 pclk;
58 } irq;
59};
60
61static inline struct nv50_display *
62nv50_display(struct drm_device *dev)
63{
64 return nouveau_display(dev)->priv;
65}
66
67int nv50_display_early_init(struct drm_device *dev);
68void nv50_display_late_takedown(struct drm_device *dev);
69int nv50_display_create(struct drm_device *dev);
70int nv50_display_init(struct drm_device *dev);
71void nv50_display_fini(struct drm_device *dev);
72void nv50_display_destroy(struct drm_device *dev);
73void nv50_display_intr(struct drm_device *);
74int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
75int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
76
77u32 nv50_display_active_crtcs(struct drm_device *);
78
79int nv50_display_sync(struct drm_device *);
80int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
81 struct nouveau_channel *chan);
82void nv50_display_flip_stop(struct drm_crtc *); 39void nv50_display_flip_stop(struct drm_crtc *);
83 40int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
84int nv50_evo_create(struct drm_device *dev);
85void nv50_evo_destroy(struct drm_device *dev);
86int nv50_evo_init(struct drm_device *dev);
87void nv50_evo_fini(struct drm_device *dev);
88void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
89 u64 size);
90int nv50_evo_dmaobj_new(struct nouveau_channel *, u32 handle, u32 memtype,
91 u64 base, u64 size, struct nouveau_gpuobj **);
92
93int nvd0_display_create(struct drm_device *);
94void nvd0_display_destroy(struct drm_device *);
95int nvd0_display_init(struct drm_device *);
96void nvd0_display_fini(struct drm_device *);
97void nvd0_display_intr(struct drm_device *);
98
99void nvd0_display_flip_stop(struct drm_crtc *);
100int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
101 struct nouveau_channel *, u32 swap_interval); 41 struct nouveau_channel *, u32 swap_interval);
102 42
103struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head); 43struct nouveau_bo *nv50_display_crtc_sema(struct drm_device *, int head);
104struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int head);
105 44
106#endif /* __NV50_DISPLAY_H__ */ 45#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
deleted file mode 100644
index 9f6f55cdfa77..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ /dev/null
@@ -1,403 +0,0 @@
1/*
2 * Copyright 2010 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <drm/drmP.h>
26
27#include "nouveau_drm.h"
28#include "nouveau_dma.h"
29#include "nv50_display.h"
30
31#include <core/gpuobj.h>
32
33#include <subdev/timer.h>
34#include <subdev/fb.h>
35
36static u32
37nv50_evo_rd32(struct nouveau_object *object, u32 addr)
38{
39 void __iomem *iomem = object->oclass->ofuncs->rd08;
40 return ioread32_native(iomem + addr);
41}
42
43static void
44nv50_evo_wr32(struct nouveau_object *object, u32 addr, u32 data)
45{
46 void __iomem *iomem = object->oclass->ofuncs->rd08;
47 iowrite32_native(data, iomem + addr);
48}
49
50static void
51nv50_evo_channel_del(struct nouveau_channel **pevo)
52{
53 struct nouveau_channel *evo = *pevo;
54
55 if (!evo)
56 return;
57 *pevo = NULL;
58
59 nouveau_bo_unmap(evo->push.buffer);
60 nouveau_bo_ref(NULL, &evo->push.buffer);
61
62 if (evo->object)
63 iounmap(evo->object->oclass->ofuncs);
64
65 kfree(evo);
66}
67
68int
69nv50_evo_dmaobj_new(struct nouveau_channel *evo, u32 handle, u32 memtype,
70 u64 base, u64 size, struct nouveau_gpuobj **pobj)
71{
72 struct drm_device *dev = evo->fence;
73 struct nouveau_drm *drm = nouveau_drm(dev);
74 struct nv50_display *disp = nv50_display(dev);
75 u32 dmao = disp->dmao;
76 u32 hash = disp->hash;
77 u32 flags5;
78
79 if (nv_device(drm->device)->chipset < 0xc0) {
80 /* not supported on 0x50, specified in format mthd */
81 if (nv_device(drm->device)->chipset == 0x50)
82 memtype = 0;
83 flags5 = 0x00010000;
84 } else {
85 if (memtype & 0x80000000)
86 flags5 = 0x00000000; /* large pages */
87 else
88 flags5 = 0x00020000;
89 }
90
91 nv_wo32(disp->ramin, dmao + 0x00, 0x0019003d | (memtype << 22));
92 nv_wo32(disp->ramin, dmao + 0x04, lower_32_bits(base + size - 1));
93 nv_wo32(disp->ramin, dmao + 0x08, lower_32_bits(base));
94 nv_wo32(disp->ramin, dmao + 0x0c, upper_32_bits(base + size - 1) << 24 |
95 upper_32_bits(base));
96 nv_wo32(disp->ramin, dmao + 0x10, 0x00000000);
97 nv_wo32(disp->ramin, dmao + 0x14, flags5);
98
99 nv_wo32(disp->ramin, hash + 0x00, handle);
100 nv_wo32(disp->ramin, hash + 0x04, (evo->handle << 28) | (dmao << 10) |
101 evo->handle);
102
103 disp->dmao += 0x20;
104 disp->hash += 0x08;
105 return 0;
106}
107
108static int
109nv50_evo_channel_new(struct drm_device *dev, int chid,
110 struct nouveau_channel **pevo)
111{
112 struct nouveau_drm *drm = nouveau_drm(dev);
113 struct nv50_display *disp = nv50_display(dev);
114 struct nouveau_channel *evo;
115 int ret;
116
117 evo = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
118 if (!evo)
119 return -ENOMEM;
120 *pevo = evo;
121
122 evo->drm = drm;
123 evo->handle = chid;
124 evo->fence = dev;
125 evo->user_get = 4;
126 evo->user_put = 0;
127
128 ret = nouveau_bo_new(dev, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0, NULL,
129 &evo->push.buffer);
130 if (ret == 0)
131 ret = nouveau_bo_pin(evo->push.buffer, TTM_PL_FLAG_VRAM);
132 if (ret) {
133 NV_ERROR(drm, "Error creating EVO DMA push buffer: %d\n", ret);
134 nv50_evo_channel_del(pevo);
135 return ret;
136 }
137
138 ret = nouveau_bo_map(evo->push.buffer);
139 if (ret) {
140 NV_ERROR(drm, "Error mapping EVO DMA push buffer: %d\n", ret);
141 nv50_evo_channel_del(pevo);
142 return ret;
143 }
144
145 evo->object = kzalloc(sizeof(*evo->object), GFP_KERNEL);
146#ifdef NOUVEAU_OBJECT_MAGIC
147 evo->object->_magic = NOUVEAU_OBJECT_MAGIC;
148#endif
149 evo->object->parent = nv_object(disp->ramin)->parent;
150 evo->object->engine = nv_object(disp->ramin)->engine;
151 evo->object->oclass =
152 kzalloc(sizeof(*evo->object->oclass), GFP_KERNEL);
153 evo->object->oclass->ofuncs =
154 kzalloc(sizeof(*evo->object->oclass->ofuncs), GFP_KERNEL);
155 evo->object->oclass->ofuncs->rd32 = nv50_evo_rd32;
156 evo->object->oclass->ofuncs->wr32 = nv50_evo_wr32;
157 evo->object->oclass->ofuncs->rd08 =
158 ioremap(pci_resource_start(dev->pdev, 0) +
159 NV50_PDISPLAY_USER(evo->handle), PAGE_SIZE);
160 return 0;
161}
162
163static int
164nv50_evo_channel_init(struct nouveau_channel *evo)
165{
166 struct nouveau_drm *drm = evo->drm;
167 struct nouveau_device *device = nv_device(drm->device);
168 int id = evo->handle, ret, i;
169 u64 pushbuf = evo->push.buffer->bo.offset;
170 u32 tmp;
171
172 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
173 if ((tmp & 0x009f0000) == 0x00020000)
174 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00800000);
175
176 tmp = nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id));
177 if ((tmp & 0x003f0000) == 0x00030000)
178 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), tmp | 0x00600000);
179
180 /* initialise fifo */
181 nv_wr32(device, NV50_PDISPLAY_EVO_DMA_CB(id), pushbuf >> 8 |
182 NV50_PDISPLAY_EVO_DMA_CB_LOCATION_VRAM |
183 NV50_PDISPLAY_EVO_DMA_CB_VALID);
184 nv_wr32(device, NV50_PDISPLAY_EVO_UNK2(id), 0x00010000);
185 nv_wr32(device, NV50_PDISPLAY_EVO_HASH_TAG(id), id);
186 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), NV50_PDISPLAY_EVO_CTRL_DMA,
187 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
188
189 nv_wr32(device, NV50_PDISPLAY_USER_PUT(id), 0x00000000);
190 nv_wr32(device, NV50_PDISPLAY_EVO_CTRL(id), 0x01000003 |
191 NV50_PDISPLAY_EVO_CTRL_DMA_ENABLED);
192 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x80000000, 0x00000000)) {
193 NV_ERROR(drm, "EvoCh %d init timeout: 0x%08x\n", id,
194 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
195 return -EBUSY;
196 }
197
198 /* enable error reporting on the channel */
199 nv_mask(device, 0x610028, 0x00000000, 0x00010001 << id);
200
201 evo->dma.max = (4096/4) - 2;
202 evo->dma.max &= ~7;
203 evo->dma.put = 0;
204 evo->dma.cur = evo->dma.put;
205 evo->dma.free = evo->dma.max - evo->dma.cur;
206
207 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
208 if (ret)
209 return ret;
210
211 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
212 OUT_RING(evo, 0);
213
214 return 0;
215}
216
217static void
218nv50_evo_channel_fini(struct nouveau_channel *evo)
219{
220 struct nouveau_drm *drm = evo->drm;
221 struct nouveau_device *device = nv_device(drm->device);
222 int id = evo->handle;
223
224 nv_mask(device, 0x610028, 0x00010001 << id, 0x00000000);
225 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00001010, 0x00001000);
226 nv_wr32(device, NV50_PDISPLAY_INTR_0, (1 << id));
227 nv_mask(device, NV50_PDISPLAY_EVO_CTRL(id), 0x00000003, 0x00000000);
228 if (!nv_wait(device, NV50_PDISPLAY_EVO_CTRL(id), 0x001e0000, 0x00000000)) {
229 NV_ERROR(drm, "EvoCh %d takedown timeout: 0x%08x\n", id,
230 nv_rd32(device, NV50_PDISPLAY_EVO_CTRL(id)));
231 }
232}
233
234void
235nv50_evo_destroy(struct drm_device *dev)
236{
237 struct nv50_display *disp = nv50_display(dev);
238 int i;
239
240 for (i = 0; i < 2; i++) {
241 if (disp->crtc[i].sem.bo) {
242 nouveau_bo_unmap(disp->crtc[i].sem.bo);
243 nouveau_bo_ref(NULL, &disp->crtc[i].sem.bo);
244 }
245 nv50_evo_channel_del(&disp->crtc[i].sync);
246 }
247 nv50_evo_channel_del(&disp->master);
248 nouveau_gpuobj_ref(NULL, &disp->ramin);
249}
250
251int
252nv50_evo_create(struct drm_device *dev)
253{
254 struct nouveau_drm *drm = nouveau_drm(dev);
255 struct nouveau_fb *pfb = nouveau_fb(drm->device);
256 struct nv50_display *disp = nv50_display(dev);
257 struct nouveau_channel *evo;
258 int ret, i, j;
259
260 /* setup object management on it, any other evo channel will
261 * use this also as there's no per-channel support on the
262 * hardware
263 */
264 ret = nouveau_gpuobj_new(drm->device, NULL, 32768, 65536,
265 NVOBJ_FLAG_ZERO_ALLOC, &disp->ramin);
266 if (ret) {
267 NV_ERROR(drm, "Error allocating EVO channel memory: %d\n", ret);
268 goto err;
269 }
270
271 disp->hash = 0x0000;
272 disp->dmao = 0x1000;
273
274 /* create primary evo channel, the one we use for modesetting
275 * purporses
276 */
277 ret = nv50_evo_channel_new(dev, 0, &disp->master);
278 if (ret)
279 return ret;
280 evo = disp->master;
281
282 ret = nv50_evo_dmaobj_new(disp->master, NvEvoSync, 0x0000,
283 disp->ramin->addr + 0x2000, 0x1000, NULL);
284 if (ret)
285 goto err;
286
287 /* create some default objects for the scanout memtypes we support */
288 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM, 0x0000,
289 0, pfb->ram.size, NULL);
290 if (ret)
291 goto err;
292
293 ret = nv50_evo_dmaobj_new(disp->master, NvEvoVRAM_LP, 0x80000000,
294 0, pfb->ram.size, NULL);
295 if (ret)
296 goto err;
297
298 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB32, 0x80000000 |
299 (nv_device(drm->device)->chipset < 0xc0 ? 0x7a : 0xfe),
300 0, pfb->ram.size, NULL);
301 if (ret)
302 goto err;
303
304 ret = nv50_evo_dmaobj_new(disp->master, NvEvoFB16, 0x80000000 |
305 (nv_device(drm->device)->chipset < 0xc0 ? 0x70 : 0xfe),
306 0, pfb->ram.size, NULL);
307 if (ret)
308 goto err;
309
310 /* create "display sync" channels and other structures we need
311 * to implement page flipping
312 */
313 for (i = 0; i < 2; i++) {
314 struct nv50_display_crtc *dispc = &disp->crtc[i];
315 u64 offset;
316
317 ret = nv50_evo_channel_new(dev, 1 + i, &dispc->sync);
318 if (ret)
319 goto err;
320
321 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
322 0, 0x0000, NULL, &dispc->sem.bo);
323 if (!ret) {
324 ret = nouveau_bo_pin(dispc->sem.bo, TTM_PL_FLAG_VRAM);
325 if (!ret)
326 ret = nouveau_bo_map(dispc->sem.bo);
327 if (ret)
328 nouveau_bo_ref(NULL, &dispc->sem.bo);
329 offset = dispc->sem.bo->bo.offset;
330 }
331
332 if (ret)
333 goto err;
334
335 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoSync, 0x0000,
336 offset, 4096, NULL);
337 if (ret)
338 goto err;
339
340 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoVRAM_LP, 0x80000000,
341 0, pfb->ram.size, NULL);
342 if (ret)
343 goto err;
344
345 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB32, 0x80000000 |
346 (nv_device(drm->device)->chipset < 0xc0 ?
347 0x7a : 0xfe),
348 0, pfb->ram.size, NULL);
349 if (ret)
350 goto err;
351
352 ret = nv50_evo_dmaobj_new(dispc->sync, NvEvoFB16, 0x80000000 |
353 (nv_device(drm->device)->chipset < 0xc0 ?
354 0x70 : 0xfe),
355 0, pfb->ram.size, NULL);
356 if (ret)
357 goto err;
358
359 for (j = 0; j < 4096; j += 4)
360 nouveau_bo_wr32(dispc->sem.bo, j / 4, 0x74b1e000);
361 dispc->sem.offset = 0;
362 }
363
364 return 0;
365
366err:
367 nv50_evo_destroy(dev);
368 return ret;
369}
370
371int
372nv50_evo_init(struct drm_device *dev)
373{
374 struct nv50_display *disp = nv50_display(dev);
375 int ret, i;
376
377 ret = nv50_evo_channel_init(disp->master);
378 if (ret)
379 return ret;
380
381 for (i = 0; i < 2; i++) {
382 ret = nv50_evo_channel_init(disp->crtc[i].sync);
383 if (ret)
384 return ret;
385 }
386
387 return 0;
388}
389
390void
391nv50_evo_fini(struct drm_device *dev)
392{
393 struct nv50_display *disp = nv50_display(dev);
394 int i;
395
396 for (i = 0; i < 2; i++) {
397 if (disp->crtc[i].sync)
398 nv50_evo_channel_fini(disp->crtc[i].sync);
399 }
400
401 if (disp->master)
402 nv50_evo_channel_fini(disp->master);
403}
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
deleted file mode 100644
index 771d879bc834..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_evo.h
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV50_EVO_H__
28#define __NV50_EVO_H__
29
30#define NV50_EVO_UPDATE 0x00000080
31#define NV50_EVO_UNK84 0x00000084
32#define NV50_EVO_UNK84_NOTIFY 0x40000000
33#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
34#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
35#define NV50_EVO_DMA_NOTIFY 0x00000088
36#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
37#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
38#define NV50_EVO_UNK8C 0x0000008C
39
40#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
41#define NV50_EVO_DAC_MODE_CTRL 0x00000400
42#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
43#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
44#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
45#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
46#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
47
48#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
49#define NV50_EVO_SOR_MODE_CTRL 0x00000600
50#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
51#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
52#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
53#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
54#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
55#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
56
57#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
58#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
59#define NV50_EVO_CRTC_UNK0800 0x00000800
60#define NV50_EVO_CRTC_CLOCK 0x00000804
61#define NV50_EVO_CRTC_INTERLACE 0x00000808
62#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
63#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
64#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
65#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
66#define NV50_EVO_CRTC_UNK0820 0x00000820
67#define NV50_EVO_CRTC_UNK0824 0x00000824
68#define NV50_EVO_CRTC_UNK082C 0x0000082c
69#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
70/* You can't have a palette in 8 bit mode (=OFF) */
71#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
72#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
73#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
74#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
75#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
76#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
77#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
78#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
79#define NV50_EVO_CRTC_FB_SIZE 0x00000868
80#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
81#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
82#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
83#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
84#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
85#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
86#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
87#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
88#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
89#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
90#define NV50_EVO_CRTC_FB_DMA 0x00000874
91#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
92#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
93#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
94#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
95#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
96#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
97#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
98#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
99#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
100#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
101#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
102#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
103#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
104#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
105#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
106#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
107#define NV50_EVO_CRTC_COLOR_CTRL_VIBRANCE 0x000fff00
108#define NV50_EVO_CRTC_COLOR_CTRL_HUE 0xfff00000
109#define NV50_EVO_CRTC_FB_POS 0x000008c0
110#define NV50_EVO_CRTC_REAL_RES 0x000008c8
111#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
112#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
113 ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
114/* Both of these are needed, otherwise nothing happens. */
115#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
116#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
117#define NV50_EVO_CRTC_UNK900 0x00000900
118#define NV50_EVO_CRTC_UNK904 0x00000904
119
120#endif
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index e0763ea88ee2..c20f2727ea0b 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -110,8 +110,11 @@ nv50_fence_create(struct nouveau_drm *drm)
110 0, 0x0000, NULL, &priv->bo); 110 0, 0x0000, NULL, &priv->bo);
111 if (!ret) { 111 if (!ret) {
112 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 112 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
113 if (!ret) 113 if (!ret) {
114 ret = nouveau_bo_map(priv->bo); 114 ret = nouveau_bo_map(priv->bo);
115 if (ret)
116 nouveau_bo_unpin(priv->bo);
117 }
115 if (ret) 118 if (ret)
116 nouveau_bo_ref(NULL, &priv->bo); 119 nouveau_bo_ref(NULL, &priv->bo);
117 } 120 }
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index c4a65039b1ca..8bd5d2781baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -546,7 +546,7 @@ calc_mclk(struct drm_device *dev, struct nouveau_pm_level *perflvl,
546{ 546{
547 struct nouveau_drm *drm = nouveau_drm(dev); 547 struct nouveau_drm *drm = nouveau_drm(dev);
548 struct nouveau_device *device = nouveau_dev(dev); 548 struct nouveau_device *device = nouveau_dev(dev);
549 u32 crtc_mask = nv50_display_active_crtcs(dev); 549 u32 crtc_mask = 0; /*XXX: nv50_display_active_crtcs(dev); */
550 struct nouveau_mem_exec_func exec = { 550 struct nouveau_mem_exec_func exec = {
551 .dev = dev, 551 .dev = dev,
552 .precharge = mclk_precharge, 552 .precharge = mclk_precharge,
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
deleted file mode 100644
index b562b59e1326..000000000000
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ /dev/null
@@ -1,530 +0,0 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drm.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39#include <subdev/timer.h>
40
41static u32
42nv50_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
43{
44 struct nouveau_drm *drm = nouveau_drm(dev);
45 static const u8 nvaf[] = { 24, 16, 8, 0 }; /* thanks, apple.. */
46 static const u8 nv50[] = { 16, 8, 0, 24 };
47 if (nv_device(drm->device)->chipset == 0xaf)
48 return nvaf[lane];
49 return nv50[lane];
50}
51
52static void
53nv50_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
54{
55 struct nouveau_device *device = nouveau_dev(dev);
56 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
57 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x0f000000, pattern << 24);
58}
59
60static void
61nv50_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
62 u8 lane, u8 swing, u8 preem)
63{
64 struct nouveau_device *device = nouveau_dev(dev);
65 struct nouveau_drm *drm = nouveau_drm(dev);
66 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
67 u32 shift = nv50_sor_dp_lane_map(dev, dcb, lane);
68 u32 mask = 0x000000ff << shift;
69 u8 *table, *entry, *config;
70
71 table = nouveau_dp_bios_data(dev, dcb, &entry);
72 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
73 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
74 return;
75 }
76
77 config = entry + table[4];
78 while (config[0] != swing || config[1] != preem) {
79 config += table[5];
80 if (config >= entry + table[4] + entry[4] * table[5])
81 return;
82 }
83
84 nv_mask(device, NV50_SOR_DP_UNK118(or, link), mask, config[2] << shift);
85 nv_mask(device, NV50_SOR_DP_UNK120(or, link), mask, config[3] << shift);
86 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000ff00, config[4] << 8);
87}
88
89static void
90nv50_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
91 int link_nr, u32 link_bw, bool enhframe)
92{
93 struct nouveau_device *device = nouveau_dev(dev);
94 struct nouveau_drm *drm = nouveau_drm(dev);
95 u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
96 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & ~0x001f4000;
97 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800)) & ~0x000c0000;
98 u8 *table, *entry, mask;
99 int i;
100
101 table = nouveau_dp_bios_data(dev, dcb, &entry);
102 if (!table || (table[0] != 0x20 && table[0] != 0x21)) {
103 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
104 return;
105 }
106
107 entry = ROMPTR(dev, entry[10]);
108 if (entry) {
109 while (link_bw < ROM16(entry[0]) * 10)
110 entry += 4;
111
112 nouveau_bios_run_init_table(dev, ROM16(entry[2]), dcb, crtc);
113 }
114
115 dpctrl |= ((1 << link_nr) - 1) << 16;
116 if (enhframe)
117 dpctrl |= 0x00004000;
118
119 if (link_bw > 162000)
120 clksor |= 0x00040000;
121
122 nv_wr32(device, 0x614300 + (or * 0x800), clksor);
123 nv_wr32(device, NV50_SOR_DP_CTRL(or, link), dpctrl);
124
125 mask = 0;
126 for (i = 0; i < link_nr; i++)
127 mask |= 1 << (nv50_sor_dp_lane_map(dev, dcb, i) >> 3);
128 nv_mask(device, NV50_SOR_DP_UNK130(or, link), 0x0000000f, mask);
129}
130
131static void
132nv50_sor_dp_link_get(struct drm_device *dev, u32 or, u32 link, u32 *nr, u32 *bw)
133{
134 struct nouveau_device *device = nouveau_dev(dev);
135 u32 dpctrl = nv_rd32(device, NV50_SOR_DP_CTRL(or, link)) & 0x000f0000;
136 u32 clksor = nv_rd32(device, 0x614300 + (or * 0x800));
137 if (clksor & 0x000c0000)
138 *bw = 270000;
139 else
140 *bw = 162000;
141
142 if (dpctrl > 0x00030000) *nr = 4;
143 else if (dpctrl > 0x00010000) *nr = 2;
144 else *nr = 1;
145}
146
147void
148nv50_sor_dp_calc_tu(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
149{
150 struct nouveau_device *device = nouveau_dev(dev);
151 struct nouveau_drm *drm = nouveau_drm(dev);
152 const u32 symbol = 100000;
153 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
154 int TU, VTUi, VTUf, VTUa;
155 u64 link_data_rate, link_ratio, unk;
156 u32 best_diff = 64 * symbol;
157 u32 link_nr, link_bw, r;
158
159 /* calculate packed data rate for each lane */
160 nv50_sor_dp_link_get(dev, or, link, &link_nr, &link_bw);
161 link_data_rate = (clk * bpp / 8) / link_nr;
162
163 /* calculate ratio of packed data rate to link symbol rate */
164 link_ratio = link_data_rate * symbol;
165 r = do_div(link_ratio, link_bw);
166
167 for (TU = 64; TU >= 32; TU--) {
168 /* calculate average number of valid symbols in each TU */
169 u32 tu_valid = link_ratio * TU;
170 u32 calc, diff;
171
172 /* find a hw representation for the fraction.. */
173 VTUi = tu_valid / symbol;
174 calc = VTUi * symbol;
175 diff = tu_valid - calc;
176 if (diff) {
177 if (diff >= (symbol / 2)) {
178 VTUf = symbol / (symbol - diff);
179 if (symbol - (VTUf * diff))
180 VTUf++;
181
182 if (VTUf <= 15) {
183 VTUa = 1;
184 calc += symbol - (symbol / VTUf);
185 } else {
186 VTUa = 0;
187 VTUf = 1;
188 calc += symbol;
189 }
190 } else {
191 VTUa = 0;
192 VTUf = min((int)(symbol / diff), 15);
193 calc += symbol / VTUf;
194 }
195
196 diff = calc - tu_valid;
197 } else {
198 /* no remainder, but the hw doesn't like the fractional
199 * part to be zero. decrement the integer part and
200 * have the fraction add a whole symbol back
201 */
202 VTUa = 0;
203 VTUf = 1;
204 VTUi--;
205 }
206
207 if (diff < best_diff) {
208 best_diff = diff;
209 bestTU = TU;
210 bestVTUa = VTUa;
211 bestVTUf = VTUf;
212 bestVTUi = VTUi;
213 if (diff == 0)
214 break;
215 }
216 }
217
218 if (!bestTU) {
219 NV_ERROR(drm, "DP: unable to find suitable config\n");
220 return;
221 }
222
223 /* XXX close to vbios numbers, but not right */
224 unk = (symbol - link_ratio) * bestTU;
225 unk *= link_ratio;
226 r = do_div(unk, symbol);
227 r = do_div(unk, symbol);
228 unk += 6;
229
230 nv_mask(device, NV50_SOR_DP_CTRL(or, link), 0x000001fc, bestTU << 2);
231 nv_mask(device, NV50_SOR_DP_SCFG(or, link), 0x010f7f3f, bestVTUa << 24 |
232 bestVTUf << 16 |
233 bestVTUi << 8 |
234 unk);
235}
236static void
237nv50_sor_disconnect(struct drm_encoder *encoder)
238{
239 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
240 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
241 struct drm_device *dev = encoder->dev;
242 struct nouveau_channel *evo = nv50_display(dev)->master;
243 int ret;
244
245 if (!nv_encoder->crtc)
246 return;
247 nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
248
249 NV_DEBUG(drm, "Disconnecting SOR %d\n", nv_encoder->or);
250
251 ret = RING_SPACE(evo, 4);
252 if (ret) {
253 NV_ERROR(drm, "no space while disconnecting SOR\n");
254 return;
255 }
256 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
257 OUT_RING (evo, 0);
258 BEGIN_NV04(evo, 0, NV50_EVO_UPDATE, 1);
259 OUT_RING (evo, 0);
260
261 nouveau_hdmi_mode_set(encoder, NULL);
262
263 nv_encoder->crtc = NULL;
264 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
265}
266
267static void
268nv50_sor_dpms(struct drm_encoder *encoder, int mode)
269{
270 struct nouveau_device *device = nouveau_dev(encoder->dev);
271 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
272 struct drm_device *dev = encoder->dev;
273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
274 struct drm_encoder *enc;
275 uint32_t val;
276 int or = nv_encoder->or;
277
278 NV_DEBUG(drm, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
279
280 nv_encoder->last_dpms = mode;
281 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
282 struct nouveau_encoder *nvenc = nouveau_encoder(enc);
283
284 if (nvenc == nv_encoder ||
285 (nvenc->dcb->type != DCB_OUTPUT_TMDS &&
286 nvenc->dcb->type != DCB_OUTPUT_LVDS &&
287 nvenc->dcb->type != DCB_OUTPUT_DP) ||
288 nvenc->dcb->or != nv_encoder->dcb->or)
289 continue;
290
291 if (nvenc->last_dpms == DRM_MODE_DPMS_ON)
292 return;
293 }
294
295 /* wait for it to be done */
296 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or),
297 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
298 NV_ERROR(drm, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
299 NV_ERROR(drm, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
300 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
301 }
302
303 val = nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
304
305 if (mode == DRM_MODE_DPMS_ON)
306 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
307 else
308 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
309
310 nv_wr32(device, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
311 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
312 if (!nv_wait(device, NV50_PDISPLAY_SOR_DPMS_STATE(or),
313 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
314 NV_ERROR(drm, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
315 NV_ERROR(drm, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
316 nv_rd32(device, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
317 }
318
319 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
320 struct dp_train_func func = {
321 .link_set = nv50_sor_dp_link_set,
322 .train_set = nv50_sor_dp_train_set,
323 .train_adj = nv50_sor_dp_train_adj
324 };
325
326 nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
327 }
328}
329
330static void
331nv50_sor_save(struct drm_encoder *encoder)
332{
333 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
334 NV_ERROR(drm, "!!\n");
335}
336
337static void
338nv50_sor_restore(struct drm_encoder *encoder)
339{
340 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
341 NV_ERROR(drm, "!!\n");
342}
343
344static bool
345nv50_sor_mode_fixup(struct drm_encoder *encoder,
346 const struct drm_display_mode *mode,
347 struct drm_display_mode *adjusted_mode)
348{
349 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
350 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
351 struct nouveau_connector *connector;
352
353 NV_DEBUG(drm, "or %d\n", nv_encoder->or);
354
355 connector = nouveau_encoder_connector_get(nv_encoder);
356 if (!connector) {
357 NV_ERROR(drm, "Encoder has no connector\n");
358 return false;
359 }
360
361 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
362 connector->native_mode)
363 drm_mode_copy(adjusted_mode, connector->native_mode);
364
365 return true;
366}
367
368static void
369nv50_sor_prepare(struct drm_encoder *encoder)
370{
371 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
372 nv50_sor_disconnect(encoder);
373 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
374 /* avoid race between link training and supervisor intr */
375 nv50_display_sync(encoder->dev);
376 }
377}
378
379static void
380nv50_sor_commit(struct drm_encoder *encoder)
381{
382}
383
384static void
385nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
386 struct drm_display_mode *mode)
387{
388 struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
389 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
390 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
391 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
392 struct nouveau_connector *nv_connector;
393 uint32_t mode_ctl = 0;
394 int ret;
395
396 NV_DEBUG(drm, "or %d type %d -> crtc %d\n",
397 nv_encoder->or, nv_encoder->dcb->type, crtc->index);
398 nv_encoder->crtc = encoder->crtc;
399
400 switch (nv_encoder->dcb->type) {
401 case DCB_OUTPUT_TMDS:
402 if (nv_encoder->dcb->sorconf.link & 1) {
403 if (mode->clock < 165000)
404 mode_ctl = 0x0100;
405 else
406 mode_ctl = 0x0500;
407 } else
408 mode_ctl = 0x0200;
409
410 nouveau_hdmi_mode_set(encoder, mode);
411 break;
412 case DCB_OUTPUT_DP:
413 nv_connector = nouveau_encoder_connector_get(nv_encoder);
414 if (nv_connector && nv_connector->base.display_info.bpc == 6) {
415 nv_encoder->dp.datarate = mode->clock * 18 / 8;
416 mode_ctl |= 0x00020000;
417 } else {
418 nv_encoder->dp.datarate = mode->clock * 24 / 8;
419 mode_ctl |= 0x00050000;
420 }
421
422 if (nv_encoder->dcb->sorconf.link & 1)
423 mode_ctl |= 0x00000800;
424 else
425 mode_ctl |= 0x00000900;
426 break;
427 default:
428 break;
429 }
430
431 if (crtc->index == 1)
432 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
433 else
434 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
435
436 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
437 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
438
439 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
440 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
441
442 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
443
444 ret = RING_SPACE(evo, 2);
445 if (ret) {
446 NV_ERROR(drm, "no space while connecting SOR\n");
447 nv_encoder->crtc = NULL;
448 return;
449 }
450 BEGIN_NV04(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
451 OUT_RING(evo, mode_ctl);
452}
453
454static struct drm_crtc *
455nv50_sor_crtc_get(struct drm_encoder *encoder)
456{
457 return nouveau_encoder(encoder)->crtc;
458}
459
460static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
461 .dpms = nv50_sor_dpms,
462 .save = nv50_sor_save,
463 .restore = nv50_sor_restore,
464 .mode_fixup = nv50_sor_mode_fixup,
465 .prepare = nv50_sor_prepare,
466 .commit = nv50_sor_commit,
467 .mode_set = nv50_sor_mode_set,
468 .get_crtc = nv50_sor_crtc_get,
469 .detect = NULL,
470 .disable = nv50_sor_disconnect
471};
472
473static void
474nv50_sor_destroy(struct drm_encoder *encoder)
475{
476 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
477 struct nouveau_drm *drm = nouveau_drm(encoder->dev);
478
479 NV_DEBUG(drm, "\n");
480
481 drm_encoder_cleanup(encoder);
482
483 kfree(nv_encoder);
484}
485
486static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
487 .destroy = nv50_sor_destroy,
488};
489
490int
491nv50_sor_create(struct drm_connector *connector, struct dcb_output *entry)
492{
493 struct nouveau_encoder *nv_encoder = NULL;
494 struct drm_device *dev = connector->dev;
495 struct nouveau_drm *drm = nouveau_drm(dev);
496 struct drm_encoder *encoder;
497 int type;
498
499 NV_DEBUG(drm, "\n");
500
501 switch (entry->type) {
502 case DCB_OUTPUT_TMDS:
503 case DCB_OUTPUT_DP:
504 type = DRM_MODE_ENCODER_TMDS;
505 break;
506 case DCB_OUTPUT_LVDS:
507 type = DRM_MODE_ENCODER_LVDS;
508 break;
509 default:
510 return -EINVAL;
511 }
512
513 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
514 if (!nv_encoder)
515 return -ENOMEM;
516 encoder = to_drm_encoder(nv_encoder);
517
518 nv_encoder->dcb = entry;
519 nv_encoder->or = ffs(entry->or) - 1;
520 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
521
522 drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
523 drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
524
525 encoder->possible_crtcs = entry->heads;
526 encoder->possible_clones = 0;
527
528 drm_mode_connector_attach_encoder(connector, encoder);
529 return 0;
530}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 53299eac9676..2a56b1b551cb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -114,17 +114,9 @@ nvc0_fence_context_del(struct nouveau_channel *chan)
114 struct nvc0_fence_chan *fctx = chan->fence; 114 struct nvc0_fence_chan *fctx = chan->fence;
115 int i; 115 int i;
116 116
117 if (nv_device(chan->drm->device)->card_type >= NV_D0) { 117 for (i = 0; i < dev->mode_config.num_crtc; i++) {
118 for (i = 0; i < dev->mode_config.num_crtc; i++) { 118 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
119 struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i); 119 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
120 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
121 }
122 } else
123 if (nv_device(chan->drm->device)->card_type >= NV_50) {
124 for (i = 0; i < dev->mode_config.num_crtc; i++) {
125 struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
126 nouveau_bo_vma_del(bo, &fctx->dispc_vma[i]);
127 }
128 } 120 }
129 121
130 nouveau_bo_vma_del(priv->bo, &fctx->vma); 122 nouveau_bo_vma_del(priv->bo, &fctx->vma);
@@ -154,12 +146,7 @@ nvc0_fence_context_new(struct nouveau_channel *chan)
154 146
155 /* map display semaphore buffers into channel's vm */ 147 /* map display semaphore buffers into channel's vm */
156 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) { 148 for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
157 struct nouveau_bo *bo; 149 struct nouveau_bo *bo = nv50_display_crtc_sema(chan->drm->dev, i);
158 if (nv_device(chan->drm->device)->card_type >= NV_D0)
159 bo = nvd0_display_crtc_sema(chan->drm->dev, i);
160 else
161 bo = nv50_display_crtc_sema(chan->drm->dev, i);
162
163 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]); 150 ret = nouveau_bo_vma_add(bo, client->vm, &fctx->dispc_vma[i]);
164 } 151 }
165 152
@@ -203,6 +190,8 @@ nvc0_fence_destroy(struct nouveau_drm *drm)
203{ 190{
204 struct nvc0_fence_priv *priv = drm->fence; 191 struct nvc0_fence_priv *priv = drm->fence;
205 nouveau_bo_unmap(priv->bo); 192 nouveau_bo_unmap(priv->bo);
193 if (priv->bo)
194 nouveau_bo_unpin(priv->bo);
206 nouveau_bo_ref(NULL, &priv->bo); 195 nouveau_bo_ref(NULL, &priv->bo);
207 drm->fence = NULL; 196 drm->fence = NULL;
208 kfree(priv); 197 kfree(priv);
@@ -232,8 +221,11 @@ nvc0_fence_create(struct nouveau_drm *drm)
232 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo); 221 TTM_PL_FLAG_VRAM, 0, 0, NULL, &priv->bo);
233 if (ret == 0) { 222 if (ret == 0) {
234 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 223 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM);
235 if (ret == 0) 224 if (ret == 0) {
236 ret = nouveau_bo_map(priv->bo); 225 ret = nouveau_bo_map(priv->bo);
226 if (ret)
227 nouveau_bo_unpin(priv->bo);
228 }
237 if (ret) 229 if (ret)
238 nouveau_bo_ref(NULL, &priv->bo); 230 nouveau_bo_ref(NULL, &priv->bo);
239 } 231 }
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
deleted file mode 100644
index c402fca2b2b8..000000000000
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ /dev/null
@@ -1,2141 +0,0 @@
1/*
2 * Copyright 2011 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <linux/dma-mapping.h>
26
27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h>
29
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32#include "nouveau_gem.h"
33#include "nouveau_connector.h"
34#include "nouveau_encoder.h"
35#include "nouveau_crtc.h"
36#include "nouveau_fence.h"
37#include "nv50_display.h"
38
39#include <core/gpuobj.h>
40
41#include <subdev/timer.h>
42#include <subdev/bar.h>
43#include <subdev/fb.h>
44
45#define EVO_DMA_NR 9
46
47#define EVO_MASTER (0x00)
48#define EVO_FLIP(c) (0x01 + (c))
49#define EVO_OVLY(c) (0x05 + (c))
50#define EVO_OIMM(c) (0x09 + (c))
51#define EVO_CURS(c) (0x0d + (c))
52
53/* offsets in shared sync bo of various structures */
54#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
55#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
56#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
57#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
58
59struct evo {
60 int idx;
61 dma_addr_t handle;
62 u32 *ptr;
63 struct {
64 u32 offset;
65 u16 value;
66 } sem;
67};
68
69struct nvd0_display {
70 struct nouveau_gpuobj *mem;
71 struct nouveau_bo *sync;
72 struct evo evo[9];
73
74 struct tasklet_struct tasklet;
75 u32 modeset;
76};
77
78static struct nvd0_display *
79nvd0_display(struct drm_device *dev)
80{
81 return nouveau_display(dev)->priv;
82}
83
84static struct drm_crtc *
85nvd0_display_crtc_get(struct drm_encoder *encoder)
86{
87 return nouveau_encoder(encoder)->crtc;
88}
89
90/******************************************************************************
91 * EVO channel helpers
92 *****************************************************************************/
93static inline int
94evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
95{
96 struct nouveau_device *device = nouveau_dev(dev);
97 int ret = 0;
98 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
99 nv_wr32(device, 0x610704 + (id * 0x10), data);
100 nv_mask(device, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
101 if (!nv_wait(device, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
102 ret = -EBUSY;
103 nv_mask(device, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
104 return ret;
105}
106
107static u32 *
108evo_wait(struct drm_device *dev, int id, int nr)
109{
110 struct nouveau_device *device = nouveau_dev(dev);
111 struct nouveau_drm *drm = nouveau_drm(dev);
112 struct nvd0_display *disp = nvd0_display(dev);
113 u32 put = nv_rd32(device, 0x640000 + (id * 0x1000)) / 4;
114
115 if (put + nr >= (PAGE_SIZE / 4)) {
116 disp->evo[id].ptr[put] = 0x20000000;
117
118 nv_wr32(device, 0x640000 + (id * 0x1000), 0x00000000);
119 if (!nv_wait(device, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
120 NV_ERROR(drm, "evo %d dma stalled\n", id);
121 return NULL;
122 }
123
124 put = 0;
125 }
126
127 return disp->evo[id].ptr + put;
128}
129
130static void
131evo_kick(u32 *push, struct drm_device *dev, int id)
132{
133 struct nouveau_device *device = nouveau_dev(dev);
134 struct nvd0_display *disp = nvd0_display(dev);
135
136 nv_wr32(device, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
137}
138
139#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
140#define evo_data(p,d) *((p)++) = (d)
141
142static int
143evo_init_dma(struct drm_device *dev, int ch)
144{
145 struct nouveau_device *device = nouveau_dev(dev);
146 struct nouveau_drm *drm = nouveau_drm(dev);
147 struct nvd0_display *disp = nvd0_display(dev);
148 u32 flags;
149
150 flags = 0x00000000;
151 if (ch == EVO_MASTER)
152 flags |= 0x01000000;
153
154 nv_wr32(device, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
155 nv_wr32(device, 0x610498 + (ch * 0x0010), 0x00010000);
156 nv_wr32(device, 0x61049c + (ch * 0x0010), 0x00000001);
157 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
158 nv_wr32(device, 0x640000 + (ch * 0x1000), 0x00000000);
159 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
160 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
161 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
162 nv_rd32(device, 0x610490 + (ch * 0x0010)));
163 return -EBUSY;
164 }
165
166 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
167 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
168 return 0;
169}
170
171static void
172evo_fini_dma(struct drm_device *dev, int ch)
173{
174 struct nouveau_device *device = nouveau_dev(dev);
175
176 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000010))
177 return;
178
179 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
180 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
181 nv_wait(device, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
182 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
183 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
184}
185
186static inline void
187evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
188{
189 struct nouveau_device *device = nouveau_dev(dev);
190 nv_wr32(device, 0x640000 + (ch * 0x1000) + mthd, data);
191}
192
193static int
194evo_init_pio(struct drm_device *dev, int ch)
195{
196 struct nouveau_device *device = nouveau_dev(dev);
197 struct nouveau_drm *drm = nouveau_drm(dev);
198
199 nv_wr32(device, 0x610490 + (ch * 0x0010), 0x00000001);
200 if (!nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
201 NV_ERROR(drm, "PDISP: ch%d 0x%08x\n", ch,
202 nv_rd32(device, 0x610490 + (ch * 0x0010)));
203 return -EBUSY;
204 }
205
206 nv_mask(device, 0x610090, (1 << ch), (1 << ch));
207 nv_mask(device, 0x6100a0, (1 << ch), (1 << ch));
208 return 0;
209}
210
211static void
212evo_fini_pio(struct drm_device *dev, int ch)
213{
214 struct nouveau_device *device = nouveau_dev(dev);
215
216 if (!(nv_rd32(device, 0x610490 + (ch * 0x0010)) & 0x00000001))
217 return;
218
219 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
220 nv_mask(device, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
221 nv_wait(device, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
222 nv_mask(device, 0x610090, (1 << ch), 0x00000000);
223 nv_mask(device, 0x6100a0, (1 << ch), 0x00000000);
224}
225
226static bool
227evo_sync_wait(void *data)
228{
229 return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
230}
231
232static int
233evo_sync(struct drm_device *dev, int ch)
234{
235 struct nouveau_device *device = nouveau_dev(dev);
236 struct nvd0_display *disp = nvd0_display(dev);
237 u32 *push = evo_wait(dev, ch, 8);
238 if (push) {
239 nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
240 evo_mthd(push, 0x0084, 1);
241 evo_data(push, 0x80000000 | EVO_MAST_NTFY);
242 evo_mthd(push, 0x0080, 2);
243 evo_data(push, 0x00000000);
244 evo_data(push, 0x00000000);
245 evo_kick(push, dev, ch);
246 if (nv_wait_cb(device, evo_sync_wait, disp->sync))
247 return 0;
248 }
249
250 return -EBUSY;
251}
252
253/******************************************************************************
254 * Page flipping channel
255 *****************************************************************************/
256struct nouveau_bo *
257nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
258{
259 return nvd0_display(dev)->sync;
260}
261
262void
263nvd0_display_flip_stop(struct drm_crtc *crtc)
264{
265 struct nvd0_display *disp = nvd0_display(crtc->dev);
266 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
267 struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
268 u32 *push;
269
270 push = evo_wait(crtc->dev, evo->idx, 8);
271 if (push) {
272 evo_mthd(push, 0x0084, 1);
273 evo_data(push, 0x00000000);
274 evo_mthd(push, 0x0094, 1);
275 evo_data(push, 0x00000000);
276 evo_mthd(push, 0x00c0, 1);
277 evo_data(push, 0x00000000);
278 evo_mthd(push, 0x0080, 1);
279 evo_data(push, 0x00000000);
280 evo_kick(push, crtc->dev, evo->idx);
281 }
282}
283
284int
285nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
286 struct nouveau_channel *chan, u32 swap_interval)
287{
288 struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
289 struct nvd0_display *disp = nvd0_display(crtc->dev);
290 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
291 struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
292 u64 offset;
293 u32 *push;
294 int ret;
295
296 swap_interval <<= 4;
297 if (swap_interval == 0)
298 swap_interval |= 0x100;
299
300 push = evo_wait(crtc->dev, evo->idx, 128);
301 if (unlikely(push == NULL))
302 return -EBUSY;
303
304 /* synchronise with the rendering channel, if necessary */
305 if (likely(chan)) {
306 ret = RING_SPACE(chan, 10);
307 if (ret)
308 return ret;
309
310
311 offset = nvc0_fence_crtc(chan, nv_crtc->index);
312 offset += evo->sem.offset;
313
314 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
315 OUT_RING (chan, upper_32_bits(offset));
316 OUT_RING (chan, lower_32_bits(offset));
317 OUT_RING (chan, 0xf00d0000 | evo->sem.value);
318 OUT_RING (chan, 0x1002);
319 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
320 OUT_RING (chan, upper_32_bits(offset));
321 OUT_RING (chan, lower_32_bits(offset ^ 0x10));
322 OUT_RING (chan, 0x74b1e000);
323 OUT_RING (chan, 0x1001);
324 FIRE_RING (chan);
325 } else {
326 nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
327 0xf00d0000 | evo->sem.value);
328 evo_sync(crtc->dev, EVO_MASTER);
329 }
330
331 /* queue the flip */
332 evo_mthd(push, 0x0100, 1);
333 evo_data(push, 0xfffe0000);
334 evo_mthd(push, 0x0084, 1);
335 evo_data(push, swap_interval);
336 if (!(swap_interval & 0x00000100)) {
337 evo_mthd(push, 0x00e0, 1);
338 evo_data(push, 0x40000000);
339 }
340 evo_mthd(push, 0x0088, 4);
341 evo_data(push, evo->sem.offset);
342 evo_data(push, 0xf00d0000 | evo->sem.value);
343 evo_data(push, 0x74b1e000);
344 evo_data(push, NvEvoSync);
345 evo_mthd(push, 0x00a0, 2);
346 evo_data(push, 0x00000000);
347 evo_data(push, 0x00000000);
348 evo_mthd(push, 0x00c0, 1);
349 evo_data(push, nv_fb->r_dma);
350 evo_mthd(push, 0x0110, 2);
351 evo_data(push, 0x00000000);
352 evo_data(push, 0x00000000);
353 evo_mthd(push, 0x0400, 5);
354 evo_data(push, nv_fb->nvbo->bo.offset >> 8);
355 evo_data(push, 0);
356 evo_data(push, (fb->height << 16) | fb->width);
357 evo_data(push, nv_fb->r_pitch);
358 evo_data(push, nv_fb->r_format);
359 evo_mthd(push, 0x0080, 1);
360 evo_data(push, 0x00000000);
361 evo_kick(push, crtc->dev, evo->idx);
362
363 evo->sem.offset ^= 0x10;
364 evo->sem.value++;
365 return 0;
366}
367
368/******************************************************************************
369 * CRTC
370 *****************************************************************************/
371static int
372nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
373{
374 struct nouveau_drm *drm = nouveau_drm(nv_crtc->base.dev);
375 struct drm_device *dev = nv_crtc->base.dev;
376 struct nouveau_connector *nv_connector;
377 struct drm_connector *connector;
378 u32 *push, mode = 0x00;
379 u32 mthd;
380
381 nv_connector = nouveau_crtc_connector_get(nv_crtc);
382 connector = &nv_connector->base;
383 if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
384 if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
385 mode = DITHERING_MODE_DYNAMIC2X2;
386 } else {
387 mode = nv_connector->dithering_mode;
388 }
389
390 if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
391 if (connector->display_info.bpc >= 8)
392 mode |= DITHERING_DEPTH_8BPC;
393 } else {
394 mode |= nv_connector->dithering_depth;
395 }
396
397 if (nv_device(drm->device)->card_type < NV_E0)
398 mthd = 0x0490 + (nv_crtc->index * 0x0300);
399 else
400 mthd = 0x04a0 + (nv_crtc->index * 0x0300);
401
402 push = evo_wait(dev, EVO_MASTER, 4);
403 if (push) {
404 evo_mthd(push, mthd, 1);
405 evo_data(push, mode);
406 if (update) {
407 evo_mthd(push, 0x0080, 1);
408 evo_data(push, 0x00000000);
409 }
410 evo_kick(push, dev, EVO_MASTER);
411 }
412
413 return 0;
414}
415
416static int
417nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
418{
419 struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
420 struct drm_device *dev = nv_crtc->base.dev;
421 struct drm_crtc *crtc = &nv_crtc->base;
422 struct nouveau_connector *nv_connector;
423 int mode = DRM_MODE_SCALE_NONE;
424 u32 oX, oY, *push;
425
426 /* start off at the resolution we programmed the crtc for, this
427 * effectively handles NONE/FULL scaling
428 */
429 nv_connector = nouveau_crtc_connector_get(nv_crtc);
430 if (nv_connector && nv_connector->native_mode)
431 mode = nv_connector->scaling_mode;
432
433 if (mode != DRM_MODE_SCALE_NONE)
434 omode = nv_connector->native_mode;
435 else
436 omode = umode;
437
438 oX = omode->hdisplay;
439 oY = omode->vdisplay;
440 if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
441 oY *= 2;
442
443 /* add overscan compensation if necessary, will keep the aspect
444 * ratio the same as the backend mode unless overridden by the
445 * user setting both hborder and vborder properties.
446 */
447 if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
448 (nv_connector->underscan == UNDERSCAN_AUTO &&
449 nv_connector->edid &&
450 drm_detect_hdmi_monitor(nv_connector->edid)))) {
451 u32 bX = nv_connector->underscan_hborder;
452 u32 bY = nv_connector->underscan_vborder;
453 u32 aspect = (oY << 19) / oX;
454
455 if (bX) {
456 oX -= (bX * 2);
457 if (bY) oY -= (bY * 2);
458 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
459 } else {
460 oX -= (oX >> 4) + 32;
461 if (bY) oY -= (bY * 2);
462 else oY = ((oX * aspect) + (aspect / 2)) >> 19;
463 }
464 }
465
466 /* handle CENTER/ASPECT scaling, taking into account the areas
467 * removed already for overscan compensation
468 */
469 switch (mode) {
470 case DRM_MODE_SCALE_CENTER:
471 oX = min((u32)umode->hdisplay, oX);
472 oY = min((u32)umode->vdisplay, oY);
473 /* fall-through */
474 case DRM_MODE_SCALE_ASPECT:
475 if (oY < oX) {
476 u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
477 oX = ((oY * aspect) + (aspect / 2)) >> 19;
478 } else {
479 u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
480 oY = ((oX * aspect) + (aspect / 2)) >> 19;
481 }
482 break;
483 default:
484 break;
485 }
486
487 push = evo_wait(dev, EVO_MASTER, 8);
488 if (push) {
489 evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
490 evo_data(push, (oY << 16) | oX);
491 evo_data(push, (oY << 16) | oX);
492 evo_data(push, (oY << 16) | oX);
493 evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
494 evo_data(push, 0x00000000);
495 evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
496 evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
497 evo_kick(push, dev, EVO_MASTER);
498 if (update) {
499 nvd0_display_flip_stop(crtc);
500 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
501 }
502 }
503
504 return 0;
505}
506
507static int
508nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
509 int x, int y, bool update)
510{
511 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
512 u32 *push;
513
514 push = evo_wait(fb->dev, EVO_MASTER, 16);
515 if (push) {
516 evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
517 evo_data(push, nvfb->nvbo->bo.offset >> 8);
518 evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
519 evo_data(push, (fb->height << 16) | fb->width);
520 evo_data(push, nvfb->r_pitch);
521 evo_data(push, nvfb->r_format);
522 evo_data(push, nvfb->r_dma);
523 evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
524 evo_data(push, (y << 16) | x);
525 if (update) {
526 evo_mthd(push, 0x0080, 1);
527 evo_data(push, 0x00000000);
528 }
529 evo_kick(push, fb->dev, EVO_MASTER);
530 }
531
532 nv_crtc->fb.tile_flags = nvfb->r_dma;
533 return 0;
534}
535
536static void
537nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
538{
539 struct drm_device *dev = nv_crtc->base.dev;
540 u32 *push = evo_wait(dev, EVO_MASTER, 16);
541 if (push) {
542 if (show) {
543 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
544 evo_data(push, 0x85000000);
545 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
546 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
547 evo_data(push, NvEvoVRAM);
548 } else {
549 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
550 evo_data(push, 0x05000000);
551 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
552 evo_data(push, 0x00000000);
553 }
554
555 if (update) {
556 evo_mthd(push, 0x0080, 1);
557 evo_data(push, 0x00000000);
558 }
559
560 evo_kick(push, dev, EVO_MASTER);
561 }
562}
563
564static void
565nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
566{
567}
568
569static void
570nvd0_crtc_prepare(struct drm_crtc *crtc)
571{
572 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
573 u32 *push;
574
575 nvd0_display_flip_stop(crtc);
576
577 push = evo_wait(crtc->dev, EVO_MASTER, 2);
578 if (push) {
579 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
580 evo_data(push, 0x00000000);
581 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
582 evo_data(push, 0x03000000);
583 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
584 evo_data(push, 0x00000000);
585 evo_kick(push, crtc->dev, EVO_MASTER);
586 }
587
588 nvd0_crtc_cursor_show(nv_crtc, false, false);
589}
590
591static void
592nvd0_crtc_commit(struct drm_crtc *crtc)
593{
594 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
595 u32 *push;
596
597 push = evo_wait(crtc->dev, EVO_MASTER, 32);
598 if (push) {
599 evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
600 evo_data(push, nv_crtc->fb.tile_flags);
601 evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
602 evo_data(push, 0x83000000);
603 evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
604 evo_data(push, 0x00000000);
605 evo_data(push, 0x00000000);
606 evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
607 evo_data(push, NvEvoVRAM);
608 evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
609 evo_data(push, 0xffffff00);
610 evo_kick(push, crtc->dev, EVO_MASTER);
611 }
612
613 nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
614 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
615}
616
617static bool
618nvd0_crtc_mode_fixup(struct drm_crtc *crtc, const struct drm_display_mode *mode,
619 struct drm_display_mode *adjusted_mode)
620{
621 return true;
622}
623
624static int
625nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
626{
627 struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
628 int ret;
629
630 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
631 if (ret)
632 return ret;
633
634 if (old_fb) {
635 nvfb = nouveau_framebuffer(old_fb);
636 nouveau_bo_unpin(nvfb->nvbo);
637 }
638
639 return 0;
640}
641
642static int
643nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
644 struct drm_display_mode *mode, int x, int y,
645 struct drm_framebuffer *old_fb)
646{
647 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
648 struct nouveau_connector *nv_connector;
649 u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
650 u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
651 u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
652 u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
653 u32 vblan2e = 0, vblan2s = 1;
654 u32 *push;
655 int ret;
656
657 hactive = mode->htotal;
658 hsynce = mode->hsync_end - mode->hsync_start - 1;
659 hbackp = mode->htotal - mode->hsync_end;
660 hblanke = hsynce + hbackp;
661 hfrontp = mode->hsync_start - mode->hdisplay;
662 hblanks = mode->htotal - hfrontp - 1;
663
664 vactive = mode->vtotal * vscan / ilace;
665 vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
666 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
667 vblanke = vsynce + vbackp;
668 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
669 vblanks = vactive - vfrontp - 1;
670 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
671 vblan2e = vactive + vsynce + vbackp;
672 vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
673 vactive = (vactive * 2) + 1;
674 }
675
676 ret = nvd0_crtc_swap_fbs(crtc, old_fb);
677 if (ret)
678 return ret;
679
680 push = evo_wait(crtc->dev, EVO_MASTER, 64);
681 if (push) {
682 evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
683 evo_data(push, 0x00000000);
684 evo_data(push, (vactive << 16) | hactive);
685 evo_data(push, ( vsynce << 16) | hsynce);
686 evo_data(push, (vblanke << 16) | hblanke);
687 evo_data(push, (vblanks << 16) | hblanks);
688 evo_data(push, (vblan2e << 16) | vblan2s);
689 evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
690 evo_data(push, 0x00000000); /* ??? */
691 evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
692 evo_data(push, mode->clock * 1000);
693 evo_data(push, 0x00200000); /* ??? */
694 evo_data(push, mode->clock * 1000);
695 evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
696 evo_data(push, 0x00000311);
697 evo_data(push, 0x00000100);
698 evo_kick(push, crtc->dev, EVO_MASTER);
699 }
700
701 nv_connector = nouveau_crtc_connector_get(nv_crtc);
702 nvd0_crtc_set_dither(nv_crtc, false);
703 nvd0_crtc_set_scale(nv_crtc, false);
704 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
705 return 0;
706}
707
708static int
709nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
710 struct drm_framebuffer *old_fb)
711{
712 struct nouveau_drm *drm = nouveau_drm(crtc->dev);
713 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
714 int ret;
715
716 if (!crtc->fb) {
717 NV_DEBUG(drm, "No FB bound\n");
718 return 0;
719 }
720
721 ret = nvd0_crtc_swap_fbs(crtc, old_fb);
722 if (ret)
723 return ret;
724
725 nvd0_display_flip_stop(crtc);
726 nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
727 nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
728 return 0;
729}
730
731static int
732nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
733 struct drm_framebuffer *fb, int x, int y,
734 enum mode_set_atomic state)
735{
736 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
737 nvd0_display_flip_stop(crtc);
738 nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
739 return 0;
740}
741
742static void
743nvd0_crtc_lut_load(struct drm_crtc *crtc)
744{
745 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
746 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
747 int i;
748
749 for (i = 0; i < 256; i++) {
750 writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
751 writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
752 writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
753 }
754}
755
756static int
757nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
758 uint32_t handle, uint32_t width, uint32_t height)
759{
760 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
761 struct drm_device *dev = crtc->dev;
762 struct drm_gem_object *gem;
763 struct nouveau_bo *nvbo;
764 bool visible = (handle != 0);
765 int i, ret = 0;
766
767 if (visible) {
768 if (width != 64 || height != 64)
769 return -EINVAL;
770
771 gem = drm_gem_object_lookup(dev, file_priv, handle);
772 if (unlikely(!gem))
773 return -ENOENT;
774 nvbo = nouveau_gem_object(gem);
775
776 ret = nouveau_bo_map(nvbo);
777 if (ret == 0) {
778 for (i = 0; i < 64 * 64; i++) {
779 u32 v = nouveau_bo_rd32(nvbo, i);
780 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
781 }
782 nouveau_bo_unmap(nvbo);
783 }
784
785 drm_gem_object_unreference_unlocked(gem);
786 }
787
788 if (visible != nv_crtc->cursor.visible) {
789 nvd0_crtc_cursor_show(nv_crtc, visible, true);
790 nv_crtc->cursor.visible = visible;
791 }
792
793 return ret;
794}
795
796static int
797nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
798{
799 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
800 int ch = EVO_CURS(nv_crtc->index);
801
802 evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff));
803 evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
804 return 0;
805}
806
807static void
808nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
809 uint32_t start, uint32_t size)
810{
811 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
812 u32 end = max(start + size, (u32)256);
813 u32 i;
814
815 for (i = start; i < end; i++) {
816 nv_crtc->lut.r[i] = r[i];
817 nv_crtc->lut.g[i] = g[i];
818 nv_crtc->lut.b[i] = b[i];
819 }
820
821 nvd0_crtc_lut_load(crtc);
822}
823
824static void
825nvd0_crtc_destroy(struct drm_crtc *crtc)
826{
827 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
828 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
829 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
830 nouveau_bo_unmap(nv_crtc->lut.nvbo);
831 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
832 drm_crtc_cleanup(crtc);
833 kfree(crtc);
834}
835
836static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
837 .dpms = nvd0_crtc_dpms,
838 .prepare = nvd0_crtc_prepare,
839 .commit = nvd0_crtc_commit,
840 .mode_fixup = nvd0_crtc_mode_fixup,
841 .mode_set = nvd0_crtc_mode_set,
842 .mode_set_base = nvd0_crtc_mode_set_base,
843 .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
844 .load_lut = nvd0_crtc_lut_load,
845};
846
847static const struct drm_crtc_funcs nvd0_crtc_func = {
848 .cursor_set = nvd0_crtc_cursor_set,
849 .cursor_move = nvd0_crtc_cursor_move,
850 .gamma_set = nvd0_crtc_gamma_set,
851 .set_config = drm_crtc_helper_set_config,
852 .destroy = nvd0_crtc_destroy,
853 .page_flip = nouveau_crtc_page_flip,
854};
855
856static void
857nvd0_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
858{
859}
860
861static void
862nvd0_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
863{
864}
865
866static int
867nvd0_crtc_create(struct drm_device *dev, int index)
868{
869 struct nouveau_crtc *nv_crtc;
870 struct drm_crtc *crtc;
871 int ret, i;
872
873 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
874 if (!nv_crtc)
875 return -ENOMEM;
876
877 nv_crtc->index = index;
878 nv_crtc->set_dither = nvd0_crtc_set_dither;
879 nv_crtc->set_scale = nvd0_crtc_set_scale;
880 nv_crtc->cursor.set_offset = nvd0_cursor_set_offset;
881 nv_crtc->cursor.set_pos = nvd0_cursor_set_pos;
882 for (i = 0; i < 256; i++) {
883 nv_crtc->lut.r[i] = i << 8;
884 nv_crtc->lut.g[i] = i << 8;
885 nv_crtc->lut.b[i] = i << 8;
886 }
887
888 crtc = &nv_crtc->base;
889 drm_crtc_init(dev, crtc, &nvd0_crtc_func);
890 drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
891 drm_mode_crtc_set_gamma_size(crtc, 256);
892
893 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
894 0, 0x0000, NULL, &nv_crtc->cursor.nvbo);
895 if (!ret) {
896 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
897 if (!ret)
898 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
899 if (ret)
900 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
901 }
902
903 if (ret)
904 goto out;
905
906 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
907 0, 0x0000, NULL, &nv_crtc->lut.nvbo);
908 if (!ret) {
909 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
910 if (!ret)
911 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
912 if (ret)
913 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
914 }
915
916 if (ret)
917 goto out;
918
919 nvd0_crtc_lut_load(crtc);
920
921out:
922 if (ret)
923 nvd0_crtc_destroy(crtc);
924 return ret;
925}
926
927/******************************************************************************
928 * DAC
929 *****************************************************************************/
930static void
931nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
932{
933 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
934 struct drm_device *dev = encoder->dev;
935 struct nouveau_device *device = nouveau_dev(dev);
936 int or = nv_encoder->or;
937 u32 dpms_ctrl;
938
939 dpms_ctrl = 0x80000000;
940 if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
941 dpms_ctrl |= 0x00000001;
942 if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
943 dpms_ctrl |= 0x00000004;
944
945 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
946 nv_mask(device, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
947 nv_wait(device, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
948}
949
950static bool
951nvd0_dac_mode_fixup(struct drm_encoder *encoder,
952 const struct drm_display_mode *mode,
953 struct drm_display_mode *adjusted_mode)
954{
955 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
956 struct nouveau_connector *nv_connector;
957
958 nv_connector = nouveau_encoder_connector_get(nv_encoder);
959 if (nv_connector && nv_connector->native_mode) {
960 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
961 int id = adjusted_mode->base.id;
962 *adjusted_mode = *nv_connector->native_mode;
963 adjusted_mode->base.id = id;
964 }
965 }
966
967 return true;
968}
969
970static void
971nvd0_dac_commit(struct drm_encoder *encoder)
972{
973}
974
975static void
976nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
977 struct drm_display_mode *adjusted_mode)
978{
979 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
980 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
981 u32 syncs, magic, *push;
982
983 syncs = 0x00000001;
984 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
985 syncs |= 0x00000008;
986 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
987 syncs |= 0x00000010;
988
989 magic = 0x31ec6000 | (nv_crtc->index << 25);
990 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
991 magic |= 0x00000001;
992
993 nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
994
995 push = evo_wait(encoder->dev, EVO_MASTER, 8);
996 if (push) {
997 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
998 evo_data(push, syncs);
999 evo_data(push, magic);
1000 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 2);
1001 evo_data(push, 1 << nv_crtc->index);
1002 evo_data(push, 0x00ff);
1003 evo_kick(push, encoder->dev, EVO_MASTER);
1004 }
1005
1006 nv_encoder->crtc = encoder->crtc;
1007}
1008
1009static void
1010nvd0_dac_disconnect(struct drm_encoder *encoder)
1011{
1012 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1013 struct drm_device *dev = encoder->dev;
1014 u32 *push;
1015
1016 if (nv_encoder->crtc) {
1017 nvd0_crtc_prepare(nv_encoder->crtc);
1018
1019 push = evo_wait(dev, EVO_MASTER, 4);
1020 if (push) {
1021 evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
1022 evo_data(push, 0x00000000);
1023 evo_mthd(push, 0x0080, 1);
1024 evo_data(push, 0x00000000);
1025 evo_kick(push, dev, EVO_MASTER);
1026 }
1027
1028 nv_encoder->crtc = NULL;
1029 }
1030}
1031
1032static enum drm_connector_status
1033nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
1034{
1035 enum drm_connector_status status = connector_status_disconnected;
1036 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1037 struct drm_device *dev = encoder->dev;
1038 struct nouveau_device *device = nouveau_dev(dev);
1039 int or = nv_encoder->or;
1040 u32 load;
1041
1042 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00100000);
1043 udelay(9500);
1044 nv_wr32(device, 0x61a00c + (or * 0x800), 0x80000000);
1045
1046 load = nv_rd32(device, 0x61a00c + (or * 0x800));
1047 if ((load & 0x38000000) == 0x38000000)
1048 status = connector_status_connected;
1049
1050 nv_wr32(device, 0x61a00c + (or * 0x800), 0x00000000);
1051 return status;
1052}
1053
1054static void
1055nvd0_dac_destroy(struct drm_encoder *encoder)
1056{
1057 drm_encoder_cleanup(encoder);
1058 kfree(encoder);
1059}
1060
1061static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
1062 .dpms = nvd0_dac_dpms,
1063 .mode_fixup = nvd0_dac_mode_fixup,
1064 .prepare = nvd0_dac_disconnect,
1065 .commit = nvd0_dac_commit,
1066 .mode_set = nvd0_dac_mode_set,
1067 .disable = nvd0_dac_disconnect,
1068 .get_crtc = nvd0_display_crtc_get,
1069 .detect = nvd0_dac_detect
1070};
1071
1072static const struct drm_encoder_funcs nvd0_dac_func = {
1073 .destroy = nvd0_dac_destroy,
1074};
1075
1076static int
1077nvd0_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
1078{
1079 struct drm_device *dev = connector->dev;
1080 struct nouveau_encoder *nv_encoder;
1081 struct drm_encoder *encoder;
1082
1083 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1084 if (!nv_encoder)
1085 return -ENOMEM;
1086 nv_encoder->dcb = dcbe;
1087 nv_encoder->or = ffs(dcbe->or) - 1;
1088
1089 encoder = to_drm_encoder(nv_encoder);
1090 encoder->possible_crtcs = dcbe->heads;
1091 encoder->possible_clones = 0;
1092 drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
1093 drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
1094
1095 drm_mode_connector_attach_encoder(connector, encoder);
1096 return 0;
1097}
1098
1099/******************************************************************************
1100 * Audio
1101 *****************************************************************************/
1102static void
1103nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1104{
1105 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1106 struct nouveau_connector *nv_connector;
1107 struct drm_device *dev = encoder->dev;
1108 struct nouveau_device *device = nouveau_dev(dev);
1109 int i, or = nv_encoder->or * 0x30;
1110
1111 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1112 if (!drm_detect_monitor_audio(nv_connector->edid))
1113 return;
1114
1115 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000001);
1116
1117 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1118 if (nv_connector->base.eld[0]) {
1119 u8 *eld = nv_connector->base.eld;
1120
1121 for (i = 0; i < eld[2] * 4; i++)
1122 nv_wr32(device, 0x10ec00 + or, (i << 8) | eld[i]);
1123 for (i = eld[2] * 4; i < 0x60; i++)
1124 nv_wr32(device, 0x10ec00 + or, (i << 8) | 0x00);
1125
1126 nv_mask(device, 0x10ec10 + or, 0x80000002, 0x80000002);
1127 }
1128}
1129
1130static void
1131nvd0_audio_disconnect(struct drm_encoder *encoder)
1132{
1133 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1134 struct drm_device *dev = encoder->dev;
1135 struct nouveau_device *device = nouveau_dev(dev);
1136 int or = nv_encoder->or * 0x30;
1137
1138 nv_mask(device, 0x10ec10 + or, 0x80000003, 0x80000000);
1139}
1140
1141/******************************************************************************
1142 * HDMI
1143 *****************************************************************************/
1144static void
1145nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1146{
1147 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1148 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1149 struct nouveau_connector *nv_connector;
1150 struct drm_device *dev = encoder->dev;
1151 struct nouveau_device *device = nouveau_dev(dev);
1152 int head = nv_crtc->index * 0x800;
1153 u32 rekey = 56; /* binary driver, and tegra constant */
1154 u32 max_ac_packet;
1155
1156 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1157 if (!drm_detect_hdmi_monitor(nv_connector->edid))
1158 return;
1159
1160 max_ac_packet = mode->htotal - mode->hdisplay;
1161 max_ac_packet -= rekey;
1162 max_ac_packet -= 18; /* constant from tegra */
1163 max_ac_packet /= 32;
1164
1165 /* AVI InfoFrame */
1166 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1167 nv_wr32(device, 0x61671c + head, 0x000d0282);
1168 nv_wr32(device, 0x616720 + head, 0x0000006f);
1169 nv_wr32(device, 0x616724 + head, 0x00000000);
1170 nv_wr32(device, 0x616728 + head, 0x00000000);
1171 nv_wr32(device, 0x61672c + head, 0x00000000);
1172 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000001);
1173
1174 /* ??? InfoFrame? */
1175 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1176 nv_wr32(device, 0x6167ac + head, 0x00000010);
1177 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000001);
1178
1179 /* HDMI_CTRL */
1180 nv_mask(device, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
1181 max_ac_packet << 16);
1182
1183 /* NFI, audio doesn't work without it though.. */
1184 nv_mask(device, 0x616548 + head, 0x00000070, 0x00000000);
1185
1186 nvd0_audio_mode_set(encoder, mode);
1187}
1188
1189static void
1190nvd0_hdmi_disconnect(struct drm_encoder *encoder)
1191{
1192 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1193 struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
1194 struct drm_device *dev = encoder->dev;
1195 struct nouveau_device *device = nouveau_dev(dev);
1196 int head = nv_crtc->index * 0x800;
1197
1198 nvd0_audio_disconnect(encoder);
1199
1200 nv_mask(device, 0x616798 + head, 0x40000000, 0x00000000);
1201 nv_mask(device, 0x6167a4 + head, 0x00000001, 0x00000000);
1202 nv_mask(device, 0x616714 + head, 0x00000001, 0x00000000);
1203}
1204
1205/******************************************************************************
1206 * SOR
1207 *****************************************************************************/
1208static inline u32
1209nvd0_sor_dp_lane_map(struct drm_device *dev, struct dcb_output *dcb, u8 lane)
1210{
1211 static const u8 nvd0[] = { 16, 8, 0, 24 };
1212 return nvd0[lane];
1213}
1214
1215static void
1216nvd0_sor_dp_train_set(struct drm_device *dev, struct dcb_output *dcb, u8 pattern)
1217{
1218 struct nouveau_device *device = nouveau_dev(dev);
1219 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1220 const u32 loff = (or * 0x800) + (link * 0x80);
1221 nv_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
1222}
1223
1224static void
1225nvd0_sor_dp_train_adj(struct drm_device *dev, struct dcb_output *dcb,
1226 u8 lane, u8 swing, u8 preem)
1227{
1228 struct nouveau_device *device = nouveau_dev(dev);
1229 struct nouveau_drm *drm = nouveau_drm(dev);
1230 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1231 const u32 loff = (or * 0x800) + (link * 0x80);
1232 u32 shift = nvd0_sor_dp_lane_map(dev, dcb, lane);
1233 u32 mask = 0x000000ff << shift;
1234 u8 *table, *entry, *config = NULL;
1235
1236 switch (swing) {
1237 case 0: preem += 0; break;
1238 case 1: preem += 4; break;
1239 case 2: preem += 7; break;
1240 case 3: preem += 9; break;
1241 }
1242
1243 table = nouveau_dp_bios_data(dev, dcb, &entry);
1244 if (table) {
1245 if (table[0] == 0x30) {
1246 config = entry + table[4];
1247 config += table[5] * preem;
1248 } else
1249 if (table[0] == 0x40) {
1250 config = table + table[1];
1251 config += table[2] * table[3];
1252 config += table[6] * preem;
1253 }
1254 }
1255
1256 if (!config) {
1257 NV_ERROR(drm, "PDISP: unsupported DP table for chipset\n");
1258 return;
1259 }
1260
1261 nv_mask(device, 0x61c118 + loff, mask, config[1] << shift);
1262 nv_mask(device, 0x61c120 + loff, mask, config[2] << shift);
1263 nv_mask(device, 0x61c130 + loff, 0x0000ff00, config[3] << 8);
1264 nv_mask(device, 0x61c13c + loff, 0x00000000, 0x00000000);
1265}
1266
1267static void
1268nvd0_sor_dp_link_set(struct drm_device *dev, struct dcb_output *dcb, int crtc,
1269 int link_nr, u32 link_bw, bool enhframe)
1270{
1271 struct nouveau_device *device = nouveau_dev(dev);
1272 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1273 const u32 loff = (or * 0x800) + (link * 0x80);
1274 const u32 soff = (or * 0x800);
1275 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & ~0x001f4000;
1276 u32 clksor = nv_rd32(device, 0x612300 + soff) & ~0x007c0000;
1277 u32 script = 0x0000, lane_mask = 0;
1278 u8 *table, *entry;
1279 int i;
1280
1281 link_bw /= 27000;
1282
1283 table = nouveau_dp_bios_data(dev, dcb, &entry);
1284 if (table) {
1285 if (table[0] == 0x30) entry = ROMPTR(dev, entry[10]);
1286 else if (table[0] == 0x40) entry = ROMPTR(dev, entry[9]);
1287 else entry = NULL;
1288
1289 while (entry) {
1290 if (entry[0] >= link_bw)
1291 break;
1292 entry += 3;
1293 }
1294
1295 nouveau_bios_run_init_table(dev, script, dcb, crtc);
1296 }
1297
1298 clksor |= link_bw << 18;
1299 dpctrl |= ((1 << link_nr) - 1) << 16;
1300 if (enhframe)
1301 dpctrl |= 0x00004000;
1302
1303 for (i = 0; i < link_nr; i++)
1304 lane_mask |= 1 << (nvd0_sor_dp_lane_map(dev, dcb, i) >> 3);
1305
1306 nv_wr32(device, 0x612300 + soff, clksor);
1307 nv_wr32(device, 0x61c10c + loff, dpctrl);
1308 nv_mask(device, 0x61c130 + loff, 0x0000000f, lane_mask);
1309}
1310
1311static void
1312nvd0_sor_dp_link_get(struct drm_device *dev, struct dcb_output *dcb,
1313 u32 *link_nr, u32 *link_bw)
1314{
1315 struct nouveau_device *device = nouveau_dev(dev);
1316 const u32 or = ffs(dcb->or) - 1, link = !(dcb->sorconf.link & 1);
1317 const u32 loff = (or * 0x800) + (link * 0x80);
1318 const u32 soff = (or * 0x800);
1319 u32 dpctrl = nv_rd32(device, 0x61c10c + loff) & 0x000f0000;
1320 u32 clksor = nv_rd32(device, 0x612300 + soff);
1321
1322 if (dpctrl > 0x00030000) *link_nr = 4;
1323 else if (dpctrl > 0x00010000) *link_nr = 2;
1324 else *link_nr = 1;
1325
1326 *link_bw = (clksor & 0x007c0000) >> 18;
1327 *link_bw *= 27000;
1328}
1329
1330static void
1331nvd0_sor_dp_calc_tu(struct drm_device *dev, struct dcb_output *dcb,
1332 u32 crtc, u32 datarate)
1333{
1334 struct nouveau_device *device = nouveau_dev(dev);
1335 const u32 symbol = 100000;
1336 const u32 TU = 64;
1337 u32 link_nr, link_bw;
1338 u64 ratio, value;
1339
1340 nvd0_sor_dp_link_get(dev, dcb, &link_nr, &link_bw);
1341
1342 ratio = datarate;
1343 ratio *= symbol;
1344 do_div(ratio, link_nr * link_bw);
1345
1346 value = (symbol - ratio) * TU;
1347 value *= ratio;
1348 do_div(value, symbol);
1349 do_div(value, symbol);
1350
1351 value += 5;
1352 value |= 0x08000000;
1353
1354 nv_wr32(device, 0x616610 + (crtc * 0x800), value);
1355}
1356
1357static void
1358nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
1359{
1360 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1361 struct drm_device *dev = encoder->dev;
1362 struct nouveau_device *device = nouveau_dev(dev);
1363 struct drm_encoder *partner;
1364 int or = nv_encoder->or;
1365 u32 dpms_ctrl;
1366
1367 nv_encoder->last_dpms = mode;
1368
1369 list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
1370 struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
1371
1372 if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
1373 continue;
1374
1375 if (nv_partner != nv_encoder &&
1376 nv_partner->dcb->or == nv_encoder->dcb->or) {
1377 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
1378 return;
1379 break;
1380 }
1381 }
1382
1383 dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
1384 dpms_ctrl |= 0x80000000;
1385
1386 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1387 nv_mask(device, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
1388 nv_wait(device, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
1389 nv_wait(device, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
1390
1391 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1392 struct dp_train_func func = {
1393 .link_set = nvd0_sor_dp_link_set,
1394 .train_set = nvd0_sor_dp_train_set,
1395 .train_adj = nvd0_sor_dp_train_adj
1396 };
1397
1398 nouveau_dp_dpms(encoder, mode, nv_encoder->dp.datarate, &func);
1399 }
1400}
1401
1402static bool
1403nvd0_sor_mode_fixup(struct drm_encoder *encoder,
1404 const struct drm_display_mode *mode,
1405 struct drm_display_mode *adjusted_mode)
1406{
1407 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1408 struct nouveau_connector *nv_connector;
1409
1410 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1411 if (nv_connector && nv_connector->native_mode) {
1412 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
1413 int id = adjusted_mode->base.id;
1414 *adjusted_mode = *nv_connector->native_mode;
1415 adjusted_mode->base.id = id;
1416 }
1417 }
1418
1419 return true;
1420}
1421
1422static void
1423nvd0_sor_disconnect(struct drm_encoder *encoder)
1424{
1425 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1426 struct drm_device *dev = encoder->dev;
1427 u32 *push;
1428
1429 if (nv_encoder->crtc) {
1430 nvd0_crtc_prepare(nv_encoder->crtc);
1431
1432 push = evo_wait(dev, EVO_MASTER, 4);
1433 if (push) {
1434 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
1435 evo_data(push, 0x00000000);
1436 evo_mthd(push, 0x0080, 1);
1437 evo_data(push, 0x00000000);
1438 evo_kick(push, dev, EVO_MASTER);
1439 }
1440
1441 nvd0_hdmi_disconnect(encoder);
1442
1443 nv_encoder->crtc = NULL;
1444 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
1445 }
1446}
1447
1448static void
1449nvd0_sor_prepare(struct drm_encoder *encoder)
1450{
1451 nvd0_sor_disconnect(encoder);
1452 if (nouveau_encoder(encoder)->dcb->type == DCB_OUTPUT_DP)
1453 evo_sync(encoder->dev, EVO_MASTER);
1454}
1455
1456static void
1457nvd0_sor_commit(struct drm_encoder *encoder)
1458{
1459}
1460
1461static void
1462nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
1463 struct drm_display_mode *mode)
1464{
1465 struct drm_device *dev = encoder->dev;
1466 struct nouveau_drm *drm = nouveau_drm(dev);
1467 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
1468 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
1469 struct nouveau_connector *nv_connector;
1470 struct nvbios *bios = &drm->vbios;
1471 u32 mode_ctrl = (1 << nv_crtc->index);
1472 u32 syncs, magic, *push;
1473 u32 or_config;
1474
1475 syncs = 0x00000001;
1476 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1477 syncs |= 0x00000008;
1478 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1479 syncs |= 0x00000010;
1480
1481 magic = 0x31ec6000 | (nv_crtc->index << 25);
1482 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1483 magic |= 0x00000001;
1484
1485 nv_connector = nouveau_encoder_connector_get(nv_encoder);
1486 switch (nv_encoder->dcb->type) {
1487 case DCB_OUTPUT_TMDS:
1488 if (nv_encoder->dcb->sorconf.link & 1) {
1489 if (mode->clock < 165000)
1490 mode_ctrl |= 0x00000100;
1491 else
1492 mode_ctrl |= 0x00000500;
1493 } else {
1494 mode_ctrl |= 0x00000200;
1495 }
1496
1497 or_config = (mode_ctrl & 0x00000f00) >> 8;
1498 if (mode->clock >= 165000)
1499 or_config |= 0x0100;
1500
1501 nvd0_hdmi_mode_set(encoder, mode);
1502 break;
1503 case DCB_OUTPUT_LVDS:
1504 or_config = (mode_ctrl & 0x00000f00) >> 8;
1505 if (bios->fp_no_ddc) {
1506 if (bios->fp.dual_link)
1507 or_config |= 0x0100;
1508 if (bios->fp.if_is_24bit)
1509 or_config |= 0x0200;
1510 } else {
1511 if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
1512 if (((u8 *)nv_connector->edid)[121] == 2)
1513 or_config |= 0x0100;
1514 } else
1515 if (mode->clock >= bios->fp.duallink_transition_clk) {
1516 or_config |= 0x0100;
1517 }
1518
1519 if (or_config & 0x0100) {
1520 if (bios->fp.strapless_is_24bit & 2)
1521 or_config |= 0x0200;
1522 } else {
1523 if (bios->fp.strapless_is_24bit & 1)
1524 or_config |= 0x0200;
1525 }
1526
1527 if (nv_connector->base.display_info.bpc == 8)
1528 or_config |= 0x0200;
1529
1530 }
1531 break;
1532 case DCB_OUTPUT_DP:
1533 if (nv_connector->base.display_info.bpc == 6) {
1534 nv_encoder->dp.datarate = mode->clock * 18 / 8;
1535 syncs |= 0x00000002 << 6;
1536 } else {
1537 nv_encoder->dp.datarate = mode->clock * 24 / 8;
1538 syncs |= 0x00000005 << 6;
1539 }
1540
1541 if (nv_encoder->dcb->sorconf.link & 1)
1542 mode_ctrl |= 0x00000800;
1543 else
1544 mode_ctrl |= 0x00000900;
1545
1546 or_config = (mode_ctrl & 0x00000f00) >> 8;
1547 break;
1548 default:
1549 BUG_ON(1);
1550 break;
1551 }
1552
1553 nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
1554
1555 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
1556 nvd0_sor_dp_calc_tu(dev, nv_encoder->dcb, nv_crtc->index,
1557 nv_encoder->dp.datarate);
1558 }
1559
1560 push = evo_wait(dev, EVO_MASTER, 8);
1561 if (push) {
1562 evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
1563 evo_data(push, syncs);
1564 evo_data(push, magic);
1565 evo_mthd(push, 0x0200 + (nv_encoder->or * 0x020), 2);
1566 evo_data(push, mode_ctrl);
1567 evo_data(push, or_config);
1568 evo_kick(push, dev, EVO_MASTER);
1569 }
1570
1571 nv_encoder->crtc = encoder->crtc;
1572}
1573
1574static void
1575nvd0_sor_destroy(struct drm_encoder *encoder)
1576{
1577 drm_encoder_cleanup(encoder);
1578 kfree(encoder);
1579}
1580
1581static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
1582 .dpms = nvd0_sor_dpms,
1583 .mode_fixup = nvd0_sor_mode_fixup,
1584 .prepare = nvd0_sor_prepare,
1585 .commit = nvd0_sor_commit,
1586 .mode_set = nvd0_sor_mode_set,
1587 .disable = nvd0_sor_disconnect,
1588 .get_crtc = nvd0_display_crtc_get,
1589};
1590
1591static const struct drm_encoder_funcs nvd0_sor_func = {
1592 .destroy = nvd0_sor_destroy,
1593};
1594
1595static int
1596nvd0_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
1597{
1598 struct drm_device *dev = connector->dev;
1599 struct nouveau_encoder *nv_encoder;
1600 struct drm_encoder *encoder;
1601
1602 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
1603 if (!nv_encoder)
1604 return -ENOMEM;
1605 nv_encoder->dcb = dcbe;
1606 nv_encoder->or = ffs(dcbe->or) - 1;
1607 nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
1608
1609 encoder = to_drm_encoder(nv_encoder);
1610 encoder->possible_crtcs = dcbe->heads;
1611 encoder->possible_clones = 0;
1612 drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
1613 drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
1614
1615 drm_mode_connector_attach_encoder(connector, encoder);
1616 return 0;
1617}
1618
1619/******************************************************************************
1620 * IRQ
1621 *****************************************************************************/
1622static struct dcb_output *
1623lookup_dcb(struct drm_device *dev, int id, u32 mc)
1624{
1625 struct nouveau_drm *drm = nouveau_drm(dev);
1626 int type, or, i, link = -1;
1627
1628 if (id < 4) {
1629 type = DCB_OUTPUT_ANALOG;
1630 or = id;
1631 } else {
1632 switch (mc & 0x00000f00) {
1633 case 0x00000000: link = 0; type = DCB_OUTPUT_LVDS; break;
1634 case 0x00000100: link = 0; type = DCB_OUTPUT_TMDS; break;
1635 case 0x00000200: link = 1; type = DCB_OUTPUT_TMDS; break;
1636 case 0x00000500: link = 0; type = DCB_OUTPUT_TMDS; break;
1637 case 0x00000800: link = 0; type = DCB_OUTPUT_DP; break;
1638 case 0x00000900: link = 1; type = DCB_OUTPUT_DP; break;
1639 default:
1640 NV_ERROR(drm, "PDISP: unknown SOR mc 0x%08x\n", mc);
1641 return NULL;
1642 }
1643
1644 or = id - 4;
1645 }
1646
1647 for (i = 0; i < drm->vbios.dcb.entries; i++) {
1648 struct dcb_output *dcb = &drm->vbios.dcb.entry[i];
1649 if (dcb->type == type && (dcb->or & (1 << or)) &&
1650 (link < 0 || link == !(dcb->sorconf.link & 1)))
1651 return dcb;
1652 }
1653
1654 NV_ERROR(drm, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
1655 return NULL;
1656}
1657
1658static void
1659nvd0_display_unk1_handler(struct drm_device *dev, u32 crtc, u32 mask)
1660{
1661 struct nouveau_device *device = nouveau_dev(dev);
1662 struct dcb_output *dcb;
1663 int i;
1664
1665 for (i = 0; mask && i < 8; i++) {
1666 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1667 if (!(mcc & (1 << crtc)))
1668 continue;
1669
1670 dcb = lookup_dcb(dev, i, mcc);
1671 if (!dcb)
1672 continue;
1673
1674 nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
1675 }
1676
1677 nv_wr32(device, 0x6101d4, 0x00000000);
1678 nv_wr32(device, 0x6109d4, 0x00000000);
1679 nv_wr32(device, 0x6101d0, 0x80000000);
1680}
1681
1682static void
1683nvd0_display_unk2_handler(struct drm_device *dev, u32 crtc, u32 mask)
1684{
1685 struct nouveau_device *device = nouveau_dev(dev);
1686 struct nouveau_drm *drm = nouveau_drm(dev);
1687 struct dcb_output *dcb;
1688 u32 or, tmp, pclk;
1689 int i;
1690
1691 for (i = 0; mask && i < 8; i++) {
1692 u32 mcc = nv_rd32(device, 0x640180 + (i * 0x20));
1693 if (!(mcc & (1 << crtc)))
1694 continue;
1695
1696 dcb = lookup_dcb(dev, i, mcc);
1697 if (!dcb)
1698 continue;
1699
1700 nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
1701 }
1702
1703 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1704 NV_DEBUG(drm, "PDISP: crtc %d pclk %d mask 0x%08x\n",
1705 crtc, pclk, mask);
1706 if (pclk && (mask & 0x00010000)) {
1707 nv50_crtc_set_clock(dev, crtc, pclk);
1708 }
1709
1710 for (i = 0; mask && i < 8; i++) {
1711 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1712 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1713 if (!(mcp & (1 << crtc)))
1714 continue;
1715
1716 dcb = lookup_dcb(dev, i, mcp);
1717 if (!dcb)
1718 continue;
1719 or = ffs(dcb->or) - 1;
1720
1721 nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
1722
1723 nv_wr32(device, 0x612200 + (crtc * 0x800), 0x00000000);
1724 switch (dcb->type) {
1725 case DCB_OUTPUT_ANALOG:
1726 nv_wr32(device, 0x612280 + (or * 0x800), 0x00000000);
1727 break;
1728 case DCB_OUTPUT_TMDS:
1729 case DCB_OUTPUT_LVDS:
1730 case DCB_OUTPUT_DP:
1731 if (cfg & 0x00000100)
1732 tmp = 0x00000101;
1733 else
1734 tmp = 0x00000000;
1735
1736 nv_mask(device, 0x612300 + (or * 0x800), 0x00000707, tmp);
1737 break;
1738 default:
1739 break;
1740 }
1741
1742 break;
1743 }
1744
1745 nv_wr32(device, 0x6101d4, 0x00000000);
1746 nv_wr32(device, 0x6109d4, 0x00000000);
1747 nv_wr32(device, 0x6101d0, 0x80000000);
1748}
1749
1750static void
1751nvd0_display_unk4_handler(struct drm_device *dev, u32 crtc, u32 mask)
1752{
1753 struct nouveau_device *device = nouveau_dev(dev);
1754 struct dcb_output *dcb;
1755 int pclk, i;
1756
1757 pclk = nv_rd32(device, 0x660450 + (crtc * 0x300)) / 1000;
1758
1759 for (i = 0; mask && i < 8; i++) {
1760 u32 mcp = nv_rd32(device, 0x660180 + (i * 0x20));
1761 u32 cfg = nv_rd32(device, 0x660184 + (i * 0x20));
1762 if (!(mcp & (1 << crtc)))
1763 continue;
1764
1765 dcb = lookup_dcb(dev, i, mcp);
1766 if (!dcb)
1767 continue;
1768
1769 nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
1770 }
1771
1772 nv_wr32(device, 0x6101d4, 0x00000000);
1773 nv_wr32(device, 0x6109d4, 0x00000000);
1774 nv_wr32(device, 0x6101d0, 0x80000000);
1775}
1776
1777static void
1778nvd0_display_bh(unsigned long data)
1779{
1780 struct drm_device *dev = (struct drm_device *)data;
1781 struct nouveau_device *device = nouveau_dev(dev);
1782 struct nouveau_drm *drm = nouveau_drm(dev);
1783 struct nvd0_display *disp = nvd0_display(dev);
1784 u32 mask = 0, crtc = ~0;
1785 int i;
1786
1787 if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
1788 NV_INFO(drm, "PDISP: modeset req %d\n", disp->modeset);
1789 NV_INFO(drm, " STAT: 0x%08x 0x%08x 0x%08x\n",
1790 nv_rd32(device, 0x6101d0),
1791 nv_rd32(device, 0x6101d4), nv_rd32(device, 0x6109d4));
1792 for (i = 0; i < 8; i++) {
1793 NV_INFO(drm, " %s%d: 0x%08x 0x%08x\n",
1794 i < 4 ? "DAC" : "SOR", i,
1795 nv_rd32(device, 0x640180 + (i * 0x20)),
1796 nv_rd32(device, 0x660180 + (i * 0x20)));
1797 }
1798 }
1799
1800 while (!mask && ++crtc < dev->mode_config.num_crtc)
1801 mask = nv_rd32(device, 0x6101d4 + (crtc * 0x800));
1802
1803 if (disp->modeset & 0x00000001)
1804 nvd0_display_unk1_handler(dev, crtc, mask);
1805 if (disp->modeset & 0x00000002)
1806 nvd0_display_unk2_handler(dev, crtc, mask);
1807 if (disp->modeset & 0x00000004)
1808 nvd0_display_unk4_handler(dev, crtc, mask);
1809}
1810
1811void
1812nvd0_display_intr(struct drm_device *dev)
1813{
1814 struct nvd0_display *disp = nvd0_display(dev);
1815 struct nouveau_device *device = nouveau_dev(dev);
1816 struct nouveau_drm *drm = nouveau_drm(dev);
1817 u32 intr = nv_rd32(device, 0x610088);
1818
1819 if (intr & 0x00000001) {
1820 u32 stat = nv_rd32(device, 0x61008c);
1821 nv_wr32(device, 0x61008c, stat);
1822 intr &= ~0x00000001;
1823 }
1824
1825 if (intr & 0x00000002) {
1826 u32 stat = nv_rd32(device, 0x61009c);
1827 int chid = ffs(stat) - 1;
1828 if (chid >= 0) {
1829 u32 mthd = nv_rd32(device, 0x6101f0 + (chid * 12));
1830 u32 data = nv_rd32(device, 0x6101f4 + (chid * 12));
1831 u32 unkn = nv_rd32(device, 0x6101f8 + (chid * 12));
1832
1833 NV_INFO(drm, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
1834 "0x%08x 0x%08x\n",
1835 chid, (mthd & 0x0000ffc), data, mthd, unkn);
1836 nv_wr32(device, 0x61009c, (1 << chid));
1837 nv_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
1838 }
1839
1840 intr &= ~0x00000002;
1841 }
1842
1843 if (intr & 0x00100000) {
1844 u32 stat = nv_rd32(device, 0x6100ac);
1845
1846 if (stat & 0x00000007) {
1847 disp->modeset = stat;
1848 tasklet_schedule(&disp->tasklet);
1849
1850 nv_wr32(device, 0x6100ac, (stat & 0x00000007));
1851 stat &= ~0x00000007;
1852 }
1853
1854 if (stat) {
1855 NV_INFO(drm, "PDISP: unknown intr24 0x%08x\n", stat);
1856 nv_wr32(device, 0x6100ac, stat);
1857 }
1858
1859 intr &= ~0x00100000;
1860 }
1861
1862 intr &= ~0x0f000000; /* vblank, handled in core */
1863 if (intr)
1864 NV_INFO(drm, "PDISP: unknown intr 0x%08x\n", intr);
1865}
1866
1867/******************************************************************************
1868 * Init
1869 *****************************************************************************/
1870void
1871nvd0_display_fini(struct drm_device *dev)
1872{
1873 int i;
1874
1875 /* fini cursors + overlays + flips */
1876 for (i = 1; i >= 0; i--) {
1877 evo_fini_pio(dev, EVO_CURS(i));
1878 evo_fini_pio(dev, EVO_OIMM(i));
1879 evo_fini_dma(dev, EVO_OVLY(i));
1880 evo_fini_dma(dev, EVO_FLIP(i));
1881 }
1882
1883 /* fini master */
1884 evo_fini_dma(dev, EVO_MASTER);
1885}
1886
1887int
1888nvd0_display_init(struct drm_device *dev)
1889{
1890 struct nvd0_display *disp = nvd0_display(dev);
1891 struct nouveau_device *device = nouveau_dev(dev);
1892 struct nouveau_drm *drm = nouveau_drm(dev);
1893 int ret, i;
1894 u32 *push;
1895
1896 if (nv_rd32(device, 0x6100ac) & 0x00000100) {
1897 nv_wr32(device, 0x6100ac, 0x00000100);
1898 nv_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1899 if (!nv_wait(device, 0x6194e8, 0x00000002, 0x00000000)) {
1900 NV_ERROR(drm, "PDISP: 0x6194e8 0x%08x\n",
1901 nv_rd32(device, 0x6194e8));
1902 return -EBUSY;
1903 }
1904 }
1905
1906 /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
1907 * work at all unless you do the SOR part below.
1908 */
1909 for (i = 0; i < 3; i++) {
1910 u32 dac = nv_rd32(device, 0x61a000 + (i * 0x800));
1911 nv_wr32(device, 0x6101c0 + (i * 0x800), dac);
1912 }
1913
1914 for (i = 0; i < 4; i++) {
1915 u32 sor = nv_rd32(device, 0x61c000 + (i * 0x800));
1916 nv_wr32(device, 0x6301c4 + (i * 0x800), sor);
1917 }
1918
1919 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1920 u32 crtc0 = nv_rd32(device, 0x616104 + (i * 0x800));
1921 u32 crtc1 = nv_rd32(device, 0x616108 + (i * 0x800));
1922 u32 crtc2 = nv_rd32(device, 0x61610c + (i * 0x800));
1923 nv_wr32(device, 0x6101b4 + (i * 0x800), crtc0);
1924 nv_wr32(device, 0x6101b8 + (i * 0x800), crtc1);
1925 nv_wr32(device, 0x6101bc + (i * 0x800), crtc2);
1926 }
1927
1928 /* point at our hash table / objects, enable interrupts */
1929 nv_wr32(device, 0x610010, (disp->mem->addr >> 8) | 9);
1930 nv_mask(device, 0x6100b0, 0x00000307, 0x00000307);
1931
1932 /* init master */
1933 ret = evo_init_dma(dev, EVO_MASTER);
1934 if (ret)
1935 goto error;
1936
1937 /* init flips + overlays + cursors */
1938 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1939 if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
1940 (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
1941 (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
1942 (ret = evo_init_pio(dev, EVO_CURS(i))))
1943 goto error;
1944 }
1945
1946 push = evo_wait(dev, EVO_MASTER, 32);
1947 if (!push) {
1948 ret = -EBUSY;
1949 goto error;
1950 }
1951 evo_mthd(push, 0x0088, 1);
1952 evo_data(push, NvEvoSync);
1953 evo_mthd(push, 0x0084, 1);
1954 evo_data(push, 0x00000000);
1955 evo_mthd(push, 0x0084, 1);
1956 evo_data(push, 0x80000000);
1957 evo_mthd(push, 0x008c, 1);
1958 evo_data(push, 0x00000000);
1959 evo_kick(push, dev, EVO_MASTER);
1960
1961error:
1962 if (ret)
1963 nvd0_display_fini(dev);
1964 return ret;
1965}
1966
1967void
1968nvd0_display_destroy(struct drm_device *dev)
1969{
1970 struct nvd0_display *disp = nvd0_display(dev);
1971 struct pci_dev *pdev = dev->pdev;
1972 int i;
1973
1974 for (i = 0; i < EVO_DMA_NR; i++) {
1975 struct evo *evo = &disp->evo[i];
1976 pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
1977 }
1978
1979 nouveau_gpuobj_ref(NULL, &disp->mem);
1980 nouveau_bo_unmap(disp->sync);
1981 nouveau_bo_ref(NULL, &disp->sync);
1982
1983 nouveau_display(dev)->priv = NULL;
1984 kfree(disp);
1985}
1986
1987int
1988nvd0_display_create(struct drm_device *dev)
1989{
1990 struct nouveau_device *device = nouveau_dev(dev);
1991 struct nouveau_drm *drm = nouveau_drm(dev);
1992 struct nouveau_bar *bar = nouveau_bar(device);
1993 struct nouveau_fb *pfb = nouveau_fb(device);
1994 struct dcb_table *dcb = &drm->vbios.dcb;
1995 struct drm_connector *connector, *tmp;
1996 struct pci_dev *pdev = dev->pdev;
1997 struct nvd0_display *disp;
1998 struct dcb_output *dcbe;
1999 int crtcs, ret, i;
2000
2001 disp = kzalloc(sizeof(*disp), GFP_KERNEL);
2002 if (!disp)
2003 return -ENOMEM;
2004
2005 nouveau_display(dev)->priv = disp;
2006 nouveau_display(dev)->dtor = nvd0_display_destroy;
2007 nouveau_display(dev)->init = nvd0_display_init;
2008 nouveau_display(dev)->fini = nvd0_display_fini;
2009
2010 /* create crtc objects to represent the hw heads */
2011 crtcs = nv_rd32(device, 0x022448);
2012 for (i = 0; i < crtcs; i++) {
2013 ret = nvd0_crtc_create(dev, i);
2014 if (ret)
2015 goto out;
2016 }
2017
2018 /* create encoder/connector objects based on VBIOS DCB table */
2019 for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
2020 connector = nouveau_connector_create(dev, dcbe->connector);
2021 if (IS_ERR(connector))
2022 continue;
2023
2024 if (dcbe->location != DCB_LOC_ON_CHIP) {
2025 NV_WARN(drm, "skipping off-chip encoder %d/%d\n",
2026 dcbe->type, ffs(dcbe->or) - 1);
2027 continue;
2028 }
2029
2030 switch (dcbe->type) {
2031 case DCB_OUTPUT_TMDS:
2032 case DCB_OUTPUT_LVDS:
2033 case DCB_OUTPUT_DP:
2034 nvd0_sor_create(connector, dcbe);
2035 break;
2036 case DCB_OUTPUT_ANALOG:
2037 nvd0_dac_create(connector, dcbe);
2038 break;
2039 default:
2040 NV_WARN(drm, "skipping unsupported encoder %d/%d\n",
2041 dcbe->type, ffs(dcbe->or) - 1);
2042 continue;
2043 }
2044 }
2045
2046 /* cull any connectors we created that don't have an encoder */
2047 list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
2048 if (connector->encoder_ids[0])
2049 continue;
2050
2051 NV_WARN(drm, "%s has no encoders, removing\n",
2052 drm_get_connector_name(connector));
2053 connector->funcs->destroy(connector);
2054 }
2055
2056 /* setup interrupt handling */
2057 tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
2058
2059 /* small shared memory area we use for notifiers and semaphores */
2060 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
2061 0, 0x0000, NULL, &disp->sync);
2062 if (!ret) {
2063 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
2064 if (!ret)
2065 ret = nouveau_bo_map(disp->sync);
2066 if (ret)
2067 nouveau_bo_ref(NULL, &disp->sync);
2068 }
2069
2070 if (ret)
2071 goto out;
2072
2073 /* hash table and dma objects for the memory areas we care about */
2074 ret = nouveau_gpuobj_new(nv_object(device), NULL, 0x4000, 0x10000,
2075 NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
2076 if (ret)
2077 goto out;
2078
2079 /* create evo dma channels */
2080 for (i = 0; i < EVO_DMA_NR; i++) {
2081 struct evo *evo = &disp->evo[i];
2082 u64 offset = disp->sync->bo.offset;
2083 u32 dmao = 0x1000 + (i * 0x100);
2084 u32 hash = 0x0000 + (i * 0x040);
2085
2086 evo->idx = i;
2087 evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
2088 evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
2089 if (!evo->ptr) {
2090 ret = -ENOMEM;
2091 goto out;
2092 }
2093
2094 nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
2095 nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
2096 nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
2097 nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
2098 nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
2099 nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
2100 nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
2101 nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
2102 ((dmao + 0x00) << 9));
2103
2104 nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
2105 nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
2106 nv_wo32(disp->mem, dmao + 0x28, (pfb->ram.size - 1) >> 8);
2107 nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
2108 nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
2109 nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
2110 nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
2111 nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
2112 ((dmao + 0x20) << 9));
2113
2114 nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
2115 nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
2116 nv_wo32(disp->mem, dmao + 0x48, (pfb->ram.size - 1) >> 8);
2117 nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
2118 nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
2119 nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
2120 nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
2121 nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
2122 ((dmao + 0x40) << 9));
2123
2124 nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
2125 nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
2126 nv_wo32(disp->mem, dmao + 0x68, (pfb->ram.size - 1) >> 8);
2127 nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
2128 nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
2129 nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
2130 nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
2131 nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
2132 ((dmao + 0x60) << 9));
2133 }
2134
2135 bar->flush(bar);
2136
2137out:
2138 if (ret)
2139 nvd0_display_destroy(dev);
2140 return ret;
2141}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 24d932f53203..9175615bbd8a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -561,6 +561,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
561 /* use frac fb div on APUs */ 561 /* use frac fb div on APUs */
562 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) 562 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
563 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; 563 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
564 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
565 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
564 } else { 566 } else {
565 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY; 567 radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
566 568
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index d5699fe4f1e8..064023bed480 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -34,8 +34,7 @@
34 34
35/* move these to drm_dp_helper.c/h */ 35/* move these to drm_dp_helper.c/h */
36#define DP_LINK_CONFIGURATION_SIZE 9 36#define DP_LINK_CONFIGURATION_SIZE 9
37#define DP_LINK_STATUS_SIZE 6 37#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
38#define DP_DPCD_SIZE 8
39 38
40static char *voltage_names[] = { 39static char *voltage_names[] = {
41 "0.4V", "0.6V", "0.8V", "1.2V" 40 "0.4V", "0.6V", "0.8V", "1.2V"
@@ -290,78 +289,6 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
290 289
291/***** general DP utility functions *****/ 290/***** general DP utility functions *****/
292 291
293static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
294{
295 return link_status[r - DP_LANE0_1_STATUS];
296}
297
298static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
299 int lane)
300{
301 int i = DP_LANE0_1_STATUS + (lane >> 1);
302 int s = (lane & 1) * 4;
303 u8 l = dp_link_status(link_status, i);
304 return (l >> s) & 0xf;
305}
306
307static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
308 int lane_count)
309{
310 int lane;
311 u8 lane_status;
312
313 for (lane = 0; lane < lane_count; lane++) {
314 lane_status = dp_get_lane_status(link_status, lane);
315 if ((lane_status & DP_LANE_CR_DONE) == 0)
316 return false;
317 }
318 return true;
319}
320
321static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
322 int lane_count)
323{
324 u8 lane_align;
325 u8 lane_status;
326 int lane;
327
328 lane_align = dp_link_status(link_status,
329 DP_LANE_ALIGN_STATUS_UPDATED);
330 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
331 return false;
332 for (lane = 0; lane < lane_count; lane++) {
333 lane_status = dp_get_lane_status(link_status, lane);
334 if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
335 return false;
336 }
337 return true;
338}
339
340static u8 dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
341 int lane)
342
343{
344 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
345 int s = ((lane & 1) ?
346 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
347 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
348 u8 l = dp_link_status(link_status, i);
349
350 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
351}
352
353static u8 dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
354 int lane)
355{
356 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
357 int s = ((lane & 1) ?
358 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
359 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
360 u8 l = dp_link_status(link_status, i);
361
362 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
363}
364
365#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200 292#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
366#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5 293#define DP_PRE_EMPHASIS_MAX DP_TRAIN_PRE_EMPHASIS_9_5
367 294
@@ -374,8 +301,8 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
374 int lane; 301 int lane;
375 302
376 for (lane = 0; lane < lane_count; lane++) { 303 for (lane = 0; lane < lane_count; lane++) {
377 u8 this_v = dp_get_adjust_request_voltage(link_status, lane); 304 u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
378 u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); 305 u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
379 306
380 DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", 307 DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
381 lane, 308 lane,
@@ -420,37 +347,6 @@ static int dp_get_max_dp_pix_clock(int link_rate,
420 return (link_rate * lane_num * 8) / bpp; 347 return (link_rate * lane_num * 8) / bpp;
421} 348}
422 349
423static int dp_get_max_link_rate(u8 dpcd[DP_DPCD_SIZE])
424{
425 switch (dpcd[DP_MAX_LINK_RATE]) {
426 case DP_LINK_BW_1_62:
427 default:
428 return 162000;
429 case DP_LINK_BW_2_7:
430 return 270000;
431 case DP_LINK_BW_5_4:
432 return 540000;
433 }
434}
435
436static u8 dp_get_max_lane_number(u8 dpcd[DP_DPCD_SIZE])
437{
438 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
439}
440
441static u8 dp_get_dp_link_rate_coded(int link_rate)
442{
443 switch (link_rate) {
444 case 162000:
445 default:
446 return DP_LINK_BW_1_62;
447 case 270000:
448 return DP_LINK_BW_2_7;
449 case 540000:
450 return DP_LINK_BW_5_4;
451 }
452}
453
454/***** radeon specific DP functions *****/ 350/***** radeon specific DP functions *****/
455 351
456/* First get the min lane# when low rate is used according to pixel clock 352/* First get the min lane# when low rate is used according to pixel clock
@@ -462,8 +358,8 @@ static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
462 int pix_clock) 358 int pix_clock)
463{ 359{
464 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector)); 360 int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
465 int max_link_rate = dp_get_max_link_rate(dpcd); 361 int max_link_rate = drm_dp_max_link_rate(dpcd);
466 int max_lane_num = dp_get_max_lane_number(dpcd); 362 int max_lane_num = drm_dp_max_lane_count(dpcd);
467 int lane_num; 363 int lane_num;
468 int max_dp_pix_clock; 364 int max_dp_pix_clock;
469 365
@@ -500,7 +396,7 @@ static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
500 return 540000; 396 return 540000;
501 } 397 }
502 398
503 return dp_get_max_link_rate(dpcd); 399 return drm_dp_max_link_rate(dpcd);
504} 400}
505 401
506static u8 radeon_dp_encoder_service(struct radeon_device *rdev, 402static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
@@ -551,14 +447,15 @@ static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
551bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) 447bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
552{ 448{
553 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 449 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
554 u8 msg[25]; 450 u8 msg[DP_DPCD_SIZE];
555 int ret, i; 451 int ret, i;
556 452
557 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg, 8, 0); 453 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
454 DP_DPCD_SIZE, 0);
558 if (ret > 0) { 455 if (ret > 0) {
559 memcpy(dig_connector->dpcd, msg, 8); 456 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
560 DRM_DEBUG_KMS("DPCD: "); 457 DRM_DEBUG_KMS("DPCD: ");
561 for (i = 0; i < 8; i++) 458 for (i = 0; i < DP_DPCD_SIZE; i++)
562 DRM_DEBUG_KMS("%02x ", msg[i]); 459 DRM_DEBUG_KMS("%02x ", msg[i]);
563 DRM_DEBUG_KMS("\n"); 460 DRM_DEBUG_KMS("\n");
564 461
@@ -664,7 +561,7 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
664 561
665 if (!radeon_dp_get_link_status(radeon_connector, link_status)) 562 if (!radeon_dp_get_link_status(radeon_connector, link_status))
666 return false; 563 return false;
667 if (dp_channel_eq_ok(link_status, dig->dp_lane_count)) 564 if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
668 return false; 565 return false;
669 return true; 566 return true;
670} 567}
@@ -677,9 +574,8 @@ struct radeon_dp_link_train_info {
677 int enc_id; 574 int enc_id;
678 int dp_clock; 575 int dp_clock;
679 int dp_lane_count; 576 int dp_lane_count;
680 int rd_interval;
681 bool tp3_supported; 577 bool tp3_supported;
682 u8 dpcd[8]; 578 u8 dpcd[DP_RECEIVER_CAP_SIZE];
683 u8 train_set[4]; 579 u8 train_set[4];
684 u8 link_status[DP_LINK_STATUS_SIZE]; 580 u8 link_status[DP_LINK_STATUS_SIZE];
685 u8 tries; 581 u8 tries;
@@ -765,7 +661,7 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
765 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp); 661 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
766 662
767 /* set the link rate on the sink */ 663 /* set the link rate on the sink */
768 tmp = dp_get_dp_link_rate_coded(dp_info->dp_clock); 664 tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
769 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp); 665 radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
770 666
771 /* start training on the source */ 667 /* start training on the source */
@@ -821,17 +717,14 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
821 dp_info->tries = 0; 717 dp_info->tries = 0;
822 voltage = 0xff; 718 voltage = 0xff;
823 while (1) { 719 while (1) {
824 if (dp_info->rd_interval == 0) 720 drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
825 udelay(100);
826 else
827 mdelay(dp_info->rd_interval * 4);
828 721
829 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { 722 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
830 DRM_ERROR("displayport link status failed\n"); 723 DRM_ERROR("displayport link status failed\n");
831 break; 724 break;
832 } 725 }
833 726
834 if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) { 727 if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
835 clock_recovery = true; 728 clock_recovery = true;
836 break; 729 break;
837 } 730 }
@@ -886,17 +779,14 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
886 dp_info->tries = 0; 779 dp_info->tries = 0;
887 channel_eq = false; 780 channel_eq = false;
888 while (1) { 781 while (1) {
889 if (dp_info->rd_interval == 0) 782 drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
890 udelay(400);
891 else
892 mdelay(dp_info->rd_interval * 4);
893 783
894 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) { 784 if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
895 DRM_ERROR("displayport link status failed\n"); 785 DRM_ERROR("displayport link status failed\n");
896 break; 786 break;
897 } 787 }
898 788
899 if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) { 789 if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
900 channel_eq = true; 790 channel_eq = true;
901 break; 791 break;
902 } 792 }
@@ -974,14 +864,13 @@ void radeon_dp_link_train(struct drm_encoder *encoder,
974 else 864 else
975 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A; 865 dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
976 866
977 dp_info.rd_interval = radeon_read_dpcd_reg(radeon_connector, DP_TRAINING_AUX_RD_INTERVAL);
978 tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT); 867 tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
979 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED)) 868 if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
980 dp_info.tp3_supported = true; 869 dp_info.tp3_supported = true;
981 else 870 else
982 dp_info.tp3_supported = false; 871 dp_info.tp3_supported = false;
983 872
984 memcpy(dp_info.dpcd, dig_connector->dpcd, 8); 873 memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
985 dp_info.rdev = rdev; 874 dp_info.rdev = rdev;
986 dp_info.encoder = encoder; 875 dp_info.encoder = encoder;
987 dp_info.connector = connector; 876 dp_info.connector = connector;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 010bae19554a..4552d4aff317 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -340,7 +340,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
340 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 340 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
341 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) { 341 (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
342 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 342 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
343 radeon_dp_set_link_config(connector, mode); 343 radeon_dp_set_link_config(connector, adjusted_mode);
344 } 344 }
345 345
346 return true; 346 return true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 5d1d21a6dcdd..f95d7fc1f5e0 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1821,7 +1821,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1821 case CHIP_SUMO: 1821 case CHIP_SUMO:
1822 rdev->config.evergreen.num_ses = 1; 1822 rdev->config.evergreen.num_ses = 1;
1823 rdev->config.evergreen.max_pipes = 4; 1823 rdev->config.evergreen.max_pipes = 4;
1824 rdev->config.evergreen.max_tile_pipes = 2; 1824 rdev->config.evergreen.max_tile_pipes = 4;
1825 if (rdev->pdev->device == 0x9648) 1825 if (rdev->pdev->device == 0x9648)
1826 rdev->config.evergreen.max_simds = 3; 1826 rdev->config.evergreen.max_simds = 3;
1827 else if ((rdev->pdev->device == 0x9647) || 1827 else if ((rdev->pdev->device == 0x9647) ||
@@ -1844,7 +1844,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1844 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1844 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1845 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1845 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1846 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1846 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1847 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; 1847 gb_addr_config = SUMO_GB_ADDR_CONFIG_GOLDEN;
1848 break; 1848 break;
1849 case CHIP_SUMO2: 1849 case CHIP_SUMO2:
1850 rdev->config.evergreen.num_ses = 1; 1850 rdev->config.evergreen.num_ses = 1;
@@ -1866,7 +1866,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1866 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1866 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1867 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1867 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1868 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1868 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1869 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN; 1869 gb_addr_config = SUMO2_GB_ADDR_CONFIG_GOLDEN;
1870 break; 1870 break;
1871 case CHIP_BARTS: 1871 case CHIP_BARTS:
1872 rdev->config.evergreen.num_ses = 2; 1872 rdev->config.evergreen.num_ses = 2;
@@ -1914,7 +1914,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1914 break; 1914 break;
1915 case CHIP_CAICOS: 1915 case CHIP_CAICOS:
1916 rdev->config.evergreen.num_ses = 1; 1916 rdev->config.evergreen.num_ses = 1;
1917 rdev->config.evergreen.max_pipes = 4; 1917 rdev->config.evergreen.max_pipes = 2;
1918 rdev->config.evergreen.max_tile_pipes = 2; 1918 rdev->config.evergreen.max_tile_pipes = 2;
1919 rdev->config.evergreen.max_simds = 2; 1919 rdev->config.evergreen.max_simds = 2;
1920 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses; 1920 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
@@ -2034,6 +2034,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2034 WREG32(GB_ADDR_CONFIG, gb_addr_config); 2034 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2035 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 2035 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2036 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2036 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2037 WREG32(DMA_TILING_CONFIG, gb_addr_config);
2037 2038
2038 tmp = gb_addr_config & NUM_PIPES_MASK; 2039 tmp = gb_addr_config & NUM_PIPES_MASK;
2039 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends, 2040 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
@@ -2403,8 +2404,12 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
2403 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2404 CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2404 cayman_cp_int_cntl_setup(rdev, 1, 0); 2405 cayman_cp_int_cntl_setup(rdev, 1, 0);
2405 cayman_cp_int_cntl_setup(rdev, 2, 0); 2406 cayman_cp_int_cntl_setup(rdev, 2, 0);
2407 tmp = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2408 WREG32(CAYMAN_DMA1_CNTL, tmp);
2406 } else 2409 } else
2407 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 2410 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
2411 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2412 WREG32(DMA_CNTL, tmp);
2408 WREG32(GRBM_INT_CNTL, 0); 2413 WREG32(GRBM_INT_CNTL, 0);
2409 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 2414 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2410 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 2415 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2457,6 +2462,7 @@ int evergreen_irq_set(struct radeon_device *rdev)
2457 u32 grbm_int_cntl = 0; 2462 u32 grbm_int_cntl = 0;
2458 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 2463 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
2459 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0; 2464 u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
2465 u32 dma_cntl, dma_cntl1 = 0;
2460 2466
2461 if (!rdev->irq.installed) { 2467 if (!rdev->irq.installed) {
2462 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 2468 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -2484,6 +2490,8 @@ int evergreen_irq_set(struct radeon_device *rdev)
2484 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2490 afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2485 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK; 2491 afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
2486 2492
2493 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
2494
2487 if (rdev->family >= CHIP_CAYMAN) { 2495 if (rdev->family >= CHIP_CAYMAN) {
2488 /* enable CP interrupts on all rings */ 2496 /* enable CP interrupts on all rings */
2489 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 2497 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
@@ -2506,6 +2514,19 @@ int evergreen_irq_set(struct radeon_device *rdev)
2506 } 2514 }
2507 } 2515 }
2508 2516
2517 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
2518 DRM_DEBUG("r600_irq_set: sw int dma\n");
2519 dma_cntl |= TRAP_ENABLE;
2520 }
2521
2522 if (rdev->family >= CHIP_CAYMAN) {
2523 dma_cntl1 = RREG32(CAYMAN_DMA1_CNTL) & ~TRAP_ENABLE;
2524 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
2525 DRM_DEBUG("r600_irq_set: sw int dma1\n");
2526 dma_cntl1 |= TRAP_ENABLE;
2527 }
2528 }
2529
2509 if (rdev->irq.crtc_vblank_int[0] || 2530 if (rdev->irq.crtc_vblank_int[0] ||
2510 atomic_read(&rdev->irq.pflip[0])) { 2531 atomic_read(&rdev->irq.pflip[0])) {
2511 DRM_DEBUG("evergreen_irq_set: vblank 0\n"); 2532 DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2591,6 +2612,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
2591 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2); 2612 cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
2592 } else 2613 } else
2593 WREG32(CP_INT_CNTL, cp_int_cntl); 2614 WREG32(CP_INT_CNTL, cp_int_cntl);
2615
2616 WREG32(DMA_CNTL, dma_cntl);
2617
2618 if (rdev->family >= CHIP_CAYMAN)
2619 WREG32(CAYMAN_DMA1_CNTL, dma_cntl1);
2620
2594 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 2621 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
2595 2622
2596 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 2623 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3093,6 +3120,16 @@ restart_ih:
3093 break; 3120 break;
3094 } 3121 }
3095 break; 3122 break;
3123 case 146:
3124 case 147:
3125 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3126 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3127 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3128 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3129 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3130 /* reset addr and status */
3131 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3132 break;
3096 case 176: /* CP_INT in ring buffer */ 3133 case 176: /* CP_INT in ring buffer */
3097 case 177: /* CP_INT in IB1 */ 3134 case 177: /* CP_INT in IB1 */
3098 case 178: /* CP_INT in IB2 */ 3135 case 178: /* CP_INT in IB2 */
@@ -3116,9 +3153,19 @@ restart_ih:
3116 } else 3153 } else
3117 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3154 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3118 break; 3155 break;
3156 case 224: /* DMA trap event */
3157 DRM_DEBUG("IH: DMA trap\n");
3158 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3159 break;
3119 case 233: /* GUI IDLE */ 3160 case 233: /* GUI IDLE */
3120 DRM_DEBUG("IH: GUI idle\n"); 3161 DRM_DEBUG("IH: GUI idle\n");
3121 break; 3162 break;
3163 case 244: /* DMA trap event */
3164 if (rdev->family >= CHIP_CAYMAN) {
3165 DRM_DEBUG("IH: DMA1 trap\n");
3166 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3167 }
3168 break;
3122 default: 3169 default:
3123 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3170 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3124 break; 3171 break;
@@ -3144,6 +3191,143 @@ restart_ih:
3144 return IRQ_HANDLED; 3191 return IRQ_HANDLED;
3145} 3192}
3146 3193
3194/**
3195 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
3196 *
3197 * @rdev: radeon_device pointer
3198 * @fence: radeon fence object
3199 *
3200 * Add a DMA fence packet to the ring to write
3201 * the fence seq number and DMA trap packet to generate
3202 * an interrupt if needed (evergreen-SI).
3203 */
3204void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
3205 struct radeon_fence *fence)
3206{
3207 struct radeon_ring *ring = &rdev->ring[fence->ring];
3208 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
3209 /* write the fence */
3210 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
3211 radeon_ring_write(ring, addr & 0xfffffffc);
3212 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
3213 radeon_ring_write(ring, fence->seq);
3214 /* generate an interrupt */
3215 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
3216 /* flush HDP */
3217 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
3218 radeon_ring_write(ring, (0xf << 16) | HDP_MEM_COHERENCY_FLUSH_CNTL);
3219 radeon_ring_write(ring, 1);
3220}
3221
3222/**
3223 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
3224 *
3225 * @rdev: radeon_device pointer
3226 * @ib: IB object to schedule
3227 *
3228 * Schedule an IB in the DMA ring (evergreen).
3229 */
3230void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
3231 struct radeon_ib *ib)
3232{
3233 struct radeon_ring *ring = &rdev->ring[ib->ring];
3234
3235 if (rdev->wb.enabled) {
3236 u32 next_rptr = ring->wptr + 4;
3237 while ((next_rptr & 7) != 5)
3238 next_rptr++;
3239 next_rptr += 3;
3240 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3241 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3242 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3243 radeon_ring_write(ring, next_rptr);
3244 }
3245
3246 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3247 * Pad as necessary with NOPs.
3248 */
3249 while ((ring->wptr & 7) != 5)
3250 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3251 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3252 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3253 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3254
3255}
3256
3257/**
3258 * evergreen_copy_dma - copy pages using the DMA engine
3259 *
3260 * @rdev: radeon_device pointer
3261 * @src_offset: src GPU address
3262 * @dst_offset: dst GPU address
3263 * @num_gpu_pages: number of GPU pages to xfer
3264 * @fence: radeon fence object
3265 *
3266 * Copy GPU paging using the DMA engine (evergreen-cayman).
3267 * Used by the radeon ttm implementation to move pages if
3268 * registered as the asic copy callback.
3269 */
3270int evergreen_copy_dma(struct radeon_device *rdev,
3271 uint64_t src_offset, uint64_t dst_offset,
3272 unsigned num_gpu_pages,
3273 struct radeon_fence **fence)
3274{
3275 struct radeon_semaphore *sem = NULL;
3276 int ring_index = rdev->asic->copy.dma_ring_index;
3277 struct radeon_ring *ring = &rdev->ring[ring_index];
3278 u32 size_in_dw, cur_size_in_dw;
3279 int i, num_loops;
3280 int r = 0;
3281
3282 r = radeon_semaphore_create(rdev, &sem);
3283 if (r) {
3284 DRM_ERROR("radeon: moving bo (%d).\n", r);
3285 return r;
3286 }
3287
3288 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
3289 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
3290 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3291 if (r) {
3292 DRM_ERROR("radeon: moving bo (%d).\n", r);
3293 radeon_semaphore_free(rdev, &sem, NULL);
3294 return r;
3295 }
3296
3297 if (radeon_fence_need_sync(*fence, ring->idx)) {
3298 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3299 ring->idx);
3300 radeon_fence_note_sync(*fence, ring->idx);
3301 } else {
3302 radeon_semaphore_free(rdev, &sem, NULL);
3303 }
3304
3305 for (i = 0; i < num_loops; i++) {
3306 cur_size_in_dw = size_in_dw;
3307 if (cur_size_in_dw > 0xFFFFF)
3308 cur_size_in_dw = 0xFFFFF;
3309 size_in_dw -= cur_size_in_dw;
3310 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
3311 radeon_ring_write(ring, dst_offset & 0xfffffffc);
3312 radeon_ring_write(ring, src_offset & 0xfffffffc);
3313 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3314 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3315 src_offset += cur_size_in_dw * 4;
3316 dst_offset += cur_size_in_dw * 4;
3317 }
3318
3319 r = radeon_fence_emit(rdev, fence, ring->idx);
3320 if (r) {
3321 radeon_ring_unlock_undo(rdev, ring);
3322 return r;
3323 }
3324
3325 radeon_ring_unlock_commit(rdev, ring);
3326 radeon_semaphore_free(rdev, &sem, *fence);
3327
3328 return r;
3329}
3330
3147static int evergreen_startup(struct radeon_device *rdev) 3331static int evergreen_startup(struct radeon_device *rdev)
3148{ 3332{
3149 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 3333 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -3207,6 +3391,12 @@ static int evergreen_startup(struct radeon_device *rdev)
3207 return r; 3391 return r;
3208 } 3392 }
3209 3393
3394 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
3395 if (r) {
3396 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
3397 return r;
3398 }
3399
3210 /* Enable IRQ */ 3400 /* Enable IRQ */
3211 r = r600_irq_init(rdev); 3401 r = r600_irq_init(rdev);
3212 if (r) { 3402 if (r) {
@@ -3221,12 +3411,23 @@ static int evergreen_startup(struct radeon_device *rdev)
3221 0, 0xfffff, RADEON_CP_PACKET2); 3411 0, 0xfffff, RADEON_CP_PACKET2);
3222 if (r) 3412 if (r)
3223 return r; 3413 return r;
3414
3415 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
3416 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
3417 DMA_RB_RPTR, DMA_RB_WPTR,
3418 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3419 if (r)
3420 return r;
3421
3224 r = evergreen_cp_load_microcode(rdev); 3422 r = evergreen_cp_load_microcode(rdev);
3225 if (r) 3423 if (r)
3226 return r; 3424 return r;
3227 r = evergreen_cp_resume(rdev); 3425 r = evergreen_cp_resume(rdev);
3228 if (r) 3426 if (r)
3229 return r; 3427 return r;
3428 r = r600_dma_resume(rdev);
3429 if (r)
3430 return r;
3230 3431
3231 r = radeon_ib_pool_init(rdev); 3432 r = radeon_ib_pool_init(rdev);
3232 if (r) { 3433 if (r) {
@@ -3273,11 +3474,9 @@ int evergreen_resume(struct radeon_device *rdev)
3273 3474
3274int evergreen_suspend(struct radeon_device *rdev) 3475int evergreen_suspend(struct radeon_device *rdev)
3275{ 3476{
3276 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
3277
3278 r600_audio_fini(rdev); 3477 r600_audio_fini(rdev);
3279 r700_cp_stop(rdev); 3478 r700_cp_stop(rdev);
3280 ring->ready = false; 3479 r600_dma_stop(rdev);
3281 evergreen_irq_suspend(rdev); 3480 evergreen_irq_suspend(rdev);
3282 radeon_wb_disable(rdev); 3481 radeon_wb_disable(rdev);
3283 evergreen_pcie_gart_disable(rdev); 3482 evergreen_pcie_gart_disable(rdev);
@@ -3354,6 +3553,9 @@ int evergreen_init(struct radeon_device *rdev)
3354 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 3553 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3355 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 3554 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3356 3555
3556 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
3557 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
3558
3357 rdev->ih.ring_obj = NULL; 3559 rdev->ih.ring_obj = NULL;
3358 r600_ih_ring_init(rdev, 64 * 1024); 3560 r600_ih_ring_init(rdev, 64 * 1024);
3359 3561
@@ -3366,6 +3568,7 @@ int evergreen_init(struct radeon_device *rdev)
3366 if (r) { 3568 if (r) {
3367 dev_err(rdev->dev, "disabling GPU acceleration\n"); 3569 dev_err(rdev->dev, "disabling GPU acceleration\n");
3368 r700_cp_fini(rdev); 3570 r700_cp_fini(rdev);
3571 r600_dma_fini(rdev);
3369 r600_irq_fini(rdev); 3572 r600_irq_fini(rdev);
3370 radeon_wb_fini(rdev); 3573 radeon_wb_fini(rdev);
3371 radeon_ib_pool_fini(rdev); 3574 radeon_ib_pool_fini(rdev);
@@ -3393,6 +3596,7 @@ void evergreen_fini(struct radeon_device *rdev)
3393 r600_audio_fini(rdev); 3596 r600_audio_fini(rdev);
3394 r600_blit_fini(rdev); 3597 r600_blit_fini(rdev);
3395 r700_cp_fini(rdev); 3598 r700_cp_fini(rdev);
3599 r600_dma_fini(rdev);
3396 r600_irq_fini(rdev); 3600 r600_irq_fini(rdev);
3397 radeon_wb_fini(rdev); 3601 radeon_wb_fini(rdev);
3398 radeon_ib_pool_fini(rdev); 3602 radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index c042e497e450..74c6b42d2597 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -34,6 +34,8 @@
34#define MAX(a,b) (((a)>(b))?(a):(b)) 34#define MAX(a,b) (((a)>(b))?(a):(b))
35#define MIN(a,b) (((a)<(b))?(a):(b)) 35#define MIN(a,b) (((a)<(b))?(a):(b))
36 36
37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc);
37static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p, 39static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc); 40 struct radeon_cs_reloc **cs_reloc);
39 41
@@ -507,20 +509,28 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
507 /* height is npipes htiles aligned == npipes * 8 pixel aligned */ 509 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
508 nby = round_up(nby, track->npipes * 8); 510 nby = round_up(nby, track->npipes * 8);
509 } else { 511 } else {
512 /* always assume 8x8 htile */
513 /* align is htile align * 8, htile align vary according to
514 * number of pipe and tile width and nby
515 */
510 switch (track->npipes) { 516 switch (track->npipes) {
511 case 8: 517 case 8:
518 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
512 nbx = round_up(nbx, 64 * 8); 519 nbx = round_up(nbx, 64 * 8);
513 nby = round_up(nby, 64 * 8); 520 nby = round_up(nby, 64 * 8);
514 break; 521 break;
515 case 4: 522 case 4:
523 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
516 nbx = round_up(nbx, 64 * 8); 524 nbx = round_up(nbx, 64 * 8);
517 nby = round_up(nby, 32 * 8); 525 nby = round_up(nby, 32 * 8);
518 break; 526 break;
519 case 2: 527 case 2:
528 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
520 nbx = round_up(nbx, 32 * 8); 529 nbx = round_up(nbx, 32 * 8);
521 nby = round_up(nby, 32 * 8); 530 nby = round_up(nby, 32 * 8);
522 break; 531 break;
523 case 1: 532 case 1:
533 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
524 nbx = round_up(nbx, 32 * 8); 534 nbx = round_up(nbx, 32 * 8);
525 nby = round_up(nby, 16 * 8); 535 nby = round_up(nby, 16 * 8);
526 break; 536 break;
@@ -531,9 +541,10 @@ static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
531 } 541 }
532 } 542 }
533 /* compute number of htile */ 543 /* compute number of htile */
534 nbx = nbx / 8; 544 nbx = nbx >> 3;
535 nby = nby / 8; 545 nby = nby >> 3;
536 size = nbx * nby * 4; 546 /* size must be aligned on npipes * 2K boundary */
547 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
537 size += track->htile_offset; 548 size += track->htile_offset;
538 549
539 if (size > radeon_bo_size(track->htile_bo)) { 550 if (size > radeon_bo_size(track->htile_bo)) {
@@ -1790,6 +1801,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1790 case DB_HTILE_SURFACE: 1801 case DB_HTILE_SURFACE:
1791 /* 8x8 only */ 1802 /* 8x8 only */
1792 track->htile_surface = radeon_get_ib_value(p, idx); 1803 track->htile_surface = radeon_get_ib_value(p, idx);
1804 /* force 8x8 htile width and height */
1805 ib[idx] |= 3;
1793 track->db_dirty = true; 1806 track->db_dirty = true;
1794 break; 1807 break;
1795 case CB_IMMED0_BASE: 1808 case CB_IMMED0_BASE:
@@ -2232,6 +2245,107 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2232 ib[idx+2] = upper_32_bits(offset) & 0xff; 2245 ib[idx+2] = upper_32_bits(offset) & 0xff;
2233 } 2246 }
2234 break; 2247 break;
2248 case PACKET3_CP_DMA:
2249 {
2250 u32 command, size, info;
2251 u64 offset, tmp;
2252 if (pkt->count != 4) {
2253 DRM_ERROR("bad CP DMA\n");
2254 return -EINVAL;
2255 }
2256 command = radeon_get_ib_value(p, idx+4);
2257 size = command & 0x1fffff;
2258 info = radeon_get_ib_value(p, idx+1);
2259 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
2260 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
2261 ((((info & 0x00300000) >> 20) == 0) &&
2262 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
2263 ((((info & 0x60000000) >> 29) == 0) &&
2264 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
2265 /* non mem to mem copies requires dw aligned count */
2266 if (size % 4) {
2267 DRM_ERROR("CP DMA command requires dw count alignment\n");
2268 return -EINVAL;
2269 }
2270 }
2271 if (command & PACKET3_CP_DMA_CMD_SAS) {
2272 /* src address space is register */
2273 /* GDS is ok */
2274 if (((info & 0x60000000) >> 29) != 1) {
2275 DRM_ERROR("CP DMA SAS not supported\n");
2276 return -EINVAL;
2277 }
2278 } else {
2279 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2280 DRM_ERROR("CP DMA SAIC only supported for registers\n");
2281 return -EINVAL;
2282 }
2283 /* src address space is memory */
2284 if (((info & 0x60000000) >> 29) == 0) {
2285 r = evergreen_cs_packet_next_reloc(p, &reloc);
2286 if (r) {
2287 DRM_ERROR("bad CP DMA SRC\n");
2288 return -EINVAL;
2289 }
2290
2291 tmp = radeon_get_ib_value(p, idx) +
2292 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2293
2294 offset = reloc->lobj.gpu_offset + tmp;
2295
2296 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2297 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
2298 tmp + size, radeon_bo_size(reloc->robj));
2299 return -EINVAL;
2300 }
2301
2302 ib[idx] = offset;
2303 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2304 } else if (((info & 0x60000000) >> 29) != 2) {
2305 DRM_ERROR("bad CP DMA SRC_SEL\n");
2306 return -EINVAL;
2307 }
2308 }
2309 if (command & PACKET3_CP_DMA_CMD_DAS) {
2310 /* dst address space is register */
2311 /* GDS is ok */
2312 if (((info & 0x00300000) >> 20) != 1) {
2313 DRM_ERROR("CP DMA DAS not supported\n");
2314 return -EINVAL;
2315 }
2316 } else {
2317 /* dst address space is memory */
2318 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2319 DRM_ERROR("CP DMA DAIC only supported for registers\n");
2320 return -EINVAL;
2321 }
2322 if (((info & 0x00300000) >> 20) == 0) {
2323 r = evergreen_cs_packet_next_reloc(p, &reloc);
2324 if (r) {
2325 DRM_ERROR("bad CP DMA DST\n");
2326 return -EINVAL;
2327 }
2328
2329 tmp = radeon_get_ib_value(p, idx+2) +
2330 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2331
2332 offset = reloc->lobj.gpu_offset + tmp;
2333
2334 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2335 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
2336 tmp + size, radeon_bo_size(reloc->robj));
2337 return -EINVAL;
2338 }
2339
2340 ib[idx+2] = offset;
2341 ib[idx+3] = upper_32_bits(offset) & 0xff;
2342 } else {
2343 DRM_ERROR("bad CP DMA DST_SEL\n");
2344 return -EINVAL;
2345 }
2346 }
2347 break;
2348 }
2235 case PACKET3_SURFACE_SYNC: 2349 case PACKET3_SURFACE_SYNC:
2236 if (pkt->count != 3) { 2350 if (pkt->count != 3) {
2237 DRM_ERROR("bad SURFACE_SYNC\n"); 2351 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2715,6 +2829,455 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2715 return 0; 2829 return 0;
2716} 2830}
2717 2831
2832/*
2833 * DMA
2834 */
2835
2836#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2837#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
2838#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2839#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
2840#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
2841
2842/**
2843 * evergreen_dma_cs_parse() - parse the DMA IB
2844 * @p: parser structure holding parsing context.
2845 *
2846 * Parses the DMA IB from the CS ioctl and updates
2847 * the GPU addresses based on the reloc information and
2848 * checks for errors. (Evergreen-Cayman)
2849 * Returns 0 for success and an error on failure.
2850 **/
2851int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2852{
2853 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2854 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
2855 u32 header, cmd, count, tiled, new_cmd, misc;
2856 volatile u32 *ib = p->ib.ptr;
2857 u32 idx, idx_value;
2858 u64 src_offset, dst_offset, dst2_offset;
2859 int r;
2860
2861 do {
2862 if (p->idx >= ib_chunk->length_dw) {
2863 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2864 p->idx, ib_chunk->length_dw);
2865 return -EINVAL;
2866 }
2867 idx = p->idx;
2868 header = radeon_get_ib_value(p, idx);
2869 cmd = GET_DMA_CMD(header);
2870 count = GET_DMA_COUNT(header);
2871 tiled = GET_DMA_T(header);
2872 new_cmd = GET_DMA_NEW(header);
2873 misc = GET_DMA_MISC(header);
2874
2875 switch (cmd) {
2876 case DMA_PACKET_WRITE:
2877 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2878 if (r) {
2879 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2880 return -EINVAL;
2881 }
2882 if (tiled) {
2883 dst_offset = ib[idx+1];
2884 dst_offset <<= 8;
2885
2886 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2887 p->idx += count + 7;
2888 } else {
2889 dst_offset = ib[idx+1];
2890 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
2891
2892 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2893 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2894 p->idx += count + 3;
2895 }
2896 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2897 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2898 dst_offset, radeon_bo_size(dst_reloc->robj));
2899 return -EINVAL;
2900 }
2901 break;
2902 case DMA_PACKET_COPY:
2903 r = r600_dma_cs_next_reloc(p, &src_reloc);
2904 if (r) {
2905 DRM_ERROR("bad DMA_PACKET_COPY\n");
2906 return -EINVAL;
2907 }
2908 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2909 if (r) {
2910 DRM_ERROR("bad DMA_PACKET_COPY\n");
2911 return -EINVAL;
2912 }
2913 if (tiled) {
2914 idx_value = radeon_get_ib_value(p, idx + 2);
2915 if (new_cmd) {
2916 switch (misc) {
2917 case 0:
2918 /* L2T, frame to fields */
2919 if (idx_value & (1 << 31)) {
2920 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2921 return -EINVAL;
2922 }
2923 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2924 if (r) {
2925 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2926 return -EINVAL;
2927 }
2928 dst_offset = ib[idx+1];
2929 dst_offset <<= 8;
2930 dst2_offset = ib[idx+2];
2931 dst2_offset <<= 8;
2932 src_offset = ib[idx+8];
2933 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2934 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2935 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
2936 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2937 return -EINVAL;
2938 }
2939 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2940 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2941 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2942 return -EINVAL;
2943 }
2944 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2945 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2946 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2947 return -EINVAL;
2948 }
2949 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2950 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2951 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2952 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2953 p->idx += 10;
2954 break;
2955 case 1:
2956 /* L2T, T2L partial */
2957 if (p->family < CHIP_CAYMAN) {
2958 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2959 return -EINVAL;
2960 }
2961 /* detile bit */
2962 if (idx_value & (1 << 31)) {
2963 /* tiled src, linear dst */
2964 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2965
2966 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2967 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2968 } else {
2969 /* linear src, tiled dst */
2970 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2971 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2972
2973 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2974 }
2975 p->idx += 12;
2976 break;
2977 case 3:
2978 /* L2T, broadcast */
2979 if (idx_value & (1 << 31)) {
2980 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2981 return -EINVAL;
2982 }
2983 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2984 if (r) {
2985 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2986 return -EINVAL;
2987 }
2988 dst_offset = ib[idx+1];
2989 dst_offset <<= 8;
2990 dst2_offset = ib[idx+2];
2991 dst2_offset <<= 8;
2992 src_offset = ib[idx+8];
2993 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2994 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2995 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
2996 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2997 return -EINVAL;
2998 }
2999 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3000 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3001 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3002 return -EINVAL;
3003 }
3004 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3005 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3006 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3007 return -EINVAL;
3008 }
3009 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3010 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3011 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3012 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3013 p->idx += 10;
3014 break;
3015 case 4:
3016 /* L2T, T2L */
3017 /* detile bit */
3018 if (idx_value & (1 << 31)) {
3019 /* tiled src, linear dst */
3020 src_offset = ib[idx+1];
3021 src_offset <<= 8;
3022 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3023
3024 dst_offset = ib[idx+7];
3025 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3026 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3027 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3028 } else {
3029 /* linear src, tiled dst */
3030 src_offset = ib[idx+7];
3031 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3032 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3033 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3034
3035 dst_offset = ib[idx+1];
3036 dst_offset <<= 8;
3037 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3038 }
3039 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3040 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
3041 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3042 return -EINVAL;
3043 }
3044 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3045 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
3046 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3047 return -EINVAL;
3048 }
3049 p->idx += 9;
3050 break;
3051 case 5:
3052 /* T2T partial */
3053 if (p->family < CHIP_CAYMAN) {
3054 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
3055 return -EINVAL;
3056 }
3057 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3058 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3059 p->idx += 13;
3060 break;
3061 case 7:
3062 /* L2T, broadcast */
3063 if (idx_value & (1 << 31)) {
3064 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3065 return -EINVAL;
3066 }
3067 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3068 if (r) {
3069 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
3070 return -EINVAL;
3071 }
3072 dst_offset = ib[idx+1];
3073 dst_offset <<= 8;
3074 dst2_offset = ib[idx+2];
3075 dst2_offset <<= 8;
3076 src_offset = ib[idx+8];
3077 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
3078 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3079 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3080 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3081 return -EINVAL;
3082 }
3083 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3084 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3085 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3086 return -EINVAL;
3087 }
3088 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3089 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
3090 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3091 return -EINVAL;
3092 }
3093 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3094 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
3095 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3096 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3097 p->idx += 10;
3098 break;
3099 default:
3100 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3101 return -EINVAL;
3102 }
3103 } else {
3104 switch (misc) {
3105 case 0:
3106 /* detile bit */
3107 if (idx_value & (1 << 31)) {
3108 /* tiled src, linear dst */
3109 src_offset = ib[idx+1];
3110 src_offset <<= 8;
3111 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
3112
3113 dst_offset = ib[idx+7];
3114 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3115 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3116 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3117 } else {
3118 /* linear src, tiled dst */
3119 src_offset = ib[idx+7];
3120 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3121 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3122 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3123
3124 dst_offset = ib[idx+1];
3125 dst_offset <<= 8;
3126 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3127 }
3128 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3129 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3130 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3131 return -EINVAL;
3132 }
3133 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3134 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3135 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3136 return -EINVAL;
3137 }
3138 p->idx += 9;
3139 break;
3140 default:
3141 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3142 return -EINVAL;
3143 }
3144 }
3145 } else {
3146 if (new_cmd) {
3147 switch (misc) {
3148 case 0:
3149 /* L2L, byte */
3150 src_offset = ib[idx+2];
3151 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3152 dst_offset = ib[idx+1];
3153 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
3154 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
3155 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
3156 src_offset + count, radeon_bo_size(src_reloc->robj));
3157 return -EINVAL;
3158 }
3159 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
3160 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
3161 dst_offset + count, radeon_bo_size(dst_reloc->robj));
3162 return -EINVAL;
3163 }
3164 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3165 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3166 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3167 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3168 p->idx += 5;
3169 break;
3170 case 1:
3171 /* L2L, partial */
3172 if (p->family < CHIP_CAYMAN) {
3173 DRM_ERROR("L2L Partial is cayman only !\n");
3174 return -EINVAL;
3175 }
3176 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3177 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3178 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3179 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3180
3181 p->idx += 9;
3182 break;
3183 case 4:
3184 /* L2L, dw, broadcast */
3185 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3186 if (r) {
3187 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
3188 return -EINVAL;
3189 }
3190 dst_offset = ib[idx+1];
3191 dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3192 dst2_offset = ib[idx+2];
3193 dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
3194 src_offset = ib[idx+3];
3195 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
3196 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3197 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
3198 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3199 return -EINVAL;
3200 }
3201 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3202 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
3203 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3204 return -EINVAL;
3205 }
3206 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3207 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
3208 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3209 return -EINVAL;
3210 }
3211 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3212 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
3213 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3214 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3215 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
3216 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3217 p->idx += 7;
3218 break;
3219 default:
3220 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3221 return -EINVAL;
3222 }
3223 } else {
3224 /* L2L, dw */
3225 src_offset = ib[idx+2];
3226 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3227 dst_offset = ib[idx+1];
3228 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
3229 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3230 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
3231 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3232 return -EINVAL;
3233 }
3234 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3235 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
3236 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3237 return -EINVAL;
3238 }
3239 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3240 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3241 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3242 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3243 p->idx += 5;
3244 }
3245 }
3246 break;
3247 case DMA_PACKET_CONSTANT_FILL:
3248 r = r600_dma_cs_next_reloc(p, &dst_reloc);
3249 if (r) {
3250 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3251 return -EINVAL;
3252 }
3253 dst_offset = ib[idx+1];
3254 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
3255 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3256 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
3257 dst_offset, radeon_bo_size(dst_reloc->robj));
3258 return -EINVAL;
3259 }
3260 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3261 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
3262 p->idx += 4;
3263 break;
3264 case DMA_PACKET_NOP:
3265 p->idx += 1;
3266 break;
3267 default:
3268 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3269 return -EINVAL;
3270 }
3271 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
3272#if 0
3273 for (r = 0; r < p->ib->length_dw; r++) {
3274 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
3275 mdelay(1);
3276 }
3277#endif
3278 return 0;
3279}
3280
2718/* vm parser */ 3281/* vm parser */
2719static bool evergreen_vm_reg_valid(u32 reg) 3282static bool evergreen_vm_reg_valid(u32 reg)
2720{ 3283{
@@ -2843,6 +3406,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
2843 u32 idx = pkt->idx + 1; 3406 u32 idx = pkt->idx + 1;
2844 u32 idx_value = ib[idx]; 3407 u32 idx_value = ib[idx];
2845 u32 start_reg, end_reg, reg, i; 3408 u32 start_reg, end_reg, reg, i;
3409 u32 command, info;
2846 3410
2847 switch (pkt->opcode) { 3411 switch (pkt->opcode) {
2848 case PACKET3_NOP: 3412 case PACKET3_NOP:
@@ -2917,6 +3481,64 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
2917 return -EINVAL; 3481 return -EINVAL;
2918 } 3482 }
2919 break; 3483 break;
3484 case PACKET3_CP_DMA:
3485 command = ib[idx + 4];
3486 info = ib[idx + 1];
3487 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
3488 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
3489 ((((info & 0x00300000) >> 20) == 0) &&
3490 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
3491 ((((info & 0x60000000) >> 29) == 0) &&
3492 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
3493 /* non mem to mem copies requires dw aligned count */
3494 if ((command & 0x1fffff) % 4) {
3495 DRM_ERROR("CP DMA command requires dw count alignment\n");
3496 return -EINVAL;
3497 }
3498 }
3499 if (command & PACKET3_CP_DMA_CMD_SAS) {
3500 /* src address space is register */
3501 if (((info & 0x60000000) >> 29) == 0) {
3502 start_reg = idx_value << 2;
3503 if (command & PACKET3_CP_DMA_CMD_SAIC) {
3504 reg = start_reg;
3505 if (!evergreen_vm_reg_valid(reg)) {
3506 DRM_ERROR("CP DMA Bad SRC register\n");
3507 return -EINVAL;
3508 }
3509 } else {
3510 for (i = 0; i < (command & 0x1fffff); i++) {
3511 reg = start_reg + (4 * i);
3512 if (!evergreen_vm_reg_valid(reg)) {
3513 DRM_ERROR("CP DMA Bad SRC register\n");
3514 return -EINVAL;
3515 }
3516 }
3517 }
3518 }
3519 }
3520 if (command & PACKET3_CP_DMA_CMD_DAS) {
3521 /* dst address space is register */
3522 if (((info & 0x00300000) >> 20) == 0) {
3523 start_reg = ib[idx + 2];
3524 if (command & PACKET3_CP_DMA_CMD_DAIC) {
3525 reg = start_reg;
3526 if (!evergreen_vm_reg_valid(reg)) {
3527 DRM_ERROR("CP DMA Bad DST register\n");
3528 return -EINVAL;
3529 }
3530 } else {
3531 for (i = 0; i < (command & 0x1fffff); i++) {
3532 reg = start_reg + (4 * i);
3533 if (!evergreen_vm_reg_valid(reg)) {
3534 DRM_ERROR("CP DMA Bad DST register\n");
3535 return -EINVAL;
3536 }
3537 }
3538 }
3539 }
3540 }
3541 break;
2920 default: 3542 default:
2921 return -EINVAL; 3543 return -EINVAL;
2922 } 3544 }
@@ -2958,3 +3580,114 @@ int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
2958 3580
2959 return ret; 3581 return ret;
2960} 3582}
3583
3584/**
3585 * evergreen_dma_ib_parse() - parse the DMA IB for VM
3586 * @rdev: radeon_device pointer
3587 * @ib: radeon_ib pointer
3588 *
3589 * Parses the DMA IB from the VM CS ioctl
3590 * checks for errors. (Cayman-SI)
3591 * Returns 0 for success and an error on failure.
3592 **/
3593int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3594{
3595 u32 idx = 0;
3596 u32 header, cmd, count, tiled, new_cmd, misc;
3597
3598 do {
3599 header = ib->ptr[idx];
3600 cmd = GET_DMA_CMD(header);
3601 count = GET_DMA_COUNT(header);
3602 tiled = GET_DMA_T(header);
3603 new_cmd = GET_DMA_NEW(header);
3604 misc = GET_DMA_MISC(header);
3605
3606 switch (cmd) {
3607 case DMA_PACKET_WRITE:
3608 if (tiled)
3609 idx += count + 7;
3610 else
3611 idx += count + 3;
3612 break;
3613 case DMA_PACKET_COPY:
3614 if (tiled) {
3615 if (new_cmd) {
3616 switch (misc) {
3617 case 0:
3618 /* L2T, frame to fields */
3619 idx += 10;
3620 break;
3621 case 1:
3622 /* L2T, T2L partial */
3623 idx += 12;
3624 break;
3625 case 3:
3626 /* L2T, broadcast */
3627 idx += 10;
3628 break;
3629 case 4:
3630 /* L2T, T2L */
3631 idx += 9;
3632 break;
3633 case 5:
3634 /* T2T partial */
3635 idx += 13;
3636 break;
3637 case 7:
3638 /* L2T, broadcast */
3639 idx += 10;
3640 break;
3641 default:
3642 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3643 return -EINVAL;
3644 }
3645 } else {
3646 switch (misc) {
3647 case 0:
3648 idx += 9;
3649 break;
3650 default:
3651 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3652 return -EINVAL;
3653 }
3654 }
3655 } else {
3656 if (new_cmd) {
3657 switch (misc) {
3658 case 0:
3659 /* L2L, byte */
3660 idx += 5;
3661 break;
3662 case 1:
3663 /* L2L, partial */
3664 idx += 9;
3665 break;
3666 case 4:
3667 /* L2L, dw, broadcast */
3668 idx += 7;
3669 break;
3670 default:
3671 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3672 return -EINVAL;
3673 }
3674 } else {
3675 /* L2L, dw */
3676 idx += 5;
3677 }
3678 }
3679 break;
3680 case DMA_PACKET_CONSTANT_FILL:
3681 idx += 4;
3682 break;
3683 case DMA_PACKET_NOP:
3684 idx += 1;
3685 break;
3686 default:
3687 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3688 return -EINVAL;
3689 }
3690 } while (idx < ib->length_dw);
3691
3692 return 0;
3693}
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 2bc0f6a1b428..cb9baaac9e85 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -45,6 +45,8 @@
45#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002 45#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
46#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001 46#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
47#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001 47#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
48#define SUMO_GB_ADDR_CONFIG_GOLDEN 0x02010002
49#define SUMO2_GB_ADDR_CONFIG_GOLDEN 0x02010002
48 50
49/* Registers */ 51/* Registers */
50 52
@@ -355,6 +357,54 @@
355# define AFMT_MPEG_INFO_UPDATE (1 << 10) 357# define AFMT_MPEG_INFO_UPDATE (1 << 10)
356#define AFMT_GENERIC0_7 0x7138 358#define AFMT_GENERIC0_7 0x7138
357 359
360/* DCE4/5 ELD audio interface */
361#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
362#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
363#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
364#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x5f90 /* MP3 */
365#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x5f94 /* MPEG2 */
366#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x5f98 /* AAC */
367#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x5f9c /* DTS */
368#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x5fa0 /* ATRAC */
369#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x5fa4 /* one bit audio - leave at 0 (default) */
370#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x5fa8 /* Dolby Digital */
371#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x5fac /* DTS-HD */
372#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x5fb0 /* MAT-MLP */
373#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x5fb4 /* DTS */
374#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x5fb8 /* WMA Pro */
375# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
376/* max channels minus one. 7 = 8 channels */
377# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
378# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
379# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
380/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
381 * bit0 = 32 kHz
382 * bit1 = 44.1 kHz
383 * bit2 = 48 kHz
384 * bit3 = 88.2 kHz
385 * bit4 = 96 kHz
386 * bit5 = 176.4 kHz
387 * bit6 = 192 kHz
388 */
389
390#define AZ_HOT_PLUG_CONTROL 0x5e78
391# define AZ_FORCE_CODEC_WAKE (1 << 0)
392# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
393# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
394# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
395# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
396# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
397# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
398# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
399# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
400# define CODEC_HOT_PLUG_ENABLE (1 << 12)
401# define PIN0_AUDIO_ENABLED (1 << 24)
402# define PIN1_AUDIO_ENABLED (1 << 25)
403# define PIN2_AUDIO_ENABLED (1 << 26)
404# define PIN3_AUDIO_ENABLED (1 << 27)
405# define AUDIO_ENABLED (1 << 31)
406
407
358#define GC_USER_SHADER_PIPE_CONFIG 0x8954 408#define GC_USER_SHADER_PIPE_CONFIG 0x8954
359#define INACTIVE_QD_PIPES(x) ((x) << 8) 409#define INACTIVE_QD_PIPES(x) ((x) << 8)
360#define INACTIVE_QD_PIPES_MASK 0x0000FF00 410#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -651,6 +701,7 @@
651#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) 701#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
652#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) 702#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
653#define VM_CONTEXT1_CNTL 0x1414 703#define VM_CONTEXT1_CNTL 0x1414
704#define VM_CONTEXT1_CNTL2 0x1434
654#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C 705#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153C
655#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C 706#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
656#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C 707#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155C
@@ -672,6 +723,8 @@
672#define CACHE_UPDATE_MODE(x) ((x) << 6) 723#define CACHE_UPDATE_MODE(x) ((x) << 6)
673#define VM_L2_STATUS 0x140C 724#define VM_L2_STATUS 0x140C
674#define L2_BUSY (1 << 0) 725#define L2_BUSY (1 << 0)
726#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
727#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
675 728
676#define WAIT_UNTIL 0x8040 729#define WAIT_UNTIL 0x8040
677 730
@@ -854,6 +907,37 @@
854# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16) 907# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
855# define DC_HPDx_EN (1 << 28) 908# define DC_HPDx_EN (1 << 28)
856 909
910/* ASYNC DMA */
911#define DMA_RB_RPTR 0xd008
912#define DMA_RB_WPTR 0xd00c
913
914#define DMA_CNTL 0xd02c
915# define TRAP_ENABLE (1 << 0)
916# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
917# define SEM_WAIT_INT_ENABLE (1 << 2)
918# define DATA_SWAP_ENABLE (1 << 3)
919# define FENCE_SWAP_ENABLE (1 << 4)
920# define CTXEMPTY_INT_ENABLE (1 << 28)
921#define DMA_TILING_CONFIG 0xD0B8
922
923#define CAYMAN_DMA1_CNTL 0xd82c
924
925/* async DMA packets */
926#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
927 (((t) & 0x1) << 23) | \
928 (((s) & 0x1) << 22) | \
929 (((n) & 0xFFFFF) << 0))
930/* async DMA Packet types */
931#define DMA_PACKET_WRITE 0x2
932#define DMA_PACKET_COPY 0x3
933#define DMA_PACKET_INDIRECT_BUFFER 0x4
934#define DMA_PACKET_SEMAPHORE 0x5
935#define DMA_PACKET_FENCE 0x6
936#define DMA_PACKET_TRAP 0x7
937#define DMA_PACKET_SRBM_WRITE 0x9
938#define DMA_PACKET_CONSTANT_FILL 0xd
939#define DMA_PACKET_NOP 0xf
940
857/* PCIE link stuff */ 941/* PCIE link stuff */
858#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */ 942#define PCIE_LC_TRAINING_CNTL 0xa1 /* PCIE_P */
859#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */ 943#define PCIE_LC_LINK_WIDTH_CNTL 0xa2 /* PCIE_P */
@@ -951,6 +1035,53 @@
951#define PACKET3_WAIT_REG_MEM 0x3C 1035#define PACKET3_WAIT_REG_MEM 0x3C
952#define PACKET3_MEM_WRITE 0x3D 1036#define PACKET3_MEM_WRITE 0x3D
953#define PACKET3_INDIRECT_BUFFER 0x32 1037#define PACKET3_INDIRECT_BUFFER 0x32
1038#define PACKET3_CP_DMA 0x41
1039/* 1. header
1040 * 2. SRC_ADDR_LO or DATA [31:0]
1041 * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
1042 * SRC_ADDR_HI [7:0]
1043 * 4. DST_ADDR_LO [31:0]
1044 * 5. DST_ADDR_HI [7:0]
1045 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
1046 */
1047# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
1048 /* 0 - SRC_ADDR
1049 * 1 - GDS
1050 */
1051# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
1052 /* 0 - ME
1053 * 1 - PFP
1054 */
1055# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
1056 /* 0 - SRC_ADDR
1057 * 1 - GDS
1058 * 2 - DATA
1059 */
1060# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1061/* COMMAND */
1062# define PACKET3_CP_DMA_DIS_WC (1 << 21)
1063# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
1064 /* 0 - none
1065 * 1 - 8 in 16
1066 * 2 - 8 in 32
1067 * 3 - 8 in 64
1068 */
1069# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
1070 /* 0 - none
1071 * 1 - 8 in 16
1072 * 2 - 8 in 32
1073 * 3 - 8 in 64
1074 */
1075# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
1076 /* 0 - memory
1077 * 1 - register
1078 */
1079# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
1080 /* 0 - memory
1081 * 1 - register
1082 */
1083# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
1084# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
954#define PACKET3_SURFACE_SYNC 0x43 1085#define PACKET3_SURFACE_SYNC 0x43
955# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 1086# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
956# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) 1087# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index cda01f808f12..7bdbcb00aaf2 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -611,6 +611,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
611 WREG32(GB_ADDR_CONFIG, gb_addr_config); 611 WREG32(GB_ADDR_CONFIG, gb_addr_config);
612 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 612 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
613 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 613 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
614 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
615 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
614 616
615 tmp = gb_addr_config & NUM_PIPES_MASK; 617 tmp = gb_addr_config & NUM_PIPES_MASK;
616 tmp = r6xx_remap_render_backend(rdev, tmp, 618 tmp = r6xx_remap_render_backend(rdev, tmp,
@@ -784,10 +786,20 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
784 /* enable context1-7 */ 786 /* enable context1-7 */
785 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 787 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
786 (u32)(rdev->dummy_page.addr >> 12)); 788 (u32)(rdev->dummy_page.addr >> 12));
787 WREG32(VM_CONTEXT1_CNTL2, 0); 789 WREG32(VM_CONTEXT1_CNTL2, 4);
788 WREG32(VM_CONTEXT1_CNTL, 0);
789 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 790 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
790 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 791 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
792 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
793 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
794 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
795 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
796 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
797 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
798 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
799 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
800 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
801 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
802 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
791 803
792 cayman_pcie_gart_tlb_flush(rdev); 804 cayman_pcie_gart_tlb_flush(rdev);
793 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 805 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -905,6 +917,7 @@ static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
905 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 917 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
906 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 918 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
907 WREG32(SCRATCH_UMSK, 0); 919 WREG32(SCRATCH_UMSK, 0);
920 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
908 } 921 }
909} 922}
910 923
@@ -1118,6 +1131,181 @@ static int cayman_cp_resume(struct radeon_device *rdev)
1118 return 0; 1131 return 0;
1119} 1132}
1120 1133
1134/*
1135 * DMA
1136 * Starting with R600, the GPU has an asynchronous
1137 * DMA engine. The programming model is very similar
1138 * to the 3D engine (ring buffer, IBs, etc.), but the
1139 * DMA controller has it's own packet format that is
1140 * different form the PM4 format used by the 3D engine.
1141 * It supports copying data, writing embedded data,
1142 * solid fills, and a number of other things. It also
1143 * has support for tiling/detiling of buffers.
1144 * Cayman and newer support two asynchronous DMA engines.
1145 */
1146/**
1147 * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
1148 *
1149 * @rdev: radeon_device pointer
1150 * @ib: IB object to schedule
1151 *
1152 * Schedule an IB in the DMA ring (cayman-SI).
1153 */
1154void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
1155 struct radeon_ib *ib)
1156{
1157 struct radeon_ring *ring = &rdev->ring[ib->ring];
1158
1159 if (rdev->wb.enabled) {
1160 u32 next_rptr = ring->wptr + 4;
1161 while ((next_rptr & 7) != 5)
1162 next_rptr++;
1163 next_rptr += 3;
1164 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
1165 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
1166 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
1167 radeon_ring_write(ring, next_rptr);
1168 }
1169
1170 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
1171 * Pad as necessary with NOPs.
1172 */
1173 while ((ring->wptr & 7) != 5)
1174 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1175 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
1176 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
1177 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
1178
1179}
1180
1181/**
1182 * cayman_dma_stop - stop the async dma engines
1183 *
1184 * @rdev: radeon_device pointer
1185 *
1186 * Stop the async dma engines (cayman-SI).
1187 */
1188void cayman_dma_stop(struct radeon_device *rdev)
1189{
1190 u32 rb_cntl;
1191
1192 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1193
1194 /* dma0 */
1195 rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
1196 rb_cntl &= ~DMA_RB_ENABLE;
1197 WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
1198
1199 /* dma1 */
1200 rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
1201 rb_cntl &= ~DMA_RB_ENABLE;
1202 WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
1203
1204 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
1205 rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
1206}
1207
1208/**
1209 * cayman_dma_resume - setup and start the async dma engines
1210 *
1211 * @rdev: radeon_device pointer
1212 *
1213 * Set up the DMA ring buffers and enable them. (cayman-SI).
1214 * Returns 0 for success, error for failure.
1215 */
1216int cayman_dma_resume(struct radeon_device *rdev)
1217{
1218 struct radeon_ring *ring;
1219 u32 rb_cntl, dma_cntl;
1220 u32 rb_bufsz;
1221 u32 reg_offset, wb_offset;
1222 int i, r;
1223
1224 /* Reset dma */
1225 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
1226 RREG32(SRBM_SOFT_RESET);
1227 udelay(50);
1228 WREG32(SRBM_SOFT_RESET, 0);
1229
1230 for (i = 0; i < 2; i++) {
1231 if (i == 0) {
1232 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1233 reg_offset = DMA0_REGISTER_OFFSET;
1234 wb_offset = R600_WB_DMA_RPTR_OFFSET;
1235 } else {
1236 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1237 reg_offset = DMA1_REGISTER_OFFSET;
1238 wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
1239 }
1240
1241 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
1242 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
1243
1244 /* Set ring buffer size in dwords */
1245 rb_bufsz = drm_order(ring->ring_size / 4);
1246 rb_cntl = rb_bufsz << 1;
1247#ifdef __BIG_ENDIAN
1248 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
1249#endif
1250 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
1251
1252 /* Initialize the ring buffer's read and write pointers */
1253 WREG32(DMA_RB_RPTR + reg_offset, 0);
1254 WREG32(DMA_RB_WPTR + reg_offset, 0);
1255
1256 /* set the wb address whether it's enabled or not */
1257 WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
1258 upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
1259 WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
1260 ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
1261
1262 if (rdev->wb.enabled)
1263 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
1264
1265 WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
1266
1267 /* enable DMA IBs */
1268 WREG32(DMA_IB_CNTL + reg_offset, DMA_IB_ENABLE | CMD_VMID_FORCE);
1269
1270 dma_cntl = RREG32(DMA_CNTL + reg_offset);
1271 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
1272 WREG32(DMA_CNTL + reg_offset, dma_cntl);
1273
1274 ring->wptr = 0;
1275 WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
1276
1277 ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
1278
1279 WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
1280
1281 ring->ready = true;
1282
1283 r = radeon_ring_test(rdev, ring->idx, ring);
1284 if (r) {
1285 ring->ready = false;
1286 return r;
1287 }
1288 }
1289
1290 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1291
1292 return 0;
1293}
1294
1295/**
1296 * cayman_dma_fini - tear down the async dma engines
1297 *
1298 * @rdev: radeon_device pointer
1299 *
1300 * Stop the async dma engines and free the rings (cayman-SI).
1301 */
1302void cayman_dma_fini(struct radeon_device *rdev)
1303{
1304 cayman_dma_stop(rdev);
1305 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
1306 radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
1307}
1308
1121static int cayman_gpu_soft_reset(struct radeon_device *rdev) 1309static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1122{ 1310{
1123 struct evergreen_mc_save save; 1311 struct evergreen_mc_save save;
@@ -1208,6 +1396,32 @@ int cayman_asic_reset(struct radeon_device *rdev)
1208 return cayman_gpu_soft_reset(rdev); 1396 return cayman_gpu_soft_reset(rdev);
1209} 1397}
1210 1398
1399/**
1400 * cayman_dma_is_lockup - Check if the DMA engine is locked up
1401 *
1402 * @rdev: radeon_device pointer
1403 * @ring: radeon_ring structure holding ring information
1404 *
1405 * Check if the async DMA engine is locked up (cayman-SI).
1406 * Returns true if the engine appears to be locked up, false if not.
1407 */
1408bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1409{
1410 u32 dma_status_reg;
1411
1412 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
1413 dma_status_reg = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET);
1414 else
1415 dma_status_reg = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET);
1416 if (dma_status_reg & DMA_IDLE) {
1417 radeon_ring_lockup_update(ring);
1418 return false;
1419 }
1420 /* force ring activities */
1421 radeon_ring_force_activity(rdev, ring);
1422 return radeon_ring_test_lockup(rdev, ring);
1423}
1424
1211static int cayman_startup(struct radeon_device *rdev) 1425static int cayman_startup(struct radeon_device *rdev)
1212{ 1426{
1213 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 1427 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -1289,6 +1503,18 @@ static int cayman_startup(struct radeon_device *rdev)
1289 return r; 1503 return r;
1290 } 1504 }
1291 1505
1506 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
1507 if (r) {
1508 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1509 return r;
1510 }
1511
1512 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
1513 if (r) {
1514 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
1515 return r;
1516 }
1517
1292 /* Enable IRQ */ 1518 /* Enable IRQ */
1293 r = r600_irq_init(rdev); 1519 r = r600_irq_init(rdev);
1294 if (r) { 1520 if (r) {
@@ -1303,6 +1529,23 @@ static int cayman_startup(struct radeon_device *rdev)
1303 0, 0xfffff, RADEON_CP_PACKET2); 1529 0, 0xfffff, RADEON_CP_PACKET2);
1304 if (r) 1530 if (r)
1305 return r; 1531 return r;
1532
1533 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1534 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
1535 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
1536 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
1537 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1538 if (r)
1539 return r;
1540
1541 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1542 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
1543 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
1544 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
1545 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
1546 if (r)
1547 return r;
1548
1306 r = cayman_cp_load_microcode(rdev); 1549 r = cayman_cp_load_microcode(rdev);
1307 if (r) 1550 if (r)
1308 return r; 1551 return r;
@@ -1310,6 +1553,10 @@ static int cayman_startup(struct radeon_device *rdev)
1310 if (r) 1553 if (r)
1311 return r; 1554 return r;
1312 1555
1556 r = cayman_dma_resume(rdev);
1557 if (r)
1558 return r;
1559
1313 r = radeon_ib_pool_init(rdev); 1560 r = radeon_ib_pool_init(rdev);
1314 if (r) { 1561 if (r) {
1315 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 1562 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -1354,7 +1601,7 @@ int cayman_suspend(struct radeon_device *rdev)
1354{ 1601{
1355 r600_audio_fini(rdev); 1602 r600_audio_fini(rdev);
1356 cayman_cp_enable(rdev, false); 1603 cayman_cp_enable(rdev, false);
1357 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1604 cayman_dma_stop(rdev);
1358 evergreen_irq_suspend(rdev); 1605 evergreen_irq_suspend(rdev);
1359 radeon_wb_disable(rdev); 1606 radeon_wb_disable(rdev);
1360 cayman_pcie_gart_disable(rdev); 1607 cayman_pcie_gart_disable(rdev);
@@ -1421,6 +1668,14 @@ int cayman_init(struct radeon_device *rdev)
1421 ring->ring_obj = NULL; 1668 ring->ring_obj = NULL;
1422 r600_ring_init(rdev, ring, 1024 * 1024); 1669 r600_ring_init(rdev, ring, 1024 * 1024);
1423 1670
1671 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
1672 ring->ring_obj = NULL;
1673 r600_ring_init(rdev, ring, 64 * 1024);
1674
1675 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
1676 ring->ring_obj = NULL;
1677 r600_ring_init(rdev, ring, 64 * 1024);
1678
1424 rdev->ih.ring_obj = NULL; 1679 rdev->ih.ring_obj = NULL;
1425 r600_ih_ring_init(rdev, 64 * 1024); 1680 r600_ih_ring_init(rdev, 64 * 1024);
1426 1681
@@ -1433,6 +1688,7 @@ int cayman_init(struct radeon_device *rdev)
1433 if (r) { 1688 if (r) {
1434 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1689 dev_err(rdev->dev, "disabling GPU acceleration\n");
1435 cayman_cp_fini(rdev); 1690 cayman_cp_fini(rdev);
1691 cayman_dma_fini(rdev);
1436 r600_irq_fini(rdev); 1692 r600_irq_fini(rdev);
1437 if (rdev->flags & RADEON_IS_IGP) 1693 if (rdev->flags & RADEON_IS_IGP)
1438 si_rlc_fini(rdev); 1694 si_rlc_fini(rdev);
@@ -1463,6 +1719,7 @@ void cayman_fini(struct radeon_device *rdev)
1463{ 1719{
1464 r600_blit_fini(rdev); 1720 r600_blit_fini(rdev);
1465 cayman_cp_fini(rdev); 1721 cayman_cp_fini(rdev);
1722 cayman_dma_fini(rdev);
1466 r600_irq_fini(rdev); 1723 r600_irq_fini(rdev);
1467 if (rdev->flags & RADEON_IS_IGP) 1724 if (rdev->flags & RADEON_IS_IGP)
1468 si_rlc_fini(rdev); 1725 si_rlc_fini(rdev);
@@ -1538,30 +1795,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
1538{ 1795{
1539 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 1796 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
1540 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 1797 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
1541 1798 uint64_t value;
1542 while (count) { 1799 unsigned ndw;
1543 unsigned ndw = 1 + count * 2; 1800
1544 if (ndw > 0x3FFF) 1801 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
1545 ndw = 0x3FFF; 1802 while (count) {
1546 1803 ndw = 1 + count * 2;
1547 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw)); 1804 if (ndw > 0x3FFF)
1548 radeon_ring_write(ring, pe); 1805 ndw = 0x3FFF;
1549 radeon_ring_write(ring, upper_32_bits(pe) & 0xff); 1806
1550 for (; ndw > 1; ndw -= 2, --count, pe += 8) { 1807 radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
1551 uint64_t value = 0; 1808 radeon_ring_write(ring, pe);
1552 if (flags & RADEON_VM_PAGE_SYSTEM) { 1809 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1553 value = radeon_vm_map_gart(rdev, addr); 1810 for (; ndw > 1; ndw -= 2, --count, pe += 8) {
1554 value &= 0xFFFFFFFFFFFFF000ULL; 1811 if (flags & RADEON_VM_PAGE_SYSTEM) {
1812 value = radeon_vm_map_gart(rdev, addr);
1813 value &= 0xFFFFFFFFFFFFF000ULL;
1814 } else if (flags & RADEON_VM_PAGE_VALID) {
1815 value = addr;
1816 } else {
1817 value = 0;
1818 }
1555 addr += incr; 1819 addr += incr;
1556 1820 value |= r600_flags;
1557 } else if (flags & RADEON_VM_PAGE_VALID) { 1821 radeon_ring_write(ring, value);
1558 value = addr; 1822 radeon_ring_write(ring, upper_32_bits(value));
1823 }
1824 }
1825 } else {
1826 while (count) {
1827 ndw = count * 2;
1828 if (ndw > 0xFFFFE)
1829 ndw = 0xFFFFE;
1830
1831 /* for non-physically contiguous pages (system) */
1832 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
1833 radeon_ring_write(ring, pe);
1834 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
1835 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
1836 if (flags & RADEON_VM_PAGE_SYSTEM) {
1837 value = radeon_vm_map_gart(rdev, addr);
1838 value &= 0xFFFFFFFFFFFFF000ULL;
1839 } else if (flags & RADEON_VM_PAGE_VALID) {
1840 value = addr;
1841 } else {
1842 value = 0;
1843 }
1559 addr += incr; 1844 addr += incr;
1845 value |= r600_flags;
1846 radeon_ring_write(ring, value);
1847 radeon_ring_write(ring, upper_32_bits(value));
1560 } 1848 }
1561
1562 value |= r600_flags;
1563 radeon_ring_write(ring, value);
1564 radeon_ring_write(ring, upper_32_bits(value));
1565 } 1849 }
1566 } 1850 }
1567} 1851}
@@ -1596,3 +1880,26 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1596 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 1880 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
1597 radeon_ring_write(ring, 0x0); 1881 radeon_ring_write(ring, 0x0);
1598} 1882}
1883
1884void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
1885{
1886 struct radeon_ring *ring = &rdev->ring[ridx];
1887
1888 if (vm == NULL)
1889 return;
1890
1891 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1892 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
1893 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
1894
1895 /* flush hdp cache */
1896 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1897 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
1898 radeon_ring_write(ring, 1);
1899
1900 /* bits 0-7 are the VM contexts0-7 */
1901 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
1902 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
1903 radeon_ring_write(ring, 1 << vm->id);
1904}
1905
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index cbef6815907a..b93186b8ee4b 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -50,6 +50,24 @@
50#define VMID(x) (((x) & 0x7) << 0) 50#define VMID(x) (((x) & 0x7) << 0)
51#define SRBM_STATUS 0x0E50 51#define SRBM_STATUS 0x0E50
52 52
53#define SRBM_SOFT_RESET 0x0E60
54#define SOFT_RESET_BIF (1 << 1)
55#define SOFT_RESET_CG (1 << 2)
56#define SOFT_RESET_DC (1 << 5)
57#define SOFT_RESET_DMA1 (1 << 6)
58#define SOFT_RESET_GRBM (1 << 8)
59#define SOFT_RESET_HDP (1 << 9)
60#define SOFT_RESET_IH (1 << 10)
61#define SOFT_RESET_MC (1 << 11)
62#define SOFT_RESET_RLC (1 << 13)
63#define SOFT_RESET_ROM (1 << 14)
64#define SOFT_RESET_SEM (1 << 15)
65#define SOFT_RESET_VMC (1 << 17)
66#define SOFT_RESET_DMA (1 << 20)
67#define SOFT_RESET_TST (1 << 21)
68#define SOFT_RESET_REGBB (1 << 22)
69#define SOFT_RESET_ORB (1 << 23)
70
53#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470 71#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
54#define REQUEST_TYPE(x) (((x) & 0xf) << 0) 72#define REQUEST_TYPE(x) (((x) & 0xf) << 0)
55#define RESPONSE_TYPE_MASK 0x000000F0 73#define RESPONSE_TYPE_MASK 0x000000F0
@@ -80,7 +98,18 @@
80#define VM_CONTEXT0_CNTL 0x1410 98#define VM_CONTEXT0_CNTL 0x1410
81#define ENABLE_CONTEXT (1 << 0) 99#define ENABLE_CONTEXT (1 << 0)
82#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) 100#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
101#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
83#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) 102#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
103#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
104#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
105#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
106#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
107#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
108#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
109#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
110#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
111#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
112#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
84#define VM_CONTEXT1_CNTL 0x1414 113#define VM_CONTEXT1_CNTL 0x1414
85#define VM_CONTEXT0_CNTL2 0x1430 114#define VM_CONTEXT0_CNTL2 0x1430
86#define VM_CONTEXT1_CNTL2 0x1434 115#define VM_CONTEXT1_CNTL2 0x1434
@@ -588,5 +617,62 @@
588#define PACKET3_SET_APPEND_CNT 0x75 617#define PACKET3_SET_APPEND_CNT 0x75
589#define PACKET3_ME_WRITE 0x7A 618#define PACKET3_ME_WRITE 0x7A
590 619
620/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
621#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
622#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
623
624#define DMA_RB_CNTL 0xd000
625# define DMA_RB_ENABLE (1 << 0)
626# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
627# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
628# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
629# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
630# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
631#define DMA_RB_BASE 0xd004
632#define DMA_RB_RPTR 0xd008
633#define DMA_RB_WPTR 0xd00c
634
635#define DMA_RB_RPTR_ADDR_HI 0xd01c
636#define DMA_RB_RPTR_ADDR_LO 0xd020
637
638#define DMA_IB_CNTL 0xd024
639# define DMA_IB_ENABLE (1 << 0)
640# define DMA_IB_SWAP_ENABLE (1 << 4)
641# define CMD_VMID_FORCE (1 << 31)
642#define DMA_IB_RPTR 0xd028
643#define DMA_CNTL 0xd02c
644# define TRAP_ENABLE (1 << 0)
645# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
646# define SEM_WAIT_INT_ENABLE (1 << 2)
647# define DATA_SWAP_ENABLE (1 << 3)
648# define FENCE_SWAP_ENABLE (1 << 4)
649# define CTXEMPTY_INT_ENABLE (1 << 28)
650#define DMA_STATUS_REG 0xd034
651# define DMA_IDLE (1 << 0)
652#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
653#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
654#define DMA_TILING_CONFIG 0xd0b8
655#define DMA_MODE 0xd0bc
656
657#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
658 (((t) & 0x1) << 23) | \
659 (((s) & 0x1) << 22) | \
660 (((n) & 0xFFFFF) << 0))
661
662#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
663 (((vmid) & 0xF) << 20) | \
664 (((n) & 0xFFFFF) << 0))
665
666/* async DMA Packet types */
667#define DMA_PACKET_WRITE 0x2
668#define DMA_PACKET_COPY 0x3
669#define DMA_PACKET_INDIRECT_BUFFER 0x4
670#define DMA_PACKET_SEMAPHORE 0x5
671#define DMA_PACKET_FENCE 0x6
672#define DMA_PACKET_TRAP 0x7
673#define DMA_PACKET_SRBM_WRITE 0x9
674#define DMA_PACKET_CONSTANT_FILL 0xd
675#define DMA_PACKET_NOP 0xf
676
591#endif 677#endif
592 678
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 376884f1bcd2..8ff7cac222dc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -4135,23 +4135,36 @@ int r100_init(struct radeon_device *rdev)
4135 return 0; 4135 return 0;
4136} 4136}
4137 4137
4138uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) 4138uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
4139 bool always_indirect)
4139{ 4140{
4140 if (reg < rdev->rmmio_size) 4141 if (reg < rdev->rmmio_size && !always_indirect)
4141 return readl(((void __iomem *)rdev->rmmio) + reg); 4142 return readl(((void __iomem *)rdev->rmmio) + reg);
4142 else { 4143 else {
4144 unsigned long flags;
4145 uint32_t ret;
4146
4147 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4143 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4148 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4144 return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4149 ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4150 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4151
4152 return ret;
4145 } 4153 }
4146} 4154}
4147 4155
4148void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) 4156void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
4157 bool always_indirect)
4149{ 4158{
4150 if (reg < rdev->rmmio_size) 4159 if (reg < rdev->rmmio_size && !always_indirect)
4151 writel(v, ((void __iomem *)rdev->rmmio) + reg); 4160 writel(v, ((void __iomem *)rdev->rmmio) + reg);
4152 else { 4161 else {
4162 unsigned long flags;
4163
4164 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
4153 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); 4165 writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
4154 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); 4166 writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
4167 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
4155 } 4168 }
4156} 4169}
4157 4170
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cda280d157da..2aaf147969bd 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1370,6 +1370,29 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1370 return radeon_ring_test_lockup(rdev, ring); 1370 return radeon_ring_test_lockup(rdev, ring);
1371} 1371}
1372 1372
1373/**
1374 * r600_dma_is_lockup - Check if the DMA engine is locked up
1375 *
1376 * @rdev: radeon_device pointer
1377 * @ring: radeon_ring structure holding ring information
1378 *
1379 * Check if the async DMA engine is locked up (r6xx-evergreen).
1380 * Returns true if the engine appears to be locked up, false if not.
1381 */
1382bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1383{
1384 u32 dma_status_reg;
1385
1386 dma_status_reg = RREG32(DMA_STATUS_REG);
1387 if (dma_status_reg & DMA_IDLE) {
1388 radeon_ring_lockup_update(ring);
1389 return false;
1390 }
1391 /* force ring activities */
1392 radeon_ring_force_activity(rdev, ring);
1393 return radeon_ring_test_lockup(rdev, ring);
1394}
1395
1373int r600_asic_reset(struct radeon_device *rdev) 1396int r600_asic_reset(struct radeon_device *rdev)
1374{ 1397{
1375 return r600_gpu_soft_reset(rdev); 1398 return r600_gpu_soft_reset(rdev);
@@ -1424,13 +1447,7 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1424 1447
1425int r600_count_pipe_bits(uint32_t val) 1448int r600_count_pipe_bits(uint32_t val)
1426{ 1449{
1427 int i, ret = 0; 1450 return hweight32(val);
1428
1429 for (i = 0; i < 32; i++) {
1430 ret += val & 1;
1431 val >>= 1;
1432 }
1433 return ret;
1434} 1451}
1435 1452
1436static void r600_gpu_init(struct radeon_device *rdev) 1453static void r600_gpu_init(struct radeon_device *rdev)
@@ -1594,6 +1611,7 @@ static void r600_gpu_init(struct radeon_device *rdev)
1594 WREG32(GB_TILING_CONFIG, tiling_config); 1611 WREG32(GB_TILING_CONFIG, tiling_config);
1595 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1612 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1596 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1613 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1614 WREG32(DMA_TILING_CONFIG, tiling_config & 0xffff);
1597 1615
1598 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1616 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1599 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1617 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -1871,6 +1889,7 @@ void r600_cp_stop(struct radeon_device *rdev)
1871 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1889 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1872 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1890 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1873 WREG32(SCRATCH_UMSK, 0); 1891 WREG32(SCRATCH_UMSK, 0);
1892 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1874} 1893}
1875 1894
1876int r600_init_microcode(struct radeon_device *rdev) 1895int r600_init_microcode(struct radeon_device *rdev)
@@ -2196,6 +2215,128 @@ void r600_cp_fini(struct radeon_device *rdev)
2196 radeon_scratch_free(rdev, ring->rptr_save_reg); 2215 radeon_scratch_free(rdev, ring->rptr_save_reg);
2197} 2216}
2198 2217
2218/*
2219 * DMA
2220 * Starting with R600, the GPU has an asynchronous
2221 * DMA engine. The programming model is very similar
2222 * to the 3D engine (ring buffer, IBs, etc.), but the
2223 * DMA controller has it's own packet format that is
2224 * different form the PM4 format used by the 3D engine.
2225 * It supports copying data, writing embedded data,
2226 * solid fills, and a number of other things. It also
2227 * has support for tiling/detiling of buffers.
2228 */
2229/**
2230 * r600_dma_stop - stop the async dma engine
2231 *
2232 * @rdev: radeon_device pointer
2233 *
2234 * Stop the async dma engine (r6xx-evergreen).
2235 */
2236void r600_dma_stop(struct radeon_device *rdev)
2237{
2238 u32 rb_cntl = RREG32(DMA_RB_CNTL);
2239
2240 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2241
2242 rb_cntl &= ~DMA_RB_ENABLE;
2243 WREG32(DMA_RB_CNTL, rb_cntl);
2244
2245 rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
2246}
2247
2248/**
2249 * r600_dma_resume - setup and start the async dma engine
2250 *
2251 * @rdev: radeon_device pointer
2252 *
2253 * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
2254 * Returns 0 for success, error for failure.
2255 */
2256int r600_dma_resume(struct radeon_device *rdev)
2257{
2258 struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2259 u32 rb_cntl, dma_cntl;
2260 u32 rb_bufsz;
2261 int r;
2262
2263 /* Reset dma */
2264 if (rdev->family >= CHIP_RV770)
2265 WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
2266 else
2267 WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
2268 RREG32(SRBM_SOFT_RESET);
2269 udelay(50);
2270 WREG32(SRBM_SOFT_RESET, 0);
2271
2272 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
2273 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
2274
2275 /* Set ring buffer size in dwords */
2276 rb_bufsz = drm_order(ring->ring_size / 4);
2277 rb_cntl = rb_bufsz << 1;
2278#ifdef __BIG_ENDIAN
2279 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
2280#endif
2281 WREG32(DMA_RB_CNTL, rb_cntl);
2282
2283 /* Initialize the ring buffer's read and write pointers */
2284 WREG32(DMA_RB_RPTR, 0);
2285 WREG32(DMA_RB_WPTR, 0);
2286
2287 /* set the wb address whether it's enabled or not */
2288 WREG32(DMA_RB_RPTR_ADDR_HI,
2289 upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
2290 WREG32(DMA_RB_RPTR_ADDR_LO,
2291 ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
2292
2293 if (rdev->wb.enabled)
2294 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
2295
2296 WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
2297
2298 /* enable DMA IBs */
2299 WREG32(DMA_IB_CNTL, DMA_IB_ENABLE);
2300
2301 dma_cntl = RREG32(DMA_CNTL);
2302 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
2303 WREG32(DMA_CNTL, dma_cntl);
2304
2305 if (rdev->family >= CHIP_RV770)
2306 WREG32(DMA_MODE, 1);
2307
2308 ring->wptr = 0;
2309 WREG32(DMA_RB_WPTR, ring->wptr << 2);
2310
2311 ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
2312
2313 WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
2314
2315 ring->ready = true;
2316
2317 r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
2318 if (r) {
2319 ring->ready = false;
2320 return r;
2321 }
2322
2323 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
2324
2325 return 0;
2326}
2327
2328/**
2329 * r600_dma_fini - tear down the async dma engine
2330 *
2331 * @rdev: radeon_device pointer
2332 *
2333 * Stop the async dma engine and free the ring (r6xx-evergreen).
2334 */
2335void r600_dma_fini(struct radeon_device *rdev)
2336{
2337 r600_dma_stop(rdev);
2338 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
2339}
2199 2340
2200/* 2341/*
2201 * GPU scratch registers helpers function. 2342 * GPU scratch registers helpers function.
@@ -2252,6 +2393,64 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
2252 return r; 2393 return r;
2253} 2394}
2254 2395
2396/**
2397 * r600_dma_ring_test - simple async dma engine test
2398 *
2399 * @rdev: radeon_device pointer
2400 * @ring: radeon_ring structure holding ring information
2401 *
2402 * Test the DMA engine by writing using it to write an
2403 * value to memory. (r6xx-SI).
2404 * Returns 0 for success, error for failure.
2405 */
2406int r600_dma_ring_test(struct radeon_device *rdev,
2407 struct radeon_ring *ring)
2408{
2409 unsigned i;
2410 int r;
2411 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
2412 u32 tmp;
2413
2414 if (!ptr) {
2415 DRM_ERROR("invalid vram scratch pointer\n");
2416 return -EINVAL;
2417 }
2418
2419 tmp = 0xCAFEDEAD;
2420 writel(tmp, ptr);
2421
2422 r = radeon_ring_lock(rdev, ring, 4);
2423 if (r) {
2424 DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
2425 return r;
2426 }
2427 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
2428 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
2429 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
2430 radeon_ring_write(ring, 0xDEADBEEF);
2431 radeon_ring_unlock_commit(rdev, ring);
2432
2433 for (i = 0; i < rdev->usec_timeout; i++) {
2434 tmp = readl(ptr);
2435 if (tmp == 0xDEADBEEF)
2436 break;
2437 DRM_UDELAY(1);
2438 }
2439
2440 if (i < rdev->usec_timeout) {
2441 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
2442 } else {
2443 DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
2444 ring->idx, tmp);
2445 r = -EINVAL;
2446 }
2447 return r;
2448}
2449
2450/*
2451 * CP fences/semaphores
2452 */
2453
2255void r600_fence_ring_emit(struct radeon_device *rdev, 2454void r600_fence_ring_emit(struct radeon_device *rdev,
2256 struct radeon_fence *fence) 2455 struct radeon_fence *fence)
2257{ 2456{
@@ -2315,6 +2514,59 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
2315 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); 2514 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
2316} 2515}
2317 2516
2517/*
2518 * DMA fences/semaphores
2519 */
2520
2521/**
2522 * r600_dma_fence_ring_emit - emit a fence on the DMA ring
2523 *
2524 * @rdev: radeon_device pointer
2525 * @fence: radeon fence object
2526 *
2527 * Add a DMA fence packet to the ring to write
2528 * the fence seq number and DMA trap packet to generate
2529 * an interrupt if needed (r6xx-r7xx).
2530 */
2531void r600_dma_fence_ring_emit(struct radeon_device *rdev,
2532 struct radeon_fence *fence)
2533{
2534 struct radeon_ring *ring = &rdev->ring[fence->ring];
2535 u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
2536
2537 /* write the fence */
2538 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
2539 radeon_ring_write(ring, addr & 0xfffffffc);
2540 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
2541 radeon_ring_write(ring, lower_32_bits(fence->seq));
2542 /* generate an interrupt */
2543 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
2544}
2545
2546/**
2547 * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
2548 *
2549 * @rdev: radeon_device pointer
2550 * @ring: radeon_ring structure holding ring information
2551 * @semaphore: radeon semaphore object
2552 * @emit_wait: wait or signal semaphore
2553 *
2554 * Add a DMA semaphore packet to the ring wait on or signal
2555 * other rings (r6xx-SI).
2556 */
2557void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
2558 struct radeon_ring *ring,
2559 struct radeon_semaphore *semaphore,
2560 bool emit_wait)
2561{
2562 u64 addr = semaphore->gpu_addr;
2563 u32 s = emit_wait ? 0 : 1;
2564
2565 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
2566 radeon_ring_write(ring, addr & 0xfffffffc);
2567 radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
2568}
2569
2318int r600_copy_blit(struct radeon_device *rdev, 2570int r600_copy_blit(struct radeon_device *rdev,
2319 uint64_t src_offset, 2571 uint64_t src_offset,
2320 uint64_t dst_offset, 2572 uint64_t dst_offset,
@@ -2334,6 +2586,80 @@ int r600_copy_blit(struct radeon_device *rdev,
2334 return 0; 2586 return 0;
2335} 2587}
2336 2588
2589/**
2590 * r600_copy_dma - copy pages using the DMA engine
2591 *
2592 * @rdev: radeon_device pointer
2593 * @src_offset: src GPU address
2594 * @dst_offset: dst GPU address
2595 * @num_gpu_pages: number of GPU pages to xfer
2596 * @fence: radeon fence object
2597 *
2598 * Copy GPU paging using the DMA engine (r6xx-r7xx).
2599 * Used by the radeon ttm implementation to move pages if
2600 * registered as the asic copy callback.
2601 */
2602int r600_copy_dma(struct radeon_device *rdev,
2603 uint64_t src_offset, uint64_t dst_offset,
2604 unsigned num_gpu_pages,
2605 struct radeon_fence **fence)
2606{
2607 struct radeon_semaphore *sem = NULL;
2608 int ring_index = rdev->asic->copy.dma_ring_index;
2609 struct radeon_ring *ring = &rdev->ring[ring_index];
2610 u32 size_in_dw, cur_size_in_dw;
2611 int i, num_loops;
2612 int r = 0;
2613
2614 r = radeon_semaphore_create(rdev, &sem);
2615 if (r) {
2616 DRM_ERROR("radeon: moving bo (%d).\n", r);
2617 return r;
2618 }
2619
2620 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
2621 num_loops = DIV_ROUND_UP(size_in_dw, 0xffff);
2622 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
2623 if (r) {
2624 DRM_ERROR("radeon: moving bo (%d).\n", r);
2625 radeon_semaphore_free(rdev, &sem, NULL);
2626 return r;
2627 }
2628
2629 if (radeon_fence_need_sync(*fence, ring->idx)) {
2630 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
2631 ring->idx);
2632 radeon_fence_note_sync(*fence, ring->idx);
2633 } else {
2634 radeon_semaphore_free(rdev, &sem, NULL);
2635 }
2636
2637 for (i = 0; i < num_loops; i++) {
2638 cur_size_in_dw = size_in_dw;
2639 if (cur_size_in_dw > 0xFFFF)
2640 cur_size_in_dw = 0xFFFF;
2641 size_in_dw -= cur_size_in_dw;
2642 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
2643 radeon_ring_write(ring, dst_offset & 0xfffffffc);
2644 radeon_ring_write(ring, src_offset & 0xfffffffc);
2645 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
2646 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
2647 src_offset += cur_size_in_dw * 4;
2648 dst_offset += cur_size_in_dw * 4;
2649 }
2650
2651 r = radeon_fence_emit(rdev, fence, ring->idx);
2652 if (r) {
2653 radeon_ring_unlock_undo(rdev, ring);
2654 return r;
2655 }
2656
2657 radeon_ring_unlock_commit(rdev, ring);
2658 radeon_semaphore_free(rdev, &sem, *fence);
2659
2660 return r;
2661}
2662
2337int r600_set_surface_reg(struct radeon_device *rdev, int reg, 2663int r600_set_surface_reg(struct radeon_device *rdev, int reg,
2338 uint32_t tiling_flags, uint32_t pitch, 2664 uint32_t tiling_flags, uint32_t pitch,
2339 uint32_t offset, uint32_t obj_size) 2665 uint32_t offset, uint32_t obj_size)
@@ -2349,7 +2675,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
2349 2675
2350static int r600_startup(struct radeon_device *rdev) 2676static int r600_startup(struct radeon_device *rdev)
2351{ 2677{
2352 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 2678 struct radeon_ring *ring;
2353 int r; 2679 int r;
2354 2680
2355 /* enable pcie gen2 link */ 2681 /* enable pcie gen2 link */
@@ -2394,6 +2720,12 @@ static int r600_startup(struct radeon_device *rdev)
2394 return r; 2720 return r;
2395 } 2721 }
2396 2722
2723 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
2724 if (r) {
2725 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
2726 return r;
2727 }
2728
2397 /* Enable IRQ */ 2729 /* Enable IRQ */
2398 r = r600_irq_init(rdev); 2730 r = r600_irq_init(rdev);
2399 if (r) { 2731 if (r) {
@@ -2403,12 +2735,20 @@ static int r600_startup(struct radeon_device *rdev)
2403 } 2735 }
2404 r600_irq_set(rdev); 2736 r600_irq_set(rdev);
2405 2737
2738 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
2406 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 2739 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
2407 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 2740 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
2408 0, 0xfffff, RADEON_CP_PACKET2); 2741 0, 0xfffff, RADEON_CP_PACKET2);
2742 if (r)
2743 return r;
2409 2744
2745 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
2746 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
2747 DMA_RB_RPTR, DMA_RB_WPTR,
2748 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
2410 if (r) 2749 if (r)
2411 return r; 2750 return r;
2751
2412 r = r600_cp_load_microcode(rdev); 2752 r = r600_cp_load_microcode(rdev);
2413 if (r) 2753 if (r)
2414 return r; 2754 return r;
@@ -2416,6 +2756,10 @@ static int r600_startup(struct radeon_device *rdev)
2416 if (r) 2756 if (r)
2417 return r; 2757 return r;
2418 2758
2759 r = r600_dma_resume(rdev);
2760 if (r)
2761 return r;
2762
2419 r = radeon_ib_pool_init(rdev); 2763 r = radeon_ib_pool_init(rdev);
2420 if (r) { 2764 if (r) {
2421 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 2765 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -2471,7 +2815,7 @@ int r600_suspend(struct radeon_device *rdev)
2471{ 2815{
2472 r600_audio_fini(rdev); 2816 r600_audio_fini(rdev);
2473 r600_cp_stop(rdev); 2817 r600_cp_stop(rdev);
2474 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 2818 r600_dma_stop(rdev);
2475 r600_irq_suspend(rdev); 2819 r600_irq_suspend(rdev);
2476 radeon_wb_disable(rdev); 2820 radeon_wb_disable(rdev);
2477 r600_pcie_gart_disable(rdev); 2821 r600_pcie_gart_disable(rdev);
@@ -2544,6 +2888,9 @@ int r600_init(struct radeon_device *rdev)
2544 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 2888 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
2545 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 2889 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
2546 2890
2891 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
2892 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
2893
2547 rdev->ih.ring_obj = NULL; 2894 rdev->ih.ring_obj = NULL;
2548 r600_ih_ring_init(rdev, 64 * 1024); 2895 r600_ih_ring_init(rdev, 64 * 1024);
2549 2896
@@ -2556,6 +2903,7 @@ int r600_init(struct radeon_device *rdev)
2556 if (r) { 2903 if (r) {
2557 dev_err(rdev->dev, "disabling GPU acceleration\n"); 2904 dev_err(rdev->dev, "disabling GPU acceleration\n");
2558 r600_cp_fini(rdev); 2905 r600_cp_fini(rdev);
2906 r600_dma_fini(rdev);
2559 r600_irq_fini(rdev); 2907 r600_irq_fini(rdev);
2560 radeon_wb_fini(rdev); 2908 radeon_wb_fini(rdev);
2561 radeon_ib_pool_fini(rdev); 2909 radeon_ib_pool_fini(rdev);
@@ -2572,6 +2920,7 @@ void r600_fini(struct radeon_device *rdev)
2572 r600_audio_fini(rdev); 2920 r600_audio_fini(rdev);
2573 r600_blit_fini(rdev); 2921 r600_blit_fini(rdev);
2574 r600_cp_fini(rdev); 2922 r600_cp_fini(rdev);
2923 r600_dma_fini(rdev);
2575 r600_irq_fini(rdev); 2924 r600_irq_fini(rdev);
2576 radeon_wb_fini(rdev); 2925 radeon_wb_fini(rdev);
2577 radeon_ib_pool_fini(rdev); 2926 radeon_ib_pool_fini(rdev);
@@ -2674,6 +3023,104 @@ free_scratch:
2674 return r; 3023 return r;
2675} 3024}
2676 3025
3026/**
3027 * r600_dma_ib_test - test an IB on the DMA engine
3028 *
3029 * @rdev: radeon_device pointer
3030 * @ring: radeon_ring structure holding ring information
3031 *
3032 * Test a simple IB in the DMA ring (r6xx-SI).
3033 * Returns 0 on success, error on failure.
3034 */
3035int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
3036{
3037 struct radeon_ib ib;
3038 unsigned i;
3039 int r;
3040 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
3041 u32 tmp = 0;
3042
3043 if (!ptr) {
3044 DRM_ERROR("invalid vram scratch pointer\n");
3045 return -EINVAL;
3046 }
3047
3048 tmp = 0xCAFEDEAD;
3049 writel(tmp, ptr);
3050
3051 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
3052 if (r) {
3053 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
3054 return r;
3055 }
3056
3057 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
3058 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
3059 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
3060 ib.ptr[3] = 0xDEADBEEF;
3061 ib.length_dw = 4;
3062
3063 r = radeon_ib_schedule(rdev, &ib, NULL);
3064 if (r) {
3065 radeon_ib_free(rdev, &ib);
3066 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3067 return r;
3068 }
3069 r = radeon_fence_wait(ib.fence, false);
3070 if (r) {
3071 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
3072 return r;
3073 }
3074 for (i = 0; i < rdev->usec_timeout; i++) {
3075 tmp = readl(ptr);
3076 if (tmp == 0xDEADBEEF)
3077 break;
3078 DRM_UDELAY(1);
3079 }
3080 if (i < rdev->usec_timeout) {
3081 DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
3082 } else {
3083 DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
3084 r = -EINVAL;
3085 }
3086 radeon_ib_free(rdev, &ib);
3087 return r;
3088}
3089
3090/**
3091 * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
3092 *
3093 * @rdev: radeon_device pointer
3094 * @ib: IB object to schedule
3095 *
3096 * Schedule an IB in the DMA ring (r6xx-r7xx).
3097 */
3098void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3099{
3100 struct radeon_ring *ring = &rdev->ring[ib->ring];
3101
3102 if (rdev->wb.enabled) {
3103 u32 next_rptr = ring->wptr + 4;
3104 while ((next_rptr & 7) != 5)
3105 next_rptr++;
3106 next_rptr += 3;
3107 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
3108 radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3109 radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
3110 radeon_ring_write(ring, next_rptr);
3111 }
3112
3113 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
3114 * Pad as necessary with NOPs.
3115 */
3116 while ((ring->wptr & 7) != 5)
3117 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
3118 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
3119 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
3120 radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
3121
3122}
3123
2677/* 3124/*
2678 * Interrupts 3125 * Interrupts
2679 * 3126 *
@@ -2865,6 +3312,8 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
2865 u32 tmp; 3312 u32 tmp;
2866 3313
2867 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3314 WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3315 tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3316 WREG32(DMA_CNTL, tmp);
2868 WREG32(GRBM_INT_CNTL, 0); 3317 WREG32(GRBM_INT_CNTL, 0);
2869 WREG32(DxMODE_INT_MASK, 0); 3318 WREG32(DxMODE_INT_MASK, 0);
2870 WREG32(D1GRPH_INTERRUPT_CONTROL, 0); 3319 WREG32(D1GRPH_INTERRUPT_CONTROL, 0);
@@ -3006,6 +3455,7 @@ int r600_irq_set(struct radeon_device *rdev)
3006 u32 grbm_int_cntl = 0; 3455 u32 grbm_int_cntl = 0;
3007 u32 hdmi0, hdmi1; 3456 u32 hdmi0, hdmi1;
3008 u32 d1grph = 0, d2grph = 0; 3457 u32 d1grph = 0, d2grph = 0;
3458 u32 dma_cntl;
3009 3459
3010 if (!rdev->irq.installed) { 3460 if (!rdev->irq.installed) {
3011 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3461 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3040,12 +3490,19 @@ int r600_irq_set(struct radeon_device *rdev)
3040 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3490 hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3041 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK; 3491 hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
3042 } 3492 }
3493 dma_cntl = RREG32(DMA_CNTL) & ~TRAP_ENABLE;
3043 3494
3044 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3495 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3045 DRM_DEBUG("r600_irq_set: sw int\n"); 3496 DRM_DEBUG("r600_irq_set: sw int\n");
3046 cp_int_cntl |= RB_INT_ENABLE; 3497 cp_int_cntl |= RB_INT_ENABLE;
3047 cp_int_cntl |= TIME_STAMP_INT_ENABLE; 3498 cp_int_cntl |= TIME_STAMP_INT_ENABLE;
3048 } 3499 }
3500
3501 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3502 DRM_DEBUG("r600_irq_set: sw int dma\n");
3503 dma_cntl |= TRAP_ENABLE;
3504 }
3505
3049 if (rdev->irq.crtc_vblank_int[0] || 3506 if (rdev->irq.crtc_vblank_int[0] ||
3050 atomic_read(&rdev->irq.pflip[0])) { 3507 atomic_read(&rdev->irq.pflip[0])) {
3051 DRM_DEBUG("r600_irq_set: vblank 0\n"); 3508 DRM_DEBUG("r600_irq_set: vblank 0\n");
@@ -3090,6 +3547,7 @@ int r600_irq_set(struct radeon_device *rdev)
3090 } 3547 }
3091 3548
3092 WREG32(CP_INT_CNTL, cp_int_cntl); 3549 WREG32(CP_INT_CNTL, cp_int_cntl);
3550 WREG32(DMA_CNTL, dma_cntl);
3093 WREG32(DxMODE_INT_MASK, mode_int); 3551 WREG32(DxMODE_INT_MASK, mode_int);
3094 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph); 3552 WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
3095 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph); 3553 WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
@@ -3469,6 +3927,10 @@ restart_ih:
3469 DRM_DEBUG("IH: CP EOP\n"); 3927 DRM_DEBUG("IH: CP EOP\n");
3470 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3928 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3471 break; 3929 break;
3930 case 224: /* DMA trap event */
3931 DRM_DEBUG("IH: DMA trap\n");
3932 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3933 break;
3472 case 233: /* GUI IDLE */ 3934 case 233: /* GUI IDLE */
3473 DRM_DEBUG("IH: GUI idle\n"); 3935 DRM_DEBUG("IH: GUI idle\n");
3474 break; 3936 break;
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 2514123d2d00..be85f75aedda 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -721,12 +721,7 @@ static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
721 721
722static int r600_count_pipe_bits(uint32_t val) 722static int r600_count_pipe_bits(uint32_t val)
723{ 723{
724 int i, ret = 0; 724 return hweight32(val);
725 for (i = 0; i < 32; i++) {
726 ret += val & 1;
727 val >>= 1;
728 }
729 return ret;
730} 725}
731 726
732static void r600_gfx_init(struct drm_device *dev, 727static void r600_gfx_init(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 211c40252fe0..0be768be530c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -657,87 +657,30 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
657 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */ 657 /* nby is npipes htiles aligned == npipes * 8 pixel aligned */
658 nby = round_up(nby, track->npipes * 8); 658 nby = round_up(nby, track->npipes * 8);
659 } else { 659 } else {
660 /* htile widht & nby (8 or 4) make 2 bits number */ 660 /* always assume 8x8 htile */
661 tmp = track->htile_surface & 3;
662 /* align is htile align * 8, htile align vary according to 661 /* align is htile align * 8, htile align vary according to
663 * number of pipe and tile width and nby 662 * number of pipe and tile width and nby
664 */ 663 */
665 switch (track->npipes) { 664 switch (track->npipes) {
666 case 8: 665 case 8:
667 switch (tmp) { 666 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
668 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 667 nbx = round_up(nbx, 64 * 8);
669 nbx = round_up(nbx, 64 * 8); 668 nby = round_up(nby, 64 * 8);
670 nby = round_up(nby, 64 * 8);
671 break;
672 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
673 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
674 nbx = round_up(nbx, 64 * 8);
675 nby = round_up(nby, 32 * 8);
676 break;
677 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
678 nbx = round_up(nbx, 32 * 8);
679 nby = round_up(nby, 32 * 8);
680 break;
681 default:
682 return -EINVAL;
683 }
684 break; 669 break;
685 case 4: 670 case 4:
686 switch (tmp) { 671 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
687 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 672 nbx = round_up(nbx, 64 * 8);
688 nbx = round_up(nbx, 64 * 8); 673 nby = round_up(nby, 32 * 8);
689 nby = round_up(nby, 32 * 8);
690 break;
691 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
692 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
693 nbx = round_up(nbx, 32 * 8);
694 nby = round_up(nby, 32 * 8);
695 break;
696 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
697 nbx = round_up(nbx, 32 * 8);
698 nby = round_up(nby, 16 * 8);
699 break;
700 default:
701 return -EINVAL;
702 }
703 break; 674 break;
704 case 2: 675 case 2:
705 switch (tmp) { 676 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
706 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 677 nbx = round_up(nbx, 32 * 8);
707 nbx = round_up(nbx, 32 * 8); 678 nby = round_up(nby, 32 * 8);
708 nby = round_up(nby, 32 * 8);
709 break;
710 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
711 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
712 nbx = round_up(nbx, 32 * 8);
713 nby = round_up(nby, 16 * 8);
714 break;
715 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
716 nbx = round_up(nbx, 16 * 8);
717 nby = round_up(nby, 16 * 8);
718 break;
719 default:
720 return -EINVAL;
721 }
722 break; 679 break;
723 case 1: 680 case 1:
724 switch (tmp) { 681 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
725 case 3: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/ 682 nbx = round_up(nbx, 32 * 8);
726 nbx = round_up(nbx, 32 * 8); 683 nby = round_up(nby, 16 * 8);
727 nby = round_up(nby, 16 * 8);
728 break;
729 case 2: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 8*/
730 case 1: /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 4*/
731 nbx = round_up(nbx, 16 * 8);
732 nby = round_up(nby, 16 * 8);
733 break;
734 case 0: /* HTILE_WIDTH = 4 & HTILE_HEIGHT = 4*/
735 nbx = round_up(nbx, 16 * 8);
736 nby = round_up(nby, 8 * 8);
737 break;
738 default:
739 return -EINVAL;
740 }
741 break; 684 break;
742 default: 685 default:
743 dev_warn(p->dev, "%s:%d invalid num pipes %d\n", 686 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
@@ -746,9 +689,10 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p)
746 } 689 }
747 } 690 }
748 /* compute number of htile */ 691 /* compute number of htile */
749 nbx = G_028D24_HTILE_WIDTH(track->htile_surface) ? nbx / 8 : nbx / 4; 692 nbx = nbx >> 3;
750 nby = G_028D24_HTILE_HEIGHT(track->htile_surface) ? nby / 8 : nby / 4; 693 nby = nby >> 3;
751 size = nbx * nby * 4; 694 /* size must be aligned on npipes * 2K boundary */
695 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
752 size += track->htile_offset; 696 size += track->htile_offset;
753 697
754 if (size > radeon_bo_size(track->htile_bo)) { 698 if (size > radeon_bo_size(track->htile_bo)) {
@@ -1492,6 +1436,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1492 break; 1436 break;
1493 case DB_HTILE_SURFACE: 1437 case DB_HTILE_SURFACE:
1494 track->htile_surface = radeon_get_ib_value(p, idx); 1438 track->htile_surface = radeon_get_ib_value(p, idx);
1439 /* force 8x8 htile width and height */
1440 ib[idx] |= 3;
1495 track->db_dirty = true; 1441 track->db_dirty = true;
1496 break; 1442 break;
1497 case SQ_PGM_START_FS: 1443 case SQ_PGM_START_FS:
@@ -1949,6 +1895,78 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1949 ib[idx+2] = upper_32_bits(offset) & 0xff; 1895 ib[idx+2] = upper_32_bits(offset) & 0xff;
1950 } 1896 }
1951 break; 1897 break;
1898 case PACKET3_CP_DMA:
1899 {
1900 u32 command, size;
1901 u64 offset, tmp;
1902 if (pkt->count != 4) {
1903 DRM_ERROR("bad CP DMA\n");
1904 return -EINVAL;
1905 }
1906 command = radeon_get_ib_value(p, idx+4);
1907 size = command & 0x1fffff;
1908 if (command & PACKET3_CP_DMA_CMD_SAS) {
1909 /* src address space is register */
1910 DRM_ERROR("CP DMA SAS not supported\n");
1911 return -EINVAL;
1912 } else {
1913 if (command & PACKET3_CP_DMA_CMD_SAIC) {
1914 DRM_ERROR("CP DMA SAIC only supported for registers\n");
1915 return -EINVAL;
1916 }
1917 /* src address space is memory */
1918 r = r600_cs_packet_next_reloc(p, &reloc);
1919 if (r) {
1920 DRM_ERROR("bad CP DMA SRC\n");
1921 return -EINVAL;
1922 }
1923
1924 tmp = radeon_get_ib_value(p, idx) +
1925 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1926
1927 offset = reloc->lobj.gpu_offset + tmp;
1928
1929 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1930 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
1931 tmp + size, radeon_bo_size(reloc->robj));
1932 return -EINVAL;
1933 }
1934
1935 ib[idx] = offset;
1936 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
1937 }
1938 if (command & PACKET3_CP_DMA_CMD_DAS) {
1939 /* dst address space is register */
1940 DRM_ERROR("CP DMA DAS not supported\n");
1941 return -EINVAL;
1942 } else {
1943 /* dst address space is memory */
1944 if (command & PACKET3_CP_DMA_CMD_DAIC) {
1945 DRM_ERROR("CP DMA DAIC only supported for registers\n");
1946 return -EINVAL;
1947 }
1948 r = r600_cs_packet_next_reloc(p, &reloc);
1949 if (r) {
1950 DRM_ERROR("bad CP DMA DST\n");
1951 return -EINVAL;
1952 }
1953
1954 tmp = radeon_get_ib_value(p, idx+2) +
1955 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
1956
1957 offset = reloc->lobj.gpu_offset + tmp;
1958
1959 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
1960 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
1961 tmp + size, radeon_bo_size(reloc->robj));
1962 return -EINVAL;
1963 }
1964
1965 ib[idx+2] = offset;
1966 ib[idx+3] = upper_32_bits(offset) & 0xff;
1967 }
1968 break;
1969 }
1952 case PACKET3_SURFACE_SYNC: 1970 case PACKET3_SURFACE_SYNC:
1953 if (pkt->count != 3) { 1971 if (pkt->count != 3) {
1954 DRM_ERROR("bad SURFACE_SYNC\n"); 1972 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -2496,3 +2514,196 @@ void r600_cs_legacy_init(void)
2496{ 2514{
2497 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm; 2515 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;
2498} 2516}
2517
2518/*
2519 * DMA
2520 */
2521/**
2522 * r600_dma_cs_next_reloc() - parse next reloc
2523 * @p: parser structure holding parsing context.
2524 * @cs_reloc: reloc informations
2525 *
2526 * Return the next reloc, do bo validation and compute
2527 * GPU offset using the provided start.
2528 **/
2529int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2530 struct radeon_cs_reloc **cs_reloc)
2531{
2532 struct radeon_cs_chunk *relocs_chunk;
2533 unsigned idx;
2534
2535 if (p->chunk_relocs_idx == -1) {
2536 DRM_ERROR("No relocation chunk !\n");
2537 return -EINVAL;
2538 }
2539 *cs_reloc = NULL;
2540 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
2541 idx = p->dma_reloc_idx;
2542 if (idx >= relocs_chunk->length_dw) {
2543 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2544 idx, relocs_chunk->length_dw);
2545 return -EINVAL;
2546 }
2547 *cs_reloc = p->relocs_ptr[idx];
2548 p->dma_reloc_idx++;
2549 return 0;
2550}
2551
2552#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2553#define GET_DMA_COUNT(h) ((h) & 0x0000ffff)
2554#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2555
2556/**
2557 * r600_dma_cs_parse() - parse the DMA IB
2558 * @p: parser structure holding parsing context.
2559 *
2560 * Parses the DMA IB from the CS ioctl and updates
2561 * the GPU addresses based on the reloc information and
2562 * checks for errors. (R6xx-R7xx)
2563 * Returns 0 for success and an error on failure.
2564 **/
2565int r600_dma_cs_parse(struct radeon_cs_parser *p)
2566{
2567 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2568 struct radeon_cs_reloc *src_reloc, *dst_reloc;
2569 u32 header, cmd, count, tiled;
2570 volatile u32 *ib = p->ib.ptr;
2571 u32 idx, idx_value;
2572 u64 src_offset, dst_offset;
2573 int r;
2574
2575 do {
2576 if (p->idx >= ib_chunk->length_dw) {
2577 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2578 p->idx, ib_chunk->length_dw);
2579 return -EINVAL;
2580 }
2581 idx = p->idx;
2582 header = radeon_get_ib_value(p, idx);
2583 cmd = GET_DMA_CMD(header);
2584 count = GET_DMA_COUNT(header);
2585 tiled = GET_DMA_T(header);
2586
2587 switch (cmd) {
2588 case DMA_PACKET_WRITE:
2589 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2590 if (r) {
2591 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2592 return -EINVAL;
2593 }
2594 if (tiled) {
2595 dst_offset = ib[idx+1];
2596 dst_offset <<= 8;
2597
2598 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2599 p->idx += count + 5;
2600 } else {
2601 dst_offset = ib[idx+1];
2602 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
2603
2604 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2605 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2606 p->idx += count + 3;
2607 }
2608 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2609 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2610 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2611 return -EINVAL;
2612 }
2613 break;
2614 case DMA_PACKET_COPY:
2615 r = r600_dma_cs_next_reloc(p, &src_reloc);
2616 if (r) {
2617 DRM_ERROR("bad DMA_PACKET_COPY\n");
2618 return -EINVAL;
2619 }
2620 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2621 if (r) {
2622 DRM_ERROR("bad DMA_PACKET_COPY\n");
2623 return -EINVAL;
2624 }
2625 if (tiled) {
2626 idx_value = radeon_get_ib_value(p, idx + 2);
2627 /* detile bit */
2628 if (idx_value & (1 << 31)) {
2629 /* tiled src, linear dst */
2630 src_offset = ib[idx+1];
2631 src_offset <<= 8;
2632 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2633
2634 dst_offset = ib[idx+5];
2635 dst_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
2636 ib[idx+5] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2637 ib[idx+6] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2638 } else {
2639 /* linear src, tiled dst */
2640 src_offset = ib[idx+5];
2641 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
2642 ib[idx+5] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2643 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2644
2645 dst_offset = ib[idx+1];
2646 dst_offset <<= 8;
2647 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2648 }
2649 p->idx += 7;
2650 } else {
2651 src_offset = ib[idx+2];
2652 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
2653 dst_offset = ib[idx+1];
2654 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
2655
2656 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2657 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2658 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2659 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2660 p->idx += 5;
2661 }
2662 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2663 dev_warn(p->dev, "DMA copy src buffer too small (%llu %lu)\n",
2664 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2665 return -EINVAL;
2666 }
2667 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2668 dev_warn(p->dev, "DMA write dst buffer too small (%llu %lu)\n",
2669 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2670 return -EINVAL;
2671 }
2672 break;
2673 case DMA_PACKET_CONSTANT_FILL:
2674 if (p->family < CHIP_RV770) {
2675 DRM_ERROR("Constant Fill is 7xx only !\n");
2676 return -EINVAL;
2677 }
2678 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2679 if (r) {
2680 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2681 return -EINVAL;
2682 }
2683 dst_offset = ib[idx+1];
2684 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
2685 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2686 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
2687 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2688 return -EINVAL;
2689 }
2690 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2691 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
2692 p->idx += 4;
2693 break;
2694 case DMA_PACKET_NOP:
2695 p->idx += 1;
2696 break;
2697 default:
2698 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2699 return -EINVAL;
2700 }
2701 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2702#if 0
2703 for (r = 0; r < p->ib->length_dw; r++) {
2704 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
2705 mdelay(1);
2706 }
2707#endif
2708 return 0;
2709}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index 2b960cb5c18a..909219b1bf80 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -96,6 +96,15 @@
96#define R600_CONFIG_F0_BASE 0x542C 96#define R600_CONFIG_F0_BASE 0x542C
97#define R600_CONFIG_APER_SIZE 0x5430 97#define R600_CONFIG_APER_SIZE 0x5430
98 98
99#define R600_BIF_FB_EN 0x5490
100#define R600_FB_READ_EN (1 << 0)
101#define R600_FB_WRITE_EN (1 << 1)
102
103#define R600_CITF_CNTL 0x200c
104#define R600_BLACKOUT_MASK 0x00000003
105
106#define R700_MC_CITF_CNTL 0x25c0
107
99#define R600_ROM_CNTL 0x1600 108#define R600_ROM_CNTL 0x1600
100# define R600_SCK_OVERWRITE (1 << 1) 109# define R600_SCK_OVERWRITE (1 << 1)
101# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28 110# define R600_SCK_PRESCALE_CRYSTAL_CLK_SHIFT 28
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index fa6f37099ba9..4a53402b1852 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -590,9 +590,59 @@
590#define WAIT_2D_IDLECLEAN_bit (1 << 16) 590#define WAIT_2D_IDLECLEAN_bit (1 << 16)
591#define WAIT_3D_IDLECLEAN_bit (1 << 17) 591#define WAIT_3D_IDLECLEAN_bit (1 << 17)
592 592
593/* async DMA */
594#define DMA_TILING_CONFIG 0x3ec4
595#define DMA_CONFIG 0x3e4c
596
597#define DMA_RB_CNTL 0xd000
598# define DMA_RB_ENABLE (1 << 0)
599# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
600# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
601# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
602# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
603# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
604#define DMA_RB_BASE 0xd004
605#define DMA_RB_RPTR 0xd008
606#define DMA_RB_WPTR 0xd00c
607
608#define DMA_RB_RPTR_ADDR_HI 0xd01c
609#define DMA_RB_RPTR_ADDR_LO 0xd020
610
611#define DMA_IB_CNTL 0xd024
612# define DMA_IB_ENABLE (1 << 0)
613# define DMA_IB_SWAP_ENABLE (1 << 4)
614#define DMA_IB_RPTR 0xd028
615#define DMA_CNTL 0xd02c
616# define TRAP_ENABLE (1 << 0)
617# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
618# define SEM_WAIT_INT_ENABLE (1 << 2)
619# define DATA_SWAP_ENABLE (1 << 3)
620# define FENCE_SWAP_ENABLE (1 << 4)
621# define CTXEMPTY_INT_ENABLE (1 << 28)
622#define DMA_STATUS_REG 0xd034
623# define DMA_IDLE (1 << 0)
624#define DMA_SEM_INCOMPLETE_TIMER_CNTL 0xd044
625#define DMA_SEM_WAIT_FAIL_TIMER_CNTL 0xd048
626#define DMA_MODE 0xd0bc
627
628/* async DMA packets */
629#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
630 (((t) & 0x1) << 23) | \
631 (((s) & 0x1) << 22) | \
632 (((n) & 0xFFFF) << 0))
633/* async DMA Packet types */
634#define DMA_PACKET_WRITE 0x2
635#define DMA_PACKET_COPY 0x3
636#define DMA_PACKET_INDIRECT_BUFFER 0x4
637#define DMA_PACKET_SEMAPHORE 0x5
638#define DMA_PACKET_FENCE 0x6
639#define DMA_PACKET_TRAP 0x7
640#define DMA_PACKET_CONSTANT_FILL 0xd /* 7xx only */
641#define DMA_PACKET_NOP 0xf
642
593#define IH_RB_CNTL 0x3e00 643#define IH_RB_CNTL 0x3e00
594# define IH_RB_ENABLE (1 << 0) 644# define IH_RB_ENABLE (1 << 0)
595# define IH_IB_SIZE(x) ((x) << 1) /* log2 */ 645# define IH_RB_SIZE(x) ((x) << 1) /* log2 */
596# define IH_RB_FULL_DRAIN_ENABLE (1 << 6) 646# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
597# define IH_WPTR_WRITEBACK_ENABLE (1 << 8) 647# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
598# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */ 648# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
@@ -637,7 +687,9 @@
637#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20 687#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
638 688
639#define SRBM_SOFT_RESET 0xe60 689#define SRBM_SOFT_RESET 0xe60
690# define SOFT_RESET_DMA (1 << 12)
640# define SOFT_RESET_RLC (1 << 13) 691# define SOFT_RESET_RLC (1 << 13)
692# define RV770_SOFT_RESET_DMA (1 << 20)
641 693
642#define CP_INT_CNTL 0xc124 694#define CP_INT_CNTL 0xc124
643# define CNTX_BUSY_INT_ENABLE (1 << 19) 695# define CNTX_BUSY_INT_ENABLE (1 << 19)
@@ -1134,6 +1186,38 @@
1134#define PACKET3_WAIT_REG_MEM 0x3C 1186#define PACKET3_WAIT_REG_MEM 0x3C
1135#define PACKET3_MEM_WRITE 0x3D 1187#define PACKET3_MEM_WRITE 0x3D
1136#define PACKET3_INDIRECT_BUFFER 0x32 1188#define PACKET3_INDIRECT_BUFFER 0x32
1189#define PACKET3_CP_DMA 0x41
1190/* 1. header
1191 * 2. SRC_ADDR_LO [31:0]
1192 * 3. CP_SYNC [31] | SRC_ADDR_HI [7:0]
1193 * 4. DST_ADDR_LO [31:0]
1194 * 5. DST_ADDR_HI [7:0]
1195 * 6. COMMAND [29:22] | BYTE_COUNT [20:0]
1196 */
1197# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
1198/* COMMAND */
1199# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
1200 /* 0 - none
1201 * 1 - 8 in 16
1202 * 2 - 8 in 32
1203 * 3 - 8 in 64
1204 */
1205# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
1206 /* 0 - none
1207 * 1 - 8 in 16
1208 * 2 - 8 in 32
1209 * 3 - 8 in 64
1210 */
1211# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
1212 /* 0 - memory
1213 * 1 - register
1214 */
1215# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
1216 /* 0 - memory
1217 * 1 - register
1218 */
1219# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
1220# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
1137#define PACKET3_SURFACE_SYNC 0x43 1221#define PACKET3_SURFACE_SYNC 0x43
1138# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 1222# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1139# define PACKET3_TC_ACTION_ENA (1 << 23) 1223# define PACKET3_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8c42d54c2e26..5dc744d43d12 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -109,7 +109,7 @@ extern int radeon_lockup_timeout;
109#define RADEON_BIOS_NUM_SCRATCH 8 109#define RADEON_BIOS_NUM_SCRATCH 8
110 110
111/* max number of rings */ 111/* max number of rings */
112#define RADEON_NUM_RINGS 3 112#define RADEON_NUM_RINGS 5
113 113
114/* fence seq are set to this number when signaled */ 114/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL 115#define RADEON_FENCE_SIGNALED_SEQ 0LL
@@ -122,6 +122,11 @@ extern int radeon_lockup_timeout;
122#define CAYMAN_RING_TYPE_CP1_INDEX 1 122#define CAYMAN_RING_TYPE_CP1_INDEX 1
123#define CAYMAN_RING_TYPE_CP2_INDEX 2 123#define CAYMAN_RING_TYPE_CP2_INDEX 2
124 124
125/* R600+ has an async dma ring */
126#define R600_RING_TYPE_DMA_INDEX 3
127/* cayman add a second async dma ring */
128#define CAYMAN_RING_TYPE_DMA1_INDEX 4
129
125/* hardcode those limit for now */ 130/* hardcode those limit for now */
126#define RADEON_VA_IB_OFFSET (1 << 20) 131#define RADEON_VA_IB_OFFSET (1 << 20)
127#define RADEON_VA_RESERVED_SIZE (8 << 20) 132#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -313,6 +318,7 @@ struct radeon_bo {
313 struct list_head list; 318 struct list_head list;
314 /* Protected by tbo.reserved */ 319 /* Protected by tbo.reserved */
315 u32 placements[3]; 320 u32 placements[3];
321 u32 busy_placements[3];
316 struct ttm_placement placement; 322 struct ttm_placement placement;
317 struct ttm_buffer_object tbo; 323 struct ttm_buffer_object tbo;
318 struct ttm_bo_kmap_obj kmap; 324 struct ttm_bo_kmap_obj kmap;
@@ -787,6 +793,15 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigne
787void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); 793void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
788 794
789 795
796/* r600 async dma */
797void r600_dma_stop(struct radeon_device *rdev);
798int r600_dma_resume(struct radeon_device *rdev);
799void r600_dma_fini(struct radeon_device *rdev);
800
801void cayman_dma_stop(struct radeon_device *rdev);
802int cayman_dma_resume(struct radeon_device *rdev);
803void cayman_dma_fini(struct radeon_device *rdev);
804
790/* 805/*
791 * CS. 806 * CS.
792 */ 807 */
@@ -824,6 +839,7 @@ struct radeon_cs_parser {
824 struct radeon_cs_reloc *relocs; 839 struct radeon_cs_reloc *relocs;
825 struct radeon_cs_reloc **relocs_ptr; 840 struct radeon_cs_reloc **relocs_ptr;
826 struct list_head validated; 841 struct list_head validated;
842 unsigned dma_reloc_idx;
827 /* indices of various chunks */ 843 /* indices of various chunks */
828 int chunk_ib_idx; 844 int chunk_ib_idx;
829 int chunk_relocs_idx; 845 int chunk_relocs_idx;
@@ -883,7 +899,9 @@ struct radeon_wb {
883#define RADEON_WB_CP_RPTR_OFFSET 1024 899#define RADEON_WB_CP_RPTR_OFFSET 1024
884#define RADEON_WB_CP1_RPTR_OFFSET 1280 900#define RADEON_WB_CP1_RPTR_OFFSET 1280
885#define RADEON_WB_CP2_RPTR_OFFSET 1536 901#define RADEON_WB_CP2_RPTR_OFFSET 1536
902#define R600_WB_DMA_RPTR_OFFSET 1792
886#define R600_WB_IH_WPTR_OFFSET 2048 903#define R600_WB_IH_WPTR_OFFSET 2048
904#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
887#define R600_WB_EVENT_OFFSET 3072 905#define R600_WB_EVENT_OFFSET 3072
888 906
889/** 907/**
@@ -1539,6 +1557,8 @@ struct radeon_device {
1539 /* Register mmio */ 1557 /* Register mmio */
1540 resource_size_t rmmio_base; 1558 resource_size_t rmmio_base;
1541 resource_size_t rmmio_size; 1559 resource_size_t rmmio_size;
1560 /* protects concurrent MM_INDEX/DATA based register access */
1561 spinlock_t mmio_idx_lock;
1542 void __iomem *rmmio; 1562 void __iomem *rmmio;
1543 radeon_rreg_t mc_rreg; 1563 radeon_rreg_t mc_rreg;
1544 radeon_wreg_t mc_wreg; 1564 radeon_wreg_t mc_wreg;
@@ -1614,8 +1634,10 @@ int radeon_device_init(struct radeon_device *rdev,
1614void radeon_device_fini(struct radeon_device *rdev); 1634void radeon_device_fini(struct radeon_device *rdev);
1615int radeon_gpu_wait_for_idle(struct radeon_device *rdev); 1635int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1616 1636
1617uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); 1637uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
1618void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 1638 bool always_indirect);
1639void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
1640 bool always_indirect);
1619u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); 1641u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1620void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1642void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1621 1643
@@ -1631,9 +1653,11 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1631#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) 1653#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1632#define RREG16(reg) readw((rdev->rmmio) + (reg)) 1654#define RREG16(reg) readw((rdev->rmmio) + (reg))
1633#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) 1655#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
1634#define RREG32(reg) r100_mm_rreg(rdev, (reg)) 1656#define RREG32(reg) r100_mm_rreg(rdev, (reg), false)
1635#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) 1657#define RREG32_IDX(reg) r100_mm_rreg(rdev, (reg), true)
1636#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) 1658#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg), false))
1659#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v), false)
1660#define WREG32_IDX(reg, v) r100_mm_wreg(rdev, (reg), (v), true)
1637#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1661#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1638#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) 1662#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1639#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) 1663#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
@@ -1658,7 +1682,7 @@ void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1658 tmp_ |= ((val) & ~(mask)); \ 1682 tmp_ |= ((val) & ~(mask)); \
1659 WREG32_PLL(reg, tmp_); \ 1683 WREG32_PLL(reg, tmp_); \
1660 } while (0) 1684 } while (0)
1661#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg))) 1685#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false))
1662#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) 1686#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1663#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) 1687#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1664 1688
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 654520b95ab7..596bcbe80ed0 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -947,6 +947,15 @@ static struct radeon_asic r600_asic = {
947 .ring_test = &r600_ring_test, 947 .ring_test = &r600_ring_test,
948 .ib_test = &r600_ib_test, 948 .ib_test = &r600_ib_test,
949 .is_lockup = &r600_gpu_is_lockup, 949 .is_lockup = &r600_gpu_is_lockup,
950 },
951 [R600_RING_TYPE_DMA_INDEX] = {
952 .ib_execute = &r600_dma_ring_ib_execute,
953 .emit_fence = &r600_dma_fence_ring_emit,
954 .emit_semaphore = &r600_dma_semaphore_ring_emit,
955 .cs_parse = &r600_dma_cs_parse,
956 .ring_test = &r600_dma_ring_test,
957 .ib_test = &r600_dma_ib_test,
958 .is_lockup = &r600_dma_is_lockup,
950 } 959 }
951 }, 960 },
952 .irq = { 961 .irq = {
@@ -963,10 +972,10 @@ static struct radeon_asic r600_asic = {
963 .copy = { 972 .copy = {
964 .blit = &r600_copy_blit, 973 .blit = &r600_copy_blit,
965 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 974 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
966 .dma = NULL, 975 .dma = &r600_copy_dma,
967 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 976 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
968 .copy = &r600_copy_blit, 977 .copy = &r600_copy_dma,
969 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 978 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
970 }, 979 },
971 .surface = { 980 .surface = {
972 .set_reg = r600_set_surface_reg, 981 .set_reg = r600_set_surface_reg,
@@ -1022,6 +1031,15 @@ static struct radeon_asic rs780_asic = {
1022 .ring_test = &r600_ring_test, 1031 .ring_test = &r600_ring_test,
1023 .ib_test = &r600_ib_test, 1032 .ib_test = &r600_ib_test,
1024 .is_lockup = &r600_gpu_is_lockup, 1033 .is_lockup = &r600_gpu_is_lockup,
1034 },
1035 [R600_RING_TYPE_DMA_INDEX] = {
1036 .ib_execute = &r600_dma_ring_ib_execute,
1037 .emit_fence = &r600_dma_fence_ring_emit,
1038 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1039 .cs_parse = &r600_dma_cs_parse,
1040 .ring_test = &r600_dma_ring_test,
1041 .ib_test = &r600_dma_ib_test,
1042 .is_lockup = &r600_dma_is_lockup,
1025 } 1043 }
1026 }, 1044 },
1027 .irq = { 1045 .irq = {
@@ -1038,10 +1056,10 @@ static struct radeon_asic rs780_asic = {
1038 .copy = { 1056 .copy = {
1039 .blit = &r600_copy_blit, 1057 .blit = &r600_copy_blit,
1040 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1058 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1041 .dma = NULL, 1059 .dma = &r600_copy_dma,
1042 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1060 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1043 .copy = &r600_copy_blit, 1061 .copy = &r600_copy_dma,
1044 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1062 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1045 }, 1063 },
1046 .surface = { 1064 .surface = {
1047 .set_reg = r600_set_surface_reg, 1065 .set_reg = r600_set_surface_reg,
@@ -1097,6 +1115,15 @@ static struct radeon_asic rv770_asic = {
1097 .ring_test = &r600_ring_test, 1115 .ring_test = &r600_ring_test,
1098 .ib_test = &r600_ib_test, 1116 .ib_test = &r600_ib_test,
1099 .is_lockup = &r600_gpu_is_lockup, 1117 .is_lockup = &r600_gpu_is_lockup,
1118 },
1119 [R600_RING_TYPE_DMA_INDEX] = {
1120 .ib_execute = &r600_dma_ring_ib_execute,
1121 .emit_fence = &r600_dma_fence_ring_emit,
1122 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1123 .cs_parse = &r600_dma_cs_parse,
1124 .ring_test = &r600_dma_ring_test,
1125 .ib_test = &r600_dma_ib_test,
1126 .is_lockup = &r600_dma_is_lockup,
1100 } 1127 }
1101 }, 1128 },
1102 .irq = { 1129 .irq = {
@@ -1113,10 +1140,10 @@ static struct radeon_asic rv770_asic = {
1113 .copy = { 1140 .copy = {
1114 .blit = &r600_copy_blit, 1141 .blit = &r600_copy_blit,
1115 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1142 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1116 .dma = NULL, 1143 .dma = &r600_copy_dma,
1117 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1144 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1118 .copy = &r600_copy_blit, 1145 .copy = &r600_copy_dma,
1119 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1146 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1120 }, 1147 },
1121 .surface = { 1148 .surface = {
1122 .set_reg = r600_set_surface_reg, 1149 .set_reg = r600_set_surface_reg,
@@ -1172,6 +1199,15 @@ static struct radeon_asic evergreen_asic = {
1172 .ring_test = &r600_ring_test, 1199 .ring_test = &r600_ring_test,
1173 .ib_test = &r600_ib_test, 1200 .ib_test = &r600_ib_test,
1174 .is_lockup = &evergreen_gpu_is_lockup, 1201 .is_lockup = &evergreen_gpu_is_lockup,
1202 },
1203 [R600_RING_TYPE_DMA_INDEX] = {
1204 .ib_execute = &evergreen_dma_ring_ib_execute,
1205 .emit_fence = &evergreen_dma_fence_ring_emit,
1206 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1207 .cs_parse = &evergreen_dma_cs_parse,
1208 .ring_test = &r600_dma_ring_test,
1209 .ib_test = &r600_dma_ib_test,
1210 .is_lockup = &r600_dma_is_lockup,
1175 } 1211 }
1176 }, 1212 },
1177 .irq = { 1213 .irq = {
@@ -1188,10 +1224,10 @@ static struct radeon_asic evergreen_asic = {
1188 .copy = { 1224 .copy = {
1189 .blit = &r600_copy_blit, 1225 .blit = &r600_copy_blit,
1190 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1226 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1191 .dma = NULL, 1227 .dma = &evergreen_copy_dma,
1192 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1228 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1193 .copy = &r600_copy_blit, 1229 .copy = &evergreen_copy_dma,
1194 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1230 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1195 }, 1231 },
1196 .surface = { 1232 .surface = {
1197 .set_reg = r600_set_surface_reg, 1233 .set_reg = r600_set_surface_reg,
@@ -1248,6 +1284,15 @@ static struct radeon_asic sumo_asic = {
1248 .ib_test = &r600_ib_test, 1284 .ib_test = &r600_ib_test,
1249 .is_lockup = &evergreen_gpu_is_lockup, 1285 .is_lockup = &evergreen_gpu_is_lockup,
1250 }, 1286 },
1287 [R600_RING_TYPE_DMA_INDEX] = {
1288 .ib_execute = &evergreen_dma_ring_ib_execute,
1289 .emit_fence = &evergreen_dma_fence_ring_emit,
1290 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1291 .cs_parse = &evergreen_dma_cs_parse,
1292 .ring_test = &r600_dma_ring_test,
1293 .ib_test = &r600_dma_ib_test,
1294 .is_lockup = &r600_dma_is_lockup,
1295 }
1251 }, 1296 },
1252 .irq = { 1297 .irq = {
1253 .set = &evergreen_irq_set, 1298 .set = &evergreen_irq_set,
@@ -1263,10 +1308,10 @@ static struct radeon_asic sumo_asic = {
1263 .copy = { 1308 .copy = {
1264 .blit = &r600_copy_blit, 1309 .blit = &r600_copy_blit,
1265 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1310 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1266 .dma = NULL, 1311 .dma = &evergreen_copy_dma,
1267 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1312 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1268 .copy = &r600_copy_blit, 1313 .copy = &evergreen_copy_dma,
1269 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1314 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1270 }, 1315 },
1271 .surface = { 1316 .surface = {
1272 .set_reg = r600_set_surface_reg, 1317 .set_reg = r600_set_surface_reg,
@@ -1322,6 +1367,15 @@ static struct radeon_asic btc_asic = {
1322 .ring_test = &r600_ring_test, 1367 .ring_test = &r600_ring_test,
1323 .ib_test = &r600_ib_test, 1368 .ib_test = &r600_ib_test,
1324 .is_lockup = &evergreen_gpu_is_lockup, 1369 .is_lockup = &evergreen_gpu_is_lockup,
1370 },
1371 [R600_RING_TYPE_DMA_INDEX] = {
1372 .ib_execute = &evergreen_dma_ring_ib_execute,
1373 .emit_fence = &evergreen_dma_fence_ring_emit,
1374 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1375 .cs_parse = &evergreen_dma_cs_parse,
1376 .ring_test = &r600_dma_ring_test,
1377 .ib_test = &r600_dma_ib_test,
1378 .is_lockup = &r600_dma_is_lockup,
1325 } 1379 }
1326 }, 1380 },
1327 .irq = { 1381 .irq = {
@@ -1338,10 +1392,10 @@ static struct radeon_asic btc_asic = {
1338 .copy = { 1392 .copy = {
1339 .blit = &r600_copy_blit, 1393 .blit = &r600_copy_blit,
1340 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1394 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1341 .dma = NULL, 1395 .dma = &evergreen_copy_dma,
1342 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1396 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1343 .copy = &r600_copy_blit, 1397 .copy = &evergreen_copy_dma,
1344 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1398 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1345 }, 1399 },
1346 .surface = { 1400 .surface = {
1347 .set_reg = r600_set_surface_reg, 1401 .set_reg = r600_set_surface_reg,
@@ -1391,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
1391 .vm = { 1445 .vm = {
1392 .init = &cayman_vm_init, 1446 .init = &cayman_vm_init,
1393 .fini = &cayman_vm_fini, 1447 .fini = &cayman_vm_fini,
1394 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1448 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1395 .set_page = &cayman_vm_set_page, 1449 .set_page = &cayman_vm_set_page,
1396 }, 1450 },
1397 .ring = { 1451 .ring = {
@@ -1427,6 +1481,28 @@ static struct radeon_asic cayman_asic = {
1427 .ib_test = &r600_ib_test, 1481 .ib_test = &r600_ib_test,
1428 .is_lockup = &evergreen_gpu_is_lockup, 1482 .is_lockup = &evergreen_gpu_is_lockup,
1429 .vm_flush = &cayman_vm_flush, 1483 .vm_flush = &cayman_vm_flush,
1484 },
1485 [R600_RING_TYPE_DMA_INDEX] = {
1486 .ib_execute = &cayman_dma_ring_ib_execute,
1487 .ib_parse = &evergreen_dma_ib_parse,
1488 .emit_fence = &evergreen_dma_fence_ring_emit,
1489 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1490 .cs_parse = &evergreen_dma_cs_parse,
1491 .ring_test = &r600_dma_ring_test,
1492 .ib_test = &r600_dma_ib_test,
1493 .is_lockup = &cayman_dma_is_lockup,
1494 .vm_flush = &cayman_dma_vm_flush,
1495 },
1496 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1497 .ib_execute = &cayman_dma_ring_ib_execute,
1498 .ib_parse = &evergreen_dma_ib_parse,
1499 .emit_fence = &evergreen_dma_fence_ring_emit,
1500 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1501 .cs_parse = &evergreen_dma_cs_parse,
1502 .ring_test = &r600_dma_ring_test,
1503 .ib_test = &r600_dma_ib_test,
1504 .is_lockup = &cayman_dma_is_lockup,
1505 .vm_flush = &cayman_dma_vm_flush,
1430 } 1506 }
1431 }, 1507 },
1432 .irq = { 1508 .irq = {
@@ -1443,10 +1519,10 @@ static struct radeon_asic cayman_asic = {
1443 .copy = { 1519 .copy = {
1444 .blit = &r600_copy_blit, 1520 .blit = &r600_copy_blit,
1445 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1521 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1446 .dma = NULL, 1522 .dma = &evergreen_copy_dma,
1447 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1523 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1448 .copy = &r600_copy_blit, 1524 .copy = &evergreen_copy_dma,
1449 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1525 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1450 }, 1526 },
1451 .surface = { 1527 .surface = {
1452 .set_reg = r600_set_surface_reg, 1528 .set_reg = r600_set_surface_reg,
@@ -1496,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
1496 .vm = { 1572 .vm = {
1497 .init = &cayman_vm_init, 1573 .init = &cayman_vm_init,
1498 .fini = &cayman_vm_fini, 1574 .fini = &cayman_vm_fini,
1499 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1575 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1500 .set_page = &cayman_vm_set_page, 1576 .set_page = &cayman_vm_set_page,
1501 }, 1577 },
1502 .ring = { 1578 .ring = {
@@ -1532,6 +1608,28 @@ static struct radeon_asic trinity_asic = {
1532 .ib_test = &r600_ib_test, 1608 .ib_test = &r600_ib_test,
1533 .is_lockup = &evergreen_gpu_is_lockup, 1609 .is_lockup = &evergreen_gpu_is_lockup,
1534 .vm_flush = &cayman_vm_flush, 1610 .vm_flush = &cayman_vm_flush,
1611 },
1612 [R600_RING_TYPE_DMA_INDEX] = {
1613 .ib_execute = &cayman_dma_ring_ib_execute,
1614 .ib_parse = &evergreen_dma_ib_parse,
1615 .emit_fence = &evergreen_dma_fence_ring_emit,
1616 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1617 .cs_parse = &evergreen_dma_cs_parse,
1618 .ring_test = &r600_dma_ring_test,
1619 .ib_test = &r600_dma_ib_test,
1620 .is_lockup = &cayman_dma_is_lockup,
1621 .vm_flush = &cayman_dma_vm_flush,
1622 },
1623 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1624 .ib_execute = &cayman_dma_ring_ib_execute,
1625 .ib_parse = &evergreen_dma_ib_parse,
1626 .emit_fence = &evergreen_dma_fence_ring_emit,
1627 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1628 .cs_parse = &evergreen_dma_cs_parse,
1629 .ring_test = &r600_dma_ring_test,
1630 .ib_test = &r600_dma_ib_test,
1631 .is_lockup = &cayman_dma_is_lockup,
1632 .vm_flush = &cayman_dma_vm_flush,
1535 } 1633 }
1536 }, 1634 },
1537 .irq = { 1635 .irq = {
@@ -1548,10 +1646,10 @@ static struct radeon_asic trinity_asic = {
1548 .copy = { 1646 .copy = {
1549 .blit = &r600_copy_blit, 1647 .blit = &r600_copy_blit,
1550 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1648 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1551 .dma = NULL, 1649 .dma = &evergreen_copy_dma,
1552 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1650 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1553 .copy = &r600_copy_blit, 1651 .copy = &evergreen_copy_dma,
1554 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1652 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1555 }, 1653 },
1556 .surface = { 1654 .surface = {
1557 .set_reg = r600_set_surface_reg, 1655 .set_reg = r600_set_surface_reg,
@@ -1601,7 +1699,7 @@ static struct radeon_asic si_asic = {
1601 .vm = { 1699 .vm = {
1602 .init = &si_vm_init, 1700 .init = &si_vm_init,
1603 .fini = &si_vm_fini, 1701 .fini = &si_vm_fini,
1604 .pt_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1702 .pt_ring_index = R600_RING_TYPE_DMA_INDEX,
1605 .set_page = &si_vm_set_page, 1703 .set_page = &si_vm_set_page,
1606 }, 1704 },
1607 .ring = { 1705 .ring = {
@@ -1637,6 +1735,28 @@ static struct radeon_asic si_asic = {
1637 .ib_test = &r600_ib_test, 1735 .ib_test = &r600_ib_test,
1638 .is_lockup = &si_gpu_is_lockup, 1736 .is_lockup = &si_gpu_is_lockup,
1639 .vm_flush = &si_vm_flush, 1737 .vm_flush = &si_vm_flush,
1738 },
1739 [R600_RING_TYPE_DMA_INDEX] = {
1740 .ib_execute = &cayman_dma_ring_ib_execute,
1741 .ib_parse = &evergreen_dma_ib_parse,
1742 .emit_fence = &evergreen_dma_fence_ring_emit,
1743 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1744 .cs_parse = NULL,
1745 .ring_test = &r600_dma_ring_test,
1746 .ib_test = &r600_dma_ib_test,
1747 .is_lockup = &cayman_dma_is_lockup,
1748 .vm_flush = &si_dma_vm_flush,
1749 },
1750 [CAYMAN_RING_TYPE_DMA1_INDEX] = {
1751 .ib_execute = &cayman_dma_ring_ib_execute,
1752 .ib_parse = &evergreen_dma_ib_parse,
1753 .emit_fence = &evergreen_dma_fence_ring_emit,
1754 .emit_semaphore = &r600_dma_semaphore_ring_emit,
1755 .cs_parse = NULL,
1756 .ring_test = &r600_dma_ring_test,
1757 .ib_test = &r600_dma_ib_test,
1758 .is_lockup = &cayman_dma_is_lockup,
1759 .vm_flush = &si_dma_vm_flush,
1640 } 1760 }
1641 }, 1761 },
1642 .irq = { 1762 .irq = {
@@ -1653,10 +1773,10 @@ static struct radeon_asic si_asic = {
1653 .copy = { 1773 .copy = {
1654 .blit = NULL, 1774 .blit = NULL,
1655 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1775 .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
1656 .dma = NULL, 1776 .dma = &si_copy_dma,
1657 .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1777 .dma_ring_index = R600_RING_TYPE_DMA_INDEX,
1658 .copy = NULL, 1778 .copy = &si_copy_dma,
1659 .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, 1779 .copy_ring_index = R600_RING_TYPE_DMA_INDEX,
1660 }, 1780 },
1661 .surface = { 1781 .surface = {
1662 .set_reg = r600_set_surface_reg, 1782 .set_reg = r600_set_surface_reg,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5e3a0e5c6be1..5f4882cc2152 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -263,6 +263,7 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
263struct rv515_mc_save { 263struct rv515_mc_save {
264 u32 vga_render_control; 264 u32 vga_render_control;
265 u32 vga_hdp_control; 265 u32 vga_hdp_control;
266 bool crtc_enabled[2];
266}; 267};
267 268
268int rv515_init(struct radeon_device *rdev); 269int rv515_init(struct radeon_device *rdev);
@@ -303,12 +304,21 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
303uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); 304uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
304void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 305void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
305int r600_cs_parse(struct radeon_cs_parser *p); 306int r600_cs_parse(struct radeon_cs_parser *p);
307int r600_dma_cs_parse(struct radeon_cs_parser *p);
306void r600_fence_ring_emit(struct radeon_device *rdev, 308void r600_fence_ring_emit(struct radeon_device *rdev,
307 struct radeon_fence *fence); 309 struct radeon_fence *fence);
308void r600_semaphore_ring_emit(struct radeon_device *rdev, 310void r600_semaphore_ring_emit(struct radeon_device *rdev,
309 struct radeon_ring *cp, 311 struct radeon_ring *cp,
310 struct radeon_semaphore *semaphore, 312 struct radeon_semaphore *semaphore,
311 bool emit_wait); 313 bool emit_wait);
314void r600_dma_fence_ring_emit(struct radeon_device *rdev,
315 struct radeon_fence *fence);
316void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
317 struct radeon_ring *ring,
318 struct radeon_semaphore *semaphore,
319 bool emit_wait);
320void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
321bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
312bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp); 322bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
313int r600_asic_reset(struct radeon_device *rdev); 323int r600_asic_reset(struct radeon_device *rdev);
314int r600_set_surface_reg(struct radeon_device *rdev, int reg, 324int r600_set_surface_reg(struct radeon_device *rdev, int reg,
@@ -316,11 +326,16 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg,
316 uint32_t offset, uint32_t obj_size); 326 uint32_t offset, uint32_t obj_size);
317void r600_clear_surface_reg(struct radeon_device *rdev, int reg); 327void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
318int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); 328int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
329int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
319void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 330void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
320int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); 331int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
332int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
321int r600_copy_blit(struct radeon_device *rdev, 333int r600_copy_blit(struct radeon_device *rdev,
322 uint64_t src_offset, uint64_t dst_offset, 334 uint64_t src_offset, uint64_t dst_offset,
323 unsigned num_gpu_pages, struct radeon_fence **fence); 335 unsigned num_gpu_pages, struct radeon_fence **fence);
336int r600_copy_dma(struct radeon_device *rdev,
337 uint64_t src_offset, uint64_t dst_offset,
338 unsigned num_gpu_pages, struct radeon_fence **fence);
324void r600_hpd_init(struct radeon_device *rdev); 339void r600_hpd_init(struct radeon_device *rdev);
325void r600_hpd_fini(struct radeon_device *rdev); 340void r600_hpd_fini(struct radeon_device *rdev);
326bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 341bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -416,6 +431,7 @@ u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc);
416int evergreen_irq_set(struct radeon_device *rdev); 431int evergreen_irq_set(struct radeon_device *rdev);
417int evergreen_irq_process(struct radeon_device *rdev); 432int evergreen_irq_process(struct radeon_device *rdev);
418extern int evergreen_cs_parse(struct radeon_cs_parser *p); 433extern int evergreen_cs_parse(struct radeon_cs_parser *p);
434extern int evergreen_dma_cs_parse(struct radeon_cs_parser *p);
419extern void evergreen_pm_misc(struct radeon_device *rdev); 435extern void evergreen_pm_misc(struct radeon_device *rdev);
420extern void evergreen_pm_prepare(struct radeon_device *rdev); 436extern void evergreen_pm_prepare(struct radeon_device *rdev);
421extern void evergreen_pm_finish(struct radeon_device *rdev); 437extern void evergreen_pm_finish(struct radeon_device *rdev);
@@ -428,6 +444,14 @@ extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
428void evergreen_disable_interrupt_state(struct radeon_device *rdev); 444void evergreen_disable_interrupt_state(struct radeon_device *rdev);
429int evergreen_blit_init(struct radeon_device *rdev); 445int evergreen_blit_init(struct radeon_device *rdev);
430int evergreen_mc_wait_for_idle(struct radeon_device *rdev); 446int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
447void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
448 struct radeon_fence *fence);
449void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
450 struct radeon_ib *ib);
451int evergreen_copy_dma(struct radeon_device *rdev,
452 uint64_t src_offset, uint64_t dst_offset,
453 unsigned num_gpu_pages,
454 struct radeon_fence **fence);
431 455
432/* 456/*
433 * cayman 457 * cayman
@@ -449,6 +473,11 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
449 uint64_t addr, unsigned count, 473 uint64_t addr, unsigned count,
450 uint32_t incr, uint32_t flags); 474 uint32_t incr, uint32_t flags);
451int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 475int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
476int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
477void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
478 struct radeon_ib *ib);
479bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
480void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
452 481
453/* DCE6 - SI */ 482/* DCE6 - SI */
454void dce6_bandwidth_update(struct radeon_device *rdev); 483void dce6_bandwidth_update(struct radeon_device *rdev);
@@ -476,5 +505,10 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
476void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 505void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
477int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 506int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
478uint64_t si_get_gpu_clock(struct radeon_device *rdev); 507uint64_t si_get_gpu_clock(struct radeon_device *rdev);
508int si_copy_dma(struct radeon_device *rdev,
509 uint64_t src_offset, uint64_t dst_offset,
510 unsigned num_gpu_pages,
511 struct radeon_fence **fence);
512void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
479 513
480#endif 514#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 45b660b27cfc..4af89126e223 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3246,11 +3246,9 @@ static uint32_t combios_detect_ram(struct drm_device *dev, int ram,
3246 while (ram--) { 3246 while (ram--) {
3247 addr = ram * 1024 * 1024; 3247 addr = ram * 1024 * 1024;
3248 /* write to each page */ 3248 /* write to each page */
3249 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); 3249 WREG32_IDX((addr) | RADEON_MM_APER, 0xdeadbeef);
3250 WREG32(RADEON_MM_DATA, 0xdeadbeef);
3251 /* read back and verify */ 3250 /* read back and verify */
3252 WREG32(RADEON_MM_INDEX, (addr) | RADEON_MM_APER); 3251 if (RREG32_IDX((addr) | RADEON_MM_APER) != 0xdeadbeef)
3253 if (RREG32(RADEON_MM_DATA) != 0xdeadbeef)
3254 return 0; 3252 return 0;
3255 } 3253 }
3256 3254
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index b884c362a8c2..47bf162ab9c6 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1599,7 +1599,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1599 connector->interlace_allowed = true; 1599 connector->interlace_allowed = true;
1600 connector->doublescan_allowed = true; 1600 connector->doublescan_allowed = true;
1601 radeon_connector->dac_load_detect = true; 1601 radeon_connector->dac_load_detect = true;
1602 drm_connector_attach_property(&radeon_connector->base, 1602 drm_object_attach_property(&radeon_connector->base.base,
1603 rdev->mode_info.load_detect_property, 1603 rdev->mode_info.load_detect_property,
1604 1); 1604 1);
1605 break; 1605 break;
@@ -1608,13 +1608,13 @@ radeon_add_atom_connector(struct drm_device *dev,
1608 case DRM_MODE_CONNECTOR_HDMIA: 1608 case DRM_MODE_CONNECTOR_HDMIA:
1609 case DRM_MODE_CONNECTOR_HDMIB: 1609 case DRM_MODE_CONNECTOR_HDMIB:
1610 case DRM_MODE_CONNECTOR_DisplayPort: 1610 case DRM_MODE_CONNECTOR_DisplayPort:
1611 drm_connector_attach_property(&radeon_connector->base, 1611 drm_object_attach_property(&radeon_connector->base.base,
1612 rdev->mode_info.underscan_property, 1612 rdev->mode_info.underscan_property,
1613 UNDERSCAN_OFF); 1613 UNDERSCAN_OFF);
1614 drm_connector_attach_property(&radeon_connector->base, 1614 drm_object_attach_property(&radeon_connector->base.base,
1615 rdev->mode_info.underscan_hborder_property, 1615 rdev->mode_info.underscan_hborder_property,
1616 0); 1616 0);
1617 drm_connector_attach_property(&radeon_connector->base, 1617 drm_object_attach_property(&radeon_connector->base.base,
1618 rdev->mode_info.underscan_vborder_property, 1618 rdev->mode_info.underscan_vborder_property,
1619 0); 1619 0);
1620 subpixel_order = SubPixelHorizontalRGB; 1620 subpixel_order = SubPixelHorizontalRGB;
@@ -1625,14 +1625,14 @@ radeon_add_atom_connector(struct drm_device *dev,
1625 connector->doublescan_allowed = false; 1625 connector->doublescan_allowed = false;
1626 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1626 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1627 radeon_connector->dac_load_detect = true; 1627 radeon_connector->dac_load_detect = true;
1628 drm_connector_attach_property(&radeon_connector->base, 1628 drm_object_attach_property(&radeon_connector->base.base,
1629 rdev->mode_info.load_detect_property, 1629 rdev->mode_info.load_detect_property,
1630 1); 1630 1);
1631 } 1631 }
1632 break; 1632 break;
1633 case DRM_MODE_CONNECTOR_LVDS: 1633 case DRM_MODE_CONNECTOR_LVDS:
1634 case DRM_MODE_CONNECTOR_eDP: 1634 case DRM_MODE_CONNECTOR_eDP:
1635 drm_connector_attach_property(&radeon_connector->base, 1635 drm_object_attach_property(&radeon_connector->base.base,
1636 dev->mode_config.scaling_mode_property, 1636 dev->mode_config.scaling_mode_property,
1637 DRM_MODE_SCALE_FULLSCREEN); 1637 DRM_MODE_SCALE_FULLSCREEN);
1638 subpixel_order = SubPixelHorizontalRGB; 1638 subpixel_order = SubPixelHorizontalRGB;
@@ -1651,7 +1651,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1651 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1651 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1652 } 1652 }
1653 radeon_connector->dac_load_detect = true; 1653 radeon_connector->dac_load_detect = true;
1654 drm_connector_attach_property(&radeon_connector->base, 1654 drm_object_attach_property(&radeon_connector->base.base,
1655 rdev->mode_info.load_detect_property, 1655 rdev->mode_info.load_detect_property,
1656 1); 1656 1);
1657 /* no HPD on analog connectors */ 1657 /* no HPD on analog connectors */
@@ -1669,7 +1669,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1669 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1669 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1670 } 1670 }
1671 radeon_connector->dac_load_detect = true; 1671 radeon_connector->dac_load_detect = true;
1672 drm_connector_attach_property(&radeon_connector->base, 1672 drm_object_attach_property(&radeon_connector->base.base,
1673 rdev->mode_info.load_detect_property, 1673 rdev->mode_info.load_detect_property,
1674 1); 1674 1);
1675 /* no HPD on analog connectors */ 1675 /* no HPD on analog connectors */
@@ -1692,23 +1692,23 @@ radeon_add_atom_connector(struct drm_device *dev,
1692 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1692 DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1693 } 1693 }
1694 subpixel_order = SubPixelHorizontalRGB; 1694 subpixel_order = SubPixelHorizontalRGB;
1695 drm_connector_attach_property(&radeon_connector->base, 1695 drm_object_attach_property(&radeon_connector->base.base,
1696 rdev->mode_info.coherent_mode_property, 1696 rdev->mode_info.coherent_mode_property,
1697 1); 1697 1);
1698 if (ASIC_IS_AVIVO(rdev)) { 1698 if (ASIC_IS_AVIVO(rdev)) {
1699 drm_connector_attach_property(&radeon_connector->base, 1699 drm_object_attach_property(&radeon_connector->base.base,
1700 rdev->mode_info.underscan_property, 1700 rdev->mode_info.underscan_property,
1701 UNDERSCAN_OFF); 1701 UNDERSCAN_OFF);
1702 drm_connector_attach_property(&radeon_connector->base, 1702 drm_object_attach_property(&radeon_connector->base.base,
1703 rdev->mode_info.underscan_hborder_property, 1703 rdev->mode_info.underscan_hborder_property,
1704 0); 1704 0);
1705 drm_connector_attach_property(&radeon_connector->base, 1705 drm_object_attach_property(&radeon_connector->base.base,
1706 rdev->mode_info.underscan_vborder_property, 1706 rdev->mode_info.underscan_vborder_property,
1707 0); 1707 0);
1708 } 1708 }
1709 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1709 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1710 radeon_connector->dac_load_detect = true; 1710 radeon_connector->dac_load_detect = true;
1711 drm_connector_attach_property(&radeon_connector->base, 1711 drm_object_attach_property(&radeon_connector->base.base,
1712 rdev->mode_info.load_detect_property, 1712 rdev->mode_info.load_detect_property,
1713 1); 1713 1);
1714 } 1714 }
@@ -1732,17 +1732,17 @@ radeon_add_atom_connector(struct drm_device *dev,
1732 if (!radeon_connector->ddc_bus) 1732 if (!radeon_connector->ddc_bus)
1733 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1733 DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1734 } 1734 }
1735 drm_connector_attach_property(&radeon_connector->base, 1735 drm_object_attach_property(&radeon_connector->base.base,
1736 rdev->mode_info.coherent_mode_property, 1736 rdev->mode_info.coherent_mode_property,
1737 1); 1737 1);
1738 if (ASIC_IS_AVIVO(rdev)) { 1738 if (ASIC_IS_AVIVO(rdev)) {
1739 drm_connector_attach_property(&radeon_connector->base, 1739 drm_object_attach_property(&radeon_connector->base.base,
1740 rdev->mode_info.underscan_property, 1740 rdev->mode_info.underscan_property,
1741 UNDERSCAN_OFF); 1741 UNDERSCAN_OFF);
1742 drm_connector_attach_property(&radeon_connector->base, 1742 drm_object_attach_property(&radeon_connector->base.base,
1743 rdev->mode_info.underscan_hborder_property, 1743 rdev->mode_info.underscan_hborder_property,
1744 0); 1744 0);
1745 drm_connector_attach_property(&radeon_connector->base, 1745 drm_object_attach_property(&radeon_connector->base.base,
1746 rdev->mode_info.underscan_vborder_property, 1746 rdev->mode_info.underscan_vborder_property,
1747 0); 1747 0);
1748 } 1748 }
@@ -1771,17 +1771,17 @@ radeon_add_atom_connector(struct drm_device *dev,
1771 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1771 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1772 } 1772 }
1773 subpixel_order = SubPixelHorizontalRGB; 1773 subpixel_order = SubPixelHorizontalRGB;
1774 drm_connector_attach_property(&radeon_connector->base, 1774 drm_object_attach_property(&radeon_connector->base.base,
1775 rdev->mode_info.coherent_mode_property, 1775 rdev->mode_info.coherent_mode_property,
1776 1); 1776 1);
1777 if (ASIC_IS_AVIVO(rdev)) { 1777 if (ASIC_IS_AVIVO(rdev)) {
1778 drm_connector_attach_property(&radeon_connector->base, 1778 drm_object_attach_property(&radeon_connector->base.base,
1779 rdev->mode_info.underscan_property, 1779 rdev->mode_info.underscan_property,
1780 UNDERSCAN_OFF); 1780 UNDERSCAN_OFF);
1781 drm_connector_attach_property(&radeon_connector->base, 1781 drm_object_attach_property(&radeon_connector->base.base,
1782 rdev->mode_info.underscan_hborder_property, 1782 rdev->mode_info.underscan_hborder_property,
1783 0); 1783 0);
1784 drm_connector_attach_property(&radeon_connector->base, 1784 drm_object_attach_property(&radeon_connector->base.base,
1785 rdev->mode_info.underscan_vborder_property, 1785 rdev->mode_info.underscan_vborder_property,
1786 0); 1786 0);
1787 } 1787 }
@@ -1806,7 +1806,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1806 if (!radeon_connector->ddc_bus) 1806 if (!radeon_connector->ddc_bus)
1807 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1807 DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1808 } 1808 }
1809 drm_connector_attach_property(&radeon_connector->base, 1809 drm_object_attach_property(&radeon_connector->base.base,
1810 dev->mode_config.scaling_mode_property, 1810 dev->mode_config.scaling_mode_property,
1811 DRM_MODE_SCALE_FULLSCREEN); 1811 DRM_MODE_SCALE_FULLSCREEN);
1812 subpixel_order = SubPixelHorizontalRGB; 1812 subpixel_order = SubPixelHorizontalRGB;
@@ -1819,10 +1819,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1819 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1819 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1820 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1820 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1821 radeon_connector->dac_load_detect = true; 1821 radeon_connector->dac_load_detect = true;
1822 drm_connector_attach_property(&radeon_connector->base, 1822 drm_object_attach_property(&radeon_connector->base.base,
1823 rdev->mode_info.load_detect_property, 1823 rdev->mode_info.load_detect_property,
1824 1); 1824 1);
1825 drm_connector_attach_property(&radeon_connector->base, 1825 drm_object_attach_property(&radeon_connector->base.base,
1826 rdev->mode_info.tv_std_property, 1826 rdev->mode_info.tv_std_property,
1827 radeon_atombios_get_tv_info(rdev)); 1827 radeon_atombios_get_tv_info(rdev));
1828 /* no HPD on analog connectors */ 1828 /* no HPD on analog connectors */
@@ -1843,7 +1843,7 @@ radeon_add_atom_connector(struct drm_device *dev,
1843 if (!radeon_connector->ddc_bus) 1843 if (!radeon_connector->ddc_bus)
1844 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1844 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1845 } 1845 }
1846 drm_connector_attach_property(&radeon_connector->base, 1846 drm_object_attach_property(&radeon_connector->base.base,
1847 dev->mode_config.scaling_mode_property, 1847 dev->mode_config.scaling_mode_property,
1848 DRM_MODE_SCALE_FULLSCREEN); 1848 DRM_MODE_SCALE_FULLSCREEN);
1849 subpixel_order = SubPixelHorizontalRGB; 1849 subpixel_order = SubPixelHorizontalRGB;
@@ -1922,7 +1922,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1922 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1922 DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1923 } 1923 }
1924 radeon_connector->dac_load_detect = true; 1924 radeon_connector->dac_load_detect = true;
1925 drm_connector_attach_property(&radeon_connector->base, 1925 drm_object_attach_property(&radeon_connector->base.base,
1926 rdev->mode_info.load_detect_property, 1926 rdev->mode_info.load_detect_property,
1927 1); 1927 1);
1928 /* no HPD on analog connectors */ 1928 /* no HPD on analog connectors */
@@ -1940,7 +1940,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1940 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 1940 DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
1941 } 1941 }
1942 radeon_connector->dac_load_detect = true; 1942 radeon_connector->dac_load_detect = true;
1943 drm_connector_attach_property(&radeon_connector->base, 1943 drm_object_attach_property(&radeon_connector->base.base,
1944 rdev->mode_info.load_detect_property, 1944 rdev->mode_info.load_detect_property,
1945 1); 1945 1);
1946 /* no HPD on analog connectors */ 1946 /* no HPD on analog connectors */
@@ -1959,7 +1959,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1959 } 1959 }
1960 if (connector_type == DRM_MODE_CONNECTOR_DVII) { 1960 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1961 radeon_connector->dac_load_detect = true; 1961 radeon_connector->dac_load_detect = true;
1962 drm_connector_attach_property(&radeon_connector->base, 1962 drm_object_attach_property(&radeon_connector->base.base,
1963 rdev->mode_info.load_detect_property, 1963 rdev->mode_info.load_detect_property,
1964 1); 1964 1);
1965 } 1965 }
@@ -1983,10 +1983,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
1983 */ 1983 */
1984 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) 1984 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
1985 radeon_connector->dac_load_detect = false; 1985 radeon_connector->dac_load_detect = false;
1986 drm_connector_attach_property(&radeon_connector->base, 1986 drm_object_attach_property(&radeon_connector->base.base,
1987 rdev->mode_info.load_detect_property, 1987 rdev->mode_info.load_detect_property,
1988 radeon_connector->dac_load_detect); 1988 radeon_connector->dac_load_detect);
1989 drm_connector_attach_property(&radeon_connector->base, 1989 drm_object_attach_property(&radeon_connector->base.base,
1990 rdev->mode_info.tv_std_property, 1990 rdev->mode_info.tv_std_property,
1991 radeon_combios_get_tv_info(rdev)); 1991 radeon_combios_get_tv_info(rdev));
1992 /* no HPD on analog connectors */ 1992 /* no HPD on analog connectors */
@@ -2002,7 +2002,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
2002 if (!radeon_connector->ddc_bus) 2002 if (!radeon_connector->ddc_bus)
2003 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n"); 2003 DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
2004 } 2004 }
2005 drm_connector_attach_property(&radeon_connector->base, 2005 drm_object_attach_property(&radeon_connector->base.base,
2006 dev->mode_config.scaling_mode_property, 2006 dev->mode_config.scaling_mode_property,
2007 DRM_MODE_SCALE_FULLSCREEN); 2007 DRM_MODE_SCALE_FULLSCREEN);
2008 subpixel_order = SubPixelHorizontalRGB; 2008 subpixel_order = SubPixelHorizontalRGB;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 8b2797dc7b64..9143fc45e35b 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -116,20 +116,6 @@ u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index)
116 } 116 }
117} 117}
118 118
119u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr)
120{
121 u32 ret;
122
123 if (addr < 0x10000)
124 ret = DRM_READ32(dev_priv->mmio, addr);
125 else {
126 DRM_WRITE32(dev_priv->mmio, RADEON_MM_INDEX, addr);
127 ret = DRM_READ32(dev_priv->mmio, RADEON_MM_DATA);
128 }
129
130 return ret;
131}
132
133static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 119static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
134{ 120{
135 u32 ret; 121 u32 ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 41672cc563fb..396baba0141a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -43,6 +43,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
43 return 0; 43 return 0;
44 } 44 }
45 chunk = &p->chunks[p->chunk_relocs_idx]; 45 chunk = &p->chunks[p->chunk_relocs_idx];
46 p->dma_reloc_idx = 0;
46 /* FIXME: we assume that each relocs use 4 dwords */ 47 /* FIXME: we assume that each relocs use 4 dwords */
47 p->nrelocs = chunk->length_dw / 4; 48 p->nrelocs = chunk->length_dw / 4;
48 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); 49 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
@@ -111,6 +112,18 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
111 } else 112 } else
112 p->ring = RADEON_RING_TYPE_GFX_INDEX; 113 p->ring = RADEON_RING_TYPE_GFX_INDEX;
113 break; 114 break;
115 case RADEON_CS_RING_DMA:
116 if (p->rdev->family >= CHIP_CAYMAN) {
117 if (p->priority > 0)
118 p->ring = R600_RING_TYPE_DMA_INDEX;
119 else
120 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
121 } else if (p->rdev->family >= CHIP_R600) {
122 p->ring = R600_RING_TYPE_DMA_INDEX;
123 } else {
124 return -EINVAL;
125 }
126 break;
114 } 127 }
115 return 0; 128 return 0;
116} 129}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 0fe56c9f64bd..ad6df625e8b8 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -66,24 +66,25 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
66 struct radeon_device *rdev = crtc->dev->dev_private; 66 struct radeon_device *rdev = crtc->dev->dev_private;
67 67
68 if (ASIC_IS_DCE4(rdev)) { 68 if (ASIC_IS_DCE4(rdev)) {
69 WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); 69 WREG32_IDX(EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset,
70 WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | 70 EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) |
71 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); 71 EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2));
72 } else if (ASIC_IS_AVIVO(rdev)) { 72 } else if (ASIC_IS_AVIVO(rdev)) {
73 WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); 73 WREG32_IDX(AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset,
74 WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); 74 (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
75 } else { 75 } else {
76 u32 reg;
76 switch (radeon_crtc->crtc_id) { 77 switch (radeon_crtc->crtc_id) {
77 case 0: 78 case 0:
78 WREG32(RADEON_MM_INDEX, RADEON_CRTC_GEN_CNTL); 79 reg = RADEON_CRTC_GEN_CNTL;
79 break; 80 break;
80 case 1: 81 case 1:
81 WREG32(RADEON_MM_INDEX, RADEON_CRTC2_GEN_CNTL); 82 reg = RADEON_CRTC2_GEN_CNTL;
82 break; 83 break;
83 default: 84 default:
84 return; 85 return;
85 } 86 }
86 WREG32_P(RADEON_MM_DATA, 0, ~RADEON_CRTC_CUR_EN); 87 WREG32_IDX(reg, RREG32_IDX(reg) & ~RADEON_CRTC_CUR_EN);
87 } 88 }
88} 89}
89 90
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e2f5f888c374..49b06590001e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1059,6 +1059,7 @@ int radeon_device_init(struct radeon_device *rdev,
1059 1059
1060 /* Registers mapping */ 1060 /* Registers mapping */
1061 /* TODO: block userspace mapping of io register */ 1061 /* TODO: block userspace mapping of io register */
1062 spin_lock_init(&rdev->mmio_idx_lock);
1062 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); 1063 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1063 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); 1064 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1064 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); 1065 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index bfa2a6015727..310c0e5254ba 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -378,8 +378,12 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
378 work->old_rbo = rbo; 378 work->old_rbo = rbo;
379 obj = new_radeon_fb->obj; 379 obj = new_radeon_fb->obj;
380 rbo = gem_to_radeon_bo(obj); 380 rbo = gem_to_radeon_bo(obj);
381
382 spin_lock(&rbo->tbo.bdev->fence_lock);
381 if (rbo->tbo.sync_obj) 383 if (rbo->tbo.sync_obj)
382 work->fence = radeon_fence_ref(rbo->tbo.sync_obj); 384 work->fence = radeon_fence_ref(rbo->tbo.sync_obj);
385 spin_unlock(&rbo->tbo.bdev->fence_lock);
386
383 INIT_WORK(&work->work, radeon_unpin_work_func); 387 INIT_WORK(&work->work, radeon_unpin_work_func);
384 388
385 /* We borrow the event spin lock for protecting unpin_work */ 389 /* We borrow the event spin lock for protecting unpin_work */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 07eb84e8a8a4..9b1a727d3c9e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -65,9 +65,12 @@
65 * 2.22.0 - r600 only: RESOLVE_BOX allowed 65 * 2.22.0 - r600 only: RESOLVE_BOX allowed
66 * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880 66 * 2.23.0 - allow STRMOUT_BASE_UPDATE on RS780 and RS880
67 * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures 67 * 2.24.0 - eg only: allow MIP_ADDRESS=0 for MSAA textures
68 * 2.25.0 - eg+: new info request for num SE and num SH
69 * 2.26.0 - r600-eg: fix htile size computation
70 * 2.27.0 - r600-SI: Add CS ioctl support for async DMA
68 */ 71 */
69#define KMS_DRIVER_MAJOR 2 72#define KMS_DRIVER_MAJOR 2
70#define KMS_DRIVER_MINOR 24 73#define KMS_DRIVER_MINOR 27
71#define KMS_DRIVER_PATCHLEVEL 0 74#define KMS_DRIVER_PATCHLEVEL 0
72int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 75int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
73int radeon_driver_unload_kms(struct drm_device *dev); 76int radeon_driver_unload_kms(struct drm_device *dev);
@@ -281,12 +284,15 @@ static struct drm_driver driver_old = {
281 284
282static struct drm_driver kms_driver; 285static struct drm_driver kms_driver;
283 286
284static void radeon_kick_out_firmware_fb(struct pci_dev *pdev) 287static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
285{ 288{
286 struct apertures_struct *ap; 289 struct apertures_struct *ap;
287 bool primary = false; 290 bool primary = false;
288 291
289 ap = alloc_apertures(1); 292 ap = alloc_apertures(1);
293 if (!ap)
294 return -ENOMEM;
295
290 ap->ranges[0].base = pci_resource_start(pdev, 0); 296 ap->ranges[0].base = pci_resource_start(pdev, 0);
291 ap->ranges[0].size = pci_resource_len(pdev, 0); 297 ap->ranges[0].size = pci_resource_len(pdev, 0);
292 298
@@ -295,13 +301,19 @@ static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
295#endif 301#endif
296 remove_conflicting_framebuffers(ap, "radeondrmfb", primary); 302 remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
297 kfree(ap); 303 kfree(ap);
304
305 return 0;
298} 306}
299 307
300static int __devinit 308static int __devinit
301radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 309radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
302{ 310{
311 int ret;
312
303 /* Get rid of things like offb */ 313 /* Get rid of things like offb */
304 radeon_kick_out_firmware_fb(pdev); 314 ret = radeon_kick_out_firmware_fb(pdev);
315 if (ret)
316 return ret;
305 317
306 return drm_get_pci_dev(pdev, ent, &kms_driver); 318 return drm_get_pci_dev(pdev, ent, &kms_driver);
307} 319}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index a1b59ca96d01..e7fdf163a8ca 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -366,7 +366,6 @@ extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file
366extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv); 366extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
367extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc); 367extern void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc);
368extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base); 368extern void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base);
369extern u32 RADEON_READ_MM(drm_radeon_private_t *dev_priv, int addr);
370 369
371extern void radeon_freelist_reset(struct drm_device * dev); 370extern void radeon_freelist_reset(struct drm_device * dev);
372extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); 371extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 22bd6c2c2740..410a975a8eec 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -772,7 +772,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
772 int r; 772 int r;
773 773
774 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); 774 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
775 if (rdev->wb.use_event) { 775 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
776 rdev->fence_drv[ring].scratch_reg = 0; 776 rdev->fence_drv[ring].scratch_reg = 0;
777 index = R600_WB_EVENT_OFFSET + ring * 4; 777 index = R600_WB_EVENT_OFFSET + ring * 4;
778 } else { 778 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 4debd60e5aa6..6e24f84755b5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1237,7 +1237,6 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1237{ 1237{
1238 struct radeon_bo_va *bo_va; 1238 struct radeon_bo_va *bo_va;
1239 1239
1240 BUG_ON(!atomic_read(&bo->tbo.reserved));
1241 list_for_each_entry(bo_va, &bo->va, bo_list) { 1240 list_for_each_entry(bo_va, &bo->va, bo_list) {
1242 bo_va->valid = false; 1241 bo_va->valid = false;
1243 } 1242 }
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dc781c49b96b..9c312f9afb68 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -361,6 +361,22 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
361 return -EINVAL; 361 return -EINVAL;
362 } 362 }
363 break; 363 break;
364 case RADEON_INFO_MAX_SE:
365 if (rdev->family >= CHIP_TAHITI)
366 value = rdev->config.si.max_shader_engines;
367 else if (rdev->family >= CHIP_CAYMAN)
368 value = rdev->config.cayman.max_shader_engines;
369 else if (rdev->family >= CHIP_CEDAR)
370 value = rdev->config.evergreen.num_ses;
371 else
372 value = 1;
373 break;
374 case RADEON_INFO_MAX_SH_PER_SE:
375 if (rdev->family >= CHIP_TAHITI)
376 value = rdev->config.si.max_sh_per_se;
377 else
378 return -EINVAL;
379 break;
364 default: 380 default:
365 DRM_DEBUG_KMS("Invalid request %d\n", info->request); 381 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
366 return -EINVAL; 382 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 92c5f473cf08..d818b503b42f 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -427,7 +427,7 @@ struct radeon_connector_atom_dig {
427 uint32_t igp_lane_info; 427 uint32_t igp_lane_info;
428 /* displayport */ 428 /* displayport */
429 struct radeon_i2c_chan *dp_i2c_bus; 429 struct radeon_i2c_chan *dp_i2c_bus;
430 u8 dpcd[8]; 430 u8 dpcd[DP_RECEIVER_CAP_SIZE];
431 u8 dp_sink_type; 431 u8 dp_sink_type;
432 int dp_clock; 432 int dp_clock;
433 int dp_lane_count; 433 int dp_lane_count;
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index b91118ccef86..883c95d8d90f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -84,17 +84,34 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
84 rbo->placement.fpfn = 0; 84 rbo->placement.fpfn = 0;
85 rbo->placement.lpfn = 0; 85 rbo->placement.lpfn = 0;
86 rbo->placement.placement = rbo->placements; 86 rbo->placement.placement = rbo->placements;
87 rbo->placement.busy_placement = rbo->placements;
88 if (domain & RADEON_GEM_DOMAIN_VRAM) 87 if (domain & RADEON_GEM_DOMAIN_VRAM)
89 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | 88 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
90 TTM_PL_FLAG_VRAM; 89 TTM_PL_FLAG_VRAM;
91 if (domain & RADEON_GEM_DOMAIN_GTT) 90 if (domain & RADEON_GEM_DOMAIN_GTT) {
92 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 91 if (rbo->rdev->flags & RADEON_IS_AGP) {
93 if (domain & RADEON_GEM_DOMAIN_CPU) 92 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
94 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 93 } else {
94 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
95 }
96 }
97 if (domain & RADEON_GEM_DOMAIN_CPU) {
98 if (rbo->rdev->flags & RADEON_IS_AGP) {
99 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
100 } else {
101 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
102 }
103 }
95 if (!c) 104 if (!c)
96 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 105 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
97 rbo->placement.num_placement = c; 106 rbo->placement.num_placement = c;
107
108 c = 0;
109 rbo->placement.busy_placement = rbo->busy_placements;
110 if (rbo->rdev->flags & RADEON_IS_AGP) {
111 rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
112 } else {
113 rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
114 }
98 rbo->placement.num_busy_placement = c; 115 rbo->placement.num_busy_placement = c;
99} 116}
100 117
@@ -140,7 +157,7 @@ int radeon_bo_create(struct radeon_device *rdev,
140 /* Kernel allocation are uninterruptible */ 157 /* Kernel allocation are uninterruptible */
141 down_read(&rdev->pm.mclk_lock); 158 down_read(&rdev->pm.mclk_lock);
142 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, 159 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
143 &bo->placement, page_align, 0, !kernel, NULL, 160 &bo->placement, page_align, !kernel, NULL,
144 acc_size, sg, &radeon_ttm_bo_destroy); 161 acc_size, sg, &radeon_ttm_bo_destroy);
145 up_read(&rdev->pm.mclk_lock); 162 up_read(&rdev->pm.mclk_lock);
146 if (unlikely(r != 0)) { 163 if (unlikely(r != 0)) {
@@ -240,7 +257,7 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
240 } 257 }
241 for (i = 0; i < bo->placement.num_placement; i++) 258 for (i = 0; i < bo->placement.num_placement; i++)
242 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; 259 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
243 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 260 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
244 if (likely(r == 0)) { 261 if (likely(r == 0)) {
245 bo->pin_count = 1; 262 bo->pin_count = 1;
246 if (gpu_addr != NULL) 263 if (gpu_addr != NULL)
@@ -269,7 +286,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
269 return 0; 286 return 0;
270 for (i = 0; i < bo->placement.num_placement; i++) 287 for (i = 0; i < bo->placement.num_placement; i++)
271 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; 288 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
272 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); 289 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
273 if (unlikely(r != 0)) 290 if (unlikely(r != 0))
274 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); 291 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
275 return r; 292 return r;
@@ -340,7 +357,6 @@ int radeon_bo_list_validate(struct list_head *head)
340{ 357{
341 struct radeon_bo_list *lobj; 358 struct radeon_bo_list *lobj;
342 struct radeon_bo *bo; 359 struct radeon_bo *bo;
343 u32 domain;
344 int r; 360 int r;
345 361
346 r = ttm_eu_reserve_buffers(head); 362 r = ttm_eu_reserve_buffers(head);
@@ -350,17 +366,9 @@ int radeon_bo_list_validate(struct list_head *head)
350 list_for_each_entry(lobj, head, tv.head) { 366 list_for_each_entry(lobj, head, tv.head) {
351 bo = lobj->bo; 367 bo = lobj->bo;
352 if (!bo->pin_count) { 368 if (!bo->pin_count) {
353 domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
354
355 retry:
356 radeon_ttm_placement_from_domain(bo, domain);
357 r = ttm_bo_validate(&bo->tbo, &bo->placement, 369 r = ttm_bo_validate(&bo->tbo, &bo->placement,
358 true, false, false); 370 true, false);
359 if (unlikely(r)) { 371 if (unlikely(r)) {
360 if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
361 domain |= RADEON_GEM_DOMAIN_GTT;
362 goto retry;
363 }
364 return r; 372 return r;
365 } 373 }
366 } 374 }
@@ -384,7 +392,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
384 int steal; 392 int steal;
385 int i; 393 int i;
386 394
387 BUG_ON(!atomic_read(&bo->tbo.reserved)); 395 BUG_ON(!radeon_bo_is_reserved(bo));
388 396
389 if (!bo->tiling_flags) 397 if (!bo->tiling_flags)
390 return 0; 398 return 0;
@@ -510,7 +518,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
510 uint32_t *tiling_flags, 518 uint32_t *tiling_flags,
511 uint32_t *pitch) 519 uint32_t *pitch)
512{ 520{
513 BUG_ON(!atomic_read(&bo->tbo.reserved)); 521 BUG_ON(!radeon_bo_is_reserved(bo));
514 if (tiling_flags) 522 if (tiling_flags)
515 *tiling_flags = bo->tiling_flags; 523 *tiling_flags = bo->tiling_flags;
516 if (pitch) 524 if (pitch)
@@ -520,7 +528,7 @@ void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
520int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, 528int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
521 bool force_drop) 529 bool force_drop)
522{ 530{
523 BUG_ON(!atomic_read(&bo->tbo.reserved)); 531 BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
524 532
525 if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) 533 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
526 return 0; 534 return 0;
@@ -575,7 +583,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
575 /* hurrah the memory is not visible ! */ 583 /* hurrah the memory is not visible ! */
576 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 584 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
577 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 585 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
578 r = ttm_bo_validate(bo, &rbo->placement, false, true, false); 586 r = ttm_bo_validate(bo, &rbo->placement, false, false);
579 if (unlikely(r != 0)) 587 if (unlikely(r != 0))
580 return r; 588 return r;
581 offset = bo->mem.start << PAGE_SHIFT; 589 offset = bo->mem.start << PAGE_SHIFT;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 93cd491fff2e..5fc86b03043b 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -80,7 +80,7 @@ static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
80 80
81static inline bool radeon_bo_is_reserved(struct radeon_bo *bo) 81static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
82{ 82{
83 return !!atomic_read(&bo->tbo.reserved); 83 return ttm_bo_is_reserved(&bo->tbo);
84} 84}
85 85
86static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) 86static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 587c09a00ba2..fda09c9ea689 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -26,16 +26,31 @@
26#include "radeon_reg.h" 26#include "radeon_reg.h"
27#include "radeon.h" 27#include "radeon.h"
28 28
29#define RADEON_TEST_COPY_BLIT 1
30#define RADEON_TEST_COPY_DMA 0
31
29 32
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 33/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31void radeon_test_moves(struct radeon_device *rdev) 34static void radeon_do_test_moves(struct radeon_device *rdev, int flag)
32{ 35{
33 struct radeon_bo *vram_obj = NULL; 36 struct radeon_bo *vram_obj = NULL;
34 struct radeon_bo **gtt_obj = NULL; 37 struct radeon_bo **gtt_obj = NULL;
35 struct radeon_fence *fence = NULL; 38 struct radeon_fence *fence = NULL;
36 uint64_t gtt_addr, vram_addr; 39 uint64_t gtt_addr, vram_addr;
37 unsigned i, n, size; 40 unsigned i, n, size;
38 int r; 41 int r, ring;
42
43 switch (flag) {
44 case RADEON_TEST_COPY_DMA:
45 ring = radeon_copy_dma_ring_index(rdev);
46 break;
47 case RADEON_TEST_COPY_BLIT:
48 ring = radeon_copy_blit_ring_index(rdev);
49 break;
50 default:
51 DRM_ERROR("Unknown copy method\n");
52 return;
53 }
39 54
40 size = 1024 * 1024; 55 size = 1024 * 1024;
41 56
@@ -106,7 +121,10 @@ void radeon_test_moves(struct radeon_device *rdev)
106 121
107 radeon_bo_kunmap(gtt_obj[i]); 122 radeon_bo_kunmap(gtt_obj[i]);
108 123
109 r = radeon_copy(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 124 if (ring == R600_RING_TYPE_DMA_INDEX)
125 r = radeon_copy_dma(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
126 else
127 r = radeon_copy_blit(rdev, gtt_addr, vram_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
110 if (r) { 128 if (r) {
111 DRM_ERROR("Failed GTT->VRAM copy %d\n", i); 129 DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
112 goto out_cleanup; 130 goto out_cleanup;
@@ -149,7 +167,10 @@ void radeon_test_moves(struct radeon_device *rdev)
149 167
150 radeon_bo_kunmap(vram_obj); 168 radeon_bo_kunmap(vram_obj);
151 169
152 r = radeon_copy(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence); 170 if (ring == R600_RING_TYPE_DMA_INDEX)
171 r = radeon_copy_dma(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
172 else
173 r = radeon_copy_blit(rdev, vram_addr, gtt_addr, size / RADEON_GPU_PAGE_SIZE, &fence);
153 if (r) { 174 if (r) {
154 DRM_ERROR("Failed VRAM->GTT copy %d\n", i); 175 DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
155 goto out_cleanup; 176 goto out_cleanup;
@@ -223,6 +244,14 @@ out_cleanup:
223 } 244 }
224} 245}
225 246
247void radeon_test_moves(struct radeon_device *rdev)
248{
249 if (rdev->asic->copy.dma)
250 radeon_do_test_moves(rdev, RADEON_TEST_COPY_DMA);
251 if (rdev->asic->copy.blit)
252 radeon_do_test_moves(rdev, RADEON_TEST_COPY_BLIT);
253}
254
226void radeon_test_ring_sync(struct radeon_device *rdev, 255void radeon_test_ring_sync(struct radeon_device *rdev,
227 struct radeon_ring *ringA, 256 struct radeon_ring *ringA,
228 struct radeon_ring *ringB) 257 struct radeon_ring *ringB)
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 5ebe1b3e5db2..1d8ff2f850ba 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -216,7 +216,7 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
216} 216}
217 217
218static int radeon_move_blit(struct ttm_buffer_object *bo, 218static int radeon_move_blit(struct ttm_buffer_object *bo,
219 bool evict, int no_wait_reserve, bool no_wait_gpu, 219 bool evict, bool no_wait_gpu,
220 struct ttm_mem_reg *new_mem, 220 struct ttm_mem_reg *new_mem,
221 struct ttm_mem_reg *old_mem) 221 struct ttm_mem_reg *old_mem)
222{ 222{
@@ -265,15 +265,15 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
265 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ 265 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
266 &fence); 266 &fence);
267 /* FIXME: handle copy error */ 267 /* FIXME: handle copy error */
268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, 268 r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
269 evict, no_wait_reserve, no_wait_gpu, new_mem); 269 evict, no_wait_gpu, new_mem);
270 radeon_fence_unref(&fence); 270 radeon_fence_unref(&fence);
271 return r; 271 return r;
272} 272}
273 273
274static int radeon_move_vram_ram(struct ttm_buffer_object *bo, 274static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
275 bool evict, bool interruptible, 275 bool evict, bool interruptible,
276 bool no_wait_reserve, bool no_wait_gpu, 276 bool no_wait_gpu,
277 struct ttm_mem_reg *new_mem) 277 struct ttm_mem_reg *new_mem)
278{ 278{
279 struct radeon_device *rdev; 279 struct radeon_device *rdev;
@@ -294,7 +294,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
294 placement.busy_placement = &placements; 294 placement.busy_placement = &placements;
295 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 295 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
296 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, 296 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
297 interruptible, no_wait_reserve, no_wait_gpu); 297 interruptible, no_wait_gpu);
298 if (unlikely(r)) { 298 if (unlikely(r)) {
299 return r; 299 return r;
300 } 300 }
@@ -308,11 +308,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
308 if (unlikely(r)) { 308 if (unlikely(r)) {
309 goto out_cleanup; 309 goto out_cleanup;
310 } 310 }
311 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); 311 r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
312 if (unlikely(r)) { 312 if (unlikely(r)) {
313 goto out_cleanup; 313 goto out_cleanup;
314 } 314 }
315 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); 315 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
316out_cleanup: 316out_cleanup:
317 ttm_bo_mem_put(bo, &tmp_mem); 317 ttm_bo_mem_put(bo, &tmp_mem);
318 return r; 318 return r;
@@ -320,7 +320,7 @@ out_cleanup:
320 320
321static int radeon_move_ram_vram(struct ttm_buffer_object *bo, 321static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
322 bool evict, bool interruptible, 322 bool evict, bool interruptible,
323 bool no_wait_reserve, bool no_wait_gpu, 323 bool no_wait_gpu,
324 struct ttm_mem_reg *new_mem) 324 struct ttm_mem_reg *new_mem)
325{ 325{
326 struct radeon_device *rdev; 326 struct radeon_device *rdev;
@@ -340,15 +340,16 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
340 placement.num_busy_placement = 1; 340 placement.num_busy_placement = 1;
341 placement.busy_placement = &placements; 341 placement.busy_placement = &placements;
342 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; 342 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
343 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); 343 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
344 interruptible, no_wait_gpu);
344 if (unlikely(r)) { 345 if (unlikely(r)) {
345 return r; 346 return r;
346 } 347 }
347 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); 348 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
348 if (unlikely(r)) { 349 if (unlikely(r)) {
349 goto out_cleanup; 350 goto out_cleanup;
350 } 351 }
351 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 352 r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
352 if (unlikely(r)) { 353 if (unlikely(r)) {
353 goto out_cleanup; 354 goto out_cleanup;
354 } 355 }
@@ -359,7 +360,7 @@ out_cleanup:
359 360
360static int radeon_bo_move(struct ttm_buffer_object *bo, 361static int radeon_bo_move(struct ttm_buffer_object *bo,
361 bool evict, bool interruptible, 362 bool evict, bool interruptible,
362 bool no_wait_reserve, bool no_wait_gpu, 363 bool no_wait_gpu,
363 struct ttm_mem_reg *new_mem) 364 struct ttm_mem_reg *new_mem)
364{ 365{
365 struct radeon_device *rdev; 366 struct radeon_device *rdev;
@@ -388,18 +389,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
388 if (old_mem->mem_type == TTM_PL_VRAM && 389 if (old_mem->mem_type == TTM_PL_VRAM &&
389 new_mem->mem_type == TTM_PL_SYSTEM) { 390 new_mem->mem_type == TTM_PL_SYSTEM) {
390 r = radeon_move_vram_ram(bo, evict, interruptible, 391 r = radeon_move_vram_ram(bo, evict, interruptible,
391 no_wait_reserve, no_wait_gpu, new_mem); 392 no_wait_gpu, new_mem);
392 } else if (old_mem->mem_type == TTM_PL_SYSTEM && 393 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
393 new_mem->mem_type == TTM_PL_VRAM) { 394 new_mem->mem_type == TTM_PL_VRAM) {
394 r = radeon_move_ram_vram(bo, evict, interruptible, 395 r = radeon_move_ram_vram(bo, evict, interruptible,
395 no_wait_reserve, no_wait_gpu, new_mem); 396 no_wait_gpu, new_mem);
396 } else { 397 } else {
397 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); 398 r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
398 } 399 }
399 400
400 if (r) { 401 if (r) {
401memcpy: 402memcpy:
402 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 403 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
403 } 404 }
404 return r; 405 return r;
405} 406}
@@ -471,13 +472,12 @@ static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
471{ 472{
472} 473}
473 474
474static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 475static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
475 bool lazy, bool interruptible)
476{ 476{
477 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); 477 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
478} 478}
479 479
480static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) 480static int radeon_sync_obj_flush(void *sync_obj)
481{ 481{
482 return 0; 482 return 0;
483} 483}
@@ -492,7 +492,7 @@ static void *radeon_sync_obj_ref(void *sync_obj)
492 return radeon_fence_ref((struct radeon_fence *)sync_obj); 492 return radeon_fence_ref((struct radeon_fence *)sync_obj);
493} 493}
494 494
495static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) 495static bool radeon_sync_obj_signaled(void *sync_obj)
496{ 496{
497 return radeon_fence_signaled((struct radeon_fence *)sync_obj); 497 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
498} 498}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 785d09590b24..2bb6d0e84b3d 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -40,6 +40,12 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
40static void rv515_gpu_init(struct radeon_device *rdev); 40static void rv515_gpu_init(struct radeon_device *rdev);
41int rv515_mc_wait_for_idle(struct radeon_device *rdev); 41int rv515_mc_wait_for_idle(struct radeon_device *rdev);
42 42
43static const u32 crtc_offsets[2] =
44{
45 0,
46 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
47};
48
43void rv515_debugfs(struct radeon_device *rdev) 49void rv515_debugfs(struct radeon_device *rdev)
44{ 50{
45 if (r100_debugfs_rbbm_init(rdev)) { 51 if (r100_debugfs_rbbm_init(rdev)) {
@@ -281,30 +287,114 @@ static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
281 287
282void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) 288void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
283{ 289{
290 u32 crtc_enabled, tmp, frame_count, blackout;
291 int i, j;
292
284 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); 293 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
285 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); 294 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
286 295
287 /* Stop all video */ 296 /* disable VGA render */
288 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
289 WREG32(R_000300_VGA_RENDER_CONTROL, 0); 297 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
290 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); 298 /* blank the display controllers */
291 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); 299 for (i = 0; i < rdev->num_crtc; i++) {
292 WREG32(R_006080_D1CRTC_CONTROL, 0); 300 crtc_enabled = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]) & AVIVO_CRTC_EN;
293 WREG32(R_006880_D2CRTC_CONTROL, 0); 301 if (crtc_enabled) {
294 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); 302 save->crtc_enabled[i] = true;
295 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); 303 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
296 WREG32(R_000330_D1VGA_CONTROL, 0); 304 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
297 WREG32(R_000338_D2VGA_CONTROL, 0); 305 radeon_wait_for_vblank(rdev, i);
306 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
307 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
308 }
309 /* wait for the next frame */
310 frame_count = radeon_get_vblank_counter(rdev, i);
311 for (j = 0; j < rdev->usec_timeout; j++) {
312 if (radeon_get_vblank_counter(rdev, i) != frame_count)
313 break;
314 udelay(1);
315 }
316 } else {
317 save->crtc_enabled[i] = false;
318 }
319 }
320
321 radeon_mc_wait_for_idle(rdev);
322
323 if (rdev->family >= CHIP_R600) {
324 if (rdev->family >= CHIP_RV770)
325 blackout = RREG32(R700_MC_CITF_CNTL);
326 else
327 blackout = RREG32(R600_CITF_CNTL);
328 if ((blackout & R600_BLACKOUT_MASK) != R600_BLACKOUT_MASK) {
329 /* Block CPU access */
330 WREG32(R600_BIF_FB_EN, 0);
331 /* blackout the MC */
332 blackout |= R600_BLACKOUT_MASK;
333 if (rdev->family >= CHIP_RV770)
334 WREG32(R700_MC_CITF_CNTL, blackout);
335 else
336 WREG32(R600_CITF_CNTL, blackout);
337 }
338 }
298} 339}
299 340
300void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) 341void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
301{ 342{
302 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); 343 u32 tmp, frame_count;
303 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); 344 int i, j;
304 WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start); 345
305 WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start); 346 /* update crtc base addresses */
306 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); 347 for (i = 0; i < rdev->num_crtc; i++) {
307 /* Unlock host access */ 348 if (rdev->family >= CHIP_RV770) {
349 if (i == 1) {
350 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
351 upper_32_bits(rdev->mc.vram_start));
352 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
353 upper_32_bits(rdev->mc.vram_start));
354 } else {
355 WREG32(R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
356 upper_32_bits(rdev->mc.vram_start));
357 WREG32(R700_D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
358 upper_32_bits(rdev->mc.vram_start));
359 }
360 }
361 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
362 (u32)rdev->mc.vram_start);
363 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
364 (u32)rdev->mc.vram_start);
365 }
366 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
367
368 if (rdev->family >= CHIP_R600) {
369 /* unblackout the MC */
370 if (rdev->family >= CHIP_RV770)
371 tmp = RREG32(R700_MC_CITF_CNTL);
372 else
373 tmp = RREG32(R600_CITF_CNTL);
374 tmp &= ~R600_BLACKOUT_MASK;
375 if (rdev->family >= CHIP_RV770)
376 WREG32(R700_MC_CITF_CNTL, tmp);
377 else
378 WREG32(R600_CITF_CNTL, tmp);
379 /* allow CPU access */
380 WREG32(R600_BIF_FB_EN, R600_FB_READ_EN | R600_FB_WRITE_EN);
381 }
382
383 for (i = 0; i < rdev->num_crtc; i++) {
384 if (save->crtc_enabled[i]) {
385 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
386 tmp &= ~AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
387 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
388 /* wait for the next frame */
389 frame_count = radeon_get_vblank_counter(rdev, i);
390 for (j = 0; j < rdev->usec_timeout; j++) {
391 if (radeon_get_vblank_counter(rdev, i) != frame_count)
392 break;
393 udelay(1);
394 }
395 }
396 }
397 /* Unlock vga access */
308 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); 398 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
309 mdelay(1); 399 mdelay(1);
310 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); 400 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 79814a08c8e5..87c979c4f721 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -316,6 +316,7 @@ void r700_cp_stop(struct radeon_device *rdev)
316 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 316 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
317 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 317 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
318 WREG32(SCRATCH_UMSK, 0); 318 WREG32(SCRATCH_UMSK, 0);
319 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
319} 320}
320 321
321static int rv770_cp_load_microcode(struct radeon_device *rdev) 322static int rv770_cp_load_microcode(struct radeon_device *rdev)
@@ -583,6 +584,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
583 WREG32(GB_TILING_CONFIG, gb_tiling_config); 584 WREG32(GB_TILING_CONFIG, gb_tiling_config);
584 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 585 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
585 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 586 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
587 WREG32(DMA_TILING_CONFIG, (gb_tiling_config & 0xffff));
588 WREG32(DMA_TILING_CONFIG2, (gb_tiling_config & 0xffff));
586 589
587 WREG32(CGTS_SYS_TCC_DISABLE, 0); 590 WREG32(CGTS_SYS_TCC_DISABLE, 0);
588 WREG32(CGTS_TCC_DISABLE, 0); 591 WREG32(CGTS_TCC_DISABLE, 0);
@@ -886,7 +889,7 @@ static int rv770_mc_init(struct radeon_device *rdev)
886 889
887static int rv770_startup(struct radeon_device *rdev) 890static int rv770_startup(struct radeon_device *rdev)
888{ 891{
889 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; 892 struct radeon_ring *ring;
890 int r; 893 int r;
891 894
892 /* enable pcie gen2 link */ 895 /* enable pcie gen2 link */
@@ -932,6 +935,12 @@ static int rv770_startup(struct radeon_device *rdev)
932 return r; 935 return r;
933 } 936 }
934 937
938 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
939 if (r) {
940 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
941 return r;
942 }
943
935 /* Enable IRQ */ 944 /* Enable IRQ */
936 r = r600_irq_init(rdev); 945 r = r600_irq_init(rdev);
937 if (r) { 946 if (r) {
@@ -941,11 +950,20 @@ static int rv770_startup(struct radeon_device *rdev)
941 } 950 }
942 r600_irq_set(rdev); 951 r600_irq_set(rdev);
943 952
953 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
944 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET, 954 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
945 R600_CP_RB_RPTR, R600_CP_RB_WPTR, 955 R600_CP_RB_RPTR, R600_CP_RB_WPTR,
946 0, 0xfffff, RADEON_CP_PACKET2); 956 0, 0xfffff, RADEON_CP_PACKET2);
947 if (r) 957 if (r)
948 return r; 958 return r;
959
960 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
961 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
962 DMA_RB_RPTR, DMA_RB_WPTR,
963 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
964 if (r)
965 return r;
966
949 r = rv770_cp_load_microcode(rdev); 967 r = rv770_cp_load_microcode(rdev);
950 if (r) 968 if (r)
951 return r; 969 return r;
@@ -953,6 +971,10 @@ static int rv770_startup(struct radeon_device *rdev)
953 if (r) 971 if (r)
954 return r; 972 return r;
955 973
974 r = r600_dma_resume(rdev);
975 if (r)
976 return r;
977
956 r = radeon_ib_pool_init(rdev); 978 r = radeon_ib_pool_init(rdev);
957 if (r) { 979 if (r) {
958 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 980 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -995,7 +1017,7 @@ int rv770_suspend(struct radeon_device *rdev)
995{ 1017{
996 r600_audio_fini(rdev); 1018 r600_audio_fini(rdev);
997 r700_cp_stop(rdev); 1019 r700_cp_stop(rdev);
998 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 1020 r600_dma_stop(rdev);
999 r600_irq_suspend(rdev); 1021 r600_irq_suspend(rdev);
1000 radeon_wb_disable(rdev); 1022 radeon_wb_disable(rdev);
1001 rv770_pcie_gart_disable(rdev); 1023 rv770_pcie_gart_disable(rdev);
@@ -1066,6 +1088,9 @@ int rv770_init(struct radeon_device *rdev)
1066 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; 1088 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
1067 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); 1089 r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
1068 1090
1091 rdev->ring[R600_RING_TYPE_DMA_INDEX].ring_obj = NULL;
1092 r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX], 64 * 1024);
1093
1069 rdev->ih.ring_obj = NULL; 1094 rdev->ih.ring_obj = NULL;
1070 r600_ih_ring_init(rdev, 64 * 1024); 1095 r600_ih_ring_init(rdev, 64 * 1024);
1071 1096
@@ -1078,6 +1103,7 @@ int rv770_init(struct radeon_device *rdev)
1078 if (r) { 1103 if (r) {
1079 dev_err(rdev->dev, "disabling GPU acceleration\n"); 1104 dev_err(rdev->dev, "disabling GPU acceleration\n");
1080 r700_cp_fini(rdev); 1105 r700_cp_fini(rdev);
1106 r600_dma_fini(rdev);
1081 r600_irq_fini(rdev); 1107 r600_irq_fini(rdev);
1082 radeon_wb_fini(rdev); 1108 radeon_wb_fini(rdev);
1083 radeon_ib_pool_fini(rdev); 1109 radeon_ib_pool_fini(rdev);
@@ -1093,6 +1119,7 @@ void rv770_fini(struct radeon_device *rdev)
1093{ 1119{
1094 r600_blit_fini(rdev); 1120 r600_blit_fini(rdev);
1095 r700_cp_fini(rdev); 1121 r700_cp_fini(rdev);
1122 r600_dma_fini(rdev);
1096 r600_irq_fini(rdev); 1123 r600_irq_fini(rdev);
1097 radeon_wb_fini(rdev); 1124 radeon_wb_fini(rdev);
1098 radeon_ib_pool_fini(rdev); 1125 radeon_ib_pool_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index b0adfc595d75..20e29d23d348 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -109,6 +109,9 @@
109#define PIPE_TILING__SHIFT 1 109#define PIPE_TILING__SHIFT 1
110#define PIPE_TILING__MASK 0x0000000e 110#define PIPE_TILING__MASK 0x0000000e
111 111
112#define DMA_TILING_CONFIG 0x3ec8
113#define DMA_TILING_CONFIG2 0xd0b8
114
112#define GC_USER_SHADER_PIPE_CONFIG 0x8954 115#define GC_USER_SHADER_PIPE_CONFIG 0x8954
113#define INACTIVE_QD_PIPES(x) ((x) << 8) 116#define INACTIVE_QD_PIPES(x) ((x) << 8)
114#define INACTIVE_QD_PIPES_MASK 0x0000FF00 117#define INACTIVE_QD_PIPES_MASK 0x0000FF00
@@ -358,6 +361,26 @@
358 361
359#define WAIT_UNTIL 0x8040 362#define WAIT_UNTIL 0x8040
360 363
364/* async DMA */
365#define DMA_RB_RPTR 0xd008
366#define DMA_RB_WPTR 0xd00c
367
368/* async DMA packets */
369#define DMA_PACKET(cmd, t, s, n) ((((cmd) & 0xF) << 28) | \
370 (((t) & 0x1) << 23) | \
371 (((s) & 0x1) << 22) | \
372 (((n) & 0xFFFF) << 0))
373/* async DMA Packet types */
374#define DMA_PACKET_WRITE 0x2
375#define DMA_PACKET_COPY 0x3
376#define DMA_PACKET_INDIRECT_BUFFER 0x4
377#define DMA_PACKET_SEMAPHORE 0x5
378#define DMA_PACKET_FENCE 0x6
379#define DMA_PACKET_TRAP 0x7
380#define DMA_PACKET_CONSTANT_FILL 0xd
381#define DMA_PACKET_NOP 0xf
382
383
361#define SRBM_STATUS 0x0E50 384#define SRBM_STATUS 0x0E50
362 385
363/* DCE 3.2 HDMI */ 386/* DCE 3.2 HDMI */
@@ -551,6 +574,54 @@
551#define HDMI_OFFSET0 (0x7400 - 0x7400) 574#define HDMI_OFFSET0 (0x7400 - 0x7400)
552#define HDMI_OFFSET1 (0x7800 - 0x7400) 575#define HDMI_OFFSET1 (0x7800 - 0x7400)
553 576
577/* DCE3.2 ELD audio interface */
578#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
579#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
580#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
581#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
582#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
583#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
584#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
585#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
586#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
587#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
588#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
589#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
590#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
591#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
592# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
593/* max channels minus one. 7 = 8 channels */
594# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
595# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
596# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
597/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
598 * bit0 = 32 kHz
599 * bit1 = 44.1 kHz
600 * bit2 = 48 kHz
601 * bit3 = 88.2 kHz
602 * bit4 = 96 kHz
603 * bit5 = 176.4 kHz
604 * bit6 = 192 kHz
605 */
606
607#define AZ_HOT_PLUG_CONTROL 0x7300
608# define AZ_FORCE_CODEC_WAKE (1 << 0)
609# define PIN0_JACK_DETECTION_ENABLE (1 << 4)
610# define PIN1_JACK_DETECTION_ENABLE (1 << 5)
611# define PIN2_JACK_DETECTION_ENABLE (1 << 6)
612# define PIN3_JACK_DETECTION_ENABLE (1 << 7)
613# define PIN0_UNSOLICITED_RESPONSE_ENABLE (1 << 8)
614# define PIN1_UNSOLICITED_RESPONSE_ENABLE (1 << 9)
615# define PIN2_UNSOLICITED_RESPONSE_ENABLE (1 << 10)
616# define PIN3_UNSOLICITED_RESPONSE_ENABLE (1 << 11)
617# define CODEC_HOT_PLUG_ENABLE (1 << 12)
618# define PIN0_AUDIO_ENABLED (1 << 24)
619# define PIN1_AUDIO_ENABLED (1 << 25)
620# define PIN2_AUDIO_ENABLED (1 << 26)
621# define PIN3_AUDIO_ENABLED (1 << 27)
622# define AUDIO_ENABLED (1 << 31)
623
624
554#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 625#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
555#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914 626#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
556#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114 627#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 010156dd949f..ef683653f0b7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -1660,6 +1660,8 @@ static void si_gpu_init(struct radeon_device *rdev)
1660 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1660 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1661 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1661 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1662 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1662 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1663 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1664 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1663 1665
1664 si_tiling_mode_table_init(rdev); 1666 si_tiling_mode_table_init(rdev);
1665 1667
@@ -1836,6 +1838,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable)
1836 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); 1838 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1837 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); 1839 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
1838 WREG32(SCRATCH_UMSK, 0); 1840 WREG32(SCRATCH_UMSK, 0);
1841 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1842 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1843 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1839 } 1844 }
1840 udelay(50); 1845 udelay(50);
1841} 1846}
@@ -2426,9 +2431,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
2426 /* enable context1-15 */ 2431 /* enable context1-15 */
2427 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 2432 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
2428 (u32)(rdev->dummy_page.addr >> 12)); 2433 (u32)(rdev->dummy_page.addr >> 12));
2429 WREG32(VM_CONTEXT1_CNTL2, 0); 2434 WREG32(VM_CONTEXT1_CNTL2, 4);
2430 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | 2435 WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
2431 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); 2436 RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2437 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2438 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2439 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT |
2440 PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT |
2441 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT |
2442 VALID_PROTECTION_FAULT_ENABLE_INTERRUPT |
2443 VALID_PROTECTION_FAULT_ENABLE_DEFAULT |
2444 READ_PROTECTION_FAULT_ENABLE_INTERRUPT |
2445 READ_PROTECTION_FAULT_ENABLE_DEFAULT |
2446 WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT |
2447 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT);
2432 2448
2433 si_pcie_gart_tlb_flush(rdev); 2449 si_pcie_gart_tlb_flush(rdev);
2434 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 2450 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -2534,6 +2550,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
2534 u32 idx = pkt->idx + 1; 2550 u32 idx = pkt->idx + 1;
2535 u32 idx_value = ib[idx]; 2551 u32 idx_value = ib[idx];
2536 u32 start_reg, end_reg, reg, i; 2552 u32 start_reg, end_reg, reg, i;
2553 u32 command, info;
2537 2554
2538 switch (pkt->opcode) { 2555 switch (pkt->opcode) {
2539 case PACKET3_NOP: 2556 case PACKET3_NOP:
@@ -2633,6 +2650,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
2633 return -EINVAL; 2650 return -EINVAL;
2634 } 2651 }
2635 break; 2652 break;
2653 case PACKET3_CP_DMA:
2654 command = ib[idx + 4];
2655 info = ib[idx + 1];
2656 if (command & PACKET3_CP_DMA_CMD_SAS) {
2657 /* src address space is register */
2658 if (((info & 0x60000000) >> 29) == 0) {
2659 start_reg = idx_value << 2;
2660 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2661 reg = start_reg;
2662 if (!si_vm_reg_valid(reg)) {
2663 DRM_ERROR("CP DMA Bad SRC register\n");
2664 return -EINVAL;
2665 }
2666 } else {
2667 for (i = 0; i < (command & 0x1fffff); i++) {
2668 reg = start_reg + (4 * i);
2669 if (!si_vm_reg_valid(reg)) {
2670 DRM_ERROR("CP DMA Bad SRC register\n");
2671 return -EINVAL;
2672 }
2673 }
2674 }
2675 }
2676 }
2677 if (command & PACKET3_CP_DMA_CMD_DAS) {
2678 /* dst address space is register */
2679 if (((info & 0x00300000) >> 20) == 0) {
2680 start_reg = ib[idx + 2];
2681 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2682 reg = start_reg;
2683 if (!si_vm_reg_valid(reg)) {
2684 DRM_ERROR("CP DMA Bad DST register\n");
2685 return -EINVAL;
2686 }
2687 } else {
2688 for (i = 0; i < (command & 0x1fffff); i++) {
2689 reg = start_reg + (4 * i);
2690 if (!si_vm_reg_valid(reg)) {
2691 DRM_ERROR("CP DMA Bad DST register\n");
2692 return -EINVAL;
2693 }
2694 }
2695 }
2696 }
2697 }
2698 break;
2636 default: 2699 default:
2637 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); 2700 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
2638 return -EINVAL; 2701 return -EINVAL;
@@ -2809,30 +2872,86 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
2809{ 2872{
2810 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; 2873 struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
2811 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); 2874 uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
2812 2875 uint64_t value;
2813 while (count) { 2876 unsigned ndw;
2814 unsigned ndw = 2 + count * 2; 2877
2815 if (ndw > 0x3FFE) 2878 if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
2816 ndw = 0x3FFE; 2879 while (count) {
2817 2880 ndw = 2 + count * 2;
2818 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw)); 2881 if (ndw > 0x3FFE)
2819 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | 2882 ndw = 0x3FFE;
2820 WRITE_DATA_DST_SEL(1))); 2883
2821 radeon_ring_write(ring, pe); 2884 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
2822 radeon_ring_write(ring, upper_32_bits(pe)); 2885 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
2823 for (; ndw > 2; ndw -= 2, --count, pe += 8) { 2886 WRITE_DATA_DST_SEL(1)));
2824 uint64_t value; 2887 radeon_ring_write(ring, pe);
2825 if (flags & RADEON_VM_PAGE_SYSTEM) { 2888 radeon_ring_write(ring, upper_32_bits(pe));
2826 value = radeon_vm_map_gart(rdev, addr); 2889 for (; ndw > 2; ndw -= 2, --count, pe += 8) {
2827 value &= 0xFFFFFFFFFFFFF000ULL; 2890 if (flags & RADEON_VM_PAGE_SYSTEM) {
2828 } else if (flags & RADEON_VM_PAGE_VALID) 2891 value = radeon_vm_map_gart(rdev, addr);
2829 value = addr; 2892 value &= 0xFFFFFFFFFFFFF000ULL;
2830 else 2893 } else if (flags & RADEON_VM_PAGE_VALID) {
2831 value = 0; 2894 value = addr;
2832 addr += incr; 2895 } else {
2833 value |= r600_flags; 2896 value = 0;
2834 radeon_ring_write(ring, value); 2897 }
2835 radeon_ring_write(ring, upper_32_bits(value)); 2898 addr += incr;
2899 value |= r600_flags;
2900 radeon_ring_write(ring, value);
2901 radeon_ring_write(ring, upper_32_bits(value));
2902 }
2903 }
2904 } else {
2905 /* DMA */
2906 if (flags & RADEON_VM_PAGE_SYSTEM) {
2907 while (count) {
2908 ndw = count * 2;
2909 if (ndw > 0xFFFFE)
2910 ndw = 0xFFFFE;
2911
2912 /* for non-physically contiguous pages (system) */
2913 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw));
2914 radeon_ring_write(ring, pe);
2915 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
2916 for (; ndw > 0; ndw -= 2, --count, pe += 8) {
2917 if (flags & RADEON_VM_PAGE_SYSTEM) {
2918 value = radeon_vm_map_gart(rdev, addr);
2919 value &= 0xFFFFFFFFFFFFF000ULL;
2920 } else if (flags & RADEON_VM_PAGE_VALID) {
2921 value = addr;
2922 } else {
2923 value = 0;
2924 }
2925 addr += incr;
2926 value |= r600_flags;
2927 radeon_ring_write(ring, value);
2928 radeon_ring_write(ring, upper_32_bits(value));
2929 }
2930 }
2931 } else {
2932 while (count) {
2933 ndw = count * 2;
2934 if (ndw > 0xFFFFE)
2935 ndw = 0xFFFFE;
2936
2937 if (flags & RADEON_VM_PAGE_VALID)
2938 value = addr;
2939 else
2940 value = 0;
2941 /* for physically contiguous pages (vram) */
2942 radeon_ring_write(ring, DMA_PTE_PDE_PACKET(ndw));
2943 radeon_ring_write(ring, pe); /* dst addr */
2944 radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
2945 radeon_ring_write(ring, r600_flags); /* mask */
2946 radeon_ring_write(ring, 0);
2947 radeon_ring_write(ring, value); /* value */
2948 radeon_ring_write(ring, upper_32_bits(value));
2949 radeon_ring_write(ring, incr); /* increment size */
2950 radeon_ring_write(ring, 0);
2951 pe += ndw * 4;
2952 addr += (ndw / 2) * incr;
2953 count -= ndw / 2;
2954 }
2836 } 2955 }
2837 } 2956 }
2838} 2957}
@@ -2880,6 +2999,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2880 radeon_ring_write(ring, 0x0); 2999 radeon_ring_write(ring, 0x0);
2881} 3000}
2882 3001
3002void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
3003{
3004 struct radeon_ring *ring = &rdev->ring[ridx];
3005
3006 if (vm == NULL)
3007 return;
3008
3009 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
3010 if (vm->id < 8) {
3011 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
3012 } else {
3013 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
3014 }
3015 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
3016
3017 /* flush hdp cache */
3018 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
3019 radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
3020 radeon_ring_write(ring, 1);
3021
3022 /* bits 0-7 are the VM contexts0-7 */
3023 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
3024 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
3025 radeon_ring_write(ring, 1 << vm->id);
3026}
3027
2883/* 3028/*
2884 * RLC 3029 * RLC
2885 */ 3030 */
@@ -3048,6 +3193,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
3048 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); 3193 WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
3049 WREG32(CP_INT_CNTL_RING1, 0); 3194 WREG32(CP_INT_CNTL_RING1, 0);
3050 WREG32(CP_INT_CNTL_RING2, 0); 3195 WREG32(CP_INT_CNTL_RING2, 0);
3196 tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
3197 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp);
3198 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
3199 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
3051 WREG32(GRBM_INT_CNTL, 0); 3200 WREG32(GRBM_INT_CNTL, 0);
3052 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 3201 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
3053 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 3202 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -3167,6 +3316,7 @@ int si_irq_set(struct radeon_device *rdev)
3167 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 3316 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
3168 u32 grbm_int_cntl = 0; 3317 u32 grbm_int_cntl = 0;
3169 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 3318 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
3319 u32 dma_cntl, dma_cntl1;
3170 3320
3171 if (!rdev->irq.installed) { 3321 if (!rdev->irq.installed) {
3172 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); 3322 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -3187,6 +3337,9 @@ int si_irq_set(struct radeon_device *rdev)
3187 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 3337 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
3188 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 3338 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
3189 3339
3340 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
3341 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
3342
3190 /* enable CP interrupts on all rings */ 3343 /* enable CP interrupts on all rings */
3191 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { 3344 if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
3192 DRM_DEBUG("si_irq_set: sw int gfx\n"); 3345 DRM_DEBUG("si_irq_set: sw int gfx\n");
@@ -3200,6 +3353,15 @@ int si_irq_set(struct radeon_device *rdev)
3200 DRM_DEBUG("si_irq_set: sw int cp2\n"); 3353 DRM_DEBUG("si_irq_set: sw int cp2\n");
3201 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; 3354 cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
3202 } 3355 }
3356 if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) {
3357 DRM_DEBUG("si_irq_set: sw int dma\n");
3358 dma_cntl |= TRAP_ENABLE;
3359 }
3360
3361 if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) {
3362 DRM_DEBUG("si_irq_set: sw int dma1\n");
3363 dma_cntl1 |= TRAP_ENABLE;
3364 }
3203 if (rdev->irq.crtc_vblank_int[0] || 3365 if (rdev->irq.crtc_vblank_int[0] ||
3204 atomic_read(&rdev->irq.pflip[0])) { 3366 atomic_read(&rdev->irq.pflip[0])) {
3205 DRM_DEBUG("si_irq_set: vblank 0\n"); 3367 DRM_DEBUG("si_irq_set: vblank 0\n");
@@ -3259,6 +3421,9 @@ int si_irq_set(struct radeon_device *rdev)
3259 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); 3421 WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
3260 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); 3422 WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
3261 3423
3424 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl);
3425 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1);
3426
3262 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 3427 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
3263 3428
3264 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 3429 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3684,6 +3849,16 @@ restart_ih:
3684 break; 3849 break;
3685 } 3850 }
3686 break; 3851 break;
3852 case 146:
3853 case 147:
3854 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
3855 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
3856 RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR));
3857 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
3858 RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
3859 /* reset addr and status */
3860 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
3861 break;
3687 case 176: /* RINGID0 CP_INT */ 3862 case 176: /* RINGID0 CP_INT */
3688 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 3863 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
3689 break; 3864 break;
@@ -3707,9 +3882,17 @@ restart_ih:
3707 break; 3882 break;
3708 } 3883 }
3709 break; 3884 break;
3885 case 224: /* DMA trap event */
3886 DRM_DEBUG("IH: DMA trap\n");
3887 radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX);
3888 break;
3710 case 233: /* GUI IDLE */ 3889 case 233: /* GUI IDLE */
3711 DRM_DEBUG("IH: GUI idle\n"); 3890 DRM_DEBUG("IH: GUI idle\n");
3712 break; 3891 break;
3892 case 244: /* DMA trap event */
3893 DRM_DEBUG("IH: DMA1 trap\n");
3894 radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
3895 break;
3713 default: 3896 default:
3714 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); 3897 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3715 break; 3898 break;
@@ -3733,6 +3916,80 @@ restart_ih:
3733 return IRQ_HANDLED; 3916 return IRQ_HANDLED;
3734} 3917}
3735 3918
3919/**
3920 * si_copy_dma - copy pages using the DMA engine
3921 *
3922 * @rdev: radeon_device pointer
3923 * @src_offset: src GPU address
3924 * @dst_offset: dst GPU address
3925 * @num_gpu_pages: number of GPU pages to xfer
3926 * @fence: radeon fence object
3927 *
3928 * Copy GPU paging using the DMA engine (SI).
3929 * Used by the radeon ttm implementation to move pages if
3930 * registered as the asic copy callback.
3931 */
3932int si_copy_dma(struct radeon_device *rdev,
3933 uint64_t src_offset, uint64_t dst_offset,
3934 unsigned num_gpu_pages,
3935 struct radeon_fence **fence)
3936{
3937 struct radeon_semaphore *sem = NULL;
3938 int ring_index = rdev->asic->copy.dma_ring_index;
3939 struct radeon_ring *ring = &rdev->ring[ring_index];
3940 u32 size_in_bytes, cur_size_in_bytes;
3941 int i, num_loops;
3942 int r = 0;
3943
3944 r = radeon_semaphore_create(rdev, &sem);
3945 if (r) {
3946 DRM_ERROR("radeon: moving bo (%d).\n", r);
3947 return r;
3948 }
3949
3950 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3951 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
3952 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
3953 if (r) {
3954 DRM_ERROR("radeon: moving bo (%d).\n", r);
3955 radeon_semaphore_free(rdev, &sem, NULL);
3956 return r;
3957 }
3958
3959 if (radeon_fence_need_sync(*fence, ring->idx)) {
3960 radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
3961 ring->idx);
3962 radeon_fence_note_sync(*fence, ring->idx);
3963 } else {
3964 radeon_semaphore_free(rdev, &sem, NULL);
3965 }
3966
3967 for (i = 0; i < num_loops; i++) {
3968 cur_size_in_bytes = size_in_bytes;
3969 if (cur_size_in_bytes > 0xFFFFF)
3970 cur_size_in_bytes = 0xFFFFF;
3971 size_in_bytes -= cur_size_in_bytes;
3972 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
3973 radeon_ring_write(ring, dst_offset & 0xffffffff);
3974 radeon_ring_write(ring, src_offset & 0xffffffff);
3975 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
3976 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
3977 src_offset += cur_size_in_bytes;
3978 dst_offset += cur_size_in_bytes;
3979 }
3980
3981 r = radeon_fence_emit(rdev, fence, ring->idx);
3982 if (r) {
3983 radeon_ring_unlock_undo(rdev, ring);
3984 return r;
3985 }
3986
3987 radeon_ring_unlock_commit(rdev, ring);
3988 radeon_semaphore_free(rdev, &sem, *fence);
3989
3990 return r;
3991}
3992
3736/* 3993/*
3737 * startup/shutdown callbacks 3994 * startup/shutdown callbacks
3738 */ 3995 */
@@ -3804,6 +4061,18 @@ static int si_startup(struct radeon_device *rdev)
3804 return r; 4061 return r;
3805 } 4062 }
3806 4063
4064 r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX);
4065 if (r) {
4066 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
4067 return r;
4068 }
4069
4070 r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX);
4071 if (r) {
4072 dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r);
4073 return r;
4074 }
4075
3807 /* Enable IRQ */ 4076 /* Enable IRQ */
3808 r = si_irq_init(rdev); 4077 r = si_irq_init(rdev);
3809 if (r) { 4078 if (r) {
@@ -3834,6 +4103,22 @@ static int si_startup(struct radeon_device *rdev)
3834 if (r) 4103 if (r)
3835 return r; 4104 return r;
3836 4105
4106 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
4107 r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
4108 DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
4109 DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
4110 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
4111 if (r)
4112 return r;
4113
4114 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
4115 r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
4116 DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
4117 DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
4118 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
4119 if (r)
4120 return r;
4121
3837 r = si_cp_load_microcode(rdev); 4122 r = si_cp_load_microcode(rdev);
3838 if (r) 4123 if (r)
3839 return r; 4124 return r;
@@ -3841,6 +4126,10 @@ static int si_startup(struct radeon_device *rdev)
3841 if (r) 4126 if (r)
3842 return r; 4127 return r;
3843 4128
4129 r = cayman_dma_resume(rdev);
4130 if (r)
4131 return r;
4132
3844 r = radeon_ib_pool_init(rdev); 4133 r = radeon_ib_pool_init(rdev);
3845 if (r) { 4134 if (r) {
3846 dev_err(rdev->dev, "IB initialization failed (%d).\n", r); 4135 dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
@@ -3882,9 +4171,7 @@ int si_resume(struct radeon_device *rdev)
3882int si_suspend(struct radeon_device *rdev) 4171int si_suspend(struct radeon_device *rdev)
3883{ 4172{
3884 si_cp_enable(rdev, false); 4173 si_cp_enable(rdev, false);
3885 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; 4174 cayman_dma_stop(rdev);
3886 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
3887 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
3888 si_irq_suspend(rdev); 4175 si_irq_suspend(rdev);
3889 radeon_wb_disable(rdev); 4176 radeon_wb_disable(rdev);
3890 si_pcie_gart_disable(rdev); 4177 si_pcie_gart_disable(rdev);
@@ -3962,6 +4249,14 @@ int si_init(struct radeon_device *rdev)
3962 ring->ring_obj = NULL; 4249 ring->ring_obj = NULL;
3963 r600_ring_init(rdev, ring, 1024 * 1024); 4250 r600_ring_init(rdev, ring, 1024 * 1024);
3964 4251
4252 ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
4253 ring->ring_obj = NULL;
4254 r600_ring_init(rdev, ring, 64 * 1024);
4255
4256 ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
4257 ring->ring_obj = NULL;
4258 r600_ring_init(rdev, ring, 64 * 1024);
4259
3965 rdev->ih.ring_obj = NULL; 4260 rdev->ih.ring_obj = NULL;
3966 r600_ih_ring_init(rdev, 64 * 1024); 4261 r600_ih_ring_init(rdev, 64 * 1024);
3967 4262
@@ -3974,6 +4269,7 @@ int si_init(struct radeon_device *rdev)
3974 if (r) { 4269 if (r) {
3975 dev_err(rdev->dev, "disabling GPU acceleration\n"); 4270 dev_err(rdev->dev, "disabling GPU acceleration\n");
3976 si_cp_fini(rdev); 4271 si_cp_fini(rdev);
4272 cayman_dma_fini(rdev);
3977 si_irq_fini(rdev); 4273 si_irq_fini(rdev);
3978 si_rlc_fini(rdev); 4274 si_rlc_fini(rdev);
3979 radeon_wb_fini(rdev); 4275 radeon_wb_fini(rdev);
@@ -4002,6 +4298,7 @@ void si_fini(struct radeon_device *rdev)
4002 r600_blit_fini(rdev); 4298 r600_blit_fini(rdev);
4003#endif 4299#endif
4004 si_cp_fini(rdev); 4300 si_cp_fini(rdev);
4301 cayman_dma_fini(rdev);
4005 si_irq_fini(rdev); 4302 si_irq_fini(rdev);
4006 si_rlc_fini(rdev); 4303 si_rlc_fini(rdev);
4007 radeon_wb_fini(rdev); 4304 radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index a8871afc5b4e..62b46215d423 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -91,7 +91,18 @@
91#define VM_CONTEXT0_CNTL 0x1410 91#define VM_CONTEXT0_CNTL 0x1410
92#define ENABLE_CONTEXT (1 << 0) 92#define ENABLE_CONTEXT (1 << 0)
93#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1) 93#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
94#define RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 3)
94#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4) 95#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
96#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 6)
97#define DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 7)
98#define PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 9)
99#define PDE0_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 10)
100#define VALID_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 12)
101#define VALID_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 13)
102#define READ_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 15)
103#define READ_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 16)
104#define WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT (1 << 18)
105#define WRITE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 19)
95#define VM_CONTEXT1_CNTL 0x1414 106#define VM_CONTEXT1_CNTL 0x1414
96#define VM_CONTEXT0_CNTL2 0x1430 107#define VM_CONTEXT0_CNTL2 0x1430
97#define VM_CONTEXT1_CNTL2 0x1434 108#define VM_CONTEXT1_CNTL2 0x1434
@@ -104,6 +115,9 @@
104#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450 115#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
105#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454 116#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
106 117
118#define VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x14FC
119#define VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x14DC
120
107#define VM_INVALIDATE_REQUEST 0x1478 121#define VM_INVALIDATE_REQUEST 0x1478
108#define VM_INVALIDATE_RESPONSE 0x147c 122#define VM_INVALIDATE_RESPONSE 0x147c
109 123
@@ -835,6 +849,54 @@
835#define PACKET3_WAIT_REG_MEM 0x3C 849#define PACKET3_WAIT_REG_MEM 0x3C
836#define PACKET3_MEM_WRITE 0x3D 850#define PACKET3_MEM_WRITE 0x3D
837#define PACKET3_COPY_DATA 0x40 851#define PACKET3_COPY_DATA 0x40
852#define PACKET3_CP_DMA 0x41
853/* 1. header
854 * 2. SRC_ADDR_LO or DATA [31:0]
855 * 3. CP_SYNC [31] | SRC_SEL [30:29] | ENGINE [27] | DST_SEL [21:20] |
856 * SRC_ADDR_HI [7:0]
857 * 4. DST_ADDR_LO [31:0]
858 * 5. DST_ADDR_HI [7:0]
859 * 6. COMMAND [30:21] | BYTE_COUNT [20:0]
860 */
861# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
862 /* 0 - SRC_ADDR
863 * 1 - GDS
864 */
865# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
866 /* 0 - ME
867 * 1 - PFP
868 */
869# define PACKET3_CP_DMA_SRC_SEL(x) ((x) << 29)
870 /* 0 - SRC_ADDR
871 * 1 - GDS
872 * 2 - DATA
873 */
874# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
875/* COMMAND */
876# define PACKET3_CP_DMA_DIS_WC (1 << 21)
877# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
878 /* 0 - none
879 * 1 - 8 in 16
880 * 2 - 8 in 32
881 * 3 - 8 in 64
882 */
883# define PACKET3_CP_DMA_CMD_DST_SWAP(x) ((x) << 24)
884 /* 0 - none
885 * 1 - 8 in 16
886 * 2 - 8 in 32
887 * 3 - 8 in 64
888 */
889# define PACKET3_CP_DMA_CMD_SAS (1 << 26)
890 /* 0 - memory
891 * 1 - register
892 */
893# define PACKET3_CP_DMA_CMD_DAS (1 << 27)
894 /* 0 - memory
895 * 1 - register
896 */
897# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
898# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
899# define PACKET3_CP_DMA_CMD_RAW_WAIT (1 << 30)
838#define PACKET3_PFP_SYNC_ME 0x42 900#define PACKET3_PFP_SYNC_ME 0x42
839#define PACKET3_SURFACE_SYNC 0x43 901#define PACKET3_SURFACE_SYNC 0x43
840# define PACKET3_DEST_BASE_0_ENA (1 << 0) 902# define PACKET3_DEST_BASE_0_ENA (1 << 0)
@@ -922,4 +984,61 @@
922#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A 984#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
923#define PACKET3_SWITCH_BUFFER 0x8B 985#define PACKET3_SWITCH_BUFFER 0x8B
924 986
987/* ASYNC DMA - first instance at 0xd000, second at 0xd800 */
988#define DMA0_REGISTER_OFFSET 0x0 /* not a register */
989#define DMA1_REGISTER_OFFSET 0x800 /* not a register */
990
991#define DMA_RB_CNTL 0xd000
992# define DMA_RB_ENABLE (1 << 0)
993# define DMA_RB_SIZE(x) ((x) << 1) /* log2 */
994# define DMA_RB_SWAP_ENABLE (1 << 9) /* 8IN32 */
995# define DMA_RPTR_WRITEBACK_ENABLE (1 << 12)
996# define DMA_RPTR_WRITEBACK_SWAP_ENABLE (1 << 13) /* 8IN32 */
997# define DMA_RPTR_WRITEBACK_TIMER(x) ((x) << 16) /* log2 */
998#define DMA_RB_BASE 0xd004
999#define DMA_RB_RPTR 0xd008
1000#define DMA_RB_WPTR 0xd00c
1001
1002#define DMA_RB_RPTR_ADDR_HI 0xd01c
1003#define DMA_RB_RPTR_ADDR_LO 0xd020
1004
1005#define DMA_IB_CNTL 0xd024
1006# define DMA_IB_ENABLE (1 << 0)
1007# define DMA_IB_SWAP_ENABLE (1 << 4)
1008#define DMA_IB_RPTR 0xd028
1009#define DMA_CNTL 0xd02c
1010# define TRAP_ENABLE (1 << 0)
1011# define SEM_INCOMPLETE_INT_ENABLE (1 << 1)
1012# define SEM_WAIT_INT_ENABLE (1 << 2)
1013# define DATA_SWAP_ENABLE (1 << 3)
1014# define FENCE_SWAP_ENABLE (1 << 4)
1015# define CTXEMPTY_INT_ENABLE (1 << 28)
1016#define DMA_TILING_CONFIG 0xd0b8
1017
1018#define DMA_PACKET(cmd, b, t, s, n) ((((cmd) & 0xF) << 28) | \
1019 (((b) & 0x1) << 26) | \
1020 (((t) & 0x1) << 23) | \
1021 (((s) & 0x1) << 22) | \
1022 (((n) & 0xFFFFF) << 0))
1023
1024#define DMA_IB_PACKET(cmd, vmid, n) ((((cmd) & 0xF) << 28) | \
1025 (((vmid) & 0xF) << 20) | \
1026 (((n) & 0xFFFFF) << 0))
1027
1028#define DMA_PTE_PDE_PACKET(n) ((2 << 28) | \
1029 (1 << 26) | \
1030 (1 << 21) | \
1031 (((n) & 0xFFFFF) << 0))
1032
1033/* async DMA Packet types */
1034#define DMA_PACKET_WRITE 0x2
1035#define DMA_PACKET_COPY 0x3
1036#define DMA_PACKET_INDIRECT_BUFFER 0x4
1037#define DMA_PACKET_SEMAPHORE 0x5
1038#define DMA_PACKET_FENCE 0x6
1039#define DMA_PACKET_TRAP 0x7
1040#define DMA_PACKET_SRBM_WRITE 0x9
1041#define DMA_PACKET_CONSTANT_FILL 0xd
1042#define DMA_PACKET_NOP 0xf
1043
925#endif 1044#endif
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0e7a9306bd0c..d917a411ca85 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -748,7 +748,7 @@ int shmob_drm_connector_create(struct shmob_drm_device *sdev,
748 connector->encoder = encoder; 748 connector->encoder = encoder;
749 749
750 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 750 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
751 drm_connector_property_set_value(connector, 751 drm_object_property_set_value(&connector->base,
752 sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 752 sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
753 753
754 return 0; 754 return 0;
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
new file mode 100644
index 000000000000..be1daf7344d3
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -0,0 +1,23 @@
1config DRM_TEGRA
2 tristate "NVIDIA Tegra DRM"
3 depends on DRM && OF && ARCH_TEGRA
4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 help
11 Choose this option if you have an NVIDIA Tegra SoC.
12
13 To compile this driver as a module, choose M here: the module
14 will be called tegra-drm.
15
16if DRM_TEGRA
17
18config DRM_TEGRA_DEBUG
19 bool "NVIDIA Tegra DRM debug support"
20 help
21 Say yes here to enable debugging support.
22
23endif
diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile
new file mode 100644
index 000000000000..80f73d1315d0
--- /dev/null
+++ b/drivers/gpu/drm/tegra/Makefile
@@ -0,0 +1,7 @@
1ccflags-y := -Iinclude/drm
2ccflags-$(CONFIG_DRM_TEGRA_DEBUG) += -DDEBUG
3
4tegra-drm-y := drm.o fb.o dc.o host1x.o
5tegra-drm-y += output.o rgb.o hdmi.o
6
7obj-$(CONFIG_DRM_TEGRA) += tegra-drm.o
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
new file mode 100644
index 000000000000..074410371e2a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -0,0 +1,834 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/debugfs.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#include <mach/clk.h>
17
18#include "drm.h"
19#include "dc.h"
20
21struct tegra_dc_window {
22 fixed20_12 x;
23 fixed20_12 y;
24 fixed20_12 w;
25 fixed20_12 h;
26 unsigned int outx;
27 unsigned int outy;
28 unsigned int outw;
29 unsigned int outh;
30 unsigned int stride;
31 unsigned int fmt;
32};
33
34static const struct drm_crtc_funcs tegra_crtc_funcs = {
35 .set_config = drm_crtc_helper_set_config,
36 .destroy = drm_crtc_cleanup,
37};
38
39static void tegra_crtc_dpms(struct drm_crtc *crtc, int mode)
40{
41}
42
43static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
44 const struct drm_display_mode *mode,
45 struct drm_display_mode *adjusted)
46{
47 return true;
48}
49
50static inline u32 compute_dda_inc(fixed20_12 inf, unsigned int out, bool v,
51 unsigned int bpp)
52{
53 fixed20_12 outf = dfixed_init(out);
54 u32 dda_inc;
55 int max;
56
57 if (v)
58 max = 15;
59 else {
60 switch (bpp) {
61 case 2:
62 max = 8;
63 break;
64
65 default:
66 WARN_ON_ONCE(1);
67 /* fallthrough */
68 case 4:
69 max = 4;
70 break;
71 }
72 }
73
74 outf.full = max_t(u32, outf.full - dfixed_const(1), dfixed_const(1));
75 inf.full -= dfixed_const(1);
76
77 dda_inc = dfixed_div(inf, outf);
78 dda_inc = min_t(u32, dda_inc, dfixed_const(max));
79
80 return dda_inc;
81}
82
83static inline u32 compute_initial_dda(fixed20_12 in)
84{
85 return dfixed_frac(in);
86}
87
88static int tegra_dc_set_timings(struct tegra_dc *dc,
89 struct drm_display_mode *mode)
90{
91 /* TODO: For HDMI compliance, h & v ref_to_sync should be set to 1 */
92 unsigned int h_ref_to_sync = 0;
93 unsigned int v_ref_to_sync = 0;
94 unsigned long value;
95
96 tegra_dc_writel(dc, 0x0, DC_DISP_DISP_TIMING_OPTIONS);
97
98 value = (v_ref_to_sync << 16) | h_ref_to_sync;
99 tegra_dc_writel(dc, value, DC_DISP_REF_TO_SYNC);
100
101 value = ((mode->vsync_end - mode->vsync_start) << 16) |
102 ((mode->hsync_end - mode->hsync_start) << 0);
103 tegra_dc_writel(dc, value, DC_DISP_SYNC_WIDTH);
104
105 value = ((mode->vsync_start - mode->vdisplay) << 16) |
106 ((mode->hsync_start - mode->hdisplay) << 0);
107 tegra_dc_writel(dc, value, DC_DISP_BACK_PORCH);
108
109 value = ((mode->vtotal - mode->vsync_end) << 16) |
110 ((mode->htotal - mode->hsync_end) << 0);
111 tegra_dc_writel(dc, value, DC_DISP_FRONT_PORCH);
112
113 value = (mode->vdisplay << 16) | mode->hdisplay;
114 tegra_dc_writel(dc, value, DC_DISP_ACTIVE);
115
116 return 0;
117}
118
119static int tegra_crtc_setup_clk(struct drm_crtc *crtc,
120 struct drm_display_mode *mode,
121 unsigned long *div)
122{
123 unsigned long pclk = mode->clock * 1000, rate;
124 struct tegra_dc *dc = to_tegra_dc(crtc);
125 struct tegra_output *output = NULL;
126 struct drm_encoder *encoder;
127 long err;
128
129 list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, head)
130 if (encoder->crtc == crtc) {
131 output = encoder_to_output(encoder);
132 break;
133 }
134
135 if (!output)
136 return -ENODEV;
137
138 /*
139 * This assumes that the display controller will divide its parent
140 * clock by 2 to generate the pixel clock.
141 */
142 err = tegra_output_setup_clock(output, dc->clk, pclk * 2);
143 if (err < 0) {
144 dev_err(dc->dev, "failed to setup clock: %ld\n", err);
145 return err;
146 }
147
148 rate = clk_get_rate(dc->clk);
149 *div = (rate * 2 / pclk) - 2;
150
151 DRM_DEBUG_KMS("rate: %lu, div: %lu\n", rate, *div);
152
153 return 0;
154}
155
156static int tegra_crtc_mode_set(struct drm_crtc *crtc,
157 struct drm_display_mode *mode,
158 struct drm_display_mode *adjusted,
159 int x, int y, struct drm_framebuffer *old_fb)
160{
161 struct tegra_framebuffer *fb = to_tegra_fb(crtc->fb);
162 struct tegra_dc *dc = to_tegra_dc(crtc);
163 unsigned int h_dda, v_dda, bpp;
164 struct tegra_dc_window win;
165 unsigned long div, value;
166 int err;
167
168 err = tegra_crtc_setup_clk(crtc, mode, &div);
169 if (err) {
170 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
171 return err;
172 }
173
174 /* program display mode */
175 tegra_dc_set_timings(dc, mode);
176
177 value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
178 tegra_dc_writel(dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
179
180 value = tegra_dc_readl(dc, DC_COM_PIN_OUTPUT_POLARITY(1));
181 value &= ~LVS_OUTPUT_POLARITY_LOW;
182 value &= ~LHS_OUTPUT_POLARITY_LOW;
183 tegra_dc_writel(dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
184
185 value = DISP_DATA_FORMAT_DF1P1C | DISP_ALIGNMENT_MSB |
186 DISP_ORDER_RED_BLUE;
187 tegra_dc_writel(dc, value, DC_DISP_DISP_INTERFACE_CONTROL);
188
189 tegra_dc_writel(dc, 0x00010001, DC_DISP_SHIFT_CLOCK_OPTIONS);
190
191 value = SHIFT_CLK_DIVIDER(div) | PIXEL_CLK_DIVIDER_PCD1;
192 tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
193
194 /* setup window parameters */
195 memset(&win, 0, sizeof(win));
196 win.x.full = dfixed_const(0);
197 win.y.full = dfixed_const(0);
198 win.w.full = dfixed_const(mode->hdisplay);
199 win.h.full = dfixed_const(mode->vdisplay);
200 win.outx = 0;
201 win.outy = 0;
202 win.outw = mode->hdisplay;
203 win.outh = mode->vdisplay;
204
205 switch (crtc->fb->pixel_format) {
206 case DRM_FORMAT_XRGB8888:
207 win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
208 break;
209
210 case DRM_FORMAT_RGB565:
211 win.fmt = WIN_COLOR_DEPTH_B5G6R5;
212 break;
213
214 default:
215 win.fmt = WIN_COLOR_DEPTH_B8G8R8A8;
216 WARN_ON(1);
217 break;
218 }
219
220 bpp = crtc->fb->bits_per_pixel / 8;
221 win.stride = crtc->fb->pitches[0];
222
223 /* program window registers */
224 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_WINDOW_HEADER);
225 value |= WINDOW_A_SELECT;
226 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
227
228 tegra_dc_writel(dc, win.fmt, DC_WIN_COLOR_DEPTH);
229 tegra_dc_writel(dc, 0, DC_WIN_BYTE_SWAP);
230
231 value = V_POSITION(win.outy) | H_POSITION(win.outx);
232 tegra_dc_writel(dc, value, DC_WIN_POSITION);
233
234 value = V_SIZE(win.outh) | H_SIZE(win.outw);
235 tegra_dc_writel(dc, value, DC_WIN_SIZE);
236
237 value = V_PRESCALED_SIZE(dfixed_trunc(win.h)) |
238 H_PRESCALED_SIZE(dfixed_trunc(win.w) * bpp);
239 tegra_dc_writel(dc, value, DC_WIN_PRESCALED_SIZE);
240
241 h_dda = compute_dda_inc(win.w, win.outw, false, bpp);
242 v_dda = compute_dda_inc(win.h, win.outh, true, bpp);
243
244 value = V_DDA_INC(v_dda) | H_DDA_INC(h_dda);
245 tegra_dc_writel(dc, value, DC_WIN_DDA_INC);
246
247 h_dda = compute_initial_dda(win.x);
248 v_dda = compute_initial_dda(win.y);
249
250 tegra_dc_writel(dc, h_dda, DC_WIN_H_INITIAL_DDA);
251 tegra_dc_writel(dc, v_dda, DC_WIN_V_INITIAL_DDA);
252
253 tegra_dc_writel(dc, 0, DC_WIN_UV_BUF_STRIDE);
254 tegra_dc_writel(dc, 0, DC_WIN_BUF_STRIDE);
255
256 tegra_dc_writel(dc, fb->obj->paddr, DC_WINBUF_START_ADDR);
257 tegra_dc_writel(dc, win.stride, DC_WIN_LINE_STRIDE);
258 tegra_dc_writel(dc, dfixed_trunc(win.x) * bpp,
259 DC_WINBUF_ADDR_H_OFFSET);
260 tegra_dc_writel(dc, dfixed_trunc(win.y), DC_WINBUF_ADDR_V_OFFSET);
261
262 value = WIN_ENABLE;
263
264 if (bpp < 24)
265 value |= COLOR_EXPAND;
266
267 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
268
269 tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_NOKEY);
270 tegra_dc_writel(dc, 0xff00, DC_WIN_BLEND_1WIN);
271
272 return 0;
273}
274
275static void tegra_crtc_prepare(struct drm_crtc *crtc)
276{
277 struct tegra_dc *dc = to_tegra_dc(crtc);
278 unsigned int syncpt;
279 unsigned long value;
280
281 /* hardware initialization */
282 tegra_periph_reset_deassert(dc->clk);
283 usleep_range(10000, 20000);
284
285 if (dc->pipe)
286 syncpt = SYNCPT_VBLANK1;
287 else
288 syncpt = SYNCPT_VBLANK0;
289
290 /* initialize display controller */
291 tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
292 tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
293
294 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT;
295 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
296
297 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
298 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
299 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
300
301 value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
302 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
303 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
304
305 value = tegra_dc_readl(dc, DC_CMD_DISPLAY_COMMAND);
306 value |= DISP_CTRL_MODE_C_DISPLAY;
307 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
308
309 /* initialize timer */
310 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
311 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
312 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
313
314 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
315 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
316 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
317
318 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
319 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
320
321 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT;
322 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
323}
324
325static void tegra_crtc_commit(struct drm_crtc *crtc)
326{
327 struct tegra_dc *dc = to_tegra_dc(crtc);
328 unsigned long update_mask;
329 unsigned long value;
330
331 update_mask = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
332
333 tegra_dc_writel(dc, update_mask << 8, DC_CMD_STATE_CONTROL);
334
335 value = tegra_dc_readl(dc, DC_CMD_INT_ENABLE);
336 value |= FRAME_END_INT;
337 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
338
339 value = tegra_dc_readl(dc, DC_CMD_INT_MASK);
340 value |= FRAME_END_INT;
341 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
342
343 tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
344}
345
346static void tegra_crtc_load_lut(struct drm_crtc *crtc)
347{
348}
349
350static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
351 .dpms = tegra_crtc_dpms,
352 .mode_fixup = tegra_crtc_mode_fixup,
353 .mode_set = tegra_crtc_mode_set,
354 .prepare = tegra_crtc_prepare,
355 .commit = tegra_crtc_commit,
356 .load_lut = tegra_crtc_load_lut,
357};
358
359static irqreturn_t tegra_drm_irq(int irq, void *data)
360{
361 struct tegra_dc *dc = data;
362 unsigned long status;
363
364 status = tegra_dc_readl(dc, DC_CMD_INT_STATUS);
365 tegra_dc_writel(dc, status, DC_CMD_INT_STATUS);
366
367 if (status & FRAME_END_INT) {
368 /*
369 dev_dbg(dc->dev, "%s(): frame end\n", __func__);
370 */
371 }
372
373 if (status & VBLANK_INT) {
374 /*
375 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
376 */
377 drm_handle_vblank(dc->base.dev, dc->pipe);
378 }
379
380 if (status & (WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT)) {
381 /*
382 dev_dbg(dc->dev, "%s(): underflow\n", __func__);
383 */
384 }
385
386 return IRQ_HANDLED;
387}
388
389static int tegra_dc_show_regs(struct seq_file *s, void *data)
390{
391 struct drm_info_node *node = s->private;
392 struct tegra_dc *dc = node->info_ent->data;
393
394#define DUMP_REG(name) \
395 seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \
396 tegra_dc_readl(dc, name))
397
398 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
399 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
400 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT_ERROR);
401 DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT);
402 DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_CNTRL);
403 DUMP_REG(DC_CMD_WIN_A_INCR_SYNCPT_ERROR);
404 DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT);
405 DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_CNTRL);
406 DUMP_REG(DC_CMD_WIN_B_INCR_SYNCPT_ERROR);
407 DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT);
408 DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_CNTRL);
409 DUMP_REG(DC_CMD_WIN_C_INCR_SYNCPT_ERROR);
410 DUMP_REG(DC_CMD_CONT_SYNCPT_VSYNC);
411 DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
412 DUMP_REG(DC_CMD_DISPLAY_COMMAND);
413 DUMP_REG(DC_CMD_SIGNAL_RAISE);
414 DUMP_REG(DC_CMD_DISPLAY_POWER_CONTROL);
415 DUMP_REG(DC_CMD_INT_STATUS);
416 DUMP_REG(DC_CMD_INT_MASK);
417 DUMP_REG(DC_CMD_INT_ENABLE);
418 DUMP_REG(DC_CMD_INT_TYPE);
419 DUMP_REG(DC_CMD_INT_POLARITY);
420 DUMP_REG(DC_CMD_SIGNAL_RAISE1);
421 DUMP_REG(DC_CMD_SIGNAL_RAISE2);
422 DUMP_REG(DC_CMD_SIGNAL_RAISE3);
423 DUMP_REG(DC_CMD_STATE_ACCESS);
424 DUMP_REG(DC_CMD_STATE_CONTROL);
425 DUMP_REG(DC_CMD_DISPLAY_WINDOW_HEADER);
426 DUMP_REG(DC_CMD_REG_ACT_CONTROL);
427 DUMP_REG(DC_COM_CRC_CONTROL);
428 DUMP_REG(DC_COM_CRC_CHECKSUM);
429 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(0));
430 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(1));
431 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(2));
432 DUMP_REG(DC_COM_PIN_OUTPUT_ENABLE(3));
433 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(0));
434 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(1));
435 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(2));
436 DUMP_REG(DC_COM_PIN_OUTPUT_POLARITY(3));
437 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(0));
438 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(1));
439 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(2));
440 DUMP_REG(DC_COM_PIN_OUTPUT_DATA(3));
441 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(0));
442 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(1));
443 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(2));
444 DUMP_REG(DC_COM_PIN_INPUT_ENABLE(3));
445 DUMP_REG(DC_COM_PIN_INPUT_DATA(0));
446 DUMP_REG(DC_COM_PIN_INPUT_DATA(1));
447 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(0));
448 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(1));
449 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(2));
450 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(3));
451 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(4));
452 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(5));
453 DUMP_REG(DC_COM_PIN_OUTPUT_SELECT(6));
454 DUMP_REG(DC_COM_PIN_MISC_CONTROL);
455 DUMP_REG(DC_COM_PIN_PM0_CONTROL);
456 DUMP_REG(DC_COM_PIN_PM0_DUTY_CYCLE);
457 DUMP_REG(DC_COM_PIN_PM1_CONTROL);
458 DUMP_REG(DC_COM_PIN_PM1_DUTY_CYCLE);
459 DUMP_REG(DC_COM_SPI_CONTROL);
460 DUMP_REG(DC_COM_SPI_START_BYTE);
461 DUMP_REG(DC_COM_HSPI_WRITE_DATA_AB);
462 DUMP_REG(DC_COM_HSPI_WRITE_DATA_CD);
463 DUMP_REG(DC_COM_HSPI_CS_DC);
464 DUMP_REG(DC_COM_SCRATCH_REGISTER_A);
465 DUMP_REG(DC_COM_SCRATCH_REGISTER_B);
466 DUMP_REG(DC_COM_GPIO_CTRL);
467 DUMP_REG(DC_COM_GPIO_DEBOUNCE_COUNTER);
468 DUMP_REG(DC_COM_CRC_CHECKSUM_LATCHED);
469 DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS0);
470 DUMP_REG(DC_DISP_DISP_SIGNAL_OPTIONS1);
471 DUMP_REG(DC_DISP_DISP_WIN_OPTIONS);
472 DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY);
473 DUMP_REG(DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
474 DUMP_REG(DC_DISP_DISP_TIMING_OPTIONS);
475 DUMP_REG(DC_DISP_REF_TO_SYNC);
476 DUMP_REG(DC_DISP_SYNC_WIDTH);
477 DUMP_REG(DC_DISP_BACK_PORCH);
478 DUMP_REG(DC_DISP_ACTIVE);
479 DUMP_REG(DC_DISP_FRONT_PORCH);
480 DUMP_REG(DC_DISP_H_PULSE0_CONTROL);
481 DUMP_REG(DC_DISP_H_PULSE0_POSITION_A);
482 DUMP_REG(DC_DISP_H_PULSE0_POSITION_B);
483 DUMP_REG(DC_DISP_H_PULSE0_POSITION_C);
484 DUMP_REG(DC_DISP_H_PULSE0_POSITION_D);
485 DUMP_REG(DC_DISP_H_PULSE1_CONTROL);
486 DUMP_REG(DC_DISP_H_PULSE1_POSITION_A);
487 DUMP_REG(DC_DISP_H_PULSE1_POSITION_B);
488 DUMP_REG(DC_DISP_H_PULSE1_POSITION_C);
489 DUMP_REG(DC_DISP_H_PULSE1_POSITION_D);
490 DUMP_REG(DC_DISP_H_PULSE2_CONTROL);
491 DUMP_REG(DC_DISP_H_PULSE2_POSITION_A);
492 DUMP_REG(DC_DISP_H_PULSE2_POSITION_B);
493 DUMP_REG(DC_DISP_H_PULSE2_POSITION_C);
494 DUMP_REG(DC_DISP_H_PULSE2_POSITION_D);
495 DUMP_REG(DC_DISP_V_PULSE0_CONTROL);
496 DUMP_REG(DC_DISP_V_PULSE0_POSITION_A);
497 DUMP_REG(DC_DISP_V_PULSE0_POSITION_B);
498 DUMP_REG(DC_DISP_V_PULSE0_POSITION_C);
499 DUMP_REG(DC_DISP_V_PULSE1_CONTROL);
500 DUMP_REG(DC_DISP_V_PULSE1_POSITION_A);
501 DUMP_REG(DC_DISP_V_PULSE1_POSITION_B);
502 DUMP_REG(DC_DISP_V_PULSE1_POSITION_C);
503 DUMP_REG(DC_DISP_V_PULSE2_CONTROL);
504 DUMP_REG(DC_DISP_V_PULSE2_POSITION_A);
505 DUMP_REG(DC_DISP_V_PULSE3_CONTROL);
506 DUMP_REG(DC_DISP_V_PULSE3_POSITION_A);
507 DUMP_REG(DC_DISP_M0_CONTROL);
508 DUMP_REG(DC_DISP_M1_CONTROL);
509 DUMP_REG(DC_DISP_DI_CONTROL);
510 DUMP_REG(DC_DISP_PP_CONTROL);
511 DUMP_REG(DC_DISP_PP_SELECT_A);
512 DUMP_REG(DC_DISP_PP_SELECT_B);
513 DUMP_REG(DC_DISP_PP_SELECT_C);
514 DUMP_REG(DC_DISP_PP_SELECT_D);
515 DUMP_REG(DC_DISP_DISP_CLOCK_CONTROL);
516 DUMP_REG(DC_DISP_DISP_INTERFACE_CONTROL);
517 DUMP_REG(DC_DISP_DISP_COLOR_CONTROL);
518 DUMP_REG(DC_DISP_SHIFT_CLOCK_OPTIONS);
519 DUMP_REG(DC_DISP_DATA_ENABLE_OPTIONS);
520 DUMP_REG(DC_DISP_SERIAL_INTERFACE_OPTIONS);
521 DUMP_REG(DC_DISP_LCD_SPI_OPTIONS);
522 DUMP_REG(DC_DISP_BORDER_COLOR);
523 DUMP_REG(DC_DISP_COLOR_KEY0_LOWER);
524 DUMP_REG(DC_DISP_COLOR_KEY0_UPPER);
525 DUMP_REG(DC_DISP_COLOR_KEY1_LOWER);
526 DUMP_REG(DC_DISP_COLOR_KEY1_UPPER);
527 DUMP_REG(DC_DISP_CURSOR_FOREGROUND);
528 DUMP_REG(DC_DISP_CURSOR_BACKGROUND);
529 DUMP_REG(DC_DISP_CURSOR_START_ADDR);
530 DUMP_REG(DC_DISP_CURSOR_START_ADDR_NS);
531 DUMP_REG(DC_DISP_CURSOR_POSITION);
532 DUMP_REG(DC_DISP_CURSOR_POSITION_NS);
533 DUMP_REG(DC_DISP_INIT_SEQ_CONTROL);
534 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_A);
535 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_B);
536 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_C);
537 DUMP_REG(DC_DISP_SPI_INIT_SEQ_DATA_D);
538 DUMP_REG(DC_DISP_DC_MCCIF_FIFOCTRL);
539 DUMP_REG(DC_DISP_MCCIF_DISPLAY0A_HYST);
540 DUMP_REG(DC_DISP_MCCIF_DISPLAY0B_HYST);
541 DUMP_REG(DC_DISP_MCCIF_DISPLAY1A_HYST);
542 DUMP_REG(DC_DISP_MCCIF_DISPLAY1B_HYST);
543 DUMP_REG(DC_DISP_DAC_CRT_CTRL);
544 DUMP_REG(DC_DISP_DISP_MISC_CONTROL);
545 DUMP_REG(DC_DISP_SD_CONTROL);
546 DUMP_REG(DC_DISP_SD_CSC_COEFF);
547 DUMP_REG(DC_DISP_SD_LUT(0));
548 DUMP_REG(DC_DISP_SD_LUT(1));
549 DUMP_REG(DC_DISP_SD_LUT(2));
550 DUMP_REG(DC_DISP_SD_LUT(3));
551 DUMP_REG(DC_DISP_SD_LUT(4));
552 DUMP_REG(DC_DISP_SD_LUT(5));
553 DUMP_REG(DC_DISP_SD_LUT(6));
554 DUMP_REG(DC_DISP_SD_LUT(7));
555 DUMP_REG(DC_DISP_SD_LUT(8));
556 DUMP_REG(DC_DISP_SD_FLICKER_CONTROL);
557 DUMP_REG(DC_DISP_DC_PIXEL_COUNT);
558 DUMP_REG(DC_DISP_SD_HISTOGRAM(0));
559 DUMP_REG(DC_DISP_SD_HISTOGRAM(1));
560 DUMP_REG(DC_DISP_SD_HISTOGRAM(2));
561 DUMP_REG(DC_DISP_SD_HISTOGRAM(3));
562 DUMP_REG(DC_DISP_SD_HISTOGRAM(4));
563 DUMP_REG(DC_DISP_SD_HISTOGRAM(5));
564 DUMP_REG(DC_DISP_SD_HISTOGRAM(6));
565 DUMP_REG(DC_DISP_SD_HISTOGRAM(7));
566 DUMP_REG(DC_DISP_SD_BL_TF(0));
567 DUMP_REG(DC_DISP_SD_BL_TF(1));
568 DUMP_REG(DC_DISP_SD_BL_TF(2));
569 DUMP_REG(DC_DISP_SD_BL_TF(3));
570 DUMP_REG(DC_DISP_SD_BL_CONTROL);
571 DUMP_REG(DC_DISP_SD_HW_K_VALUES);
572 DUMP_REG(DC_DISP_SD_MAN_K_VALUES);
573 DUMP_REG(DC_WIN_WIN_OPTIONS);
574 DUMP_REG(DC_WIN_BYTE_SWAP);
575 DUMP_REG(DC_WIN_BUFFER_CONTROL);
576 DUMP_REG(DC_WIN_COLOR_DEPTH);
577 DUMP_REG(DC_WIN_POSITION);
578 DUMP_REG(DC_WIN_SIZE);
579 DUMP_REG(DC_WIN_PRESCALED_SIZE);
580 DUMP_REG(DC_WIN_H_INITIAL_DDA);
581 DUMP_REG(DC_WIN_V_INITIAL_DDA);
582 DUMP_REG(DC_WIN_DDA_INC);
583 DUMP_REG(DC_WIN_LINE_STRIDE);
584 DUMP_REG(DC_WIN_BUF_STRIDE);
585 DUMP_REG(DC_WIN_UV_BUF_STRIDE);
586 DUMP_REG(DC_WIN_BUFFER_ADDR_MODE);
587 DUMP_REG(DC_WIN_DV_CONTROL);
588 DUMP_REG(DC_WIN_BLEND_NOKEY);
589 DUMP_REG(DC_WIN_BLEND_1WIN);
590 DUMP_REG(DC_WIN_BLEND_2WIN_X);
591 DUMP_REG(DC_WIN_BLEND_2WIN_Y);
592 DUMP_REG(DC_WIN_BLEND32WIN_XY);
593 DUMP_REG(DC_WIN_HP_FETCH_CONTROL);
594 DUMP_REG(DC_WINBUF_START_ADDR);
595 DUMP_REG(DC_WINBUF_START_ADDR_NS);
596 DUMP_REG(DC_WINBUF_START_ADDR_U);
597 DUMP_REG(DC_WINBUF_START_ADDR_U_NS);
598 DUMP_REG(DC_WINBUF_START_ADDR_V);
599 DUMP_REG(DC_WINBUF_START_ADDR_V_NS);
600 DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
601 DUMP_REG(DC_WINBUF_ADDR_H_OFFSET_NS);
602 DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
603 DUMP_REG(DC_WINBUF_ADDR_V_OFFSET_NS);
604 DUMP_REG(DC_WINBUF_UFLOW_STATUS);
605 DUMP_REG(DC_WINBUF_AD_UFLOW_STATUS);
606 DUMP_REG(DC_WINBUF_BD_UFLOW_STATUS);
607 DUMP_REG(DC_WINBUF_CD_UFLOW_STATUS);
608
609#undef DUMP_REG
610
611 return 0;
612}
613
614static struct drm_info_list debugfs_files[] = {
615 { "regs", tegra_dc_show_regs, 0, NULL },
616};
617
618static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
619{
620 unsigned int i;
621 char *name;
622 int err;
623
624 name = kasprintf(GFP_KERNEL, "dc.%d", dc->pipe);
625 dc->debugfs = debugfs_create_dir(name, minor->debugfs_root);
626 kfree(name);
627
628 if (!dc->debugfs)
629 return -ENOMEM;
630
631 dc->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
632 GFP_KERNEL);
633 if (!dc->debugfs_files) {
634 err = -ENOMEM;
635 goto remove;
636 }
637
638 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
639 dc->debugfs_files[i].data = dc;
640
641 err = drm_debugfs_create_files(dc->debugfs_files,
642 ARRAY_SIZE(debugfs_files),
643 dc->debugfs, minor);
644 if (err < 0)
645 goto free;
646
647 dc->minor = minor;
648
649 return 0;
650
651free:
652 kfree(dc->debugfs_files);
653 dc->debugfs_files = NULL;
654remove:
655 debugfs_remove(dc->debugfs);
656 dc->debugfs = NULL;
657
658 return err;
659}
660
661static int tegra_dc_debugfs_exit(struct tegra_dc *dc)
662{
663 drm_debugfs_remove_files(dc->debugfs_files, ARRAY_SIZE(debugfs_files),
664 dc->minor);
665 dc->minor = NULL;
666
667 kfree(dc->debugfs_files);
668 dc->debugfs_files = NULL;
669
670 debugfs_remove(dc->debugfs);
671 dc->debugfs = NULL;
672
673 return 0;
674}
675
676static int tegra_dc_drm_init(struct host1x_client *client,
677 struct drm_device *drm)
678{
679 struct tegra_dc *dc = host1x_client_to_dc(client);
680 int err;
681
682 dc->pipe = drm->mode_config.num_crtc;
683
684 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
685 drm_mode_crtc_set_gamma_size(&dc->base, 256);
686 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
687
688 err = tegra_dc_rgb_init(drm, dc);
689 if (err < 0 && err != -ENODEV) {
690 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
691 return err;
692 }
693
694 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
695 err = tegra_dc_debugfs_init(dc, drm->primary);
696 if (err < 0)
697 dev_err(dc->dev, "debugfs setup failed: %d\n", err);
698 }
699
700 err = devm_request_irq(dc->dev, dc->irq, tegra_drm_irq, 0,
701 dev_name(dc->dev), dc);
702 if (err < 0) {
703 dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
704 err);
705 return err;
706 }
707
708 return 0;
709}
710
711static int tegra_dc_drm_exit(struct host1x_client *client)
712{
713 struct tegra_dc *dc = host1x_client_to_dc(client);
714 int err;
715
716 devm_free_irq(dc->dev, dc->irq, dc);
717
718 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
719 err = tegra_dc_debugfs_exit(dc);
720 if (err < 0)
721 dev_err(dc->dev, "debugfs cleanup failed: %d\n", err);
722 }
723
724 err = tegra_dc_rgb_exit(dc);
725 if (err) {
726 dev_err(dc->dev, "failed to shutdown RGB output: %d\n", err);
727 return err;
728 }
729
730 return 0;
731}
732
733static const struct host1x_client_ops dc_client_ops = {
734 .drm_init = tegra_dc_drm_init,
735 .drm_exit = tegra_dc_drm_exit,
736};
737
738static int tegra_dc_probe(struct platform_device *pdev)
739{
740 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
741 struct resource *regs;
742 struct tegra_dc *dc;
743 int err;
744
745 dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL);
746 if (!dc)
747 return -ENOMEM;
748
749 INIT_LIST_HEAD(&dc->list);
750 dc->dev = &pdev->dev;
751
752 dc->clk = devm_clk_get(&pdev->dev, NULL);
753 if (IS_ERR(dc->clk)) {
754 dev_err(&pdev->dev, "failed to get clock\n");
755 return PTR_ERR(dc->clk);
756 }
757
758 err = clk_prepare_enable(dc->clk);
759 if (err < 0)
760 return err;
761
762 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
763 if (!regs) {
764 dev_err(&pdev->dev, "failed to get registers\n");
765 return -ENXIO;
766 }
767
768 dc->regs = devm_request_and_ioremap(&pdev->dev, regs);
769 if (!dc->regs) {
770 dev_err(&pdev->dev, "failed to remap registers\n");
771 return -ENXIO;
772 }
773
774 dc->irq = platform_get_irq(pdev, 0);
775 if (dc->irq < 0) {
776 dev_err(&pdev->dev, "failed to get IRQ\n");
777 return -ENXIO;
778 }
779
780 INIT_LIST_HEAD(&dc->client.list);
781 dc->client.ops = &dc_client_ops;
782 dc->client.dev = &pdev->dev;
783
784 err = tegra_dc_rgb_probe(dc);
785 if (err < 0 && err != -ENODEV) {
786 dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
787 return err;
788 }
789
790 err = host1x_register_client(host1x, &dc->client);
791 if (err < 0) {
792 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
793 err);
794 return err;
795 }
796
797 platform_set_drvdata(pdev, dc);
798
799 return 0;
800}
801
802static int tegra_dc_remove(struct platform_device *pdev)
803{
804 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
805 struct tegra_dc *dc = platform_get_drvdata(pdev);
806 int err;
807
808 err = host1x_unregister_client(host1x, &dc->client);
809 if (err < 0) {
810 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
811 err);
812 return err;
813 }
814
815 clk_disable_unprepare(dc->clk);
816
817 return 0;
818}
819
820static struct of_device_id tegra_dc_of_match[] = {
821 { .compatible = "nvidia,tegra30-dc", },
822 { .compatible = "nvidia,tegra20-dc", },
823 { },
824};
825
826struct platform_driver tegra_dc_driver = {
827 .driver = {
828 .name = "tegra-dc",
829 .owner = THIS_MODULE,
830 .of_match_table = tegra_dc_of_match,
831 },
832 .probe = tegra_dc_probe,
833 .remove = tegra_dc_remove,
834};
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
new file mode 100644
index 000000000000..99977b5d5c36
--- /dev/null
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -0,0 +1,388 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef TEGRA_DC_H
11#define TEGRA_DC_H 1
12
13#define DC_CMD_GENERAL_INCR_SYNCPT 0x000
14#define DC_CMD_GENERAL_INCR_SYNCPT_CNTRL 0x001
15#define DC_CMD_GENERAL_INCR_SYNCPT_ERROR 0x002
16#define DC_CMD_WIN_A_INCR_SYNCPT 0x008
17#define DC_CMD_WIN_A_INCR_SYNCPT_CNTRL 0x009
18#define DC_CMD_WIN_A_INCR_SYNCPT_ERROR 0x00a
19#define DC_CMD_WIN_B_INCR_SYNCPT 0x010
20#define DC_CMD_WIN_B_INCR_SYNCPT_CNTRL 0x011
21#define DC_CMD_WIN_B_INCR_SYNCPT_ERROR 0x012
22#define DC_CMD_WIN_C_INCR_SYNCPT 0x018
23#define DC_CMD_WIN_C_INCR_SYNCPT_CNTRL 0x019
24#define DC_CMD_WIN_C_INCR_SYNCPT_ERROR 0x01a
25#define DC_CMD_CONT_SYNCPT_VSYNC 0x028
26#define DC_CMD_DISPLAY_COMMAND_OPTION0 0x031
27#define DC_CMD_DISPLAY_COMMAND 0x032
28#define DISP_CTRL_MODE_STOP (0 << 5)
29#define DISP_CTRL_MODE_C_DISPLAY (1 << 5)
30#define DISP_CTRL_MODE_NC_DISPLAY (2 << 5)
31#define DC_CMD_SIGNAL_RAISE 0x033
32#define DC_CMD_DISPLAY_POWER_CONTROL 0x036
33#define PW0_ENABLE (1 << 0)
34#define PW1_ENABLE (1 << 2)
35#define PW2_ENABLE (1 << 4)
36#define PW3_ENABLE (1 << 6)
37#define PW4_ENABLE (1 << 8)
38#define PM0_ENABLE (1 << 16)
39#define PM1_ENABLE (1 << 18)
40
41#define DC_CMD_INT_STATUS 0x037
42#define DC_CMD_INT_MASK 0x038
43#define DC_CMD_INT_ENABLE 0x039
44#define DC_CMD_INT_TYPE 0x03a
45#define DC_CMD_INT_POLARITY 0x03b
46#define CTXSW_INT (1 << 0)
47#define FRAME_END_INT (1 << 1)
48#define VBLANK_INT (1 << 2)
49#define WIN_A_UF_INT (1 << 8)
50#define WIN_B_UF_INT (1 << 9)
51#define WIN_C_UF_INT (1 << 10)
52#define WIN_A_OF_INT (1 << 14)
53#define WIN_B_OF_INT (1 << 15)
54#define WIN_C_OF_INT (1 << 16)
55
56#define DC_CMD_SIGNAL_RAISE1 0x03c
57#define DC_CMD_SIGNAL_RAISE2 0x03d
58#define DC_CMD_SIGNAL_RAISE3 0x03e
59
60#define DC_CMD_STATE_ACCESS 0x040
61
62#define DC_CMD_STATE_CONTROL 0x041
63#define GENERAL_ACT_REQ (1 << 0)
64#define WIN_A_ACT_REQ (1 << 1)
65#define WIN_B_ACT_REQ (1 << 2)
66#define WIN_C_ACT_REQ (1 << 3)
67#define GENERAL_UPDATE (1 << 8)
68#define WIN_A_UPDATE (1 << 9)
69#define WIN_B_UPDATE (1 << 10)
70#define WIN_C_UPDATE (1 << 11)
71#define NC_HOST_TRIG (1 << 24)
72
73#define DC_CMD_DISPLAY_WINDOW_HEADER 0x042
74#define WINDOW_A_SELECT (1 << 4)
75#define WINDOW_B_SELECT (1 << 5)
76#define WINDOW_C_SELECT (1 << 6)
77
78#define DC_CMD_REG_ACT_CONTROL 0x043
79
80#define DC_COM_CRC_CONTROL 0x300
81#define DC_COM_CRC_CHECKSUM 0x301
82#define DC_COM_PIN_OUTPUT_ENABLE(x) (0x302 + (x))
83#define DC_COM_PIN_OUTPUT_POLARITY(x) (0x306 + (x))
84#define LVS_OUTPUT_POLARITY_LOW (1 << 28)
85#define LHS_OUTPUT_POLARITY_LOW (1 << 30)
86#define DC_COM_PIN_OUTPUT_DATA(x) (0x30a + (x))
87#define DC_COM_PIN_INPUT_ENABLE(x) (0x30e + (x))
88#define DC_COM_PIN_INPUT_DATA(x) (0x312 + (x))
89#define DC_COM_PIN_OUTPUT_SELECT(x) (0x314 + (x))
90
91#define DC_COM_PIN_MISC_CONTROL 0x31b
92#define DC_COM_PIN_PM0_CONTROL 0x31c
93#define DC_COM_PIN_PM0_DUTY_CYCLE 0x31d
94#define DC_COM_PIN_PM1_CONTROL 0x31e
95#define DC_COM_PIN_PM1_DUTY_CYCLE 0x31f
96
97#define DC_COM_SPI_CONTROL 0x320
98#define DC_COM_SPI_START_BYTE 0x321
99#define DC_COM_HSPI_WRITE_DATA_AB 0x322
100#define DC_COM_HSPI_WRITE_DATA_CD 0x323
101#define DC_COM_HSPI_CS_DC 0x324
102#define DC_COM_SCRATCH_REGISTER_A 0x325
103#define DC_COM_SCRATCH_REGISTER_B 0x326
104#define DC_COM_GPIO_CTRL 0x327
105#define DC_COM_GPIO_DEBOUNCE_COUNTER 0x328
106#define DC_COM_CRC_CHECKSUM_LATCHED 0x329
107
108#define DC_DISP_DISP_SIGNAL_OPTIONS0 0x400
109#define H_PULSE_0_ENABLE (1 << 8)
110#define H_PULSE_1_ENABLE (1 << 10)
111#define H_PULSE_2_ENABLE (1 << 12)
112
113#define DC_DISP_DISP_SIGNAL_OPTIONS1 0x401
114
115#define DC_DISP_DISP_WIN_OPTIONS 0x402
116#define HDMI_ENABLE (1 << 30)
117
118#define DC_DISP_DISP_MEM_HIGH_PRIORITY 0x403
119#define CURSOR_THRESHOLD(x) (((x) & 0x03) << 24)
120#define WINDOW_A_THRESHOLD(x) (((x) & 0x7f) << 16)
121#define WINDOW_B_THRESHOLD(x) (((x) & 0x7f) << 8)
122#define WINDOW_C_THRESHOLD(x) (((x) & 0xff) << 0)
123
124#define DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER 0x404
125#define CURSOR_DELAY(x) (((x) & 0x3f) << 24)
126#define WINDOW_A_DELAY(x) (((x) & 0x3f) << 16)
127#define WINDOW_B_DELAY(x) (((x) & 0x3f) << 8)
128#define WINDOW_C_DELAY(x) (((x) & 0x3f) << 0)
129
130#define DC_DISP_DISP_TIMING_OPTIONS 0x405
131#define VSYNC_H_POSITION(x) ((x) & 0xfff)
132
133#define DC_DISP_REF_TO_SYNC 0x406
134#define DC_DISP_SYNC_WIDTH 0x407
135#define DC_DISP_BACK_PORCH 0x408
136#define DC_DISP_ACTIVE 0x409
137#define DC_DISP_FRONT_PORCH 0x40a
138#define DC_DISP_H_PULSE0_CONTROL 0x40b
139#define DC_DISP_H_PULSE0_POSITION_A 0x40c
140#define DC_DISP_H_PULSE0_POSITION_B 0x40d
141#define DC_DISP_H_PULSE0_POSITION_C 0x40e
142#define DC_DISP_H_PULSE0_POSITION_D 0x40f
143#define DC_DISP_H_PULSE1_CONTROL 0x410
144#define DC_DISP_H_PULSE1_POSITION_A 0x411
145#define DC_DISP_H_PULSE1_POSITION_B 0x412
146#define DC_DISP_H_PULSE1_POSITION_C 0x413
147#define DC_DISP_H_PULSE1_POSITION_D 0x414
148#define DC_DISP_H_PULSE2_CONTROL 0x415
149#define DC_DISP_H_PULSE2_POSITION_A 0x416
150#define DC_DISP_H_PULSE2_POSITION_B 0x417
151#define DC_DISP_H_PULSE2_POSITION_C 0x418
152#define DC_DISP_H_PULSE2_POSITION_D 0x419
153#define DC_DISP_V_PULSE0_CONTROL 0x41a
154#define DC_DISP_V_PULSE0_POSITION_A 0x41b
155#define DC_DISP_V_PULSE0_POSITION_B 0x41c
156#define DC_DISP_V_PULSE0_POSITION_C 0x41d
157#define DC_DISP_V_PULSE1_CONTROL 0x41e
158#define DC_DISP_V_PULSE1_POSITION_A 0x41f
159#define DC_DISP_V_PULSE1_POSITION_B 0x420
160#define DC_DISP_V_PULSE1_POSITION_C 0x421
161#define DC_DISP_V_PULSE2_CONTROL 0x422
162#define DC_DISP_V_PULSE2_POSITION_A 0x423
163#define DC_DISP_V_PULSE3_CONTROL 0x424
164#define DC_DISP_V_PULSE3_POSITION_A 0x425
165#define DC_DISP_M0_CONTROL 0x426
166#define DC_DISP_M1_CONTROL 0x427
167#define DC_DISP_DI_CONTROL 0x428
168#define DC_DISP_PP_CONTROL 0x429
169#define DC_DISP_PP_SELECT_A 0x42a
170#define DC_DISP_PP_SELECT_B 0x42b
171#define DC_DISP_PP_SELECT_C 0x42c
172#define DC_DISP_PP_SELECT_D 0x42d
173
174#define PULSE_MODE_NORMAL (0 << 3)
175#define PULSE_MODE_ONE_CLOCK (1 << 3)
176#define PULSE_POLARITY_HIGH (0 << 4)
177#define PULSE_POLARITY_LOW (1 << 4)
178#define PULSE_QUAL_ALWAYS (0 << 6)
179#define PULSE_QUAL_VACTIVE (2 << 6)
180#define PULSE_QUAL_VACTIVE1 (3 << 6)
181#define PULSE_LAST_START_A (0 << 8)
182#define PULSE_LAST_END_A (1 << 8)
183#define PULSE_LAST_START_B (2 << 8)
184#define PULSE_LAST_END_B (3 << 8)
185#define PULSE_LAST_START_C (4 << 8)
186#define PULSE_LAST_END_C (5 << 8)
187#define PULSE_LAST_START_D (6 << 8)
188#define PULSE_LAST_END_D (7 << 8)
189
190#define PULSE_START(x) (((x) & 0xfff) << 0)
191#define PULSE_END(x) (((x) & 0xfff) << 16)
192
193#define DC_DISP_DISP_CLOCK_CONTROL 0x42e
194#define PIXEL_CLK_DIVIDER_PCD1 (0 << 8)
195#define PIXEL_CLK_DIVIDER_PCD1H (1 << 8)
196#define PIXEL_CLK_DIVIDER_PCD2 (2 << 8)
197#define PIXEL_CLK_DIVIDER_PCD3 (3 << 8)
198#define PIXEL_CLK_DIVIDER_PCD4 (4 << 8)
199#define PIXEL_CLK_DIVIDER_PCD6 (5 << 8)
200#define PIXEL_CLK_DIVIDER_PCD8 (6 << 8)
201#define PIXEL_CLK_DIVIDER_PCD9 (7 << 8)
202#define PIXEL_CLK_DIVIDER_PCD12 (8 << 8)
203#define PIXEL_CLK_DIVIDER_PCD16 (9 << 8)
204#define PIXEL_CLK_DIVIDER_PCD18 (10 << 8)
205#define PIXEL_CLK_DIVIDER_PCD24 (11 << 8)
206#define PIXEL_CLK_DIVIDER_PCD13 (12 << 8)
207#define SHIFT_CLK_DIVIDER(x) ((x) & 0xff)
208
209#define DC_DISP_DISP_INTERFACE_CONTROL 0x42f
210#define DISP_DATA_FORMAT_DF1P1C (0 << 0)
211#define DISP_DATA_FORMAT_DF1P2C24B (1 << 0)
212#define DISP_DATA_FORMAT_DF1P2C18B (2 << 0)
213#define DISP_DATA_FORMAT_DF1P2C16B (3 << 0)
214#define DISP_DATA_FORMAT_DF2S (4 << 0)
215#define DISP_DATA_FORMAT_DF3S (5 << 0)
216#define DISP_DATA_FORMAT_DFSPI (6 << 0)
217#define DISP_DATA_FORMAT_DF1P3C24B (7 << 0)
218#define DISP_DATA_FORMAT_DF1P3C18B (8 << 0)
219#define DISP_ALIGNMENT_MSB (0 << 8)
220#define DISP_ALIGNMENT_LSB (1 << 8)
221#define DISP_ORDER_RED_BLUE (0 << 9)
222#define DISP_ORDER_BLUE_RED (1 << 9)
223
224#define DC_DISP_DISP_COLOR_CONTROL 0x430
225#define BASE_COLOR_SIZE666 (0 << 0)
226#define BASE_COLOR_SIZE111 (1 << 0)
227#define BASE_COLOR_SIZE222 (2 << 0)
228#define BASE_COLOR_SIZE333 (3 << 0)
229#define BASE_COLOR_SIZE444 (4 << 0)
230#define BASE_COLOR_SIZE555 (5 << 0)
231#define BASE_COLOR_SIZE565 (6 << 0)
232#define BASE_COLOR_SIZE332 (7 << 0)
233#define BASE_COLOR_SIZE888 (8 << 0)
234#define DITHER_CONTROL_DISABLE (0 << 8)
235#define DITHER_CONTROL_ORDERED (2 << 8)
236#define DITHER_CONTROL_ERRDIFF (3 << 8)
237
238#define DC_DISP_SHIFT_CLOCK_OPTIONS 0x431
239
240#define DC_DISP_DATA_ENABLE_OPTIONS 0x432
241#define DE_SELECT_ACTIVE_BLANK (0 << 0)
242#define DE_SELECT_ACTIVE (1 << 0)
243#define DE_SELECT_ACTIVE_IS (2 << 0)
244#define DE_CONTROL_ONECLK (0 << 2)
245#define DE_CONTROL_NORMAL (1 << 2)
246#define DE_CONTROL_EARLY_EXT (2 << 2)
247#define DE_CONTROL_EARLY (3 << 2)
248#define DE_CONTROL_ACTIVE_BLANK (4 << 2)
249
250#define DC_DISP_SERIAL_INTERFACE_OPTIONS 0x433
251#define DC_DISP_LCD_SPI_OPTIONS 0x434
252#define DC_DISP_BORDER_COLOR 0x435
253#define DC_DISP_COLOR_KEY0_LOWER 0x436
254#define DC_DISP_COLOR_KEY0_UPPER 0x437
255#define DC_DISP_COLOR_KEY1_LOWER 0x438
256#define DC_DISP_COLOR_KEY1_UPPER 0x439
257
258#define DC_DISP_CURSOR_FOREGROUND 0x43c
259#define DC_DISP_CURSOR_BACKGROUND 0x43d
260
261#define DC_DISP_CURSOR_START_ADDR 0x43e
262#define DC_DISP_CURSOR_START_ADDR_NS 0x43f
263
264#define DC_DISP_CURSOR_POSITION 0x440
265#define DC_DISP_CURSOR_POSITION_NS 0x441
266
267#define DC_DISP_INIT_SEQ_CONTROL 0x442
268#define DC_DISP_SPI_INIT_SEQ_DATA_A 0x443
269#define DC_DISP_SPI_INIT_SEQ_DATA_B 0x444
270#define DC_DISP_SPI_INIT_SEQ_DATA_C 0x445
271#define DC_DISP_SPI_INIT_SEQ_DATA_D 0x446
272
273#define DC_DISP_DC_MCCIF_FIFOCTRL 0x480
274#define DC_DISP_MCCIF_DISPLAY0A_HYST 0x481
275#define DC_DISP_MCCIF_DISPLAY0B_HYST 0x482
276#define DC_DISP_MCCIF_DISPLAY1A_HYST 0x483
277#define DC_DISP_MCCIF_DISPLAY1B_HYST 0x484
278
279#define DC_DISP_DAC_CRT_CTRL 0x4c0
280#define DC_DISP_DISP_MISC_CONTROL 0x4c1
281#define DC_DISP_SD_CONTROL 0x4c2
282#define DC_DISP_SD_CSC_COEFF 0x4c3
283#define DC_DISP_SD_LUT(x) (0x4c4 + (x))
284#define DC_DISP_SD_FLICKER_CONTROL 0x4cd
285#define DC_DISP_DC_PIXEL_COUNT 0x4ce
286#define DC_DISP_SD_HISTOGRAM(x) (0x4cf + (x))
287#define DC_DISP_SD_BL_PARAMETERS 0x4d7
288#define DC_DISP_SD_BL_TF(x) (0x4d8 + (x))
289#define DC_DISP_SD_BL_CONTROL 0x4dc
290#define DC_DISP_SD_HW_K_VALUES 0x4dd
291#define DC_DISP_SD_MAN_K_VALUES 0x4de
292
293#define DC_WIN_WIN_OPTIONS 0x700
294#define COLOR_EXPAND (1 << 6)
295#define WIN_ENABLE (1 << 30)
296
297#define DC_WIN_BYTE_SWAP 0x701
298#define BYTE_SWAP_NOSWAP (0 << 0)
299#define BYTE_SWAP_SWAP2 (1 << 0)
300#define BYTE_SWAP_SWAP4 (2 << 0)
301#define BYTE_SWAP_SWAP4HW (3 << 0)
302
303#define DC_WIN_BUFFER_CONTROL 0x702
304#define BUFFER_CONTROL_HOST (0 << 0)
305#define BUFFER_CONTROL_VI (1 << 0)
306#define BUFFER_CONTROL_EPP (2 << 0)
307#define BUFFER_CONTROL_MPEGE (3 << 0)
308#define BUFFER_CONTROL_SB2D (4 << 0)
309
310#define DC_WIN_COLOR_DEPTH 0x703
311#define WIN_COLOR_DEPTH_P1 0
312#define WIN_COLOR_DEPTH_P2 1
313#define WIN_COLOR_DEPTH_P4 2
314#define WIN_COLOR_DEPTH_P8 3
315#define WIN_COLOR_DEPTH_B4G4R4A4 4
316#define WIN_COLOR_DEPTH_B5G5R5A 5
317#define WIN_COLOR_DEPTH_B5G6R5 6
318#define WIN_COLOR_DEPTH_AB5G5R5 7
319#define WIN_COLOR_DEPTH_B8G8R8A8 12
320#define WIN_COLOR_DEPTH_R8G8B8A8 13
321#define WIN_COLOR_DEPTH_B6x2G6x2R6x2A8 14
322#define WIN_COLOR_DEPTH_R6x2G6x2B6x2A8 15
323#define WIN_COLOR_DEPTH_YCbCr422 16
324#define WIN_COLOR_DEPTH_YUV422 17
325#define WIN_COLOR_DEPTH_YCbCr420P 18
326#define WIN_COLOR_DEPTH_YUV420P 19
327#define WIN_COLOR_DEPTH_YCbCr422P 20
328#define WIN_COLOR_DEPTH_YUV422P 21
329#define WIN_COLOR_DEPTH_YCbCr422R 22
330#define WIN_COLOR_DEPTH_YUV422R 23
331#define WIN_COLOR_DEPTH_YCbCr422RA 24
332#define WIN_COLOR_DEPTH_YUV422RA 25
333
334#define DC_WIN_POSITION 0x704
335#define H_POSITION(x) (((x) & 0x1fff) << 0)
336#define V_POSITION(x) (((x) & 0x1fff) << 16)
337
338#define DC_WIN_SIZE 0x705
339#define H_SIZE(x) (((x) & 0x1fff) << 0)
340#define V_SIZE(x) (((x) & 0x1fff) << 16)
341
342#define DC_WIN_PRESCALED_SIZE 0x706
343#define H_PRESCALED_SIZE(x) (((x) & 0x7fff) << 0)
344#define V_PRESCALED_SIZE(x) (((x) & 0x1fff) << 16)
345
346#define DC_WIN_H_INITIAL_DDA 0x707
347#define DC_WIN_V_INITIAL_DDA 0x708
348#define DC_WIN_DDA_INC 0x709
349#define H_DDA_INC(x) (((x) & 0xffff) << 0)
350#define V_DDA_INC(x) (((x) & 0xffff) << 16)
351
352#define DC_WIN_LINE_STRIDE 0x70a
353#define DC_WIN_BUF_STRIDE 0x70b
354#define DC_WIN_UV_BUF_STRIDE 0x70c
355#define DC_WIN_BUFFER_ADDR_MODE 0x70d
356#define DC_WIN_DV_CONTROL 0x70e
357
358#define DC_WIN_BLEND_NOKEY 0x70f
359#define DC_WIN_BLEND_1WIN 0x710
360#define DC_WIN_BLEND_2WIN_X 0x711
361#define DC_WIN_BLEND_2WIN_Y 0x712
362#define DC_WIN_BLEND32WIN_XY 0x713
363
364#define DC_WIN_HP_FETCH_CONTROL 0x714
365
366#define DC_WINBUF_START_ADDR 0x800
367#define DC_WINBUF_START_ADDR_NS 0x801
368#define DC_WINBUF_START_ADDR_U 0x802
369#define DC_WINBUF_START_ADDR_U_NS 0x803
370#define DC_WINBUF_START_ADDR_V 0x804
371#define DC_WINBUF_START_ADDR_V_NS 0x805
372
373#define DC_WINBUF_ADDR_H_OFFSET 0x806
374#define DC_WINBUF_ADDR_H_OFFSET_NS 0x807
375#define DC_WINBUF_ADDR_V_OFFSET 0x808
376#define DC_WINBUF_ADDR_V_OFFSET_NS 0x809
377
378#define DC_WINBUF_UFLOW_STATUS 0x80a
379
380#define DC_WINBUF_AD_UFLOW_STATUS 0xbca
381#define DC_WINBUF_BD_UFLOW_STATUS 0xdca
382#define DC_WINBUF_CD_UFLOW_STATUS 0xfca
383
384/* synchronization points */
385#define SYNCPT_VBLANK0 26
386#define SYNCPT_VBLANK1 27
387
388#endif /* TEGRA_DC_H */
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
new file mode 100644
index 000000000000..3a503c9e4686
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_address.h>
12#include <linux/of_platform.h>
13
14#include <mach/clk.h>
15#include <linux/dma-mapping.h>
16#include <asm/dma-iommu.h>
17
18#include "drm.h"
19
20#define DRIVER_NAME "tegra"
21#define DRIVER_DESC "NVIDIA Tegra graphics"
22#define DRIVER_DATE "20120330"
23#define DRIVER_MAJOR 0
24#define DRIVER_MINOR 0
25#define DRIVER_PATCHLEVEL 0
26
27static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
28{
29 struct device *dev = drm->dev;
30 struct host1x *host1x;
31 int err;
32
33 host1x = dev_get_drvdata(dev);
34 drm->dev_private = host1x;
35 host1x->drm = drm;
36
37 drm_mode_config_init(drm);
38
39 err = host1x_drm_init(host1x, drm);
40 if (err < 0)
41 return err;
42
43 err = tegra_drm_fb_init(drm);
44 if (err < 0)
45 return err;
46
47 drm_kms_helper_poll_init(drm);
48
49 return 0;
50}
51
52static int tegra_drm_unload(struct drm_device *drm)
53{
54 drm_kms_helper_poll_fini(drm);
55 tegra_drm_fb_exit(drm);
56
57 drm_mode_config_cleanup(drm);
58
59 return 0;
60}
61
62static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
63{
64 return 0;
65}
66
67static void tegra_drm_lastclose(struct drm_device *drm)
68{
69 struct host1x *host1x = drm->dev_private;
70
71 drm_fbdev_cma_restore_mode(host1x->fbdev);
72}
73
74static struct drm_ioctl_desc tegra_drm_ioctls[] = {
75};
76
77static const struct file_operations tegra_drm_fops = {
78 .owner = THIS_MODULE,
79 .open = drm_open,
80 .release = drm_release,
81 .unlocked_ioctl = drm_ioctl,
82 .mmap = drm_gem_cma_mmap,
83 .poll = drm_poll,
84 .fasync = drm_fasync,
85 .read = drm_read,
86#ifdef CONFIG_COMPAT
87 .compat_ioctl = drm_compat_ioctl,
88#endif
89 .llseek = noop_llseek,
90};
91
92struct drm_driver tegra_drm_driver = {
93 .driver_features = DRIVER_BUS_PLATFORM | DRIVER_MODESET | DRIVER_GEM,
94 .load = tegra_drm_load,
95 .unload = tegra_drm_unload,
96 .open = tegra_drm_open,
97 .lastclose = tegra_drm_lastclose,
98
99 .gem_free_object = drm_gem_cma_free_object,
100 .gem_vm_ops = &drm_gem_cma_vm_ops,
101 .dumb_create = drm_gem_cma_dumb_create,
102 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
103 .dumb_destroy = drm_gem_cma_dumb_destroy,
104
105 .ioctls = tegra_drm_ioctls,
106 .num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
107 .fops = &tegra_drm_fops,
108
109 .name = DRIVER_NAME,
110 .desc = DRIVER_DESC,
111 .date = DRIVER_DATE,
112 .major = DRIVER_MAJOR,
113 .minor = DRIVER_MINOR,
114 .patchlevel = DRIVER_PATCHLEVEL,
115};
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
new file mode 100644
index 000000000000..3a843a77ddc7
--- /dev/null
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -0,0 +1,234 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef TEGRA_DRM_H
11#define TEGRA_DRM_H 1
12
13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_edid.h>
16#include <drm/drm_fb_helper.h>
17#include <drm/drm_gem_cma_helper.h>
18#include <drm/drm_fb_cma_helper.h>
19#include <drm/drm_fixed.h>
20
21struct tegra_framebuffer {
22 struct drm_framebuffer base;
23 struct drm_gem_cma_object *obj;
24};
25
26static inline struct tegra_framebuffer *to_tegra_fb(struct drm_framebuffer *fb)
27{
28 return container_of(fb, struct tegra_framebuffer, base);
29}
30
31struct host1x {
32 struct drm_device *drm;
33 struct device *dev;
34 void __iomem *regs;
35 struct clk *clk;
36 int syncpt;
37 int irq;
38
39 struct mutex drm_clients_lock;
40 struct list_head drm_clients;
41 struct list_head drm_active;
42
43 struct mutex clients_lock;
44 struct list_head clients;
45
46 struct drm_fbdev_cma *fbdev;
47 struct tegra_framebuffer fb;
48};
49
50struct host1x_client;
51
52struct host1x_client_ops {
53 int (*drm_init)(struct host1x_client *client, struct drm_device *drm);
54 int (*drm_exit)(struct host1x_client *client);
55};
56
57struct host1x_client {
58 struct host1x *host1x;
59 struct device *dev;
60
61 const struct host1x_client_ops *ops;
62
63 struct list_head list;
64};
65
66extern int host1x_drm_init(struct host1x *host1x, struct drm_device *drm);
67extern int host1x_drm_exit(struct host1x *host1x);
68
69extern int host1x_register_client(struct host1x *host1x,
70 struct host1x_client *client);
71extern int host1x_unregister_client(struct host1x *host1x,
72 struct host1x_client *client);
73
74struct tegra_output;
75
76struct tegra_dc {
77 struct host1x_client client;
78
79 struct host1x *host1x;
80 struct device *dev;
81
82 struct drm_crtc base;
83 int pipe;
84
85 struct clk *clk;
86
87 void __iomem *regs;
88 int irq;
89
90 struct tegra_output *rgb;
91
92 struct list_head list;
93
94 struct drm_info_list *debugfs_files;
95 struct drm_minor *minor;
96 struct dentry *debugfs;
97};
98
99static inline struct tegra_dc *host1x_client_to_dc(struct host1x_client *client)
100{
101 return container_of(client, struct tegra_dc, client);
102}
103
104static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
105{
106 return container_of(crtc, struct tegra_dc, base);
107}
108
109static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value,
110 unsigned long reg)
111{
112 writel(value, dc->regs + (reg << 2));
113}
114
115static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
116 unsigned long reg)
117{
118 return readl(dc->regs + (reg << 2));
119}
120
121struct tegra_output_ops {
122 int (*enable)(struct tegra_output *output);
123 int (*disable)(struct tegra_output *output);
124 int (*setup_clock)(struct tegra_output *output, struct clk *clk,
125 unsigned long pclk);
126 int (*check_mode)(struct tegra_output *output,
127 struct drm_display_mode *mode,
128 enum drm_mode_status *status);
129};
130
131enum tegra_output_type {
132 TEGRA_OUTPUT_RGB,
133 TEGRA_OUTPUT_HDMI,
134};
135
136struct tegra_output {
137 struct device_node *of_node;
138 struct device *dev;
139
140 const struct tegra_output_ops *ops;
141 enum tegra_output_type type;
142
143 struct i2c_adapter *ddc;
144 const struct edid *edid;
145 unsigned int hpd_irq;
146 int hpd_gpio;
147
148 struct drm_encoder encoder;
149 struct drm_connector connector;
150};
151
152static inline struct tegra_output *encoder_to_output(struct drm_encoder *e)
153{
154 return container_of(e, struct tegra_output, encoder);
155}
156
157static inline struct tegra_output *connector_to_output(struct drm_connector *c)
158{
159 return container_of(c, struct tegra_output, connector);
160}
161
162static inline int tegra_output_enable(struct tegra_output *output)
163{
164 if (output && output->ops && output->ops->enable)
165 return output->ops->enable(output);
166
167 return output ? -ENOSYS : -EINVAL;
168}
169
170static inline int tegra_output_disable(struct tegra_output *output)
171{
172 if (output && output->ops && output->ops->disable)
173 return output->ops->disable(output);
174
175 return output ? -ENOSYS : -EINVAL;
176}
177
178static inline int tegra_output_setup_clock(struct tegra_output *output,
179 struct clk *clk, unsigned long pclk)
180{
181 if (output && output->ops && output->ops->setup_clock)
182 return output->ops->setup_clock(output, clk, pclk);
183
184 return output ? -ENOSYS : -EINVAL;
185}
186
187static inline int tegra_output_check_mode(struct tegra_output *output,
188 struct drm_display_mode *mode,
189 enum drm_mode_status *status)
190{
191 if (output && output->ops && output->ops->check_mode)
192 return output->ops->check_mode(output, mode, status);
193
194 return output ? -ENOSYS : -EINVAL;
195}
196
197/* from rgb.c */
198extern int tegra_dc_rgb_probe(struct tegra_dc *dc);
199extern int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc);
200extern int tegra_dc_rgb_exit(struct tegra_dc *dc);
201
202/* from output.c */
203extern int tegra_output_parse_dt(struct tegra_output *output);
204extern int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
205extern int tegra_output_exit(struct tegra_output *output);
206
207/* from gem.c */
208extern struct tegra_gem_object *tegra_gem_alloc(struct drm_device *drm,
209 size_t size);
210extern int tegra_gem_handle_create(struct drm_device *drm,
211 struct drm_file *file, size_t size,
212 unsigned long flags, uint32_t *handle);
213extern int tegra_gem_dumb_create(struct drm_file *file, struct drm_device *drm,
214 struct drm_mode_create_dumb *args);
215extern int tegra_gem_dumb_map_offset(struct drm_file *file,
216 struct drm_device *drm, uint32_t handle,
217 uint64_t *offset);
218extern int tegra_gem_dumb_destroy(struct drm_file *file,
219 struct drm_device *drm, uint32_t handle);
220extern int tegra_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
221extern int tegra_gem_init_object(struct drm_gem_object *obj);
222extern void tegra_gem_free_object(struct drm_gem_object *obj);
223extern struct vm_operations_struct tegra_gem_vm_ops;
224
225/* from fb.c */
226extern int tegra_drm_fb_init(struct drm_device *drm);
227extern void tegra_drm_fb_exit(struct drm_device *drm);
228
229extern struct platform_driver tegra_host1x_driver;
230extern struct platform_driver tegra_hdmi_driver;
231extern struct platform_driver tegra_dc_driver;
232extern struct drm_driver tegra_drm_driver;
233
234#endif /* TEGRA_DRM_H */
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
new file mode 100644
index 000000000000..97993c6835fd
--- /dev/null
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include "drm.h"
11
12static void tegra_drm_fb_output_poll_changed(struct drm_device *drm)
13{
14 struct host1x *host1x = drm->dev_private;
15
16 drm_fbdev_cma_hotplug_event(host1x->fbdev);
17}
18
19static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
20 .fb_create = drm_fb_cma_create,
21 .output_poll_changed = tegra_drm_fb_output_poll_changed,
22};
23
24int tegra_drm_fb_init(struct drm_device *drm)
25{
26 struct host1x *host1x = drm->dev_private;
27 struct drm_fbdev_cma *fbdev;
28
29 drm->mode_config.min_width = 0;
30 drm->mode_config.min_height = 0;
31
32 drm->mode_config.max_width = 4096;
33 drm->mode_config.max_height = 4096;
34
35 drm->mode_config.funcs = &tegra_drm_mode_funcs;
36
37 fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
38 drm->mode_config.num_connector);
39 if (IS_ERR(fbdev))
40 return PTR_ERR(fbdev);
41
42#ifndef CONFIG_FRAMEBUFFER_CONSOLE
43 drm_fbdev_cma_restore_mode(fbdev);
44#endif
45
46 host1x->fbdev = fbdev;
47
48 return 0;
49}
50
51void tegra_drm_fb_exit(struct drm_device *drm)
52{
53 struct host1x *host1x = drm->dev_private;
54
55 drm_fbdev_cma_fini(host1x->fbdev);
56}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
new file mode 100644
index 000000000000..ab4016412bbf
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -0,0 +1,1334 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/debugfs.h>
12#include <linux/gpio.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/regulator/consumer.h>
17
18#include <mach/clk.h>
19
20#include "hdmi.h"
21#include "drm.h"
22#include "dc.h"
23
24struct tegra_hdmi {
25 struct host1x_client client;
26 struct tegra_output output;
27 struct device *dev;
28
29 struct regulator *vdd;
30 struct regulator *pll;
31
32 void __iomem *regs;
33 unsigned int irq;
34
35 struct clk *clk_parent;
36 struct clk *clk;
37
38 unsigned int audio_source;
39 unsigned int audio_freq;
40 bool stereo;
41 bool dvi;
42
43 struct drm_info_list *debugfs_files;
44 struct drm_minor *minor;
45 struct dentry *debugfs;
46};
47
48static inline struct tegra_hdmi *
49host1x_client_to_hdmi(struct host1x_client *client)
50{
51 return container_of(client, struct tegra_hdmi, client);
52}
53
54static inline struct tegra_hdmi *to_hdmi(struct tegra_output *output)
55{
56 return container_of(output, struct tegra_hdmi, output);
57}
58
59#define HDMI_AUDIOCLK_FREQ 216000000
60#define HDMI_REKEY_DEFAULT 56
61
62enum {
63 AUTO = 0,
64 SPDIF,
65 HDA,
66};
67
68static inline unsigned long tegra_hdmi_readl(struct tegra_hdmi *hdmi,
69 unsigned long reg)
70{
71 return readl(hdmi->regs + (reg << 2));
72}
73
74static inline void tegra_hdmi_writel(struct tegra_hdmi *hdmi, unsigned long val,
75 unsigned long reg)
76{
77 writel(val, hdmi->regs + (reg << 2));
78}
79
80struct tegra_hdmi_audio_config {
81 unsigned int pclk;
82 unsigned int n;
83 unsigned int cts;
84 unsigned int aval;
85};
86
87static const struct tegra_hdmi_audio_config tegra_hdmi_audio_32k[] = {
88 { 25200000, 4096, 25200, 24000 },
89 { 27000000, 4096, 27000, 24000 },
90 { 74250000, 4096, 74250, 24000 },
91 { 148500000, 4096, 148500, 24000 },
92 { 0, 0, 0, 0 },
93};
94
95static const struct tegra_hdmi_audio_config tegra_hdmi_audio_44_1k[] = {
96 { 25200000, 5880, 26250, 25000 },
97 { 27000000, 5880, 28125, 25000 },
98 { 74250000, 4704, 61875, 20000 },
99 { 148500000, 4704, 123750, 20000 },
100 { 0, 0, 0, 0 },
101};
102
103static const struct tegra_hdmi_audio_config tegra_hdmi_audio_48k[] = {
104 { 25200000, 6144, 25200, 24000 },
105 { 27000000, 6144, 27000, 24000 },
106 { 74250000, 6144, 74250, 24000 },
107 { 148500000, 6144, 148500, 24000 },
108 { 0, 0, 0, 0 },
109};
110
111static const struct tegra_hdmi_audio_config tegra_hdmi_audio_88_2k[] = {
112 { 25200000, 11760, 26250, 25000 },
113 { 27000000, 11760, 28125, 25000 },
114 { 74250000, 9408, 61875, 20000 },
115 { 148500000, 9408, 123750, 20000 },
116 { 0, 0, 0, 0 },
117};
118
119static const struct tegra_hdmi_audio_config tegra_hdmi_audio_96k[] = {
120 { 25200000, 12288, 25200, 24000 },
121 { 27000000, 12288, 27000, 24000 },
122 { 74250000, 12288, 74250, 24000 },
123 { 148500000, 12288, 148500, 24000 },
124 { 0, 0, 0, 0 },
125};
126
127static const struct tegra_hdmi_audio_config tegra_hdmi_audio_176_4k[] = {
128 { 25200000, 23520, 26250, 25000 },
129 { 27000000, 23520, 28125, 25000 },
130 { 74250000, 18816, 61875, 20000 },
131 { 148500000, 18816, 123750, 20000 },
132 { 0, 0, 0, 0 },
133};
134
135static const struct tegra_hdmi_audio_config tegra_hdmi_audio_192k[] = {
136 { 25200000, 24576, 25200, 24000 },
137 { 27000000, 24576, 27000, 24000 },
138 { 74250000, 24576, 74250, 24000 },
139 { 148500000, 24576, 148500, 24000 },
140 { 0, 0, 0, 0 },
141};
142
143struct tmds_config {
144 unsigned int pclk;
145 u32 pll0;
146 u32 pll1;
147 u32 pe_current;
148 u32 drive_current;
149};
150
151static const struct tmds_config tegra2_tmds_config[] = {
152 { /* 480p modes */
153 .pclk = 27000000,
154 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
155 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
156 SOR_PLL_TX_REG_LOAD(3),
157 .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
158 .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
159 PE_CURRENT1(PE_CURRENT_0_0_mA) |
160 PE_CURRENT2(PE_CURRENT_0_0_mA) |
161 PE_CURRENT3(PE_CURRENT_0_0_mA),
162 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
163 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
164 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
165 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
166 }, { /* 720p modes */
167 .pclk = 74250000,
168 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
169 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
170 SOR_PLL_TX_REG_LOAD(3),
171 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
172 .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
173 PE_CURRENT1(PE_CURRENT_6_0_mA) |
174 PE_CURRENT2(PE_CURRENT_6_0_mA) |
175 PE_CURRENT3(PE_CURRENT_6_0_mA),
176 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
177 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
178 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
179 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
180 }, { /* 1080p modes */
181 .pclk = UINT_MAX,
182 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
183 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
184 SOR_PLL_TX_REG_LOAD(3),
185 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
186 .pe_current = PE_CURRENT0(PE_CURRENT_6_0_mA) |
187 PE_CURRENT1(PE_CURRENT_6_0_mA) |
188 PE_CURRENT2(PE_CURRENT_6_0_mA) |
189 PE_CURRENT3(PE_CURRENT_6_0_mA),
190 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_7_125_mA) |
191 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_7_125_mA) |
192 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_7_125_mA) |
193 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_7_125_mA),
194 },
195};
196
197static const struct tmds_config tegra3_tmds_config[] = {
198 { /* 480p modes */
199 .pclk = 27000000,
200 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
201 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(0) |
202 SOR_PLL_TX_REG_LOAD(0),
203 .pll1 = SOR_PLL_TMDS_TERM_ENABLE,
204 .pe_current = PE_CURRENT0(PE_CURRENT_0_0_mA) |
205 PE_CURRENT1(PE_CURRENT_0_0_mA) |
206 PE_CURRENT2(PE_CURRENT_0_0_mA) |
207 PE_CURRENT3(PE_CURRENT_0_0_mA),
208 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
209 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
210 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
211 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
212 }, { /* 720p modes */
213 .pclk = 74250000,
214 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
215 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(1) |
216 SOR_PLL_TX_REG_LOAD(0),
217 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
218 .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
219 PE_CURRENT1(PE_CURRENT_5_0_mA) |
220 PE_CURRENT2(PE_CURRENT_5_0_mA) |
221 PE_CURRENT3(PE_CURRENT_5_0_mA),
222 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
223 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
224 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
225 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
226 }, { /* 1080p modes */
227 .pclk = UINT_MAX,
228 .pll0 = SOR_PLL_BG_V17_S(3) | SOR_PLL_ICHPMP(1) |
229 SOR_PLL_RESISTORSEL | SOR_PLL_VCOCAP(3) |
230 SOR_PLL_TX_REG_LOAD(0),
231 .pll1 = SOR_PLL_TMDS_TERM_ENABLE | SOR_PLL_PE_EN,
232 .pe_current = PE_CURRENT0(PE_CURRENT_5_0_mA) |
233 PE_CURRENT1(PE_CURRENT_5_0_mA) |
234 PE_CURRENT2(PE_CURRENT_5_0_mA) |
235 PE_CURRENT3(PE_CURRENT_5_0_mA),
236 .drive_current = DRIVE_CURRENT_LANE0(DRIVE_CURRENT_5_250_mA) |
237 DRIVE_CURRENT_LANE1(DRIVE_CURRENT_5_250_mA) |
238 DRIVE_CURRENT_LANE2(DRIVE_CURRENT_5_250_mA) |
239 DRIVE_CURRENT_LANE3(DRIVE_CURRENT_5_250_mA),
240 },
241};
242
243static const struct tegra_hdmi_audio_config *
244tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk)
245{
246 const struct tegra_hdmi_audio_config *table;
247
248 switch (audio_freq) {
249 case 32000:
250 table = tegra_hdmi_audio_32k;
251 break;
252
253 case 44100:
254 table = tegra_hdmi_audio_44_1k;
255 break;
256
257 case 48000:
258 table = tegra_hdmi_audio_48k;
259 break;
260
261 case 88200:
262 table = tegra_hdmi_audio_88_2k;
263 break;
264
265 case 96000:
266 table = tegra_hdmi_audio_96k;
267 break;
268
269 case 176400:
270 table = tegra_hdmi_audio_176_4k;
271 break;
272
273 case 192000:
274 table = tegra_hdmi_audio_192k;
275 break;
276
277 default:
278 return NULL;
279 }
280
281 while (table->pclk) {
282 if (table->pclk == pclk)
283 return table;
284
285 table++;
286 }
287
288 return NULL;
289}
290
291static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
292{
293 const unsigned int freqs[] = {
294 32000, 44100, 48000, 88200, 96000, 176400, 192000
295 };
296 unsigned int i;
297
298 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
299 unsigned int f = freqs[i];
300 unsigned int eight_half;
301 unsigned long value;
302 unsigned int delta;
303
304 if (f > 96000)
305 delta = 2;
306 else if (f > 480000)
307 delta = 6;
308 else
309 delta = 9;
310
311 eight_half = (8 * HDMI_AUDIOCLK_FREQ) / (f * 128);
312 value = AUDIO_FS_LOW(eight_half - delta) |
313 AUDIO_FS_HIGH(eight_half + delta);
314 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_FS(i));
315 }
316}
317
318static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
319{
320 struct device_node *node = hdmi->dev->of_node;
321 const struct tegra_hdmi_audio_config *config;
322 unsigned int offset = 0;
323 unsigned long value;
324
325 switch (hdmi->audio_source) {
326 case HDA:
327 value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
328 break;
329
330 case SPDIF:
331 value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
332 break;
333
334 default:
335 value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
336 break;
337 }
338
339 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
340 value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
341 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
342 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
343 } else {
344 value |= AUDIO_CNTRL0_INJECT_NULLSMPL;
345 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
346
347 value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) |
348 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0);
349 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
350 }
351
352 config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk);
353 if (!config) {
354 dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n",
355 hdmi->audio_freq, pclk);
356 return -EINVAL;
357 }
358
359 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_HDMI_ACR_CTRL);
360
361 value = AUDIO_N_RESETF | AUDIO_N_GENERATE_ALTERNATE |
362 AUDIO_N_VALUE(config->n - 1);
363 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
364
365 tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
366 HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
367
368 value = ACR_SUBPACK_CTS(config->cts);
369 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
370
371 value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
372 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
373
374 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_AUDIO_N);
375 value &= ~AUDIO_N_RESETF;
376 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
377
378 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
379 switch (hdmi->audio_freq) {
380 case 32000:
381 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
382 break;
383
384 case 44100:
385 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
386 break;
387
388 case 48000:
389 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480;
390 break;
391
392 case 88200:
393 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882;
394 break;
395
396 case 96000:
397 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960;
398 break;
399
400 case 176400:
401 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764;
402 break;
403
404 case 192000:
405 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920;
406 break;
407 }
408
409 tegra_hdmi_writel(hdmi, config->aval, offset);
410 }
411
412 tegra_hdmi_setup_audio_fs_tables(hdmi);
413
414 return 0;
415}
416
417static void tegra_hdmi_write_infopack(struct tegra_hdmi *hdmi,
418 unsigned int offset, u8 type,
419 u8 version, void *data, size_t size)
420{
421 unsigned long value;
422 u8 *ptr = data;
423 u32 subpack[2];
424 size_t i;
425 u8 csum;
426
427 /* first byte of data is the checksum */
428 csum = type + version + size - 1;
429
430 for (i = 1; i < size; i++)
431 csum += ptr[i];
432
433 ptr[0] = 0x100 - csum;
434
435 value = INFOFRAME_HEADER_TYPE(type) |
436 INFOFRAME_HEADER_VERSION(version) |
437 INFOFRAME_HEADER_LEN(size - 1);
438 tegra_hdmi_writel(hdmi, value, offset);
439
440 /* The audio inforame only has one set of subpack registers. The hdmi
441 * block pads the rest of the data as per the spec so we have to fixup
442 * the length before filling in the subpacks.
443 */
444 if (offset == HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER)
445 size = 6;
446
447 /* each subpack 7 bytes devided into:
448 * subpack_low - bytes 0 - 3
449 * subpack_high - bytes 4 - 6 (with byte 7 padded to 0x00)
450 */
451 for (i = 0; i < size; i++) {
452 size_t index = i % 7;
453
454 if (index == 0)
455 memset(subpack, 0x0, sizeof(subpack));
456
457 ((u8 *)subpack)[index] = ptr[i];
458
459 if (index == 6 || (i + 1 == size)) {
460 unsigned int reg = offset + 1 + (i / 7) * 2;
461
462 tegra_hdmi_writel(hdmi, subpack[0], reg);
463 tegra_hdmi_writel(hdmi, subpack[1], reg + 1);
464 }
465 }
466}
467
468static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
469 struct drm_display_mode *mode)
470{
471 struct hdmi_avi_infoframe frame;
472 unsigned int h_front_porch;
473 unsigned int hsize = 16;
474 unsigned int vsize = 9;
475
476 if (hdmi->dvi) {
477 tegra_hdmi_writel(hdmi, 0,
478 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
479 return;
480 }
481
482 h_front_porch = mode->htotal - mode->hsync_end;
483 memset(&frame, 0, sizeof(frame));
484 frame.r = HDMI_AVI_R_SAME;
485
486 switch (mode->vdisplay) {
487 case 480:
488 if (mode->hdisplay == 640) {
489 frame.m = HDMI_AVI_M_4_3;
490 frame.vic = 1;
491 } else {
492 frame.m = HDMI_AVI_M_16_9;
493 frame.vic = 3;
494 }
495 break;
496
497 case 576:
498 if (((hsize * 10) / vsize) > 14) {
499 frame.m = HDMI_AVI_M_16_9;
500 frame.vic = 18;
501 } else {
502 frame.m = HDMI_AVI_M_4_3;
503 frame.vic = 17;
504 }
505 break;
506
507 case 720:
508 case 1470: /* stereo mode */
509 frame.m = HDMI_AVI_M_16_9;
510
511 if (h_front_porch == 110)
512 frame.vic = 4;
513 else
514 frame.vic = 19;
515 break;
516
517 case 1080:
518 case 2205: /* stereo mode */
519 frame.m = HDMI_AVI_M_16_9;
520
521 switch (h_front_porch) {
522 case 88:
523 frame.vic = 16;
524 break;
525
526 case 528:
527 frame.vic = 31;
528 break;
529
530 default:
531 frame.vic = 32;
532 break;
533 }
534 break;
535
536 default:
537 frame.m = HDMI_AVI_M_16_9;
538 frame.vic = 0;
539 break;
540 }
541
542 tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER,
543 HDMI_INFOFRAME_TYPE_AVI, HDMI_AVI_VERSION,
544 &frame, sizeof(frame));
545
546 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
547 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
548}
549
550static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
551{
552 struct hdmi_audio_infoframe frame;
553
554 if (hdmi->dvi) {
555 tegra_hdmi_writel(hdmi, 0,
556 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
557 return;
558 }
559
560 memset(&frame, 0, sizeof(frame));
561 frame.cc = HDMI_AUDIO_CC_2;
562
563 tegra_hdmi_write_infopack(hdmi,
564 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER,
565 HDMI_INFOFRAME_TYPE_AUDIO,
566 HDMI_AUDIO_VERSION,
567 &frame, sizeof(frame));
568
569 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE,
570 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
571}
572
573static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
574{
575 struct hdmi_stereo_infoframe frame;
576 unsigned long value;
577
578 if (!hdmi->stereo) {
579 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
580 value &= ~GENERIC_CTRL_ENABLE;
581 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
582 return;
583 }
584
585 memset(&frame, 0, sizeof(frame));
586 frame.regid0 = 0x03;
587 frame.regid1 = 0x0c;
588 frame.regid2 = 0x00;
589 frame.hdmi_video_format = 2;
590
591 /* TODO: 74 MHz limit? */
592 if (1) {
593 frame._3d_structure = 0;
594 } else {
595 frame._3d_structure = 8;
596 frame._3d_ext_data = 0;
597 }
598
599 tegra_hdmi_write_infopack(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_HEADER,
600 HDMI_INFOFRAME_TYPE_VENDOR,
601 HDMI_VENDOR_VERSION, &frame, 6);
602
603 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
604 value |= GENERIC_CTRL_ENABLE;
605 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
606}
607
608static void tegra_hdmi_setup_tmds(struct tegra_hdmi *hdmi,
609 const struct tmds_config *tmds)
610{
611 unsigned long value;
612
613 tegra_hdmi_writel(hdmi, tmds->pll0, HDMI_NV_PDISP_SOR_PLL0);
614 tegra_hdmi_writel(hdmi, tmds->pll1, HDMI_NV_PDISP_SOR_PLL1);
615 tegra_hdmi_writel(hdmi, tmds->pe_current, HDMI_NV_PDISP_PE_CURRENT);
616
617 value = tmds->drive_current | DRIVE_CURRENT_FUSE_OVERRIDE;
618 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
619}
620
621static int tegra_output_hdmi_enable(struct tegra_output *output)
622{
623 unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
624 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
625 struct drm_display_mode *mode = &dc->base.mode;
626 struct tegra_hdmi *hdmi = to_hdmi(output);
627 struct device_node *node = hdmi->dev->of_node;
628 unsigned int pulse_start, div82, pclk;
629 const struct tmds_config *tmds;
630 unsigned int num_tmds;
631 unsigned long value;
632 int retries = 1000;
633 int err;
634
635 pclk = mode->clock * 1000;
636 h_sync_width = mode->hsync_end - mode->hsync_start;
637 h_front_porch = mode->htotal - mode->hsync_end;
638 h_back_porch = mode->hsync_start - mode->hdisplay;
639
640 err = regulator_enable(hdmi->vdd);
641 if (err < 0) {
642 dev_err(hdmi->dev, "failed to enable VDD regulator: %d\n", err);
643 return err;
644 }
645
646 err = regulator_enable(hdmi->pll);
647 if (err < 0) {
648 dev_err(hdmi->dev, "failed to enable PLL regulator: %d\n", err);
649 return err;
650 }
651
652 /*
653 * This assumes that the display controller will divide its parent
654 * clock by 2 to generate the pixel clock.
655 */
656 err = tegra_output_setup_clock(output, hdmi->clk, pclk * 2);
657 if (err < 0) {
658 dev_err(hdmi->dev, "failed to setup clock: %d\n", err);
659 return err;
660 }
661
662 err = clk_set_rate(hdmi->clk, pclk);
663 if (err < 0)
664 return err;
665
666 err = clk_enable(hdmi->clk);
667 if (err < 0) {
668 dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
669 return err;
670 }
671
672 tegra_periph_reset_assert(hdmi->clk);
673 usleep_range(1000, 2000);
674 tegra_periph_reset_deassert(hdmi->clk);
675
676 tegra_dc_writel(dc, VSYNC_H_POSITION(1),
677 DC_DISP_DISP_TIMING_OPTIONS);
678 tegra_dc_writel(dc, DITHER_CONTROL_DISABLE | BASE_COLOR_SIZE888,
679 DC_DISP_DISP_COLOR_CONTROL);
680
681 /* video_preamble uses h_pulse2 */
682 pulse_start = 1 + h_sync_width + h_back_porch - 10;
683
684 tegra_dc_writel(dc, H_PULSE_2_ENABLE, DC_DISP_DISP_SIGNAL_OPTIONS0);
685
686 value = PULSE_MODE_NORMAL | PULSE_POLARITY_HIGH | PULSE_QUAL_VACTIVE |
687 PULSE_LAST_END_A;
688 tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_CONTROL);
689
690 value = PULSE_START(pulse_start) | PULSE_END(pulse_start + 8);
691 tegra_dc_writel(dc, value, DC_DISP_H_PULSE2_POSITION_A);
692
693 value = VSYNC_WINDOW_END(0x210) | VSYNC_WINDOW_START(0x200) |
694 VSYNC_WINDOW_ENABLE;
695 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
696
697 if (dc->pipe)
698 value = HDMI_SRC_DISPLAYB;
699 else
700 value = HDMI_SRC_DISPLAYA;
701
702 if ((mode->hdisplay == 720) && ((mode->vdisplay == 480) ||
703 (mode->vdisplay == 576)))
704 tegra_hdmi_writel(hdmi,
705 value | ARM_VIDEO_RANGE_FULL,
706 HDMI_NV_PDISP_INPUT_CONTROL);
707 else
708 tegra_hdmi_writel(hdmi,
709 value | ARM_VIDEO_RANGE_LIMITED,
710 HDMI_NV_PDISP_INPUT_CONTROL);
711
712 div82 = clk_get_rate(hdmi->clk) / 1000000 * 4;
713 value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
714 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
715
716 if (!hdmi->dvi) {
717 err = tegra_hdmi_setup_audio(hdmi, pclk);
718 if (err < 0)
719 hdmi->dvi = true;
720 }
721
722 if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) {
723 /*
724 * TODO: add ELD support
725 */
726 }
727
728 rekey = HDMI_REKEY_DEFAULT;
729 value = HDMI_CTRL_REKEY(rekey);
730 value |= HDMI_CTRL_MAX_AC_PACKET((h_sync_width + h_back_porch +
731 h_front_porch - rekey - 18) / 32);
732
733 if (!hdmi->dvi)
734 value |= HDMI_CTRL_ENABLE;
735
736 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
737
738 if (hdmi->dvi)
739 tegra_hdmi_writel(hdmi, 0x0,
740 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
741 else
742 tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
743 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
744
745 tegra_hdmi_setup_avi_infoframe(hdmi, mode);
746 tegra_hdmi_setup_audio_infoframe(hdmi);
747 tegra_hdmi_setup_stereo_infoframe(hdmi);
748
749 /* TMDS CONFIG */
750 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) {
751 num_tmds = ARRAY_SIZE(tegra3_tmds_config);
752 tmds = tegra3_tmds_config;
753 } else {
754 num_tmds = ARRAY_SIZE(tegra2_tmds_config);
755 tmds = tegra2_tmds_config;
756 }
757
758 for (i = 0; i < num_tmds; i++) {
759 if (pclk <= tmds[i].pclk) {
760 tegra_hdmi_setup_tmds(hdmi, &tmds[i]);
761 break;
762 }
763 }
764
765 tegra_hdmi_writel(hdmi,
766 SOR_SEQ_CTL_PU_PC(0) |
767 SOR_SEQ_PU_PC_ALT(0) |
768 SOR_SEQ_PD_PC(8) |
769 SOR_SEQ_PD_PC_ALT(8),
770 HDMI_NV_PDISP_SOR_SEQ_CTL);
771
772 value = SOR_SEQ_INST_WAIT_TIME(1) |
773 SOR_SEQ_INST_WAIT_UNITS_VSYNC |
774 SOR_SEQ_INST_HALT |
775 SOR_SEQ_INST_PIN_A_LOW |
776 SOR_SEQ_INST_PIN_B_LOW |
777 SOR_SEQ_INST_DRIVE_PWM_OUT_LO;
778
779 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(0));
780 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_SEQ_INST(8));
781
782 value = 0x1c800;
783 value &= ~SOR_CSTM_ROTCLK(~0);
784 value |= SOR_CSTM_ROTCLK(2);
785 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_CSTM);
786
787 tegra_dc_writel(dc, DISP_CTRL_MODE_STOP, DC_CMD_DISPLAY_COMMAND);
788 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
789 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
790
791 /* start SOR */
792 tegra_hdmi_writel(hdmi,
793 SOR_PWR_NORMAL_STATE_PU |
794 SOR_PWR_NORMAL_START_NORMAL |
795 SOR_PWR_SAFE_STATE_PD |
796 SOR_PWR_SETTING_NEW_TRIGGER,
797 HDMI_NV_PDISP_SOR_PWR);
798 tegra_hdmi_writel(hdmi,
799 SOR_PWR_NORMAL_STATE_PU |
800 SOR_PWR_NORMAL_START_NORMAL |
801 SOR_PWR_SAFE_STATE_PD |
802 SOR_PWR_SETTING_NEW_DONE,
803 HDMI_NV_PDISP_SOR_PWR);
804
805 do {
806 BUG_ON(--retries < 0);
807 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PWR);
808 } while (value & SOR_PWR_SETTING_NEW_PENDING);
809
810 value = SOR_STATE_ASY_CRCMODE_COMPLETE |
811 SOR_STATE_ASY_OWNER_HEAD0 |
812 SOR_STATE_ASY_SUBOWNER_BOTH |
813 SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A |
814 SOR_STATE_ASY_DEPOL_POS;
815
816 /* setup sync polarities */
817 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
818 value |= SOR_STATE_ASY_HSYNCPOL_POS;
819
820 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
821 value |= SOR_STATE_ASY_HSYNCPOL_NEG;
822
823 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
824 value |= SOR_STATE_ASY_VSYNCPOL_POS;
825
826 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
827 value |= SOR_STATE_ASY_VSYNCPOL_NEG;
828
829 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE2);
830
831 value = SOR_STATE_ASY_HEAD_OPMODE_AWAKE | SOR_STATE_ASY_ORMODE_NORMAL;
832 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_STATE1);
833
834 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
835 tegra_hdmi_writel(hdmi, SOR_STATE_UPDATE, HDMI_NV_PDISP_SOR_STATE0);
836 tegra_hdmi_writel(hdmi, value | SOR_STATE_ATTACHED,
837 HDMI_NV_PDISP_SOR_STATE1);
838 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_STATE0);
839
840 tegra_dc_writel(dc, HDMI_ENABLE, DC_DISP_DISP_WIN_OPTIONS);
841
842 value = PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
843 PW4_ENABLE | PM0_ENABLE | PM1_ENABLE;
844 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_POWER_CONTROL);
845
846 value = DISP_CTRL_MODE_C_DISPLAY;
847 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_COMMAND);
848
849 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
850 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
851
852 /* TODO: add HDCP support */
853
854 return 0;
855}
856
857static int tegra_output_hdmi_disable(struct tegra_output *output)
858{
859 struct tegra_hdmi *hdmi = to_hdmi(output);
860
861 tegra_periph_reset_assert(hdmi->clk);
862 clk_disable(hdmi->clk);
863 regulator_disable(hdmi->pll);
864 regulator_disable(hdmi->vdd);
865
866 return 0;
867}
868
869static int tegra_output_hdmi_setup_clock(struct tegra_output *output,
870 struct clk *clk, unsigned long pclk)
871{
872 struct tegra_hdmi *hdmi = to_hdmi(output);
873 struct clk *base;
874 int err;
875
876 err = clk_set_parent(clk, hdmi->clk_parent);
877 if (err < 0) {
878 dev_err(output->dev, "failed to set parent: %d\n", err);
879 return err;
880 }
881
882 base = clk_get_parent(hdmi->clk_parent);
883
884 /*
885 * This assumes that the parent clock is pll_d_out0 or pll_d2_out
886 * respectively, each of which divides the base pll_d by 2.
887 */
888 err = clk_set_rate(base, pclk * 2);
889 if (err < 0)
890 dev_err(output->dev,
891 "failed to set base clock rate to %lu Hz\n",
892 pclk * 2);
893
894 return 0;
895}
896
897static int tegra_output_hdmi_check_mode(struct tegra_output *output,
898 struct drm_display_mode *mode,
899 enum drm_mode_status *status)
900{
901 struct tegra_hdmi *hdmi = to_hdmi(output);
902 unsigned long pclk = mode->clock * 1000;
903 struct clk *parent;
904 long err;
905
906 parent = clk_get_parent(hdmi->clk_parent);
907
908 err = clk_round_rate(parent, pclk * 4);
909 if (err < 0)
910 *status = MODE_NOCLOCK;
911 else
912 *status = MODE_OK;
913
914 return 0;
915}
916
917static const struct tegra_output_ops hdmi_ops = {
918 .enable = tegra_output_hdmi_enable,
919 .disable = tegra_output_hdmi_disable,
920 .setup_clock = tegra_output_hdmi_setup_clock,
921 .check_mode = tegra_output_hdmi_check_mode,
922};
923
924static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
925{
926 struct drm_info_node *node = s->private;
927 struct tegra_hdmi *hdmi = node->info_ent->data;
928
929#define DUMP_REG(name) \
930 seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
931 tegra_hdmi_readl(hdmi, name))
932
933 DUMP_REG(HDMI_CTXSW);
934 DUMP_REG(HDMI_NV_PDISP_SOR_STATE0);
935 DUMP_REG(HDMI_NV_PDISP_SOR_STATE1);
936 DUMP_REG(HDMI_NV_PDISP_SOR_STATE2);
937 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_MSB);
938 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AN_LSB);
939 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_MSB);
940 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CN_LSB);
941 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_MSB);
942 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_AKSV_LSB);
943 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_MSB);
944 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_BKSV_LSB);
945 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_MSB);
946 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CKSV_LSB);
947 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_MSB);
948 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_DKSV_LSB);
949 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CTRL);
950 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CMODE);
951 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB);
952 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB);
953 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB);
954 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2);
955 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1);
956 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_RI);
957 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_MSB);
958 DUMP_REG(HDMI_NV_PDISP_RG_HDCP_CS_LSB);
959 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU0);
960 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0);
961 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU1);
962 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_EMU2);
963 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
964 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS);
965 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER);
966 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW);
967 DUMP_REG(HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH);
968 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
969 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS);
970 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER);
971 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW);
972 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH);
973 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW);
974 DUMP_REG(HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH);
975 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
976 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_STATUS);
977 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_HEADER);
978 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW);
979 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH);
980 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW);
981 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH);
982 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW);
983 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH);
984 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW);
985 DUMP_REG(HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH);
986 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_CTRL);
987 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW);
988 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH);
989 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
990 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
991 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW);
992 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH);
993 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW);
994 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH);
995 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW);
996 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH);
997 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW);
998 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH);
999 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW);
1000 DUMP_REG(HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH);
1001 DUMP_REG(HDMI_NV_PDISP_HDMI_CTRL);
1002 DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT);
1003 DUMP_REG(HDMI_NV_PDISP_HDMI_VSYNC_WINDOW);
1004 DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_CTRL);
1005 DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_STATUS);
1006 DUMP_REG(HDMI_NV_PDISP_HDMI_GCP_SUBPACK);
1007 DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1);
1008 DUMP_REG(HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2);
1009 DUMP_REG(HDMI_NV_PDISP_HDMI_EMU0);
1010 DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1);
1011 DUMP_REG(HDMI_NV_PDISP_HDMI_EMU1_RDATA);
1012 DUMP_REG(HDMI_NV_PDISP_HDMI_SPARE);
1013 DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1);
1014 DUMP_REG(HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2);
1015 DUMP_REG(HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL);
1016 DUMP_REG(HDMI_NV_PDISP_SOR_CAP);
1017 DUMP_REG(HDMI_NV_PDISP_SOR_PWR);
1018 DUMP_REG(HDMI_NV_PDISP_SOR_TEST);
1019 DUMP_REG(HDMI_NV_PDISP_SOR_PLL0);
1020 DUMP_REG(HDMI_NV_PDISP_SOR_PLL1);
1021 DUMP_REG(HDMI_NV_PDISP_SOR_PLL2);
1022 DUMP_REG(HDMI_NV_PDISP_SOR_CSTM);
1023 DUMP_REG(HDMI_NV_PDISP_SOR_LVDS);
1024 DUMP_REG(HDMI_NV_PDISP_SOR_CRCA);
1025 DUMP_REG(HDMI_NV_PDISP_SOR_CRCB);
1026 DUMP_REG(HDMI_NV_PDISP_SOR_BLANK);
1027 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_CTL);
1028 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(0));
1029 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(1));
1030 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(2));
1031 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(3));
1032 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(4));
1033 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(5));
1034 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(6));
1035 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(7));
1036 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(8));
1037 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(9));
1038 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(10));
1039 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(11));
1040 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(12));
1041 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(13));
1042 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(14));
1043 DUMP_REG(HDMI_NV_PDISP_SOR_SEQ_INST(15));
1044 DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA0);
1045 DUMP_REG(HDMI_NV_PDISP_SOR_VCRCA1);
1046 DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA0);
1047 DUMP_REG(HDMI_NV_PDISP_SOR_CCRCA1);
1048 DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA0);
1049 DUMP_REG(HDMI_NV_PDISP_SOR_EDATAA1);
1050 DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA0);
1051 DUMP_REG(HDMI_NV_PDISP_SOR_COUNTA1);
1052 DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA0);
1053 DUMP_REG(HDMI_NV_PDISP_SOR_DEBUGA1);
1054 DUMP_REG(HDMI_NV_PDISP_SOR_TRIG);
1055 DUMP_REG(HDMI_NV_PDISP_SOR_MSCHECK);
1056 DUMP_REG(HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT);
1057 DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG0);
1058 DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG1);
1059 DUMP_REG(HDMI_NV_PDISP_AUDIO_DEBUG2);
1060 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(0));
1061 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(1));
1062 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(2));
1063 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(3));
1064 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(4));
1065 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(5));
1066 DUMP_REG(HDMI_NV_PDISP_AUDIO_FS(6));
1067 DUMP_REG(HDMI_NV_PDISP_AUDIO_PULSE_WIDTH);
1068 DUMP_REG(HDMI_NV_PDISP_AUDIO_THRESHOLD);
1069 DUMP_REG(HDMI_NV_PDISP_AUDIO_CNTRL0);
1070 DUMP_REG(HDMI_NV_PDISP_AUDIO_N);
1071 DUMP_REG(HDMI_NV_PDISP_HDCPRIF_ROM_TIMING);
1072 DUMP_REG(HDMI_NV_PDISP_SOR_REFCLK);
1073 DUMP_REG(HDMI_NV_PDISP_CRC_CONTROL);
1074 DUMP_REG(HDMI_NV_PDISP_INPUT_CONTROL);
1075 DUMP_REG(HDMI_NV_PDISP_SCRATCH);
1076 DUMP_REG(HDMI_NV_PDISP_PE_CURRENT);
1077 DUMP_REG(HDMI_NV_PDISP_KEY_CTRL);
1078 DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG0);
1079 DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG1);
1080 DUMP_REG(HDMI_NV_PDISP_KEY_DEBUG2);
1081 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_0);
1082 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_1);
1083 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_2);
1084 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_3);
1085 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
1086 DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
1087 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
1088 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
1089 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
1090
1091#undef DUMP_REG
1092
1093 return 0;
1094}
1095
1096static struct drm_info_list debugfs_files[] = {
1097 { "regs", tegra_hdmi_show_regs, 0, NULL },
1098};
1099
1100static int tegra_hdmi_debugfs_init(struct tegra_hdmi *hdmi,
1101 struct drm_minor *minor)
1102{
1103 unsigned int i;
1104 int err;
1105
1106 hdmi->debugfs = debugfs_create_dir("hdmi", minor->debugfs_root);
1107 if (!hdmi->debugfs)
1108 return -ENOMEM;
1109
1110 hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
1111 GFP_KERNEL);
1112 if (!hdmi->debugfs_files) {
1113 err = -ENOMEM;
1114 goto remove;
1115 }
1116
1117 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
1118 hdmi->debugfs_files[i].data = hdmi;
1119
1120 err = drm_debugfs_create_files(hdmi->debugfs_files,
1121 ARRAY_SIZE(debugfs_files),
1122 hdmi->debugfs, minor);
1123 if (err < 0)
1124 goto free;
1125
1126 hdmi->minor = minor;
1127
1128 return 0;
1129
1130free:
1131 kfree(hdmi->debugfs_files);
1132 hdmi->debugfs_files = NULL;
1133remove:
1134 debugfs_remove(hdmi->debugfs);
1135 hdmi->debugfs = NULL;
1136
1137 return err;
1138}
1139
1140static int tegra_hdmi_debugfs_exit(struct tegra_hdmi *hdmi)
1141{
1142 drm_debugfs_remove_files(hdmi->debugfs_files, ARRAY_SIZE(debugfs_files),
1143 hdmi->minor);
1144 hdmi->minor = NULL;
1145
1146 kfree(hdmi->debugfs_files);
1147 hdmi->debugfs_files = NULL;
1148
1149 debugfs_remove(hdmi->debugfs);
1150 hdmi->debugfs = NULL;
1151
1152 return 0;
1153}
1154
1155static int tegra_hdmi_drm_init(struct host1x_client *client,
1156 struct drm_device *drm)
1157{
1158 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1159 int err;
1160
1161 hdmi->output.type = TEGRA_OUTPUT_HDMI;
1162 hdmi->output.dev = client->dev;
1163 hdmi->output.ops = &hdmi_ops;
1164
1165 err = tegra_output_init(drm, &hdmi->output);
1166 if (err < 0) {
1167 dev_err(client->dev, "output setup failed: %d\n", err);
1168 return err;
1169 }
1170
1171 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1172 err = tegra_hdmi_debugfs_init(hdmi, drm->primary);
1173 if (err < 0)
1174 dev_err(client->dev, "debugfs setup failed: %d\n", err);
1175 }
1176
1177 return 0;
1178}
1179
1180static int tegra_hdmi_drm_exit(struct host1x_client *client)
1181{
1182 struct tegra_hdmi *hdmi = host1x_client_to_hdmi(client);
1183 int err;
1184
1185 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1186 err = tegra_hdmi_debugfs_exit(hdmi);
1187 if (err < 0)
1188 dev_err(client->dev, "debugfs cleanup failed: %d\n",
1189 err);
1190 }
1191
1192 err = tegra_output_disable(&hdmi->output);
1193 if (err < 0) {
1194 dev_err(client->dev, "output failed to disable: %d\n", err);
1195 return err;
1196 }
1197
1198 err = tegra_output_exit(&hdmi->output);
1199 if (err < 0) {
1200 dev_err(client->dev, "output cleanup failed: %d\n", err);
1201 return err;
1202 }
1203
1204 return 0;
1205}
1206
1207static const struct host1x_client_ops hdmi_client_ops = {
1208 .drm_init = tegra_hdmi_drm_init,
1209 .drm_exit = tegra_hdmi_drm_exit,
1210};
1211
1212static int tegra_hdmi_probe(struct platform_device *pdev)
1213{
1214 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
1215 struct tegra_hdmi *hdmi;
1216 struct resource *regs;
1217 int err;
1218
1219 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
1220 if (!hdmi)
1221 return -ENOMEM;
1222
1223 hdmi->dev = &pdev->dev;
1224 hdmi->audio_source = AUTO;
1225 hdmi->audio_freq = 44100;
1226 hdmi->stereo = false;
1227 hdmi->dvi = false;
1228
1229 hdmi->clk = devm_clk_get(&pdev->dev, NULL);
1230 if (IS_ERR(hdmi->clk)) {
1231 dev_err(&pdev->dev, "failed to get clock\n");
1232 return PTR_ERR(hdmi->clk);
1233 }
1234
1235 err = clk_prepare(hdmi->clk);
1236 if (err < 0)
1237 return err;
1238
1239 hdmi->clk_parent = devm_clk_get(&pdev->dev, "parent");
1240 if (IS_ERR(hdmi->clk_parent))
1241 return PTR_ERR(hdmi->clk_parent);
1242
1243 err = clk_prepare(hdmi->clk_parent);
1244 if (err < 0)
1245 return err;
1246
1247 err = clk_set_parent(hdmi->clk, hdmi->clk_parent);
1248 if (err < 0) {
1249 dev_err(&pdev->dev, "failed to setup clocks: %d\n", err);
1250 return err;
1251 }
1252
1253 hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
1254 if (IS_ERR(hdmi->vdd)) {
1255 dev_err(&pdev->dev, "failed to get VDD regulator\n");
1256 return PTR_ERR(hdmi->vdd);
1257 }
1258
1259 hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
1260 if (IS_ERR(hdmi->pll)) {
1261 dev_err(&pdev->dev, "failed to get PLL regulator\n");
1262 return PTR_ERR(hdmi->pll);
1263 }
1264
1265 hdmi->output.dev = &pdev->dev;
1266
1267 err = tegra_output_parse_dt(&hdmi->output);
1268 if (err < 0)
1269 return err;
1270
1271 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1272 if (!regs)
1273 return -ENXIO;
1274
1275 hdmi->regs = devm_request_and_ioremap(&pdev->dev, regs);
1276 if (!hdmi->regs)
1277 return -EADDRNOTAVAIL;
1278
1279 err = platform_get_irq(pdev, 0);
1280 if (err < 0)
1281 return err;
1282
1283 hdmi->irq = err;
1284
1285 hdmi->client.ops = &hdmi_client_ops;
1286 INIT_LIST_HEAD(&hdmi->client.list);
1287 hdmi->client.dev = &pdev->dev;
1288
1289 err = host1x_register_client(host1x, &hdmi->client);
1290 if (err < 0) {
1291 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1292 err);
1293 return err;
1294 }
1295
1296 platform_set_drvdata(pdev, hdmi);
1297
1298 return 0;
1299}
1300
1301static int tegra_hdmi_remove(struct platform_device *pdev)
1302{
1303 struct host1x *host1x = dev_get_drvdata(pdev->dev.parent);
1304 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1305 int err;
1306
1307 err = host1x_unregister_client(host1x, &hdmi->client);
1308 if (err < 0) {
1309 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1310 err);
1311 return err;
1312 }
1313
1314 clk_unprepare(hdmi->clk_parent);
1315 clk_unprepare(hdmi->clk);
1316
1317 return 0;
1318}
1319
1320static struct of_device_id tegra_hdmi_of_match[] = {
1321 { .compatible = "nvidia,tegra30-hdmi", },
1322 { .compatible = "nvidia,tegra20-hdmi", },
1323 { },
1324};
1325
1326struct platform_driver tegra_hdmi_driver = {
1327 .driver = {
1328 .name = "tegra-hdmi",
1329 .owner = THIS_MODULE,
1330 .of_match_table = tegra_hdmi_of_match,
1331 },
1332 .probe = tegra_hdmi_probe,
1333 .remove = tegra_hdmi_remove,
1334};
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
new file mode 100644
index 000000000000..1477f36eb45a
--- /dev/null
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -0,0 +1,575 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef TEGRA_HDMI_H
11#define TEGRA_HDMI_H 1
12
13#define HDMI_INFOFRAME_TYPE_VENDOR 0x81
14#define HDMI_INFOFRAME_TYPE_AVI 0x82
15#define HDMI_INFOFRAME_TYPE_SPD 0x83
16#define HDMI_INFOFRAME_TYPE_AUDIO 0x84
17#define HDMI_INFOFRAME_TYPE_MPEG_SRC 0x85
18#define HDMI_INFOFRAME_TYPE_NTSC_VBI 0x86
19
20/* all fields little endian */
21struct hdmi_avi_infoframe {
22 /* PB0 */
23 u8 csum;
24
25 /* PB1 */
26 unsigned s:2; /* scan information */
27 unsigned b:2; /* bar info data valid */
28 unsigned a:1; /* active info present */
29 unsigned y:2; /* RGB or YCbCr */
30 unsigned res1:1;
31
32 /* PB2 */
33 unsigned r:4; /* active format aspect ratio */
34 unsigned m:2; /* picture aspect ratio */
35 unsigned c:2; /* colorimetry */
36
37 /* PB3 */
38 unsigned sc:2; /* scan information */
39 unsigned q:2; /* quantization range */
40 unsigned ec:3; /* extended colorimetry */
41 unsigned itc:1; /* it content */
42
43 /* PB4 */
44 unsigned vic:7; /* video format id code */
45 unsigned res4:1;
46
47 /* PB5 */
48 unsigned pr:4; /* pixel repetition factor */
49 unsigned cn:2; /* it content type*/
50 unsigned yq:2; /* ycc quantization range */
51
52 /* PB6-7 */
53 u16 top_bar_end_line;
54
55 /* PB8-9 */
56 u16 bot_bar_start_line;
57
58 /* PB10-11 */
59 u16 left_bar_end_pixel;
60
61 /* PB12-13 */
62 u16 right_bar_start_pixel;
63} __packed;
64
65#define HDMI_AVI_VERSION 0x02
66
67#define HDMI_AVI_Y_RGB 0x0
68#define HDMI_AVI_Y_YCBCR_422 0x1
69#define HDMI_AVI_Y_YCBCR_444 0x2
70
71#define HDMI_AVI_B_VERT 0x1
72#define HDMI_AVI_B_HORIZ 0x2
73
74#define HDMI_AVI_S_NONE 0x0
75#define HDMI_AVI_S_OVERSCAN 0x1
76#define HDMI_AVI_S_UNDERSCAN 0x2
77
78#define HDMI_AVI_C_NONE 0x0
79#define HDMI_AVI_C_SMPTE 0x1
80#define HDMI_AVI_C_ITU_R 0x2
81#define HDMI_AVI_C_EXTENDED 0x4
82
83#define HDMI_AVI_M_4_3 0x1
84#define HDMI_AVI_M_16_9 0x2
85
86#define HDMI_AVI_R_SAME 0x8
87#define HDMI_AVI_R_4_3_CENTER 0x9
88#define HDMI_AVI_R_16_9_CENTER 0xa
89#define HDMI_AVI_R_14_9_CENTER 0xb
90
91/* all fields little endian */
92struct hdmi_audio_infoframe {
93 /* PB0 */
94 u8 csum;
95
96 /* PB1 */
97 unsigned cc:3; /* channel count */
98 unsigned res1:1;
99 unsigned ct:4; /* coding type */
100
101 /* PB2 */
102 unsigned ss:2; /* sample size */
103 unsigned sf:3; /* sample frequency */
104 unsigned res2:3;
105
106 /* PB3 */
107 unsigned cxt:5; /* coding extention type */
108 unsigned res3:3;
109
110 /* PB4 */
111 u8 ca; /* channel/speaker allocation */
112
113 /* PB5 */
114 unsigned res5:3;
115 unsigned lsv:4; /* level shift value */
116 unsigned dm_inh:1; /* downmix inhibit */
117
118 /* PB6-10 reserved */
119 u8 res6;
120 u8 res7;
121 u8 res8;
122 u8 res9;
123 u8 res10;
124} __packed;
125
126#define HDMI_AUDIO_VERSION 0x01
127
128#define HDMI_AUDIO_CC_STREAM 0x0 /* specified by audio stream */
129#define HDMI_AUDIO_CC_2 0x1
130#define HDMI_AUDIO_CC_3 0x2
131#define HDMI_AUDIO_CC_4 0x3
132#define HDMI_AUDIO_CC_5 0x4
133#define HDMI_AUDIO_CC_6 0x5
134#define HDMI_AUDIO_CC_7 0x6
135#define HDMI_AUDIO_CC_8 0x7
136
137#define HDMI_AUDIO_CT_STREAM 0x0 /* specified by audio stream */
138#define HDMI_AUDIO_CT_PCM 0x1
139#define HDMI_AUDIO_CT_AC3 0x2
140#define HDMI_AUDIO_CT_MPEG1 0x3
141#define HDMI_AUDIO_CT_MP3 0x4
142#define HDMI_AUDIO_CT_MPEG2 0x5
143#define HDMI_AUDIO_CT_AAC_LC 0x6
144#define HDMI_AUDIO_CT_DTS 0x7
145#define HDMI_AUDIO_CT_ATRAC 0x8
146#define HDMI_AUDIO_CT_DSD 0x9
147#define HDMI_AUDIO_CT_E_AC3 0xa
148#define HDMI_AUDIO_CT_DTS_HD 0xb
149#define HDMI_AUDIO_CT_MLP 0xc
150#define HDMI_AUDIO_CT_DST 0xd
151#define HDMI_AUDIO_CT_WMA_PRO 0xe
152#define HDMI_AUDIO_CT_CXT 0xf
153
154#define HDMI_AUDIO_SF_STREAM 0x0 /* specified by audio stream */
155#define HDMI_AUIDO_SF_32K 0x1
156#define HDMI_AUDIO_SF_44_1K 0x2
157#define HDMI_AUDIO_SF_48K 0x3
158#define HDMI_AUDIO_SF_88_2K 0x4
159#define HDMI_AUDIO_SF_96K 0x5
160#define HDMI_AUDIO_SF_176_4K 0x6
161#define HDMI_AUDIO_SF_192K 0x7
162
163#define HDMI_AUDIO_SS_STREAM 0x0 /* specified by audio stream */
164#define HDMI_AUDIO_SS_16BIT 0x1
165#define HDMI_AUDIO_SS_20BIT 0x2
166#define HDMI_AUDIO_SS_24BIT 0x3
167
168#define HDMI_AUDIO_CXT_CT 0x0 /* refer to coding in CT */
169#define HDMI_AUDIO_CXT_HE_AAC 0x1
170#define HDMI_AUDIO_CXT_HE_AAC_V2 0x2
171#define HDMI_AUDIO_CXT_MPEG_SURROUND 0x3
172
173/* all fields little endian */
174struct hdmi_stereo_infoframe {
175 /* PB0 */
176 u8 csum;
177
178 /* PB1 */
179 u8 regid0;
180
181 /* PB2 */
182 u8 regid1;
183
184 /* PB3 */
185 u8 regid2;
186
187 /* PB4 */
188 unsigned res1:5;
189 unsigned hdmi_video_format:3;
190
191 /* PB5 */
192 unsigned res2:4;
193 unsigned _3d_structure:4;
194
195 /* PB6*/
196 unsigned res3:4;
197 unsigned _3d_ext_data:4;
198} __packed;
199
200#define HDMI_VENDOR_VERSION 0x01
201
202/* register definitions */
203#define HDMI_CTXSW 0x00
204
205#define HDMI_NV_PDISP_SOR_STATE0 0x01
206#define SOR_STATE_UPDATE (1 << 0)
207
208#define HDMI_NV_PDISP_SOR_STATE1 0x02
209#define SOR_STATE_ASY_HEAD_OPMODE_AWAKE (2 << 0)
210#define SOR_STATE_ASY_ORMODE_NORMAL (1 << 2)
211#define SOR_STATE_ATTACHED (1 << 3)
212
213#define HDMI_NV_PDISP_SOR_STATE2 0x03
214#define SOR_STATE_ASY_OWNER_NONE (0 << 0)
215#define SOR_STATE_ASY_OWNER_HEAD0 (1 << 0)
216#define SOR_STATE_ASY_SUBOWNER_NONE (0 << 4)
217#define SOR_STATE_ASY_SUBOWNER_SUBHEAD0 (1 << 4)
218#define SOR_STATE_ASY_SUBOWNER_SUBHEAD1 (2 << 4)
219#define SOR_STATE_ASY_SUBOWNER_BOTH (3 << 4)
220#define SOR_STATE_ASY_CRCMODE_ACTIVE (0 << 6)
221#define SOR_STATE_ASY_CRCMODE_COMPLETE (1 << 6)
222#define SOR_STATE_ASY_CRCMODE_NON_ACTIVE (2 << 6)
223#define SOR_STATE_ASY_PROTOCOL_SINGLE_TMDS_A (1 << 8)
224#define SOR_STATE_ASY_PROTOCOL_CUSTOM (15 << 8)
225#define SOR_STATE_ASY_HSYNCPOL_POS (0 << 12)
226#define SOR_STATE_ASY_HSYNCPOL_NEG (1 << 12)
227#define SOR_STATE_ASY_VSYNCPOL_POS (0 << 13)
228#define SOR_STATE_ASY_VSYNCPOL_NEG (1 << 13)
229#define SOR_STATE_ASY_DEPOL_POS (0 << 14)
230#define SOR_STATE_ASY_DEPOL_NEG (1 << 14)
231
232#define HDMI_NV_PDISP_RG_HDCP_AN_MSB 0x04
233#define HDMI_NV_PDISP_RG_HDCP_AN_LSB 0x05
234#define HDMI_NV_PDISP_RG_HDCP_CN_MSB 0x06
235#define HDMI_NV_PDISP_RG_HDCP_CN_LSB 0x07
236#define HDMI_NV_PDISP_RG_HDCP_AKSV_MSB 0x08
237#define HDMI_NV_PDISP_RG_HDCP_AKSV_LSB 0x09
238#define HDMI_NV_PDISP_RG_HDCP_BKSV_MSB 0x0a
239#define HDMI_NV_PDISP_RG_HDCP_BKSV_LSB 0x0b
240#define HDMI_NV_PDISP_RG_HDCP_CKSV_MSB 0x0c
241#define HDMI_NV_PDISP_RG_HDCP_CKSV_LSB 0x0d
242#define HDMI_NV_PDISP_RG_HDCP_DKSV_MSB 0x0e
243#define HDMI_NV_PDISP_RG_HDCP_DKSV_LSB 0x0f
244#define HDMI_NV_PDISP_RG_HDCP_CTRL 0x10
245#define HDMI_NV_PDISP_RG_HDCP_CMODE 0x11
246#define HDMI_NV_PDISP_RG_HDCP_MPRIME_MSB 0x12
247#define HDMI_NV_PDISP_RG_HDCP_MPRIME_LSB 0x13
248#define HDMI_NV_PDISP_RG_HDCP_SPRIME_MSB 0x14
249#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB2 0x15
250#define HDMI_NV_PDISP_RG_HDCP_SPRIME_LSB1 0x16
251#define HDMI_NV_PDISP_RG_HDCP_RI 0x17
252#define HDMI_NV_PDISP_RG_HDCP_CS_MSB 0x18
253#define HDMI_NV_PDISP_RG_HDCP_CS_LSB 0x19
254#define HDMI_NV_PDISP_HDMI_AUDIO_EMU0 0x1a
255#define HDMI_NV_PDISP_HDMI_AUDIO_EMU_RDATA0 0x1b
256#define HDMI_NV_PDISP_HDMI_AUDIO_EMU1 0x1c
257#define HDMI_NV_PDISP_HDMI_AUDIO_EMU2 0x1d
258
259#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL 0x1e
260#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_STATUS 0x1f
261#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_HEADER 0x20
262#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_LOW 0x21
263#define HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_SUBPACK0_HIGH 0x22
264#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL 0x23
265#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_STATUS 0x24
266#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_HEADER 0x25
267#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_LOW 0x26
268#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK0_HIGH 0x27
269#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_LOW 0x28
270#define HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_SUBPACK1_HIGH 0x29
271
272#define INFOFRAME_CTRL_ENABLE (1 << 0)
273
274#define INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
275#define INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
276#define INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
277
278#define HDMI_NV_PDISP_HDMI_GENERIC_CTRL 0x2a
279#define GENERIC_CTRL_ENABLE (1 << 0)
280#define GENERIC_CTRL_OTHER (1 << 4)
281#define GENERIC_CTRL_SINGLE (1 << 8)
282#define GENERIC_CTRL_HBLANK (1 << 12)
283#define GENERIC_CTRL_AUDIO (1 << 16)
284
285#define HDMI_NV_PDISP_HDMI_GENERIC_STATUS 0x2b
286#define HDMI_NV_PDISP_HDMI_GENERIC_HEADER 0x2c
287#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_LOW 0x2d
288#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK0_HIGH 0x2e
289#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_LOW 0x2f
290#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK1_HIGH 0x30
291#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_LOW 0x31
292#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK2_HIGH 0x32
293#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_LOW 0x33
294#define HDMI_NV_PDISP_HDMI_GENERIC_SUBPACK3_HIGH 0x34
295
296#define HDMI_NV_PDISP_HDMI_ACR_CTRL 0x35
297#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_LOW 0x36
298#define HDMI_NV_PDISP_HDMI_ACR_0320_SUBPACK_HIGH 0x37
299#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW 0x38
300#define HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH 0x39
301#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_LOW 0x3a
302#define HDMI_NV_PDISP_HDMI_ACR_0882_SUBPACK_HIGH 0x3b
303#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_LOW 0x3c
304#define HDMI_NV_PDISP_HDMI_ACR_1764_SUBPACK_HIGH 0x3d
305#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_LOW 0x3e
306#define HDMI_NV_PDISP_HDMI_ACR_0480_SUBPACK_HIGH 0x3f
307#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_LOW 0x40
308#define HDMI_NV_PDISP_HDMI_ACR_0960_SUBPACK_HIGH 0x41
309#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_LOW 0x42
310#define HDMI_NV_PDISP_HDMI_ACR_1920_SUBPACK_HIGH 0x43
311
312#define ACR_SUBPACK_CTS(x) (((x) & 0xffffff) << 8)
313#define ACR_SUBPACK_N(x) (((x) & 0xffffff) << 0)
314#define ACR_ENABLE (1 << 31)
315
316#define HDMI_NV_PDISP_HDMI_CTRL 0x44
317#define HDMI_CTRL_REKEY(x) (((x) & 0x7f) << 0)
318#define HDMI_CTRL_MAX_AC_PACKET(x) (((x) & 0x1f) << 16)
319#define HDMI_CTRL_ENABLE (1 << 30)
320
321#define HDMI_NV_PDISP_HDMI_VSYNC_KEEPOUT 0x45
322#define HDMI_NV_PDISP_HDMI_VSYNC_WINDOW 0x46
323#define VSYNC_WINDOW_END(x) (((x) & 0x3ff) << 0)
324#define VSYNC_WINDOW_START(x) (((x) & 0x3ff) << 16)
325#define VSYNC_WINDOW_ENABLE (1 << 31)
326
327#define HDMI_NV_PDISP_HDMI_GCP_CTRL 0x47
328#define HDMI_NV_PDISP_HDMI_GCP_STATUS 0x48
329#define HDMI_NV_PDISP_HDMI_GCP_SUBPACK 0x49
330#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS1 0x4a
331#define HDMI_NV_PDISP_HDMI_CHANNEL_STATUS2 0x4b
332#define HDMI_NV_PDISP_HDMI_EMU0 0x4c
333#define HDMI_NV_PDISP_HDMI_EMU1 0x4d
334#define HDMI_NV_PDISP_HDMI_EMU1_RDATA 0x4e
335
336#define HDMI_NV_PDISP_HDMI_SPARE 0x4f
337#define SPARE_HW_CTS (1 << 0)
338#define SPARE_FORCE_SW_CTS (1 << 1)
339#define SPARE_CTS_RESET_VAL(x) (((x) & 0x7) << 16)
340
341#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS1 0x50
342#define HDMI_NV_PDISP_HDMI_SPDIF_CHN_STATUS2 0x51
343#define HDMI_NV_PDISP_HDMI_HDCPRIF_ROM_CTRL 0x53
344#define HDMI_NV_PDISP_SOR_CAP 0x54
345#define HDMI_NV_PDISP_SOR_PWR 0x55
346#define SOR_PWR_NORMAL_STATE_PD (0 << 0)
347#define SOR_PWR_NORMAL_STATE_PU (1 << 0)
348#define SOR_PWR_NORMAL_START_NORMAL (0 << 1)
349#define SOR_PWR_NORMAL_START_ALT (1 << 1)
350#define SOR_PWR_SAFE_STATE_PD (0 << 16)
351#define SOR_PWR_SAFE_STATE_PU (1 << 16)
352#define SOR_PWR_SETTING_NEW_DONE (0 << 31)
353#define SOR_PWR_SETTING_NEW_PENDING (1 << 31)
354#define SOR_PWR_SETTING_NEW_TRIGGER (1 << 31)
355
356#define HDMI_NV_PDISP_SOR_TEST 0x56
357#define HDMI_NV_PDISP_SOR_PLL0 0x57
358#define SOR_PLL_PWR (1 << 0)
359#define SOR_PLL_PDBG (1 << 1)
360#define SOR_PLL_VCAPD (1 << 2)
361#define SOR_PLL_PDPORT (1 << 3)
362#define SOR_PLL_RESISTORSEL (1 << 4)
363#define SOR_PLL_PULLDOWN (1 << 5)
364#define SOR_PLL_VCOCAP(x) (((x) & 0xf) << 8)
365#define SOR_PLL_BG_V17_S(x) (((x) & 0xf) << 12)
366#define SOR_PLL_FILTER(x) (((x) & 0xf) << 16)
367#define SOR_PLL_ICHPMP(x) (((x) & 0xf) << 24)
368#define SOR_PLL_TX_REG_LOAD(x) (((x) & 0xf) << 28)
369
370#define HDMI_NV_PDISP_SOR_PLL1 0x58
371#define SOR_PLL_TMDS_TERM_ENABLE (1 << 8)
372#define SOR_PLL_TMDS_TERMADJ(x) (((x) & 0xf) << 9)
373#define SOR_PLL_LOADADJ(x) (((x) & 0xf) << 20)
374#define SOR_PLL_PE_EN (1 << 28)
375#define SOR_PLL_HALF_FULL_PE (1 << 29)
376#define SOR_PLL_S_D_PIN_PE (1 << 30)
377
378#define HDMI_NV_PDISP_SOR_PLL2 0x59
379
380#define HDMI_NV_PDISP_SOR_CSTM 0x5a
381#define SOR_CSTM_ROTCLK(x) (((x) & 0xf) << 24)
382
383#define HDMI_NV_PDISP_SOR_LVDS 0x5b
384#define HDMI_NV_PDISP_SOR_CRCA 0x5c
385#define HDMI_NV_PDISP_SOR_CRCB 0x5d
386#define HDMI_NV_PDISP_SOR_BLANK 0x5e
387#define HDMI_NV_PDISP_SOR_SEQ_CTL 0x5f
388#define SOR_SEQ_CTL_PU_PC(x) (((x) & 0xf) << 0)
389#define SOR_SEQ_PU_PC_ALT(x) (((x) & 0xf) << 4)
390#define SOR_SEQ_PD_PC(x) (((x) & 0xf) << 8)
391#define SOR_SEQ_PD_PC_ALT(x) (((x) & 0xf) << 12)
392#define SOR_SEQ_PC(x) (((x) & 0xf) << 16)
393#define SOR_SEQ_STATUS (1 << 28)
394#define SOR_SEQ_SWITCH (1 << 30)
395
396#define HDMI_NV_PDISP_SOR_SEQ_INST(x) (0x60 + (x))
397
398#define SOR_SEQ_INST_WAIT_TIME(x) (((x) & 0x3ff) << 0)
399#define SOR_SEQ_INST_WAIT_UNITS_VSYNC (2 << 12)
400#define SOR_SEQ_INST_HALT (1 << 15)
401#define SOR_SEQ_INST_PIN_A_LOW (0 << 21)
402#define SOR_SEQ_INST_PIN_A_HIGH (1 << 21)
403#define SOR_SEQ_INST_PIN_B_LOW (0 << 22)
404#define SOR_SEQ_INST_PIN_B_HIGH (1 << 22)
405#define SOR_SEQ_INST_DRIVE_PWM_OUT_LO (1 << 23)
406
407#define HDMI_NV_PDISP_SOR_VCRCA0 0x72
408#define HDMI_NV_PDISP_SOR_VCRCA1 0x73
409#define HDMI_NV_PDISP_SOR_CCRCA0 0x74
410#define HDMI_NV_PDISP_SOR_CCRCA1 0x75
411#define HDMI_NV_PDISP_SOR_EDATAA0 0x76
412#define HDMI_NV_PDISP_SOR_EDATAA1 0x77
413#define HDMI_NV_PDISP_SOR_COUNTA0 0x78
414#define HDMI_NV_PDISP_SOR_COUNTA1 0x79
415#define HDMI_NV_PDISP_SOR_DEBUGA0 0x7a
416#define HDMI_NV_PDISP_SOR_DEBUGA1 0x7b
417#define HDMI_NV_PDISP_SOR_TRIG 0x7c
418#define HDMI_NV_PDISP_SOR_MSCHECK 0x7d
419
420#define HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT 0x7e
421#define DRIVE_CURRENT_LANE0(x) (((x) & 0x3f) << 0)
422#define DRIVE_CURRENT_LANE1(x) (((x) & 0x3f) << 8)
423#define DRIVE_CURRENT_LANE2(x) (((x) & 0x3f) << 16)
424#define DRIVE_CURRENT_LANE3(x) (((x) & 0x3f) << 24)
425#define DRIVE_CURRENT_FUSE_OVERRIDE (1 << 31)
426
427#define DRIVE_CURRENT_1_500_mA 0x00
428#define DRIVE_CURRENT_1_875_mA 0x01
429#define DRIVE_CURRENT_2_250_mA 0x02
430#define DRIVE_CURRENT_2_625_mA 0x03
431#define DRIVE_CURRENT_3_000_mA 0x04
432#define DRIVE_CURRENT_3_375_mA 0x05
433#define DRIVE_CURRENT_3_750_mA 0x06
434#define DRIVE_CURRENT_4_125_mA 0x07
435#define DRIVE_CURRENT_4_500_mA 0x08
436#define DRIVE_CURRENT_4_875_mA 0x09
437#define DRIVE_CURRENT_5_250_mA 0x0a
438#define DRIVE_CURRENT_5_625_mA 0x0b
439#define DRIVE_CURRENT_6_000_mA 0x0c
440#define DRIVE_CURRENT_6_375_mA 0x0d
441#define DRIVE_CURRENT_6_750_mA 0x0e
442#define DRIVE_CURRENT_7_125_mA 0x0f
443#define DRIVE_CURRENT_7_500_mA 0x10
444#define DRIVE_CURRENT_7_875_mA 0x11
445#define DRIVE_CURRENT_8_250_mA 0x12
446#define DRIVE_CURRENT_8_625_mA 0x13
447#define DRIVE_CURRENT_9_000_mA 0x14
448#define DRIVE_CURRENT_9_375_mA 0x15
449#define DRIVE_CURRENT_9_750_mA 0x16
450#define DRIVE_CURRENT_10_125_mA 0x17
451#define DRIVE_CURRENT_10_500_mA 0x18
452#define DRIVE_CURRENT_10_875_mA 0x19
453#define DRIVE_CURRENT_11_250_mA 0x1a
454#define DRIVE_CURRENT_11_625_mA 0x1b
455#define DRIVE_CURRENT_12_000_mA 0x1c
456#define DRIVE_CURRENT_12_375_mA 0x1d
457#define DRIVE_CURRENT_12_750_mA 0x1e
458#define DRIVE_CURRENT_13_125_mA 0x1f
459#define DRIVE_CURRENT_13_500_mA 0x20
460#define DRIVE_CURRENT_13_875_mA 0x21
461#define DRIVE_CURRENT_14_250_mA 0x22
462#define DRIVE_CURRENT_14_625_mA 0x23
463#define DRIVE_CURRENT_15_000_mA 0x24
464#define DRIVE_CURRENT_15_375_mA 0x25
465#define DRIVE_CURRENT_15_750_mA 0x26
466#define DRIVE_CURRENT_16_125_mA 0x27
467#define DRIVE_CURRENT_16_500_mA 0x28
468#define DRIVE_CURRENT_16_875_mA 0x29
469#define DRIVE_CURRENT_17_250_mA 0x2a
470#define DRIVE_CURRENT_17_625_mA 0x2b
471#define DRIVE_CURRENT_18_000_mA 0x2c
472#define DRIVE_CURRENT_18_375_mA 0x2d
473#define DRIVE_CURRENT_18_750_mA 0x2e
474#define DRIVE_CURRENT_19_125_mA 0x2f
475#define DRIVE_CURRENT_19_500_mA 0x30
476#define DRIVE_CURRENT_19_875_mA 0x31
477#define DRIVE_CURRENT_20_250_mA 0x32
478#define DRIVE_CURRENT_20_625_mA 0x33
479#define DRIVE_CURRENT_21_000_mA 0x34
480#define DRIVE_CURRENT_21_375_mA 0x35
481#define DRIVE_CURRENT_21_750_mA 0x36
482#define DRIVE_CURRENT_22_125_mA 0x37
483#define DRIVE_CURRENT_22_500_mA 0x38
484#define DRIVE_CURRENT_22_875_mA 0x39
485#define DRIVE_CURRENT_23_250_mA 0x3a
486#define DRIVE_CURRENT_23_625_mA 0x3b
487#define DRIVE_CURRENT_24_000_mA 0x3c
488#define DRIVE_CURRENT_24_375_mA 0x3d
489#define DRIVE_CURRENT_24_750_mA 0x3e
490
491#define HDMI_NV_PDISP_AUDIO_DEBUG0 0x7f
492#define HDMI_NV_PDISP_AUDIO_DEBUG1 0x80
493#define HDMI_NV_PDISP_AUDIO_DEBUG2 0x81
494
495#define HDMI_NV_PDISP_AUDIO_FS(x) (0x82 + (x))
496#define AUDIO_FS_LOW(x) (((x) & 0xfff) << 0)
497#define AUDIO_FS_HIGH(x) (((x) & 0xfff) << 16)
498
499#define HDMI_NV_PDISP_AUDIO_PULSE_WIDTH 0x89
500#define HDMI_NV_PDISP_AUDIO_THRESHOLD 0x8a
501#define HDMI_NV_PDISP_AUDIO_CNTRL0 0x8b
502#define AUDIO_CNTRL0_ERROR_TOLERANCE(x) (((x) & 0xff) << 0)
503#define AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
504#define AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
505#define AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
506#define AUDIO_CNTRL0_FRAMES_PER_BLOCK(x) (((x) & 0xff) << 24)
507
508#define HDMI_NV_PDISP_AUDIO_N 0x8c
509#define AUDIO_N_VALUE(x) (((x) & 0xfffff) << 0)
510#define AUDIO_N_RESETF (1 << 20)
511#define AUDIO_N_GENERATE_NORMAL (0 << 24)
512#define AUDIO_N_GENERATE_ALTERNATE (1 << 24)
513
514#define HDMI_NV_PDISP_HDCPRIF_ROM_TIMING 0x94
515#define HDMI_NV_PDISP_SOR_REFCLK 0x95
516#define SOR_REFCLK_DIV_INT(x) (((x) & 0xff) << 8)
517#define SOR_REFCLK_DIV_FRAC(x) (((x) & 0x03) << 6)
518
519#define HDMI_NV_PDISP_CRC_CONTROL 0x96
520#define HDMI_NV_PDISP_INPUT_CONTROL 0x97
521#define HDMI_SRC_DISPLAYA (0 << 0)
522#define HDMI_SRC_DISPLAYB (1 << 0)
523#define ARM_VIDEO_RANGE_FULL (0 << 1)
524#define ARM_VIDEO_RANGE_LIMITED (1 << 1)
525
526#define HDMI_NV_PDISP_SCRATCH 0x98
527#define HDMI_NV_PDISP_PE_CURRENT 0x99
528#define PE_CURRENT0(x) (((x) & 0xf) << 0)
529#define PE_CURRENT1(x) (((x) & 0xf) << 8)
530#define PE_CURRENT2(x) (((x) & 0xf) << 16)
531#define PE_CURRENT3(x) (((x) & 0xf) << 24)
532
533#define PE_CURRENT_0_0_mA 0x0
534#define PE_CURRENT_0_5_mA 0x1
535#define PE_CURRENT_1_0_mA 0x2
536#define PE_CURRENT_1_5_mA 0x3
537#define PE_CURRENT_2_0_mA 0x4
538#define PE_CURRENT_2_5_mA 0x5
539#define PE_CURRENT_3_0_mA 0x6
540#define PE_CURRENT_3_5_mA 0x7
541#define PE_CURRENT_4_0_mA 0x8
542#define PE_CURRENT_4_5_mA 0x9
543#define PE_CURRENT_5_0_mA 0xa
544#define PE_CURRENT_5_5_mA 0xb
545#define PE_CURRENT_6_0_mA 0xc
546#define PE_CURRENT_6_5_mA 0xd
547#define PE_CURRENT_7_0_mA 0xe
548#define PE_CURRENT_7_5_mA 0xf
549
550#define HDMI_NV_PDISP_KEY_CTRL 0x9a
551#define HDMI_NV_PDISP_KEY_DEBUG0 0x9b
552#define HDMI_NV_PDISP_KEY_DEBUG1 0x9c
553#define HDMI_NV_PDISP_KEY_DEBUG2 0x9d
554#define HDMI_NV_PDISP_KEY_HDCP_KEY_0 0x9e
555#define HDMI_NV_PDISP_KEY_HDCP_KEY_1 0x9f
556#define HDMI_NV_PDISP_KEY_HDCP_KEY_2 0xa0
557#define HDMI_NV_PDISP_KEY_HDCP_KEY_3 0xa1
558#define HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG 0xa2
559#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
560
561#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
562#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
563#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
564#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
565
566#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
567#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
568#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 0xc1
569#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 0xc2
570#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 0xc3
571#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 0xc4
572#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
573#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
574
575#endif /* TEGRA_HDMI_H */
diff --git a/drivers/gpu/drm/tegra/host1x.c b/drivers/gpu/drm/tegra/host1x.c
new file mode 100644
index 000000000000..bdb97a564d82
--- /dev/null
+++ b/drivers/gpu/drm/tegra/host1x.c
@@ -0,0 +1,325 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15
16#include "drm.h"
17
18struct host1x_drm_client {
19 struct host1x_client *client;
20 struct device_node *np;
21 struct list_head list;
22};
23
24static int host1x_add_drm_client(struct host1x *host1x, struct device_node *np)
25{
26 struct host1x_drm_client *client;
27
28 client = kzalloc(sizeof(*client), GFP_KERNEL);
29 if (!client)
30 return -ENOMEM;
31
32 INIT_LIST_HEAD(&client->list);
33 client->np = of_node_get(np);
34
35 list_add_tail(&client->list, &host1x->drm_clients);
36
37 return 0;
38}
39
40static int host1x_activate_drm_client(struct host1x *host1x,
41 struct host1x_drm_client *drm,
42 struct host1x_client *client)
43{
44 mutex_lock(&host1x->drm_clients_lock);
45 list_del_init(&drm->list);
46 list_add_tail(&drm->list, &host1x->drm_active);
47 drm->client = client;
48 mutex_unlock(&host1x->drm_clients_lock);
49
50 return 0;
51}
52
53static int host1x_remove_drm_client(struct host1x *host1x,
54 struct host1x_drm_client *client)
55{
56 mutex_lock(&host1x->drm_clients_lock);
57 list_del_init(&client->list);
58 mutex_unlock(&host1x->drm_clients_lock);
59
60 of_node_put(client->np);
61 kfree(client);
62
63 return 0;
64}
65
66static int host1x_parse_dt(struct host1x *host1x)
67{
68 static const char * const compat[] = {
69 "nvidia,tegra20-dc",
70 "nvidia,tegra20-hdmi",
71 "nvidia,tegra30-dc",
72 "nvidia,tegra30-hdmi",
73 };
74 unsigned int i;
75 int err;
76
77 for (i = 0; i < ARRAY_SIZE(compat); i++) {
78 struct device_node *np;
79
80 for_each_child_of_node(host1x->dev->of_node, np) {
81 if (of_device_is_compatible(np, compat[i]) &&
82 of_device_is_available(np)) {
83 err = host1x_add_drm_client(host1x, np);
84 if (err < 0)
85 return err;
86 }
87 }
88 }
89
90 return 0;
91}
92
93static int tegra_host1x_probe(struct platform_device *pdev)
94{
95 struct host1x *host1x;
96 struct resource *regs;
97 int err;
98
99 host1x = devm_kzalloc(&pdev->dev, sizeof(*host1x), GFP_KERNEL);
100 if (!host1x)
101 return -ENOMEM;
102
103 mutex_init(&host1x->drm_clients_lock);
104 INIT_LIST_HEAD(&host1x->drm_clients);
105 INIT_LIST_HEAD(&host1x->drm_active);
106 mutex_init(&host1x->clients_lock);
107 INIT_LIST_HEAD(&host1x->clients);
108 host1x->dev = &pdev->dev;
109
110 err = host1x_parse_dt(host1x);
111 if (err < 0) {
112 dev_err(&pdev->dev, "failed to parse DT: %d\n", err);
113 return err;
114 }
115
116 host1x->clk = devm_clk_get(&pdev->dev, NULL);
117 if (IS_ERR(host1x->clk))
118 return PTR_ERR(host1x->clk);
119
120 err = clk_prepare_enable(host1x->clk);
121 if (err < 0)
122 return err;
123
124 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
125 if (!regs) {
126 err = -ENXIO;
127 goto err;
128 }
129
130 err = platform_get_irq(pdev, 0);
131 if (err < 0)
132 goto err;
133
134 host1x->syncpt = err;
135
136 err = platform_get_irq(pdev, 1);
137 if (err < 0)
138 goto err;
139
140 host1x->irq = err;
141
142 host1x->regs = devm_request_and_ioremap(&pdev->dev, regs);
143 if (!host1x->regs) {
144 err = -EADDRNOTAVAIL;
145 goto err;
146 }
147
148 platform_set_drvdata(pdev, host1x);
149
150 return 0;
151
152err:
153 clk_disable_unprepare(host1x->clk);
154 return err;
155}
156
157static int tegra_host1x_remove(struct platform_device *pdev)
158{
159 struct host1x *host1x = platform_get_drvdata(pdev);
160
161 clk_disable_unprepare(host1x->clk);
162
163 return 0;
164}
165
166int host1x_drm_init(struct host1x *host1x, struct drm_device *drm)
167{
168 struct host1x_client *client;
169
170 mutex_lock(&host1x->clients_lock);
171
172 list_for_each_entry(client, &host1x->clients, list) {
173 if (client->ops && client->ops->drm_init) {
174 int err = client->ops->drm_init(client, drm);
175 if (err < 0) {
176 dev_err(host1x->dev,
177 "DRM setup failed for %s: %d\n",
178 dev_name(client->dev), err);
179 return err;
180 }
181 }
182 }
183
184 mutex_unlock(&host1x->clients_lock);
185
186 return 0;
187}
188
189int host1x_drm_exit(struct host1x *host1x)
190{
191 struct platform_device *pdev = to_platform_device(host1x->dev);
192 struct host1x_client *client;
193
194 if (!host1x->drm)
195 return 0;
196
197 mutex_lock(&host1x->clients_lock);
198
199 list_for_each_entry_reverse(client, &host1x->clients, list) {
200 if (client->ops && client->ops->drm_exit) {
201 int err = client->ops->drm_exit(client);
202 if (err < 0) {
203 dev_err(host1x->dev,
204 "DRM cleanup failed for %s: %d\n",
205 dev_name(client->dev), err);
206 return err;
207 }
208 }
209 }
210
211 mutex_unlock(&host1x->clients_lock);
212
213 drm_platform_exit(&tegra_drm_driver, pdev);
214 host1x->drm = NULL;
215
216 return 0;
217}
218
219int host1x_register_client(struct host1x *host1x, struct host1x_client *client)
220{
221 struct host1x_drm_client *drm, *tmp;
222 int err;
223
224 mutex_lock(&host1x->clients_lock);
225 list_add_tail(&client->list, &host1x->clients);
226 mutex_unlock(&host1x->clients_lock);
227
228 list_for_each_entry_safe(drm, tmp, &host1x->drm_clients, list)
229 if (drm->np == client->dev->of_node)
230 host1x_activate_drm_client(host1x, drm, client);
231
232 if (list_empty(&host1x->drm_clients)) {
233 struct platform_device *pdev = to_platform_device(host1x->dev);
234
235 err = drm_platform_init(&tegra_drm_driver, pdev);
236 if (err < 0) {
237 dev_err(host1x->dev, "drm_platform_init(): %d\n", err);
238 return err;
239 }
240 }
241
242 return 0;
243}
244
245int host1x_unregister_client(struct host1x *host1x,
246 struct host1x_client *client)
247{
248 struct host1x_drm_client *drm, *tmp;
249 int err;
250
251 list_for_each_entry_safe(drm, tmp, &host1x->drm_active, list) {
252 if (drm->client == client) {
253 err = host1x_drm_exit(host1x);
254 if (err < 0) {
255 dev_err(host1x->dev, "host1x_drm_exit(): %d\n",
256 err);
257 return err;
258 }
259
260 host1x_remove_drm_client(host1x, drm);
261 break;
262 }
263 }
264
265 mutex_lock(&host1x->clients_lock);
266 list_del_init(&client->list);
267 mutex_unlock(&host1x->clients_lock);
268
269 return 0;
270}
271
272static struct of_device_id tegra_host1x_of_match[] = {
273 { .compatible = "nvidia,tegra30-host1x", },
274 { .compatible = "nvidia,tegra20-host1x", },
275 { },
276};
277MODULE_DEVICE_TABLE(of, tegra_host1x_of_match);
278
279struct platform_driver tegra_host1x_driver = {
280 .driver = {
281 .name = "tegra-host1x",
282 .owner = THIS_MODULE,
283 .of_match_table = tegra_host1x_of_match,
284 },
285 .probe = tegra_host1x_probe,
286 .remove = tegra_host1x_remove,
287};
288
289static int __init tegra_host1x_init(void)
290{
291 int err;
292
293 err = platform_driver_register(&tegra_host1x_driver);
294 if (err < 0)
295 return err;
296
297 err = platform_driver_register(&tegra_dc_driver);
298 if (err < 0)
299 goto unregister_host1x;
300
301 err = platform_driver_register(&tegra_hdmi_driver);
302 if (err < 0)
303 goto unregister_dc;
304
305 return 0;
306
307unregister_dc:
308 platform_driver_unregister(&tegra_dc_driver);
309unregister_host1x:
310 platform_driver_unregister(&tegra_host1x_driver);
311 return err;
312}
313module_init(tegra_host1x_init);
314
315static void __exit tegra_host1x_exit(void)
316{
317 platform_driver_unregister(&tegra_hdmi_driver);
318 platform_driver_unregister(&tegra_dc_driver);
319 platform_driver_unregister(&tegra_host1x_driver);
320}
321module_exit(tegra_host1x_exit);
322
323MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
324MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
325MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
new file mode 100644
index 000000000000..8140fc6c34d8
--- /dev/null
+++ b/drivers/gpu/drm/tegra/output.c
@@ -0,0 +1,272 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11#include <linux/of_gpio.h>
12#include <linux/of_i2c.h>
13
14#include "drm.h"
15
16static int tegra_connector_get_modes(struct drm_connector *connector)
17{
18 struct tegra_output *output = connector_to_output(connector);
19 struct edid *edid = NULL;
20 int err = 0;
21
22 if (output->edid)
23 edid = kmemdup(output->edid, sizeof(*edid), GFP_KERNEL);
24 else if (output->ddc)
25 edid = drm_get_edid(connector, output->ddc);
26
27 drm_mode_connector_update_edid_property(connector, edid);
28
29 if (edid) {
30 err = drm_add_edid_modes(connector, edid);
31 kfree(edid);
32 }
33
34 return err;
35}
36
37static int tegra_connector_mode_valid(struct drm_connector *connector,
38 struct drm_display_mode *mode)
39{
40 struct tegra_output *output = connector_to_output(connector);
41 enum drm_mode_status status = MODE_OK;
42 int err;
43
44 err = tegra_output_check_mode(output, mode, &status);
45 if (err < 0)
46 return MODE_ERROR;
47
48 return status;
49}
50
51static struct drm_encoder *
52tegra_connector_best_encoder(struct drm_connector *connector)
53{
54 struct tegra_output *output = connector_to_output(connector);
55
56 return &output->encoder;
57}
58
59static const struct drm_connector_helper_funcs connector_helper_funcs = {
60 .get_modes = tegra_connector_get_modes,
61 .mode_valid = tegra_connector_mode_valid,
62 .best_encoder = tegra_connector_best_encoder,
63};
64
65static enum drm_connector_status
66tegra_connector_detect(struct drm_connector *connector, bool force)
67{
68 struct tegra_output *output = connector_to_output(connector);
69 enum drm_connector_status status = connector_status_unknown;
70
71 if (gpio_is_valid(output->hpd_gpio)) {
72 if (gpio_get_value(output->hpd_gpio) == 0)
73 status = connector_status_disconnected;
74 else
75 status = connector_status_connected;
76 } else {
77 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
78 status = connector_status_connected;
79 }
80
81 return status;
82}
83
84static void tegra_connector_destroy(struct drm_connector *connector)
85{
86 drm_sysfs_connector_remove(connector);
87 drm_connector_cleanup(connector);
88}
89
90static const struct drm_connector_funcs connector_funcs = {
91 .dpms = drm_helper_connector_dpms,
92 .detect = tegra_connector_detect,
93 .fill_modes = drm_helper_probe_single_connector_modes,
94 .destroy = tegra_connector_destroy,
95};
96
97static void tegra_encoder_destroy(struct drm_encoder *encoder)
98{
99 drm_encoder_cleanup(encoder);
100}
101
102static const struct drm_encoder_funcs encoder_funcs = {
103 .destroy = tegra_encoder_destroy,
104};
105
106static void tegra_encoder_dpms(struct drm_encoder *encoder, int mode)
107{
108}
109
110static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
111 const struct drm_display_mode *mode,
112 struct drm_display_mode *adjusted)
113{
114 return true;
115}
116
117static void tegra_encoder_prepare(struct drm_encoder *encoder)
118{
119}
120
121static void tegra_encoder_commit(struct drm_encoder *encoder)
122{
123}
124
125static void tegra_encoder_mode_set(struct drm_encoder *encoder,
126 struct drm_display_mode *mode,
127 struct drm_display_mode *adjusted)
128{
129 struct tegra_output *output = encoder_to_output(encoder);
130 int err;
131
132 err = tegra_output_enable(output);
133 if (err < 0)
134 dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
135}
136
137static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
138 .dpms = tegra_encoder_dpms,
139 .mode_fixup = tegra_encoder_mode_fixup,
140 .prepare = tegra_encoder_prepare,
141 .commit = tegra_encoder_commit,
142 .mode_set = tegra_encoder_mode_set,
143};
144
145static irqreturn_t hpd_irq(int irq, void *data)
146{
147 struct tegra_output *output = data;
148
149 drm_helper_hpd_irq_event(output->connector.dev);
150
151 return IRQ_HANDLED;
152}
153
154int tegra_output_parse_dt(struct tegra_output *output)
155{
156 enum of_gpio_flags flags;
157 struct device_node *ddc;
158 size_t size;
159 int err;
160
161 if (!output->of_node)
162 output->of_node = output->dev->of_node;
163
164 output->edid = of_get_property(output->of_node, "nvidia,edid", &size);
165
166 ddc = of_parse_phandle(output->of_node, "nvidia,ddc-i2c-bus", 0);
167 if (ddc) {
168 output->ddc = of_find_i2c_adapter_by_node(ddc);
169 if (!output->ddc) {
170 err = -EPROBE_DEFER;
171 of_node_put(ddc);
172 return err;
173 }
174
175 of_node_put(ddc);
176 }
177
178 if (!output->edid && !output->ddc)
179 return -ENODEV;
180
181 output->hpd_gpio = of_get_named_gpio_flags(output->of_node,
182 "nvidia,hpd-gpio", 0,
183 &flags);
184
185 return 0;
186}
187
188int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
189{
190 int connector, encoder, err;
191
192 if (gpio_is_valid(output->hpd_gpio)) {
193 unsigned long flags;
194
195 err = gpio_request_one(output->hpd_gpio, GPIOF_DIR_IN,
196 "HDMI hotplug detect");
197 if (err < 0) {
198 dev_err(output->dev, "gpio_request_one(): %d\n", err);
199 return err;
200 }
201
202 err = gpio_to_irq(output->hpd_gpio);
203 if (err < 0) {
204 dev_err(output->dev, "gpio_to_irq(): %d\n", err);
205 goto free_hpd;
206 }
207
208 output->hpd_irq = err;
209
210 flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
211 IRQF_ONESHOT;
212
213 err = request_threaded_irq(output->hpd_irq, NULL, hpd_irq,
214 flags, "hpd", output);
215 if (err < 0) {
216 dev_err(output->dev, "failed to request IRQ#%u: %d\n",
217 output->hpd_irq, err);
218 goto free_hpd;
219 }
220
221 output->connector.polled = DRM_CONNECTOR_POLL_HPD;
222 }
223
224 switch (output->type) {
225 case TEGRA_OUTPUT_RGB:
226 connector = DRM_MODE_CONNECTOR_LVDS;
227 encoder = DRM_MODE_ENCODER_LVDS;
228 break;
229
230 case TEGRA_OUTPUT_HDMI:
231 connector = DRM_MODE_CONNECTOR_HDMIA;
232 encoder = DRM_MODE_ENCODER_TMDS;
233 break;
234
235 default:
236 connector = DRM_MODE_CONNECTOR_Unknown;
237 encoder = DRM_MODE_ENCODER_NONE;
238 break;
239 }
240
241 drm_connector_init(drm, &output->connector, &connector_funcs,
242 connector);
243 drm_connector_helper_add(&output->connector, &connector_helper_funcs);
244
245 drm_encoder_init(drm, &output->encoder, &encoder_funcs, encoder);
246 drm_encoder_helper_add(&output->encoder, &encoder_helper_funcs);
247
248 drm_mode_connector_attach_encoder(&output->connector, &output->encoder);
249 drm_sysfs_connector_add(&output->connector);
250
251 output->encoder.possible_crtcs = 0x3;
252
253 return 0;
254
255free_hpd:
256 gpio_free(output->hpd_gpio);
257
258 return err;
259}
260
261int tegra_output_exit(struct tegra_output *output)
262{
263 if (gpio_is_valid(output->hpd_gpio)) {
264 free_irq(output->hpd_irq, output);
265 gpio_free(output->hpd_gpio);
266 }
267
268 if (output->ddc)
269 put_device(&output->ddc->dev);
270
271 return 0;
272}
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
new file mode 100644
index 000000000000..ed4416f20260
--- /dev/null
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -0,0 +1,228 @@
1/*
2 * Copyright (C) 2012 Avionic Design GmbH
3 * Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/clk.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/platform_device.h>
14
15#include "drm.h"
16#include "dc.h"
17
18struct tegra_rgb {
19 struct tegra_output output;
20 struct clk *clk_parent;
21 struct clk *clk;
22};
23
24static inline struct tegra_rgb *to_rgb(struct tegra_output *output)
25{
26 return container_of(output, struct tegra_rgb, output);
27}
28
29struct reg_entry {
30 unsigned long offset;
31 unsigned long value;
32};
33
34static const struct reg_entry rgb_enable[] = {
35 { DC_COM_PIN_OUTPUT_ENABLE(0), 0x00000000 },
36 { DC_COM_PIN_OUTPUT_ENABLE(1), 0x00000000 },
37 { DC_COM_PIN_OUTPUT_ENABLE(2), 0x00000000 },
38 { DC_COM_PIN_OUTPUT_ENABLE(3), 0x00000000 },
39 { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
40 { DC_COM_PIN_OUTPUT_POLARITY(1), 0x01000000 },
41 { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
42 { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
43 { DC_COM_PIN_OUTPUT_DATA(0), 0x00000000 },
44 { DC_COM_PIN_OUTPUT_DATA(1), 0x00000000 },
45 { DC_COM_PIN_OUTPUT_DATA(2), 0x00000000 },
46 { DC_COM_PIN_OUTPUT_DATA(3), 0x00000000 },
47 { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
48 { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
49 { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
50 { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
51 { DC_COM_PIN_OUTPUT_SELECT(4), 0x00210222 },
52 { DC_COM_PIN_OUTPUT_SELECT(5), 0x00002200 },
53 { DC_COM_PIN_OUTPUT_SELECT(6), 0x00020000 },
54};
55
56static const struct reg_entry rgb_disable[] = {
57 { DC_COM_PIN_OUTPUT_SELECT(6), 0x00000000 },
58 { DC_COM_PIN_OUTPUT_SELECT(5), 0x00000000 },
59 { DC_COM_PIN_OUTPUT_SELECT(4), 0x00000000 },
60 { DC_COM_PIN_OUTPUT_SELECT(3), 0x00000000 },
61 { DC_COM_PIN_OUTPUT_SELECT(2), 0x00000000 },
62 { DC_COM_PIN_OUTPUT_SELECT(1), 0x00000000 },
63 { DC_COM_PIN_OUTPUT_SELECT(0), 0x00000000 },
64 { DC_COM_PIN_OUTPUT_DATA(3), 0xaaaaaaaa },
65 { DC_COM_PIN_OUTPUT_DATA(2), 0xaaaaaaaa },
66 { DC_COM_PIN_OUTPUT_DATA(1), 0xaaaaaaaa },
67 { DC_COM_PIN_OUTPUT_DATA(0), 0xaaaaaaaa },
68 { DC_COM_PIN_OUTPUT_POLARITY(3), 0x00000000 },
69 { DC_COM_PIN_OUTPUT_POLARITY(2), 0x00000000 },
70 { DC_COM_PIN_OUTPUT_POLARITY(1), 0x00000000 },
71 { DC_COM_PIN_OUTPUT_POLARITY(0), 0x00000000 },
72 { DC_COM_PIN_OUTPUT_ENABLE(3), 0x55555555 },
73 { DC_COM_PIN_OUTPUT_ENABLE(2), 0x55555555 },
74 { DC_COM_PIN_OUTPUT_ENABLE(1), 0x55150005 },
75 { DC_COM_PIN_OUTPUT_ENABLE(0), 0x55555555 },
76};
77
78static void tegra_dc_write_regs(struct tegra_dc *dc,
79 const struct reg_entry *table,
80 unsigned int num)
81{
82 unsigned int i;
83
84 for (i = 0; i < num; i++)
85 tegra_dc_writel(dc, table[i].value, table[i].offset);
86}
87
88static int tegra_output_rgb_enable(struct tegra_output *output)
89{
90 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
91
92 tegra_dc_write_regs(dc, rgb_enable, ARRAY_SIZE(rgb_enable));
93
94 return 0;
95}
96
97static int tegra_output_rgb_disable(struct tegra_output *output)
98{
99 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
100
101 tegra_dc_write_regs(dc, rgb_disable, ARRAY_SIZE(rgb_disable));
102
103 return 0;
104}
105
106static int tegra_output_rgb_setup_clock(struct tegra_output *output,
107 struct clk *clk, unsigned long pclk)
108{
109 struct tegra_rgb *rgb = to_rgb(output);
110
111 return clk_set_parent(clk, rgb->clk_parent);
112}
113
114static int tegra_output_rgb_check_mode(struct tegra_output *output,
115 struct drm_display_mode *mode,
116 enum drm_mode_status *status)
117{
118 /*
119 * FIXME: For now, always assume that the mode is okay. There are
120 * unresolved issues with clk_round_rate(), which doesn't always
121 * reliably report whether a frequency can be set or not.
122 */
123
124 *status = MODE_OK;
125
126 return 0;
127}
128
129static const struct tegra_output_ops rgb_ops = {
130 .enable = tegra_output_rgb_enable,
131 .disable = tegra_output_rgb_disable,
132 .setup_clock = tegra_output_rgb_setup_clock,
133 .check_mode = tegra_output_rgb_check_mode,
134};
135
136int tegra_dc_rgb_probe(struct tegra_dc *dc)
137{
138 struct device_node *np;
139 struct tegra_rgb *rgb;
140 int err;
141
142 np = of_get_child_by_name(dc->dev->of_node, "rgb");
143 if (!np || !of_device_is_available(np))
144 return -ENODEV;
145
146 rgb = devm_kzalloc(dc->dev, sizeof(*rgb), GFP_KERNEL);
147 if (!rgb)
148 return -ENOMEM;
149
150 rgb->clk = devm_clk_get(dc->dev, NULL);
151 if (IS_ERR(rgb->clk)) {
152 dev_err(dc->dev, "failed to get clock\n");
153 return PTR_ERR(rgb->clk);
154 }
155
156 rgb->clk_parent = devm_clk_get(dc->dev, "parent");
157 if (IS_ERR(rgb->clk_parent)) {
158 dev_err(dc->dev, "failed to get parent clock\n");
159 return PTR_ERR(rgb->clk_parent);
160 }
161
162 err = clk_set_parent(rgb->clk, rgb->clk_parent);
163 if (err < 0) {
164 dev_err(dc->dev, "failed to set parent clock: %d\n", err);
165 return err;
166 }
167
168 rgb->output.dev = dc->dev;
169 rgb->output.of_node = np;
170
171 err = tegra_output_parse_dt(&rgb->output);
172 if (err < 0)
173 return err;
174
175 dc->rgb = &rgb->output;
176
177 return 0;
178}
179
180int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
181{
182 struct tegra_rgb *rgb = to_rgb(dc->rgb);
183 int err;
184
185 if (!dc->rgb)
186 return -ENODEV;
187
188 rgb->output.type = TEGRA_OUTPUT_RGB;
189 rgb->output.ops = &rgb_ops;
190
191 err = tegra_output_init(dc->base.dev, &rgb->output);
192 if (err < 0) {
193 dev_err(dc->dev, "output setup failed: %d\n", err);
194 return err;
195 }
196
197 /*
198 * By default, outputs can be associated with each display controller.
199 * RGB outputs are an exception, so we make sure they can be attached
200 * to only their parent display controller.
201 */
202 rgb->output.encoder.possible_crtcs = 1 << dc->pipe;
203
204 return 0;
205}
206
207int tegra_dc_rgb_exit(struct tegra_dc *dc)
208{
209 if (dc->rgb) {
210 int err;
211
212 err = tegra_output_disable(dc->rgb);
213 if (err < 0) {
214 dev_err(dc->dev, "output failed to disable: %d\n", err);
215 return err;
216 }
217
218 err = tegra_output_exit(dc->rgb);
219 if (err < 0) {
220 dev_err(dc->dev, "output cleanup failed: %d\n", err);
221 return err;
222 }
223
224 dc->rgb = NULL;
225 }
226
227 return 0;
228}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bf6e4b5a73b5..a9151337d5b9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -162,9 +162,9 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
162{ 162{
163 if (interruptible) { 163 if (interruptible) {
164 return wait_event_interruptible(bo->event_queue, 164 return wait_event_interruptible(bo->event_queue,
165 atomic_read(&bo->reserved) == 0); 165 !ttm_bo_is_reserved(bo));
166 } else { 166 } else {
167 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); 167 wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
168 return 0; 168 return 0;
169 } 169 }
170} 170}
@@ -175,7 +175,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
175 struct ttm_bo_device *bdev = bo->bdev; 175 struct ttm_bo_device *bdev = bo->bdev;
176 struct ttm_mem_type_manager *man; 176 struct ttm_mem_type_manager *man;
177 177
178 BUG_ON(!atomic_read(&bo->reserved)); 178 BUG_ON(!ttm_bo_is_reserved(bo));
179 179
180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 180 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
181 181
@@ -220,7 +220,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
220 struct ttm_bo_global *glob = bo->glob; 220 struct ttm_bo_global *glob = bo->glob;
221 int ret; 221 int ret;
222 222
223 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { 223 while (unlikely(atomic_read(&bo->reserved) != 0)) {
224 /** 224 /**
225 * Deadlock avoidance for multi-bo reserving. 225 * Deadlock avoidance for multi-bo reserving.
226 */ 226 */
@@ -249,6 +249,7 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
249 return ret; 249 return ret;
250 } 250 }
251 251
252 atomic_set(&bo->reserved, 1);
252 if (use_sequence) { 253 if (use_sequence) {
253 /** 254 /**
254 * Wake up waiters that may need to recheck for deadlock, 255 * Wake up waiters that may need to recheck for deadlock,
@@ -365,7 +366,7 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
365static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, 366static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
366 struct ttm_mem_reg *mem, 367 struct ttm_mem_reg *mem,
367 bool evict, bool interruptible, 368 bool evict, bool interruptible,
368 bool no_wait_reserve, bool no_wait_gpu) 369 bool no_wait_gpu)
369{ 370{
370 struct ttm_bo_device *bdev = bo->bdev; 371 struct ttm_bo_device *bdev = bo->bdev;
371 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); 372 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@@ -419,12 +420,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
419 420
420 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && 421 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
421 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) 422 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
422 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); 423 ret = ttm_bo_move_ttm(bo, evict, no_wait_gpu, mem);
423 else if (bdev->driver->move) 424 else if (bdev->driver->move)
424 ret = bdev->driver->move(bo, evict, interruptible, 425 ret = bdev->driver->move(bo, evict, interruptible,
425 no_wait_reserve, no_wait_gpu, mem); 426 no_wait_gpu, mem);
426 else 427 else
427 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); 428 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
428 429
429 if (ret) { 430 if (ret) {
430 if (bdev->driver->move_notify) { 431 if (bdev->driver->move_notify) {
@@ -487,40 +488,33 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
487 ttm_bo_mem_put(bo, &bo->mem); 488 ttm_bo_mem_put(bo, &bo->mem);
488 489
489 atomic_set(&bo->reserved, 0); 490 atomic_set(&bo->reserved, 0);
491 wake_up_all(&bo->event_queue);
490 492
491 /* 493 /*
492 * Make processes trying to reserve really pick it up. 494 * Since the final reference to this bo may not be dropped by
495 * the current task we have to put a memory barrier here to make
496 * sure the changes done in this function are always visible.
497 *
498 * This function only needs protection against the final kref_put.
493 */ 499 */
494 smp_mb__after_atomic_dec(); 500 smp_mb__before_atomic_dec();
495 wake_up_all(&bo->event_queue);
496} 501}
497 502
498static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) 503static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
499{ 504{
500 struct ttm_bo_device *bdev = bo->bdev; 505 struct ttm_bo_device *bdev = bo->bdev;
501 struct ttm_bo_global *glob = bo->glob; 506 struct ttm_bo_global *glob = bo->glob;
502 struct ttm_bo_driver *driver; 507 struct ttm_bo_driver *driver = bdev->driver;
503 void *sync_obj = NULL; 508 void *sync_obj = NULL;
504 void *sync_obj_arg;
505 int put_count; 509 int put_count;
506 int ret; 510 int ret;
507 511
512 spin_lock(&glob->lru_lock);
513 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
514
508 spin_lock(&bdev->fence_lock); 515 spin_lock(&bdev->fence_lock);
509 (void) ttm_bo_wait(bo, false, false, true); 516 (void) ttm_bo_wait(bo, false, false, true);
510 if (!bo->sync_obj) { 517 if (!ret && !bo->sync_obj) {
511
512 spin_lock(&glob->lru_lock);
513
514 /**
515 * Lock inversion between bo:reserve and bdev::fence_lock here,
516 * but that's OK, since we're only trylocking.
517 */
518
519 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
520
521 if (unlikely(ret == -EBUSY))
522 goto queue;
523
524 spin_unlock(&bdev->fence_lock); 518 spin_unlock(&bdev->fence_lock);
525 put_count = ttm_bo_del_from_lru(bo); 519 put_count = ttm_bo_del_from_lru(bo);
526 520
@@ -530,22 +524,22 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
530 ttm_bo_list_ref_sub(bo, put_count, true); 524 ttm_bo_list_ref_sub(bo, put_count, true);
531 525
532 return; 526 return;
533 } else {
534 spin_lock(&glob->lru_lock);
535 } 527 }
536queue:
537 driver = bdev->driver;
538 if (bo->sync_obj) 528 if (bo->sync_obj)
539 sync_obj = driver->sync_obj_ref(bo->sync_obj); 529 sync_obj = driver->sync_obj_ref(bo->sync_obj);
540 sync_obj_arg = bo->sync_obj_arg; 530 spin_unlock(&bdev->fence_lock);
531
532 if (!ret) {
533 atomic_set(&bo->reserved, 0);
534 wake_up_all(&bo->event_queue);
535 }
541 536
542 kref_get(&bo->list_kref); 537 kref_get(&bo->list_kref);
543 list_add_tail(&bo->ddestroy, &bdev->ddestroy); 538 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
544 spin_unlock(&glob->lru_lock); 539 spin_unlock(&glob->lru_lock);
545 spin_unlock(&bdev->fence_lock);
546 540
547 if (sync_obj) { 541 if (sync_obj) {
548 driver->sync_obj_flush(sync_obj, sync_obj_arg); 542 driver->sync_obj_flush(sync_obj);
549 driver->sync_obj_unref(&sync_obj); 543 driver->sync_obj_unref(&sync_obj);
550 } 544 }
551 schedule_delayed_work(&bdev->wq, 545 schedule_delayed_work(&bdev->wq,
@@ -553,68 +547,84 @@ queue:
553} 547}
554 548
555/** 549/**
556 * function ttm_bo_cleanup_refs 550 * function ttm_bo_cleanup_refs_and_unlock
557 * If bo idle, remove from delayed- and lru lists, and unref. 551 * If bo idle, remove from delayed- and lru lists, and unref.
558 * If not idle, do nothing. 552 * If not idle, do nothing.
559 * 553 *
554 * Must be called with lru_lock and reservation held, this function
555 * will drop both before returning.
556 *
560 * @interruptible Any sleeps should occur interruptibly. 557 * @interruptible Any sleeps should occur interruptibly.
561 * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
562 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead. 558 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
563 */ 559 */
564 560
565static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, 561static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
566 bool interruptible, 562 bool interruptible,
567 bool no_wait_reserve, 563 bool no_wait_gpu)
568 bool no_wait_gpu)
569{ 564{
570 struct ttm_bo_device *bdev = bo->bdev; 565 struct ttm_bo_device *bdev = bo->bdev;
566 struct ttm_bo_driver *driver = bdev->driver;
571 struct ttm_bo_global *glob = bo->glob; 567 struct ttm_bo_global *glob = bo->glob;
572 int put_count; 568 int put_count;
573 int ret = 0; 569 int ret;
574 570
575retry:
576 spin_lock(&bdev->fence_lock); 571 spin_lock(&bdev->fence_lock);
577 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu); 572 ret = ttm_bo_wait(bo, false, false, true);
578 spin_unlock(&bdev->fence_lock);
579 573
580 if (unlikely(ret != 0)) 574 if (ret && !no_wait_gpu) {
581 return ret; 575 void *sync_obj;
582 576
583retry_reserve: 577 /*
584 spin_lock(&glob->lru_lock); 578 * Take a reference to the fence and unreserve,
579 * at this point the buffer should be dead, so
580 * no new sync objects can be attached.
581 */
582 sync_obj = driver->sync_obj_ref(&bo->sync_obj);
583 spin_unlock(&bdev->fence_lock);
585 584
586 if (unlikely(list_empty(&bo->ddestroy))) { 585 atomic_set(&bo->reserved, 0);
586 wake_up_all(&bo->event_queue);
587 spin_unlock(&glob->lru_lock); 587 spin_unlock(&glob->lru_lock);
588 return 0;
589 }
590
591 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
592 588
593 if (unlikely(ret == -EBUSY)) { 589 ret = driver->sync_obj_wait(sync_obj, false, interruptible);
594 spin_unlock(&glob->lru_lock); 590 driver->sync_obj_unref(&sync_obj);
595 if (likely(!no_wait_reserve)) 591 if (ret)
596 ret = ttm_bo_wait_unreserved(bo, interruptible);
597 if (unlikely(ret != 0))
598 return ret; 592 return ret;
599 593
600 goto retry_reserve; 594 /*
601 } 595 * remove sync_obj with ttm_bo_wait, the wait should be
596 * finished, and no new wait object should have been added.
597 */
598 spin_lock(&bdev->fence_lock);
599 ret = ttm_bo_wait(bo, false, false, true);
600 WARN_ON(ret);
601 spin_unlock(&bdev->fence_lock);
602 if (ret)
603 return ret;
602 604
603 BUG_ON(ret != 0); 605 spin_lock(&glob->lru_lock);
606 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
604 607
605 /** 608 /*
606 * We can re-check for sync object without taking 609 * We raced, and lost, someone else holds the reservation now,
607 * the bo::lock since setting the sync object requires 610 * and is probably busy in ttm_bo_cleanup_memtype_use.
608 * also bo::reserved. A busy object at this point may 611 *
609 * be caused by another thread recently starting an accelerated 612 * Even if it's not the case, because we finished waiting any
610 * eviction. 613 * delayed destruction would succeed, so just return success
611 */ 614 * here.
615 */
616 if (ret) {
617 spin_unlock(&glob->lru_lock);
618 return 0;
619 }
620 } else
621 spin_unlock(&bdev->fence_lock);
612 622
613 if (unlikely(bo->sync_obj)) { 623 if (ret || unlikely(list_empty(&bo->ddestroy))) {
614 atomic_set(&bo->reserved, 0); 624 atomic_set(&bo->reserved, 0);
615 wake_up_all(&bo->event_queue); 625 wake_up_all(&bo->event_queue);
616 spin_unlock(&glob->lru_lock); 626 spin_unlock(&glob->lru_lock);
617 goto retry; 627 return ret;
618 } 628 }
619 629
620 put_count = ttm_bo_del_from_lru(bo); 630 put_count = ttm_bo_del_from_lru(bo);
@@ -657,9 +667,13 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
657 kref_get(&nentry->list_kref); 667 kref_get(&nentry->list_kref);
658 } 668 }
659 669
660 spin_unlock(&glob->lru_lock); 670 ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
661 ret = ttm_bo_cleanup_refs(entry, false, !remove_all, 671 if (!ret)
662 !remove_all); 672 ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
673 !remove_all);
674 else
675 spin_unlock(&glob->lru_lock);
676
663 kref_put(&entry->list_kref, ttm_bo_release_list); 677 kref_put(&entry->list_kref, ttm_bo_release_list);
664 entry = nentry; 678 entry = nentry;
665 679
@@ -697,6 +711,7 @@ static void ttm_bo_release(struct kref *kref)
697 struct ttm_bo_device *bdev = bo->bdev; 711 struct ttm_bo_device *bdev = bo->bdev;
698 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; 712 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
699 713
714 write_lock(&bdev->vm_lock);
700 if (likely(bo->vm_node != NULL)) { 715 if (likely(bo->vm_node != NULL)) {
701 rb_erase(&bo->vm_rb, &bdev->addr_space_rb); 716 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
702 drm_mm_put_block(bo->vm_node); 717 drm_mm_put_block(bo->vm_node);
@@ -708,18 +723,14 @@ static void ttm_bo_release(struct kref *kref)
708 ttm_mem_io_unlock(man); 723 ttm_mem_io_unlock(man);
709 ttm_bo_cleanup_refs_or_queue(bo); 724 ttm_bo_cleanup_refs_or_queue(bo);
710 kref_put(&bo->list_kref, ttm_bo_release_list); 725 kref_put(&bo->list_kref, ttm_bo_release_list);
711 write_lock(&bdev->vm_lock);
712} 726}
713 727
714void ttm_bo_unref(struct ttm_buffer_object **p_bo) 728void ttm_bo_unref(struct ttm_buffer_object **p_bo)
715{ 729{
716 struct ttm_buffer_object *bo = *p_bo; 730 struct ttm_buffer_object *bo = *p_bo;
717 struct ttm_bo_device *bdev = bo->bdev;
718 731
719 *p_bo = NULL; 732 *p_bo = NULL;
720 write_lock(&bdev->vm_lock);
721 kref_put(&bo->kref, ttm_bo_release); 733 kref_put(&bo->kref, ttm_bo_release);
722 write_unlock(&bdev->vm_lock);
723} 734}
724EXPORT_SYMBOL(ttm_bo_unref); 735EXPORT_SYMBOL(ttm_bo_unref);
725 736
@@ -738,7 +749,7 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
738EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue); 749EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
739 750
740static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, 751static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
741 bool no_wait_reserve, bool no_wait_gpu) 752 bool no_wait_gpu)
742{ 753{
743 struct ttm_bo_device *bdev = bo->bdev; 754 struct ttm_bo_device *bdev = bo->bdev;
744 struct ttm_mem_reg evict_mem; 755 struct ttm_mem_reg evict_mem;
@@ -756,7 +767,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
756 goto out; 767 goto out;
757 } 768 }
758 769
759 BUG_ON(!atomic_read(&bo->reserved)); 770 BUG_ON(!ttm_bo_is_reserved(bo));
760 771
761 evict_mem = bo->mem; 772 evict_mem = bo->mem;
762 evict_mem.mm_node = NULL; 773 evict_mem.mm_node = NULL;
@@ -769,7 +780,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
769 placement.num_busy_placement = 0; 780 placement.num_busy_placement = 0;
770 bdev->driver->evict_flags(bo, &placement); 781 bdev->driver->evict_flags(bo, &placement);
771 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible, 782 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
772 no_wait_reserve, no_wait_gpu); 783 no_wait_gpu);
773 if (ret) { 784 if (ret) {
774 if (ret != -ERESTARTSYS) { 785 if (ret != -ERESTARTSYS) {
775 pr_err("Failed to find memory space for buffer 0x%p eviction\n", 786 pr_err("Failed to find memory space for buffer 0x%p eviction\n",
@@ -780,7 +791,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
780 } 791 }
781 792
782 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 793 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
783 no_wait_reserve, no_wait_gpu); 794 no_wait_gpu);
784 if (ret) { 795 if (ret) {
785 if (ret != -ERESTARTSYS) 796 if (ret != -ERESTARTSYS)
786 pr_err("Buffer eviction failed\n"); 797 pr_err("Buffer eviction failed\n");
@@ -794,49 +805,33 @@ out:
794 805
795static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 806static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
796 uint32_t mem_type, 807 uint32_t mem_type,
797 bool interruptible, bool no_wait_reserve, 808 bool interruptible,
798 bool no_wait_gpu) 809 bool no_wait_gpu)
799{ 810{
800 struct ttm_bo_global *glob = bdev->glob; 811 struct ttm_bo_global *glob = bdev->glob;
801 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 812 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
802 struct ttm_buffer_object *bo; 813 struct ttm_buffer_object *bo;
803 int ret, put_count = 0; 814 int ret = -EBUSY, put_count;
804 815
805retry:
806 spin_lock(&glob->lru_lock); 816 spin_lock(&glob->lru_lock);
807 if (list_empty(&man->lru)) { 817 list_for_each_entry(bo, &man->lru, lru) {
808 spin_unlock(&glob->lru_lock); 818 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
809 return -EBUSY; 819 if (!ret)
820 break;
810 } 821 }
811 822
812 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru); 823 if (ret) {
813 kref_get(&bo->list_kref);
814
815 if (!list_empty(&bo->ddestroy)) {
816 spin_unlock(&glob->lru_lock); 824 spin_unlock(&glob->lru_lock);
817 ret = ttm_bo_cleanup_refs(bo, interruptible,
818 no_wait_reserve, no_wait_gpu);
819 kref_put(&bo->list_kref, ttm_bo_release_list);
820
821 return ret; 825 return ret;
822 } 826 }
823 827
824 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 828 kref_get(&bo->list_kref);
825
826 if (unlikely(ret == -EBUSY)) {
827 spin_unlock(&glob->lru_lock);
828 if (likely(!no_wait_reserve))
829 ret = ttm_bo_wait_unreserved(bo, interruptible);
830 829
830 if (!list_empty(&bo->ddestroy)) {
831 ret = ttm_bo_cleanup_refs_and_unlock(bo, interruptible,
832 no_wait_gpu);
831 kref_put(&bo->list_kref, ttm_bo_release_list); 833 kref_put(&bo->list_kref, ttm_bo_release_list);
832 834 return ret;
833 /**
834 * We *need* to retry after releasing the lru lock.
835 */
836
837 if (unlikely(ret != 0))
838 return ret;
839 goto retry;
840 } 835 }
841 836
842 put_count = ttm_bo_del_from_lru(bo); 837 put_count = ttm_bo_del_from_lru(bo);
@@ -846,7 +841,7 @@ retry:
846 841
847 ttm_bo_list_ref_sub(bo, put_count, true); 842 ttm_bo_list_ref_sub(bo, put_count, true);
848 843
849 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu); 844 ret = ttm_bo_evict(bo, interruptible, no_wait_gpu);
850 ttm_bo_unreserve(bo); 845 ttm_bo_unreserve(bo);
851 846
852 kref_put(&bo->list_kref, ttm_bo_release_list); 847 kref_put(&bo->list_kref, ttm_bo_release_list);
@@ -871,7 +866,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
871 struct ttm_placement *placement, 866 struct ttm_placement *placement,
872 struct ttm_mem_reg *mem, 867 struct ttm_mem_reg *mem,
873 bool interruptible, 868 bool interruptible,
874 bool no_wait_reserve,
875 bool no_wait_gpu) 869 bool no_wait_gpu)
876{ 870{
877 struct ttm_bo_device *bdev = bo->bdev; 871 struct ttm_bo_device *bdev = bo->bdev;
@@ -884,8 +878,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
884 return ret; 878 return ret;
885 if (mem->mm_node) 879 if (mem->mm_node)
886 break; 880 break;
887 ret = ttm_mem_evict_first(bdev, mem_type, interruptible, 881 ret = ttm_mem_evict_first(bdev, mem_type,
888 no_wait_reserve, no_wait_gpu); 882 interruptible, no_wait_gpu);
889 if (unlikely(ret != 0)) 883 if (unlikely(ret != 0))
890 return ret; 884 return ret;
891 } while (1); 885 } while (1);
@@ -950,7 +944,7 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
950int ttm_bo_mem_space(struct ttm_buffer_object *bo, 944int ttm_bo_mem_space(struct ttm_buffer_object *bo,
951 struct ttm_placement *placement, 945 struct ttm_placement *placement,
952 struct ttm_mem_reg *mem, 946 struct ttm_mem_reg *mem,
953 bool interruptible, bool no_wait_reserve, 947 bool interruptible,
954 bool no_wait_gpu) 948 bool no_wait_gpu)
955{ 949{
956 struct ttm_bo_device *bdev = bo->bdev; 950 struct ttm_bo_device *bdev = bo->bdev;
@@ -1041,7 +1035,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1041 } 1035 }
1042 1036
1043 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem, 1037 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1044 interruptible, no_wait_reserve, no_wait_gpu); 1038 interruptible, no_wait_gpu);
1045 if (ret == 0 && mem->mm_node) { 1039 if (ret == 0 && mem->mm_node) {
1046 mem->placement = cur_flags; 1040 mem->placement = cur_flags;
1047 return 0; 1041 return 0;
@@ -1054,26 +1048,16 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
1054} 1048}
1055EXPORT_SYMBOL(ttm_bo_mem_space); 1049EXPORT_SYMBOL(ttm_bo_mem_space);
1056 1050
1057int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1058{
1059 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1060 return -EBUSY;
1061
1062 return wait_event_interruptible(bo->event_queue,
1063 atomic_read(&bo->cpu_writers) == 0);
1064}
1065EXPORT_SYMBOL(ttm_bo_wait_cpu);
1066
1067int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 1051int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1068 struct ttm_placement *placement, 1052 struct ttm_placement *placement,
1069 bool interruptible, bool no_wait_reserve, 1053 bool interruptible,
1070 bool no_wait_gpu) 1054 bool no_wait_gpu)
1071{ 1055{
1072 int ret = 0; 1056 int ret = 0;
1073 struct ttm_mem_reg mem; 1057 struct ttm_mem_reg mem;
1074 struct ttm_bo_device *bdev = bo->bdev; 1058 struct ttm_bo_device *bdev = bo->bdev;
1075 1059
1076 BUG_ON(!atomic_read(&bo->reserved)); 1060 BUG_ON(!ttm_bo_is_reserved(bo));
1077 1061
1078 /* 1062 /*
1079 * FIXME: It's possible to pipeline buffer moves. 1063 * FIXME: It's possible to pipeline buffer moves.
@@ -1093,10 +1077,12 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1093 /* 1077 /*
1094 * Determine where to move the buffer. 1078 * Determine where to move the buffer.
1095 */ 1079 */
1096 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu); 1080 ret = ttm_bo_mem_space(bo, placement, &mem,
1081 interruptible, no_wait_gpu);
1097 if (ret) 1082 if (ret)
1098 goto out_unlock; 1083 goto out_unlock;
1099 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu); 1084 ret = ttm_bo_handle_move_mem(bo, &mem, false,
1085 interruptible, no_wait_gpu);
1100out_unlock: 1086out_unlock:
1101 if (ret && mem.mm_node) 1087 if (ret && mem.mm_node)
1102 ttm_bo_mem_put(bo, &mem); 1088 ttm_bo_mem_put(bo, &mem);
@@ -1125,12 +1111,12 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
1125 1111
1126int ttm_bo_validate(struct ttm_buffer_object *bo, 1112int ttm_bo_validate(struct ttm_buffer_object *bo,
1127 struct ttm_placement *placement, 1113 struct ttm_placement *placement,
1128 bool interruptible, bool no_wait_reserve, 1114 bool interruptible,
1129 bool no_wait_gpu) 1115 bool no_wait_gpu)
1130{ 1116{
1131 int ret; 1117 int ret;
1132 1118
1133 BUG_ON(!atomic_read(&bo->reserved)); 1119 BUG_ON(!ttm_bo_is_reserved(bo));
1134 /* Check that range is valid */ 1120 /* Check that range is valid */
1135 if (placement->lpfn || placement->fpfn) 1121 if (placement->lpfn || placement->fpfn)
1136 if (placement->fpfn > placement->lpfn || 1122 if (placement->fpfn > placement->lpfn ||
@@ -1141,7 +1127,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
1141 */ 1127 */
1142 ret = ttm_bo_mem_compat(placement, &bo->mem); 1128 ret = ttm_bo_mem_compat(placement, &bo->mem);
1143 if (ret < 0) { 1129 if (ret < 0) {
1144 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu); 1130 ret = ttm_bo_move_buffer(bo, placement, interruptible,
1131 no_wait_gpu);
1145 if (ret) 1132 if (ret)
1146 return ret; 1133 return ret;
1147 } else { 1134 } else {
@@ -1179,7 +1166,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1179 enum ttm_bo_type type, 1166 enum ttm_bo_type type,
1180 struct ttm_placement *placement, 1167 struct ttm_placement *placement,
1181 uint32_t page_alignment, 1168 uint32_t page_alignment,
1182 unsigned long buffer_start,
1183 bool interruptible, 1169 bool interruptible,
1184 struct file *persistent_swap_storage, 1170 struct file *persistent_swap_storage,
1185 size_t acc_size, 1171 size_t acc_size,
@@ -1200,7 +1186,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1200 return -ENOMEM; 1186 return -ENOMEM;
1201 } 1187 }
1202 1188
1203 size += buffer_start & ~PAGE_MASK;
1204 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 1189 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1205 if (num_pages == 0) { 1190 if (num_pages == 0) {
1206 pr_err("Illegal buffer object size\n"); 1191 pr_err("Illegal buffer object size\n");
@@ -1233,7 +1218,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1233 bo->mem.page_alignment = page_alignment; 1218 bo->mem.page_alignment = page_alignment;
1234 bo->mem.bus.io_reserved_vm = false; 1219 bo->mem.bus.io_reserved_vm = false;
1235 bo->mem.bus.io_reserved_count = 0; 1220 bo->mem.bus.io_reserved_count = 0;
1236 bo->buffer_start = buffer_start & PAGE_MASK;
1237 bo->priv_flags = 0; 1221 bo->priv_flags = 0;
1238 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1222 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1239 bo->seq_valid = false; 1223 bo->seq_valid = false;
@@ -1257,7 +1241,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1257 goto out_err; 1241 goto out_err;
1258 } 1242 }
1259 1243
1260 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 1244 ret = ttm_bo_validate(bo, placement, interruptible, false);
1261 if (ret) 1245 if (ret)
1262 goto out_err; 1246 goto out_err;
1263 1247
@@ -1306,7 +1290,6 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1306 enum ttm_bo_type type, 1290 enum ttm_bo_type type,
1307 struct ttm_placement *placement, 1291 struct ttm_placement *placement,
1308 uint32_t page_alignment, 1292 uint32_t page_alignment,
1309 unsigned long buffer_start,
1310 bool interruptible, 1293 bool interruptible,
1311 struct file *persistent_swap_storage, 1294 struct file *persistent_swap_storage,
1312 struct ttm_buffer_object **p_bo) 1295 struct ttm_buffer_object **p_bo)
@@ -1321,8 +1304,8 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1321 1304
1322 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); 1305 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1323 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1306 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1324 buffer_start, interruptible, 1307 interruptible, persistent_swap_storage, acc_size,
1325 persistent_swap_storage, acc_size, NULL, NULL); 1308 NULL, NULL);
1326 if (likely(ret == 0)) 1309 if (likely(ret == 0))
1327 *p_bo = bo; 1310 *p_bo = bo;
1328 1311
@@ -1344,7 +1327,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1344 spin_lock(&glob->lru_lock); 1327 spin_lock(&glob->lru_lock);
1345 while (!list_empty(&man->lru)) { 1328 while (!list_empty(&man->lru)) {
1346 spin_unlock(&glob->lru_lock); 1329 spin_unlock(&glob->lru_lock);
1347 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false); 1330 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1348 if (ret) { 1331 if (ret) {
1349 if (allow_errors) { 1332 if (allow_errors) {
1350 return ret; 1333 return ret;
@@ -1577,7 +1560,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1577 goto out_no_addr_mm; 1560 goto out_no_addr_mm;
1578 1561
1579 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); 1562 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1580 bdev->nice_mode = true;
1581 INIT_LIST_HEAD(&bdev->ddestroy); 1563 INIT_LIST_HEAD(&bdev->ddestroy);
1582 bdev->dev_mapping = NULL; 1564 bdev->dev_mapping = NULL;
1583 bdev->glob = glob; 1565 bdev->glob = glob;
@@ -1721,7 +1703,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1721 struct ttm_bo_driver *driver = bo->bdev->driver; 1703 struct ttm_bo_driver *driver = bo->bdev->driver;
1722 struct ttm_bo_device *bdev = bo->bdev; 1704 struct ttm_bo_device *bdev = bo->bdev;
1723 void *sync_obj; 1705 void *sync_obj;
1724 void *sync_obj_arg;
1725 int ret = 0; 1706 int ret = 0;
1726 1707
1727 if (likely(bo->sync_obj == NULL)) 1708 if (likely(bo->sync_obj == NULL))
@@ -1729,7 +1710,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1729 1710
1730 while (bo->sync_obj) { 1711 while (bo->sync_obj) {
1731 1712
1732 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { 1713 if (driver->sync_obj_signaled(bo->sync_obj)) {
1733 void *tmp_obj = bo->sync_obj; 1714 void *tmp_obj = bo->sync_obj;
1734 bo->sync_obj = NULL; 1715 bo->sync_obj = NULL;
1735 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 1716 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
@@ -1743,9 +1724,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1743 return -EBUSY; 1724 return -EBUSY;
1744 1725
1745 sync_obj = driver->sync_obj_ref(bo->sync_obj); 1726 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1746 sync_obj_arg = bo->sync_obj_arg;
1747 spin_unlock(&bdev->fence_lock); 1727 spin_unlock(&bdev->fence_lock);
1748 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, 1728 ret = driver->sync_obj_wait(sync_obj,
1749 lazy, interruptible); 1729 lazy, interruptible);
1750 if (unlikely(ret != 0)) { 1730 if (unlikely(ret != 0)) {
1751 driver->sync_obj_unref(&sync_obj); 1731 driver->sync_obj_unref(&sync_obj);
@@ -1753,8 +1733,7 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
1753 return ret; 1733 return ret;
1754 } 1734 }
1755 spin_lock(&bdev->fence_lock); 1735 spin_lock(&bdev->fence_lock);
1756 if (likely(bo->sync_obj == sync_obj && 1736 if (likely(bo->sync_obj == sync_obj)) {
1757 bo->sync_obj_arg == sync_obj_arg)) {
1758 void *tmp_obj = bo->sync_obj; 1737 void *tmp_obj = bo->sync_obj;
1759 bo->sync_obj = NULL; 1738 bo->sync_obj = NULL;
1760 clear_bit(TTM_BO_PRIV_FLAG_MOVING, 1739 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
@@ -1797,8 +1776,7 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1797 1776
1798void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1777void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1799{ 1778{
1800 if (atomic_dec_and_test(&bo->cpu_writers)) 1779 atomic_dec(&bo->cpu_writers);
1801 wake_up_all(&bo->event_queue);
1802} 1780}
1803EXPORT_SYMBOL(ttm_bo_synccpu_write_release); 1781EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1804 1782
@@ -1817,40 +1795,25 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1817 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); 1795 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1818 1796
1819 spin_lock(&glob->lru_lock); 1797 spin_lock(&glob->lru_lock);
1820 while (ret == -EBUSY) { 1798 list_for_each_entry(bo, &glob->swap_lru, swap) {
1821 if (unlikely(list_empty(&glob->swap_lru))) { 1799 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1822 spin_unlock(&glob->lru_lock); 1800 if (!ret)
1823 return -EBUSY; 1801 break;
1824 } 1802 }
1825
1826 bo = list_first_entry(&glob->swap_lru,
1827 struct ttm_buffer_object, swap);
1828 kref_get(&bo->list_kref);
1829 1803
1830 if (!list_empty(&bo->ddestroy)) { 1804 if (ret) {
1831 spin_unlock(&glob->lru_lock); 1805 spin_unlock(&glob->lru_lock);
1832 (void) ttm_bo_cleanup_refs(bo, false, false, false); 1806 return ret;
1833 kref_put(&bo->list_kref, ttm_bo_release_list); 1807 }
1834 spin_lock(&glob->lru_lock);
1835 continue;
1836 }
1837 1808
1838 /** 1809 kref_get(&bo->list_kref);
1839 * Reserve buffer. Since we unlock while sleeping, we need
1840 * to re-check that nobody removed us from the swap-list while
1841 * we slept.
1842 */
1843 1810
1844 ret = ttm_bo_reserve_locked(bo, false, true, false, 0); 1811 if (!list_empty(&bo->ddestroy)) {
1845 if (unlikely(ret == -EBUSY)) { 1812 ret = ttm_bo_cleanup_refs_and_unlock(bo, false, false);
1846 spin_unlock(&glob->lru_lock); 1813 kref_put(&bo->list_kref, ttm_bo_release_list);
1847 ttm_bo_wait_unreserved(bo, false); 1814 return ret;
1848 kref_put(&bo->list_kref, ttm_bo_release_list);
1849 spin_lock(&glob->lru_lock);
1850 }
1851 } 1815 }
1852 1816
1853 BUG_ON(ret != 0);
1854 put_count = ttm_bo_del_from_lru(bo); 1817 put_count = ttm_bo_del_from_lru(bo);
1855 spin_unlock(&glob->lru_lock); 1818 spin_unlock(&glob->lru_lock);
1856 1819
@@ -1876,7 +1839,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1876 evict_mem.mem_type = TTM_PL_SYSTEM; 1839 evict_mem.mem_type = TTM_PL_SYSTEM;
1877 1840
1878 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, 1841 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1879 false, false, false); 1842 false, false);
1880 if (unlikely(ret != 0)) 1843 if (unlikely(ret != 0))
1881 goto out; 1844 goto out;
1882 } 1845 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 2026060f03e0..9e9c5d2a5c74 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -43,7 +43,7 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
43} 43}
44 44
45int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 bool evict, bool no_wait_reserve, 46 bool evict,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48{ 48{
49 struct ttm_tt *ttm = bo->ttm; 49 struct ttm_tt *ttm = bo->ttm;
@@ -314,7 +314,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
314} 314}
315 315
316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317 bool evict, bool no_wait_reserve, bool no_wait_gpu, 317 bool evict, bool no_wait_gpu,
318 struct ttm_mem_reg *new_mem) 318 struct ttm_mem_reg *new_mem)
319{ 319{
320 struct ttm_bo_device *bdev = bo->bdev; 320 struct ttm_bo_device *bdev = bo->bdev;
@@ -611,8 +611,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
611 611
612int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 612int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
613 void *sync_obj, 613 void *sync_obj,
614 void *sync_obj_arg, 614 bool evict,
615 bool evict, bool no_wait_reserve,
616 bool no_wait_gpu, 615 bool no_wait_gpu,
617 struct ttm_mem_reg *new_mem) 616 struct ttm_mem_reg *new_mem)
618{ 617{
@@ -630,7 +629,6 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
630 bo->sync_obj = NULL; 629 bo->sync_obj = NULL;
631 } 630 }
632 bo->sync_obj = driver->sync_obj_ref(sync_obj); 631 bo->sync_obj = driver->sync_obj_ref(sync_obj);
633 bo->sync_obj_arg = sync_obj_arg;
634 if (evict) { 632 if (evict) {
635 ret = ttm_bo_wait(bo, false, false, false); 633 ret = ttm_bo_wait(bo, false, false, false);
636 spin_unlock(&bdev->fence_lock); 634 spin_unlock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3ba72dbdc4bd..74705f329d99 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -259,8 +259,8 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
259 read_lock(&bdev->vm_lock); 259 read_lock(&bdev->vm_lock);
260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff, 260 bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
261 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT); 261 (vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
262 if (likely(bo != NULL)) 262 if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
263 ttm_bo_reference(bo); 263 bo = NULL;
264 read_unlock(&bdev->vm_lock); 264 read_unlock(&bdev->vm_lock);
265 265
266 if (unlikely(bo == NULL)) { 266 if (unlikely(bo == NULL)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 1937069432c5..cd9e4523dc56 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -185,10 +185,7 @@ retry_this_bo:
185 ttm_eu_backoff_reservation_locked(list); 185 ttm_eu_backoff_reservation_locked(list);
186 spin_unlock(&glob->lru_lock); 186 spin_unlock(&glob->lru_lock);
187 ttm_eu_list_ref_sub(list); 187 ttm_eu_list_ref_sub(list);
188 ret = ttm_bo_wait_cpu(bo, false); 188 return -EBUSY;
189 if (ret)
190 return ret;
191 goto retry;
192 } 189 }
193 } 190 }
194 191
@@ -216,19 +213,18 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
216 driver = bdev->driver; 213 driver = bdev->driver;
217 glob = bo->glob; 214 glob = bo->glob;
218 215
219 spin_lock(&bdev->fence_lock);
220 spin_lock(&glob->lru_lock); 216 spin_lock(&glob->lru_lock);
217 spin_lock(&bdev->fence_lock);
221 218
222 list_for_each_entry(entry, list, head) { 219 list_for_each_entry(entry, list, head) {
223 bo = entry->bo; 220 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj; 221 entry->old_sync_obj = bo->sync_obj;
225 bo->sync_obj = driver->sync_obj_ref(sync_obj); 222 bo->sync_obj = driver->sync_obj_ref(sync_obj);
226 bo->sync_obj_arg = entry->new_sync_obj_arg;
227 ttm_bo_unreserve_locked(bo); 223 ttm_bo_unreserve_locked(bo);
228 entry->reserved = false; 224 entry->reserved = false;
229 } 225 }
230 spin_unlock(&glob->lru_lock);
231 spin_unlock(&bdev->fence_lock); 226 spin_unlock(&bdev->fence_lock);
227 spin_unlock(&glob->lru_lock);
232 228
233 list_for_each_entry(entry, list, head) { 229 list_for_each_entry(entry, list, head) {
234 if (entry->old_sync_obj) 230 if (entry->old_sync_obj)
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 479c6b0467ca..dbc2def887cd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -367,7 +367,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
367 spin_lock_init(&glob->lock); 367 spin_lock_init(&glob->lock);
368 glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 368 glob->swap_queue = create_singlethread_workqueue("ttm_swap");
369 INIT_WORK(&glob->work, ttm_shrink_work); 369 INIT_WORK(&glob->work, ttm_shrink_work);
370 init_waitqueue_head(&glob->queue);
371 ret = kobject_init_and_add( 370 ret = kobject_init_and_add(
372 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); 371 &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
373 if (unlikely(ret != 0)) { 372 if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index c7857874956a..58a5f3261c0b 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -80,7 +80,7 @@ struct ttm_object_file {
80 */ 80 */
81 81
82struct ttm_object_device { 82struct ttm_object_device {
83 rwlock_t object_lock; 83 spinlock_t object_lock;
84 struct drm_open_hash object_hash; 84 struct drm_open_hash object_hash;
85 atomic_t object_count; 85 atomic_t object_count;
86 struct ttm_mem_global *mem_glob; 86 struct ttm_mem_global *mem_glob;
@@ -157,12 +157,12 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
157 base->refcount_release = refcount_release; 157 base->refcount_release = refcount_release;
158 base->ref_obj_release = ref_obj_release; 158 base->ref_obj_release = ref_obj_release;
159 base->object_type = object_type; 159 base->object_type = object_type;
160 write_lock(&tdev->object_lock);
161 kref_init(&base->refcount); 160 kref_init(&base->refcount);
162 ret = drm_ht_just_insert_please(&tdev->object_hash, 161 spin_lock(&tdev->object_lock);
163 &base->hash, 162 ret = drm_ht_just_insert_please_rcu(&tdev->object_hash,
164 (unsigned long)base, 31, 0, 0); 163 &base->hash,
165 write_unlock(&tdev->object_lock); 164 (unsigned long)base, 31, 0, 0);
165 spin_unlock(&tdev->object_lock);
166 if (unlikely(ret != 0)) 166 if (unlikely(ret != 0))
167 goto out_err0; 167 goto out_err0;
168 168
@@ -174,7 +174,9 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
174 174
175 return 0; 175 return 0;
176out_err1: 176out_err1:
177 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); 177 spin_lock(&tdev->object_lock);
178 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
179 spin_unlock(&tdev->object_lock);
178out_err0: 180out_err0:
179 return ret; 181 return ret;
180} 182}
@@ -186,30 +188,29 @@ static void ttm_release_base(struct kref *kref)
186 container_of(kref, struct ttm_base_object, refcount); 188 container_of(kref, struct ttm_base_object, refcount);
187 struct ttm_object_device *tdev = base->tfile->tdev; 189 struct ttm_object_device *tdev = base->tfile->tdev;
188 190
189 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash); 191 spin_lock(&tdev->object_lock);
190 write_unlock(&tdev->object_lock); 192 (void)drm_ht_remove_item_rcu(&tdev->object_hash, &base->hash);
193 spin_unlock(&tdev->object_lock);
194
195 /*
196 * Note: We don't use synchronize_rcu() here because it's far
197 * too slow. It's up to the user to free the object using
198 * call_rcu() or ttm_base_object_kfree().
199 */
200
191 if (base->refcount_release) { 201 if (base->refcount_release) {
192 ttm_object_file_unref(&base->tfile); 202 ttm_object_file_unref(&base->tfile);
193 base->refcount_release(&base); 203 base->refcount_release(&base);
194 } 204 }
195 write_lock(&tdev->object_lock);
196} 205}
197 206
198void ttm_base_object_unref(struct ttm_base_object **p_base) 207void ttm_base_object_unref(struct ttm_base_object **p_base)
199{ 208{
200 struct ttm_base_object *base = *p_base; 209 struct ttm_base_object *base = *p_base;
201 struct ttm_object_device *tdev = base->tfile->tdev;
202 210
203 *p_base = NULL; 211 *p_base = NULL;
204 212
205 /*
206 * Need to take the lock here to avoid racing with
207 * users trying to look up the object.
208 */
209
210 write_lock(&tdev->object_lock);
211 kref_put(&base->refcount, ttm_release_base); 213 kref_put(&base->refcount, ttm_release_base);
212 write_unlock(&tdev->object_lock);
213} 214}
214EXPORT_SYMBOL(ttm_base_object_unref); 215EXPORT_SYMBOL(ttm_base_object_unref);
215 216
@@ -221,14 +222,14 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
221 struct drm_hash_item *hash; 222 struct drm_hash_item *hash;
222 int ret; 223 int ret;
223 224
224 read_lock(&tdev->object_lock); 225 rcu_read_lock();
225 ret = drm_ht_find_item(&tdev->object_hash, key, &hash); 226 ret = drm_ht_find_item_rcu(&tdev->object_hash, key, &hash);
226 227
227 if (likely(ret == 0)) { 228 if (likely(ret == 0)) {
228 base = drm_hash_entry(hash, struct ttm_base_object, hash); 229 base = drm_hash_entry(hash, struct ttm_base_object, hash);
229 kref_get(&base->refcount); 230 ret = kref_get_unless_zero(&base->refcount) ? 0 : -EINVAL;
230 } 231 }
231 read_unlock(&tdev->object_lock); 232 rcu_read_unlock();
232 233
233 if (unlikely(ret != 0)) 234 if (unlikely(ret != 0))
234 return NULL; 235 return NULL;
@@ -426,7 +427,7 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
426 return NULL; 427 return NULL;
427 428
428 tdev->mem_glob = mem_glob; 429 tdev->mem_glob = mem_glob;
429 rwlock_init(&tdev->object_lock); 430 spin_lock_init(&tdev->object_lock);
430 atomic_set(&tdev->object_count, 0); 431 atomic_set(&tdev->object_count, 0);
431 ret = drm_ht_create(&tdev->object_hash, hash_order); 432 ret = drm_ht_create(&tdev->object_hash, hash_order);
432 433
@@ -444,9 +445,9 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
444 445
445 *p_tdev = NULL; 446 *p_tdev = NULL;
446 447
447 write_lock(&tdev->object_lock); 448 spin_lock(&tdev->object_lock);
448 drm_ht_remove(&tdev->object_hash); 449 drm_ht_remove(&tdev->object_hash);
449 write_unlock(&tdev->object_lock); 450 spin_unlock(&tdev->object_lock);
450 451
451 kfree(tdev); 452 kfree(tdev);
452} 453}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index b3b2cedf6745..512f44add89f 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -84,7 +84,8 @@ udl_detect(struct drm_connector *connector, bool force)
84 return connector_status_connected; 84 return connector_status_connected;
85} 85}
86 86
87struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector) 87static struct drm_encoder*
88udl_best_single_encoder(struct drm_connector *connector)
88{ 89{
89 int enc_id = connector->encoder_ids[0]; 90 int enc_id = connector->encoder_ids[0];
90 struct drm_mode_object *obj; 91 struct drm_mode_object *obj;
@@ -97,8 +98,9 @@ struct drm_encoder *udl_best_single_encoder(struct drm_connector *connector)
97 return encoder; 98 return encoder;
98} 99}
99 100
100int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property, 101static int udl_connector_set_property(struct drm_connector *connector,
101 uint64_t val) 102 struct drm_property *property,
103 uint64_t val)
102{ 104{
103 return 0; 105 return 0;
104} 106}
@@ -110,13 +112,13 @@ static void udl_connector_destroy(struct drm_connector *connector)
110 kfree(connector); 112 kfree(connector);
111} 113}
112 114
113struct drm_connector_helper_funcs udl_connector_helper_funcs = { 115static struct drm_connector_helper_funcs udl_connector_helper_funcs = {
114 .get_modes = udl_get_modes, 116 .get_modes = udl_get_modes,
115 .mode_valid = udl_mode_valid, 117 .mode_valid = udl_mode_valid,
116 .best_encoder = udl_best_single_encoder, 118 .best_encoder = udl_best_single_encoder,
117}; 119};
118 120
119struct drm_connector_funcs udl_connector_funcs = { 121static struct drm_connector_funcs udl_connector_funcs = {
120 .dpms = drm_helper_connector_dpms, 122 .dpms = drm_helper_connector_dpms,
121 .detect = udl_detect, 123 .detect = udl_detect,
122 .fill_modes = drm_helper_probe_single_connector_modes, 124 .fill_modes = drm_helper_probe_single_connector_modes,
@@ -138,7 +140,7 @@ int udl_connector_init(struct drm_device *dev, struct drm_encoder *encoder)
138 drm_sysfs_connector_add(connector); 140 drm_sysfs_connector_add(connector);
139 drm_mode_connector_attach_encoder(connector, encoder); 141 drm_mode_connector_attach_encoder(connector, encoder);
140 142
141 drm_connector_attach_property(connector, 143 drm_object_attach_property(&connector->base,
142 dev->mode_config.dirty_info_property, 144 dev->mode_config.dirty_info_property,
143 1); 145 1);
144 return 0; 146 return 0;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 586869c8c11f..2cc6cd91ac11 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -5,6 +5,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \ 5 vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ 6 vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ 7 vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
9 vmwgfx_surface.o
9 10
10obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o 11obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
new file mode 100644
index 000000000000..8369c3ba10fe
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -0,0 +1,909 @@
1/**************************************************************************
2 *
3 * Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifdef __KERNEL__
29
30#include <drm/vmwgfx_drm.h>
31#define surf_size_struct struct drm_vmw_size
32
33#else /* __KERNEL__ */
34
35#ifndef ARRAY_SIZE
36#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
37#endif /* ARRAY_SIZE */
38
39#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
40#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
41#define surf_size_struct SVGA3dSize
42#define u32 uint32
43
44#endif /* __KERNEL__ */
45
46#include "svga3d_reg.h"
47
48/*
49 * enum svga3d_block_desc describes the active data channels in a block.
50 *
51 * There can be at-most four active channels in a block:
52 * 1. Red, bump W, luminance and depth are stored in the first channel.
53 * 2. Green, bump V and stencil are stored in the second channel.
54 * 3. Blue and bump U are stored in the third channel.
55 * 4. Alpha and bump Q are stored in the fourth channel.
56 *
57 * Block channels can be used to store compressed and buffer data:
58 * 1. For compressed formats, only the data channel is used and its size
59 * is equal to that of a singular block in the compression scheme.
60 * 2. For buffer formats, only the data channel is used and its size is
61 * exactly one byte in length.
62 * 3. In each case the bit depth represent the size of a singular block.
63 *
64 * Note: Compressed and IEEE formats do not use the bitMask structure.
65 */
66
67enum svga3d_block_desc {
68 SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
69 SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
70 data */
71 SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
72 data */
73 SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
74 U and V */
75 SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
76 data */
77 SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
78 data */
79 SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
80 channel */
81 SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
82 data */
83 SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
84 data */
85 SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
86 data */
87 SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
88 data */
89 SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
90 SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
91 channel */
92 SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
93 data */
94 SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
95 data */
96 SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
97 data depending on the
98 compression method used */
99 SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
100 floating point
101 representation in
102 all channels */
103 SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
104 data. */
105 SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
106 SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
107 SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
108 SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
109 SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
110 e.g., NV12. */
111 SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
112 Y, U, V, e.g., YV12. */
113
114 SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
115 SVGA3DBLOCKDESC_GREEN,
116 SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
117 SVGA3DBLOCKDESC_BLUE,
118 SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
119 SVGA3DBLOCKDESC_SRGB,
120 SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
121 SVGA3DBLOCKDESC_ALPHA,
122 SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
123 SVGA3DBLOCKDESC_SRGB,
124 SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
125 SVGA3DBLOCKDESC_V,
126 SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
127 SVGA3DBLOCKDESC_LUMINANCE,
128 SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
129 SVGA3DBLOCKDESC_W,
130 SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
131 SVGA3DBLOCKDESC_ALPHA,
132 SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
133 SVGA3DBLOCKDESC_V |
134 SVGA3DBLOCKDESC_W |
135 SVGA3DBLOCKDESC_Q,
136 SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
137 SVGA3DBLOCKDESC_ALPHA,
138 SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
139 SVGA3DBLOCKDESC_IEEE_FP,
140 SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
141 SVGA3DBLOCKDESC_GREEN,
142 SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
143 SVGA3DBLOCKDESC_BLUE,
144 SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
145 SVGA3DBLOCKDESC_ALPHA,
146 SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
147 SVGA3DBLOCKDESC_STENCIL,
148 SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
149 SVGA3DBLOCKDESC_Y,
150 SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
151 SVGA3DBLOCKDESC_Y |
152 SVGA3DBLOCKDESC_U_VIDEO |
153 SVGA3DBLOCKDESC_V_VIDEO,
154 SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
155 SVGA3DBLOCKDESC_EXP,
156 SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
157 SVGA3DBLOCKDESC_SRGB,
158 SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
159 SVGA3DBLOCKDESC_2PLANAR_YUV,
160 SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
161 SVGA3DBLOCKDESC_3PLANAR_YUV,
162};
163
164/*
165 * SVGA3dSurfaceDesc describes the actual pixel data.
166 *
167 * This structure provides the following information:
168 * 1. Block description.
169 * 2. Dimensions of a block in the surface.
170 * 3. Size of block in bytes.
171 * 4. Bit depth of the pixel data.
172 * 5. Channel bit depths and masks (if applicable).
173 */
174#define SVGA3D_CHANNEL_DEF(type) \
175 struct { \
176 union { \
177 type blue; \
178 type u; \
179 type uv_video; \
180 type u_video; \
181 }; \
182 union { \
183 type green; \
184 type v; \
185 type stencil; \
186 type v_video; \
187 }; \
188 union { \
189 type red; \
190 type w; \
191 type luminance; \
192 type y; \
193 type depth; \
194 type data; \
195 }; \
196 union { \
197 type alpha; \
198 type q; \
199 type exp; \
200 }; \
201 }
202
203struct svga3d_surface_desc {
204 enum svga3d_block_desc block_desc;
205 surf_size_struct block_size;
206 u32 bytes_per_block;
207 u32 pitch_bytes_per_block;
208
209 struct {
210 u32 total;
211 SVGA3D_CHANNEL_DEF(uint8);
212 } bit_depth;
213
214 struct {
215 SVGA3D_CHANNEL_DEF(uint8);
216 } bit_offset;
217};
218
219static const struct svga3d_surface_desc svga3d_surface_descs[] = {
220 {SVGA3DBLOCKDESC_NONE,
221 {1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
222 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
223
224 {SVGA3DBLOCKDESC_RGB,
225 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
226 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
227
228 {SVGA3DBLOCKDESC_RGBA,
229 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
230 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
231
232 {SVGA3DBLOCKDESC_RGB,
233 {1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
234 {{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
235
236 {SVGA3DBLOCKDESC_RGB,
237 {1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
238 {{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
239
240 {SVGA3DBLOCKDESC_RGBA,
241 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
242 {{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
243
244 {SVGA3DBLOCKDESC_RGBA,
245 {1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
246 {{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
247
248 {SVGA3DBLOCKDESC_DEPTH,
249 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
250 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
251
252 {SVGA3DBLOCKDESC_DEPTH,
253 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
254 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
255
256 {SVGA3DBLOCKDESC_DS,
257 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
258 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
259
260 {SVGA3DBLOCKDESC_DS,
261 {1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
262 {{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
263
264 {SVGA3DBLOCKDESC_LUMINANCE,
265 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
266 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
267
268 {SVGA3DBLOCKDESC_LA,
269 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
270 {{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
271
272 {SVGA3DBLOCKDESC_LUMINANCE,
273 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
274 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
275
276 {SVGA3DBLOCKDESC_LA,
277 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
278 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
279
280 {SVGA3DBLOCKDESC_COMPRESSED,
281 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
282 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
283
284 {SVGA3DBLOCKDESC_COMPRESSED,
285 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
286 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
287
288 {SVGA3DBLOCKDESC_COMPRESSED,
289 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
290 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
291
292 {SVGA3DBLOCKDESC_COMPRESSED,
293 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
294 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
295
296 {SVGA3DBLOCKDESC_COMPRESSED,
297 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
298 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
299
300 {SVGA3DBLOCKDESC_UV,
301 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
302 {{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
303
304 {SVGA3DBLOCKDESC_UVL,
305 {1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
306 {{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
307
308 {SVGA3DBLOCKDESC_UVL,
309 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
310 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
311
312 {SVGA3DBLOCKDESC_UVL,
313 {1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
314 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
315
316 {SVGA3DBLOCKDESC_RGBA_FP,
317 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
318 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
319
320 {SVGA3DBLOCKDESC_RGBA_FP,
321 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
322 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
323
324 {SVGA3DBLOCKDESC_RGBA,
325 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
326 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
327
328 {SVGA3DBLOCKDESC_UV,
329 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
330 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
331
332 {SVGA3DBLOCKDESC_UVWQ,
333 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
334 {{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
335
336 {SVGA3DBLOCKDESC_UV,
337 {1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
338 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
339
340 {SVGA3DBLOCKDESC_UVL,
341 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
342 {{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
343
344 {SVGA3DBLOCKDESC_UVWA,
345 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
346 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
347
348 {SVGA3DBLOCKDESC_ALPHA,
349 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
350 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
351
352 {SVGA3DBLOCKDESC_R_FP,
353 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
354 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
355
356 {SVGA3DBLOCKDESC_R_FP,
357 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
358 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
359
360 {SVGA3DBLOCKDESC_RG_FP,
361 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
362 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
363
364 {SVGA3DBLOCKDESC_RG_FP,
365 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
366 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
367
368 {SVGA3DBLOCKDESC_BUFFER,
369 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
370 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
371
372 {SVGA3DBLOCKDESC_DEPTH,
373 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
374 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
375
376 {SVGA3DBLOCKDESC_UV,
377 {1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
378 {{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
379
380 {SVGA3DBLOCKDESC_RG,
381 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
382 {{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
383
384 {SVGA3DBLOCKDESC_RGBA,
385 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
386 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
387
388 {SVGA3DBLOCKDESC_YUV,
389 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
390 {{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
391
392 {SVGA3DBLOCKDESC_YUV,
393 {1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
394 {{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
395
396 {SVGA3DBLOCKDESC_NV12,
397 {2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
398 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
399
400 {SVGA3DBLOCKDESC_AYUV,
401 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
402 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
403
404 {SVGA3DBLOCKDESC_RGBA,
405 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
406 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
407
408 {SVGA3DBLOCKDESC_RGBA,
409 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
410 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
411
412 {SVGA3DBLOCKDESC_UVWQ,
413 {1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
414 {{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
415
416 {SVGA3DBLOCKDESC_RGB,
417 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
418 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
419
420 {SVGA3DBLOCKDESC_RGB_FP,
421 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
422 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
423
424 {SVGA3DBLOCKDESC_RGB,
425 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
426 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
427
428 {SVGA3DBLOCKDESC_UVW,
429 {1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
430 {{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
431
432 {SVGA3DBLOCKDESC_RGBA,
433 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
434 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
435
436 {SVGA3DBLOCKDESC_RGBA,
437 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
438 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
439
440 {SVGA3DBLOCKDESC_UVWQ,
441 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
442 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
443
444 {SVGA3DBLOCKDESC_UVWQ,
445 {1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
446 {{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
447
448 {SVGA3DBLOCKDESC_RG,
449 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
450 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
451
452 {SVGA3DBLOCKDESC_RG,
453 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
454 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
455
456 {SVGA3DBLOCKDESC_UV,
457 {1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
458 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
459
460 {SVGA3DBLOCKDESC_RG,
461 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
462 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
463
464 {SVGA3DBLOCKDESC_DS,
465 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
466 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
467
468 {SVGA3DBLOCKDESC_R_FP,
469 {1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
470 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
471
472 {SVGA3DBLOCKDESC_GREEN,
473 {1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
474 {{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
475
476 {SVGA3DBLOCKDESC_RGBA,
477 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
478 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
479
480 {SVGA3DBLOCKDESC_RGBA,
481 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
482 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
483
484 {SVGA3DBLOCKDESC_RGB_FP,
485 {1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
486 {{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
487
488 {SVGA3DBLOCKDESC_RGBA,
489 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
490 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
491
492 {SVGA3DBLOCKDESC_RGBA,
493 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
494 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
495
496 {SVGA3DBLOCKDESC_RGBA_SRGB,
497 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
498 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
499
500 {SVGA3DBLOCKDESC_RGBA,
501 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
502 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
503
504 {SVGA3DBLOCKDESC_RGBA,
505 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
506 {{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
507
508 {SVGA3DBLOCKDESC_RG,
509 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
510 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
511
512 {SVGA3DBLOCKDESC_RG_FP,
513 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
514 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
515
516 {SVGA3DBLOCKDESC_UV,
517 {1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
518 {{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
519
520 {SVGA3DBLOCKDESC_RED,
521 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
522 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
523
524 {SVGA3DBLOCKDESC_DEPTH,
525 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
526 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
527
528 {SVGA3DBLOCKDESC_RED,
529 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
530 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
531
532 {SVGA3DBLOCKDESC_RED,
533 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
534 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
535
536 {SVGA3DBLOCKDESC_RG,
537 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
538 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
539
540 {SVGA3DBLOCKDESC_DS,
541 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
542 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
543
544 {SVGA3DBLOCKDESC_RED,
545 {1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
546 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
547
548 {SVGA3DBLOCKDESC_GREEN,
549 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
550 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
551
552 {SVGA3DBLOCKDESC_RG,
553 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
554 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
555
556 {SVGA3DBLOCKDESC_RG,
557 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
558 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
559
560 {SVGA3DBLOCKDESC_RG,
561 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
562 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
563
564 {SVGA3DBLOCKDESC_UV,
565 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
566 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
567
568 {SVGA3DBLOCKDESC_RED,
569 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
570 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
571
572 {SVGA3DBLOCKDESC_RED,
573 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
574 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
575
576 {SVGA3DBLOCKDESC_RED,
577 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
578 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
579
580 {SVGA3DBLOCKDESC_U,
581 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
582 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
583
584 {SVGA3DBLOCKDESC_U,
585 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
586 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
587
588 {SVGA3DBLOCKDESC_RED,
589 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
590 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
591
592 {SVGA3DBLOCKDESC_RED,
593 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
594 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
595
596 {SVGA3DBLOCKDESC_RED,
597 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
598 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
599
600 {SVGA3DBLOCKDESC_U,
601 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
602 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
603
604 {SVGA3DBLOCKDESC_U,
605 {1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
606 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
607
608 {SVGA3DBLOCKDESC_RED,
609 {8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
610 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
611
612 {SVGA3DBLOCKDESC_RGBE,
613 {1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
614 {{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
615
616 {SVGA3DBLOCKDESC_RG,
617 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
618 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
619
620 {SVGA3DBLOCKDESC_RG,
621 {1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
622 {{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
623
624 {SVGA3DBLOCKDESC_COMPRESSED,
625 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
626 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
627
628 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
629 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
630 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
631
632 {SVGA3DBLOCKDESC_COMPRESSED,
633 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
634 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
635
636 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
637 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
638 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
639
640 {SVGA3DBLOCKDESC_COMPRESSED,
641 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
642 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
643
644 {SVGA3DBLOCKDESC_COMPRESSED_SRGB,
645 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
646 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
647
648 {SVGA3DBLOCKDESC_COMPRESSED,
649 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
650 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
651
652 {SVGA3DBLOCKDESC_COMPRESSED,
653 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
654 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
655
656 {SVGA3DBLOCKDESC_COMPRESSED,
657 {4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
658 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
659
660 {SVGA3DBLOCKDESC_COMPRESSED,
661 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
662 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
663
664 {SVGA3DBLOCKDESC_COMPRESSED,
665 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
666 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
667
668 {SVGA3DBLOCKDESC_COMPRESSED,
669 {4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
670 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
671
672 {SVGA3DBLOCKDESC_RGBA,
673 {1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
674 {{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
675
676 {SVGA3DBLOCKDESC_RGBA,
677 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
678 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
679
680 {SVGA3DBLOCKDESC_RGBA_SRGB,
681 {1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
682 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
683
684 {SVGA3DBLOCKDESC_RGB,
685 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
686 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
687
688 {SVGA3DBLOCKDESC_RGB_SRGB,
689 {1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
690 {{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
691
692 {SVGA3DBLOCKDESC_DEPTH,
693 {1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
694 {{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
695
696 {SVGA3DBLOCKDESC_DS,
697 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
698 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
699
700 {SVGA3DBLOCKDESC_DS,
701 {1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
702 {{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
703};
704
705static inline u32 clamped_umul32(u32 a, u32 b)
706{
707 uint64_t tmp = (uint64_t) a*b;
708 return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
709}
710
711static inline const struct svga3d_surface_desc *
712svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
713{
714 if (format < ARRAY_SIZE(svga3d_surface_descs))
715 return &svga3d_surface_descs[format];
716
717 return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
718}
719
720/*
721 *----------------------------------------------------------------------
722 *
723 * svga3dsurface_get_mip_size --
724 *
725 * Given a base level size and the mip level, compute the size of
726 * the mip level.
727 *
728 * Results:
729 * See above.
730 *
731 * Side effects:
732 * None.
733 *
734 *----------------------------------------------------------------------
735 */
736
737static inline surf_size_struct
738svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
739{
740 surf_size_struct size;
741
742 size.width = max_t(u32, base_level.width >> mip_level, 1);
743 size.height = max_t(u32, base_level.height >> mip_level, 1);
744 size.depth = max_t(u32, base_level.depth >> mip_level, 1);
745 return size;
746}
747
748static inline void
749svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
750 const surf_size_struct *pixel_size,
751 surf_size_struct *block_size)
752{
753 block_size->width = DIV_ROUND_UP(pixel_size->width,
754 desc->block_size.width);
755 block_size->height = DIV_ROUND_UP(pixel_size->height,
756 desc->block_size.height);
757 block_size->depth = DIV_ROUND_UP(pixel_size->depth,
758 desc->block_size.depth);
759}
760
761static inline bool
762svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
763{
764 return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
765}
766
767static inline u32
768svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
769 const surf_size_struct *size)
770{
771 u32 pitch;
772 surf_size_struct blocks;
773
774 svga3dsurface_get_size_in_blocks(desc, size, &blocks);
775
776 pitch = blocks.width * desc->pitch_bytes_per_block;
777
778 return pitch;
779}
780
781/*
782 *-----------------------------------------------------------------------------
783 *
784 * svga3dsurface_get_image_buffer_size --
785 *
786 * Return the number of bytes of buffer space required to store
787 * one image of a surface, optionally using the specified pitch.
788 *
789 * If pitch is zero, it is assumed that rows are tightly packed.
790 *
791 * This function is overflow-safe. If the result would have
792 * overflowed, instead we return MAX_UINT32.
793 *
794 * Results:
795 * Byte count.
796 *
797 * Side effects:
798 * None.
799 *
800 *-----------------------------------------------------------------------------
801 */
802
803static inline u32
804svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
805 const surf_size_struct *size,
806 u32 pitch)
807{
808 surf_size_struct image_blocks;
809 u32 slice_size, total_size;
810
811 svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
812
813 if (svga3dsurface_is_planar_surface(desc)) {
814 total_size = clamped_umul32(image_blocks.width,
815 image_blocks.height);
816 total_size = clamped_umul32(total_size, image_blocks.depth);
817 total_size = clamped_umul32(total_size, desc->bytes_per_block);
818 return total_size;
819 }
820
821 if (pitch == 0)
822 pitch = svga3dsurface_calculate_pitch(desc, size);
823
824 slice_size = clamped_umul32(image_blocks.height, pitch);
825 total_size = clamped_umul32(slice_size, image_blocks.depth);
826
827 return total_size;
828}
829
830static inline u32
831svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
832 surf_size_struct base_level_size,
833 u32 num_mip_levels,
834 bool cubemap)
835{
836 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
837 u32 total_size = 0;
838 u32 mip;
839
840 for (mip = 0; mip < num_mip_levels; mip++) {
841 surf_size_struct size =
842 svga3dsurface_get_mip_size(base_level_size, mip);
843 total_size += svga3dsurface_get_image_buffer_size(desc,
844 &size, 0);
845 }
846
847 if (cubemap)
848 total_size *= SVGA3D_MAX_SURFACE_FACES;
849
850 return total_size;
851}
852
853
854/**
855 * svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
856 * in an image (or volume).
857 *
858 * @width: The image width in pixels.
859 * @height: The image height in pixels
860 */
861static inline u32
862svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
863 u32 width, u32 height,
864 u32 x, u32 y, u32 z)
865{
866 const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
867 const u32 bw = desc->block_size.width, bh = desc->block_size.height;
868 const u32 bd = desc->block_size.depth;
869 const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
870 const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
871 const u32 offset = (z / bd * imgstride +
872 y / bh * rowstride +
873 x / bw * desc->bytes_per_block);
874 return offset;
875}
876
877
878static inline u32
879svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
880 surf_size_struct baseLevelSize,
881 u32 numMipLevels,
882 u32 face,
883 u32 mip)
884
885{
886 u32 offset;
887 u32 mipChainBytes;
888 u32 mipChainBytesToLevel;
889 u32 i;
890 const struct svga3d_surface_desc *desc;
891 surf_size_struct mipSize;
892 u32 bytes;
893
894 desc = svga3dsurface_get_desc(format);
895
896 mipChainBytes = 0;
897 mipChainBytesToLevel = 0;
898 for (i = 0; i < numMipLevels; i++) {
899 mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
900 bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
901 mipChainBytes += bytes;
902 if (i < mip)
903 mipChainBytesToLevel += bytes;
904 }
905
906 offset = mipChainBytes * face + mipChainBytesToLevel;
907
908 return offset;
909}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 9826fbc88154..96dc84dc34d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -248,13 +248,12 @@ void vmw_evict_flags(struct ttm_buffer_object *bo,
248 *placement = vmw_sys_placement; 248 *placement = vmw_sys_placement;
249} 249}
250 250
251/**
252 * FIXME: Proper access checks on buffers.
253 */
254
255static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp) 251static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
256{ 252{
257 return 0; 253 struct ttm_object_file *tfile =
254 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
255
256 return vmw_user_dmabuf_verify_access(bo, tfile);
258} 257}
259 258
260static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 259static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
@@ -310,27 +309,23 @@ static void vmw_sync_obj_unref(void **sync_obj)
310 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj); 309 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
311} 310}
312 311
313static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg) 312static int vmw_sync_obj_flush(void *sync_obj)
314{ 313{
315 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj); 314 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
316 return 0; 315 return 0;
317} 316}
318 317
319static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg) 318static bool vmw_sync_obj_signaled(void *sync_obj)
320{ 319{
321 unsigned long flags = (unsigned long) sync_arg;
322 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj, 320 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
323 (uint32_t) flags); 321 DRM_VMW_FENCE_FLAG_EXEC);
324 322
325} 323}
326 324
327static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg, 325static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
328 bool lazy, bool interruptible)
329{ 326{
330 unsigned long flags = (unsigned long) sync_arg;
331
332 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj, 327 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
333 (uint32_t) flags, 328 DRM_VMW_FENCE_FLAG_EXEC,
334 lazy, interruptible, 329 lazy, interruptible,
335 VMW_FENCE_WAIT_TIMEOUT); 330 VMW_FENCE_WAIT_TIMEOUT);
336} 331}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
new file mode 100644
index 000000000000..00ae0925aca8
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -0,0 +1,274 @@
1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h"
31
32struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
35};
36
37static void vmw_user_context_free(struct vmw_resource *res);
38static struct vmw_resource *
39vmw_user_context_base_to_res(struct ttm_base_object *base);
40
41static uint64_t vmw_user_context_size;
42
43static const struct vmw_user_resource_conv user_context_conv = {
44 .object_type = VMW_RES_CONTEXT,
45 .base_obj_to_res = vmw_user_context_base_to_res,
46 .res_free = vmw_user_context_free
47};
48
49const struct vmw_user_resource_conv *user_context_converter =
50 &user_context_conv;
51
52
53static const struct vmw_res_func vmw_legacy_context_func = {
54 .res_type = vmw_res_context,
55 .needs_backup = false,
56 .may_evict = false,
57 .type_name = "legacy contexts",
58 .backup_placement = NULL,
59 .create = NULL,
60 .destroy = NULL,
61 .bind = NULL,
62 .unbind = NULL
63};
64
65/**
66 * Context management:
67 */
68
69static void vmw_hw_context_destroy(struct vmw_resource *res)
70{
71
72 struct vmw_private *dev_priv = res->dev_priv;
73 struct {
74 SVGA3dCmdHeader header;
75 SVGA3dCmdDestroyContext body;
76 } *cmd;
77
78
79 vmw_execbuf_release_pinned_bo(dev_priv);
80 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
81 if (unlikely(cmd == NULL)) {
82 DRM_ERROR("Failed reserving FIFO space for surface "
83 "destruction.\n");
84 return;
85 }
86
87 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
88 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
89 cmd->body.cid = cpu_to_le32(res->id);
90
91 vmw_fifo_commit(dev_priv, sizeof(*cmd));
92 vmw_3d_resource_dec(dev_priv, false);
93}
94
95static int vmw_context_init(struct vmw_private *dev_priv,
96 struct vmw_resource *res,
97 void (*res_free) (struct vmw_resource *res))
98{
99 int ret;
100
101 struct {
102 SVGA3dCmdHeader header;
103 SVGA3dCmdDefineContext body;
104 } *cmd;
105
106 ret = vmw_resource_init(dev_priv, res, false,
107 res_free, &vmw_legacy_context_func);
108
109 if (unlikely(ret != 0)) {
110 DRM_ERROR("Failed to allocate a resource id.\n");
111 goto out_early;
112 }
113
114 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
115 DRM_ERROR("Out of hw context ids.\n");
116 vmw_resource_unreference(&res);
117 return -ENOMEM;
118 }
119
120 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
121 if (unlikely(cmd == NULL)) {
122 DRM_ERROR("Fifo reserve failed.\n");
123 vmw_resource_unreference(&res);
124 return -ENOMEM;
125 }
126
127 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
128 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
129 cmd->body.cid = cpu_to_le32(res->id);
130
131 vmw_fifo_commit(dev_priv, sizeof(*cmd));
132 (void) vmw_3d_resource_inc(dev_priv, false);
133 vmw_resource_activate(res, vmw_hw_context_destroy);
134 return 0;
135
136out_early:
137 if (res_free == NULL)
138 kfree(res);
139 else
140 res_free(res);
141 return ret;
142}
143
144struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
145{
146 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
147 int ret;
148
149 if (unlikely(res == NULL))
150 return NULL;
151
152 ret = vmw_context_init(dev_priv, res, NULL);
153
154 return (ret == 0) ? res : NULL;
155}
156
157/**
158 * User-space context management:
159 */
160
161static struct vmw_resource *
162vmw_user_context_base_to_res(struct ttm_base_object *base)
163{
164 return &(container_of(base, struct vmw_user_context, base)->res);
165}
166
167static void vmw_user_context_free(struct vmw_resource *res)
168{
169 struct vmw_user_context *ctx =
170 container_of(res, struct vmw_user_context, res);
171 struct vmw_private *dev_priv = res->dev_priv;
172
173 ttm_base_object_kfree(ctx, base);
174 ttm_mem_global_free(vmw_mem_glob(dev_priv),
175 vmw_user_context_size);
176}
177
178/**
179 * This function is called when user space has no more references on the
180 * base object. It releases the base-object's reference on the resource object.
181 */
182
183static void vmw_user_context_base_release(struct ttm_base_object **p_base)
184{
185 struct ttm_base_object *base = *p_base;
186 struct vmw_user_context *ctx =
187 container_of(base, struct vmw_user_context, base);
188 struct vmw_resource *res = &ctx->res;
189
190 *p_base = NULL;
191 vmw_resource_unreference(&res);
192}
193
194int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
195 struct drm_file *file_priv)
196{
197 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
198 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
199
200 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
201}
202
203int vmw_context_define_ioctl(struct drm_device *dev, void *data,
204 struct drm_file *file_priv)
205{
206 struct vmw_private *dev_priv = vmw_priv(dev);
207 struct vmw_user_context *ctx;
208 struct vmw_resource *res;
209 struct vmw_resource *tmp;
210 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
211 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
212 struct vmw_master *vmaster = vmw_master(file_priv->master);
213 int ret;
214
215
216 /*
217 * Approximate idr memory usage with 128 bytes. It will be limited
218 * by maximum number_of contexts anyway.
219 */
220
221 if (unlikely(vmw_user_context_size == 0))
222 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
223
224 ret = ttm_read_lock(&vmaster->lock, true);
225 if (unlikely(ret != 0))
226 return ret;
227
228 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
229 vmw_user_context_size,
230 false, true);
231 if (unlikely(ret != 0)) {
232 if (ret != -ERESTARTSYS)
233 DRM_ERROR("Out of graphics memory for context"
234 " creation.\n");
235 goto out_unlock;
236 }
237
238 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
239 if (unlikely(ctx == NULL)) {
240 ttm_mem_global_free(vmw_mem_glob(dev_priv),
241 vmw_user_context_size);
242 ret = -ENOMEM;
243 goto out_unlock;
244 }
245
246 res = &ctx->res;
247 ctx->base.shareable = false;
248 ctx->base.tfile = NULL;
249
250 /*
251 * From here on, the destructor takes over resource freeing.
252 */
253
254 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
255 if (unlikely(ret != 0))
256 goto out_unlock;
257
258 tmp = vmw_resource_reference(&ctx->res);
259 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
260 &vmw_user_context_base_release, NULL);
261
262 if (unlikely(ret != 0)) {
263 vmw_resource_unreference(&tmp);
264 goto out_err;
265 }
266
267 arg->cid = ctx->base.hash.key;
268out_err:
269 vmw_resource_unreference(&res);
270out_unlock:
271 ttm_read_unlock(&vmaster->lock);
272 return ret;
273
274}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d1498bfd7873..5fae06ad7e25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -60,13 +60,13 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
60 if (unlikely(ret != 0)) 60 if (unlikely(ret != 0))
61 return ret; 61 return ret;
62 62
63 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 63 vmw_execbuf_release_pinned_bo(dev_priv);
64 64
65 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 65 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
66 if (unlikely(ret != 0)) 66 if (unlikely(ret != 0))
67 goto err; 67 goto err;
68 68
69 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 69 ret = ttm_bo_validate(bo, placement, interruptible, false);
70 70
71 ttm_bo_unreserve(bo); 71 ttm_bo_unreserve(bo);
72 72
@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
105 return ret; 105 return ret;
106 106
107 if (pin) 107 if (pin)
108 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 108 vmw_execbuf_release_pinned_bo(dev_priv);
109 109
110 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 110 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
111 if (unlikely(ret != 0)) 111 if (unlikely(ret != 0))
@@ -123,7 +123,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
123 else 123 else
124 placement = &vmw_vram_gmr_placement; 124 placement = &vmw_vram_gmr_placement;
125 125
126 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 126 ret = ttm_bo_validate(bo, placement, interruptible, false);
127 if (likely(ret == 0) || ret == -ERESTARTSYS) 127 if (likely(ret == 0) || ret == -ERESTARTSYS)
128 goto err_unreserve; 128 goto err_unreserve;
129 129
@@ -138,7 +138,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
138 else 138 else
139 placement = &vmw_vram_placement; 139 placement = &vmw_vram_placement;
140 140
141 ret = ttm_bo_validate(bo, placement, interruptible, false, false); 141 ret = ttm_bo_validate(bo, placement, interruptible, false);
142 142
143err_unreserve: 143err_unreserve:
144 ttm_bo_unreserve(bo); 144 ttm_bo_unreserve(bo);
@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
214 return ret; 214 return ret;
215 215
216 if (pin) 216 if (pin)
217 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 217 vmw_execbuf_release_pinned_bo(dev_priv);
218
219 ret = ttm_bo_reserve(bo, interruptible, false, false, 0); 218 ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
220 if (unlikely(ret != 0)) 219 if (unlikely(ret != 0))
221 goto err_unlock; 220 goto err_unlock;
@@ -224,10 +223,9 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
224 if (bo->mem.mem_type == TTM_PL_VRAM && 223 if (bo->mem.mem_type == TTM_PL_VRAM &&
225 bo->mem.start < bo->num_pages && 224 bo->mem.start < bo->num_pages &&
226 bo->mem.start > 0) 225 bo->mem.start > 0)
227 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, 226 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
228 false, false);
229 227
230 ret = ttm_bo_validate(bo, &placement, interruptible, false, false); 228 ret = ttm_bo_validate(bo, &placement, interruptible, false);
231 229
232 /* For some reason we didn't up at the start of vram */ 230 /* For some reason we didn't up at the start of vram */
233 WARN_ON(ret == 0 && bo->offset != 0); 231 WARN_ON(ret == 0 && bo->offset != 0);
@@ -304,7 +302,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
304 uint32_t old_mem_type = bo->mem.mem_type; 302 uint32_t old_mem_type = bo->mem.mem_type;
305 int ret; 303 int ret;
306 304
307 BUG_ON(!atomic_read(&bo->reserved)); 305 BUG_ON(!ttm_bo_is_reserved(bo));
308 BUG_ON(old_mem_type != TTM_PL_VRAM && 306 BUG_ON(old_mem_type != TTM_PL_VRAM &&
309 old_mem_type != VMW_PL_GMR); 307 old_mem_type != VMW_PL_GMR);
310 308
@@ -316,7 +314,7 @@ void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
316 placement.num_placement = 1; 314 placement.num_placement = 1;
317 placement.placement = &pl_flags; 315 placement.placement = &pl_flags;
318 316
319 ret = ttm_bo_validate(bo, &placement, false, true, true); 317 ret = ttm_bo_validate(bo, &placement, false, true);
320 318
321 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type); 319 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
322} 320}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 2dd185e42f21..161f8b2549aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -292,7 +292,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
292 PAGE_SIZE, 292 PAGE_SIZE,
293 ttm_bo_type_device, 293 ttm_bo_type_device,
294 &vmw_vram_sys_placement, 294 &vmw_vram_sys_placement,
295 0, 0, false, NULL, 295 0, false, NULL,
296 &dev_priv->dummy_query_bo); 296 &dev_priv->dummy_query_bo);
297} 297}
298 298
@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
432 struct vmw_private *dev_priv; 432 struct vmw_private *dev_priv;
433 int ret; 433 int ret;
434 uint32_t svga_id; 434 uint32_t svga_id;
435 enum vmw_res_type i;
435 436
436 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 437 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
437 if (unlikely(dev_priv == NULL)) { 438 if (unlikely(dev_priv == NULL)) {
@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
448 mutex_init(&dev_priv->cmdbuf_mutex); 449 mutex_init(&dev_priv->cmdbuf_mutex);
449 mutex_init(&dev_priv->release_mutex); 450 mutex_init(&dev_priv->release_mutex);
450 rwlock_init(&dev_priv->resource_lock); 451 rwlock_init(&dev_priv->resource_lock);
451 idr_init(&dev_priv->context_idr); 452
452 idr_init(&dev_priv->surface_idr); 453 for (i = vmw_res_context; i < vmw_res_max; ++i) {
453 idr_init(&dev_priv->stream_idr); 454 idr_init(&dev_priv->res_idr[i]);
455 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
456 }
457
454 mutex_init(&dev_priv->init_mutex); 458 mutex_init(&dev_priv->init_mutex);
455 init_waitqueue_head(&dev_priv->fence_queue); 459 init_waitqueue_head(&dev_priv->fence_queue);
456 init_waitqueue_head(&dev_priv->fifo_queue); 460 init_waitqueue_head(&dev_priv->fifo_queue);
457 dev_priv->fence_queue_waiters = 0; 461 dev_priv->fence_queue_waiters = 0;
458 atomic_set(&dev_priv->fifo_queue_waiters, 0); 462 atomic_set(&dev_priv->fifo_queue_waiters, 0);
459 INIT_LIST_HEAD(&dev_priv->surface_lru); 463
460 dev_priv->used_memory_size = 0; 464 dev_priv->used_memory_size = 0;
461 465
462 dev_priv->io_start = pci_resource_start(dev->pdev, 0); 466 dev_priv->io_start = pci_resource_start(dev->pdev, 0);
@@ -609,14 +613,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
609 } 613 }
610 } 614 }
611 615
616 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
617 ret = drm_irq_install(dev);
618 if (ret != 0) {
619 DRM_ERROR("Failed installing irq: %d\n", ret);
620 goto out_no_irq;
621 }
622 }
623
612 dev_priv->fman = vmw_fence_manager_init(dev_priv); 624 dev_priv->fman = vmw_fence_manager_init(dev_priv);
613 if (unlikely(dev_priv->fman == NULL)) 625 if (unlikely(dev_priv->fman == NULL))
614 goto out_no_fman; 626 goto out_no_fman;
615 627
616 /* Need to start the fifo to check if we can do screen objects */
617 ret = vmw_3d_resource_inc(dev_priv, true);
618 if (unlikely(ret != 0))
619 goto out_no_fifo;
620 vmw_kms_save_vga(dev_priv); 628 vmw_kms_save_vga(dev_priv);
621 629
622 /* Start kms and overlay systems, needs fifo. */ 630 /* Start kms and overlay systems, needs fifo. */
@@ -625,25 +633,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
625 goto out_no_kms; 633 goto out_no_kms;
626 vmw_overlay_init(dev_priv); 634 vmw_overlay_init(dev_priv);
627 635
628 /* 3D Depends on Screen Objects being used. */
629 DRM_INFO("Detected %sdevice 3D availability.\n",
630 vmw_fifo_have_3d(dev_priv) ?
631 "" : "no ");
632
633 /* We might be done with the fifo now */
634 if (dev_priv->enable_fb) { 636 if (dev_priv->enable_fb) {
637 ret = vmw_3d_resource_inc(dev_priv, true);
638 if (unlikely(ret != 0))
639 goto out_no_fifo;
635 vmw_fb_init(dev_priv); 640 vmw_fb_init(dev_priv);
636 } else {
637 vmw_kms_restore_vga(dev_priv);
638 vmw_3d_resource_dec(dev_priv, true);
639 }
640
641 if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
642 ret = drm_irq_install(dev);
643 if (unlikely(ret != 0)) {
644 DRM_ERROR("Failed installing irq: %d\n", ret);
645 goto out_no_irq;
646 }
647 } 641 }
648 642
649 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; 643 dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
@@ -651,20 +645,16 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
651 645
652 return 0; 646 return 0;
653 647
654out_no_irq: 648out_no_fifo:
655 if (dev_priv->enable_fb)
656 vmw_fb_close(dev_priv);
657 vmw_overlay_close(dev_priv); 649 vmw_overlay_close(dev_priv);
658 vmw_kms_close(dev_priv); 650 vmw_kms_close(dev_priv);
659out_no_kms: 651out_no_kms:
660 /* We still have a 3D resource reference held */ 652 vmw_kms_restore_vga(dev_priv);
661 if (dev_priv->enable_fb) {
662 vmw_kms_restore_vga(dev_priv);
663 vmw_3d_resource_dec(dev_priv, false);
664 }
665out_no_fifo:
666 vmw_fence_manager_takedown(dev_priv->fman); 653 vmw_fence_manager_takedown(dev_priv->fman);
667out_no_fman: 654out_no_fman:
655 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
656 drm_irq_uninstall(dev_priv->dev);
657out_no_irq:
668 if (dev_priv->stealth) 658 if (dev_priv->stealth)
669 pci_release_region(dev->pdev, 2); 659 pci_release_region(dev->pdev, 2);
670 else 660 else
@@ -684,9 +674,9 @@ out_err2:
684out_err1: 674out_err1:
685 vmw_ttm_global_release(dev_priv); 675 vmw_ttm_global_release(dev_priv);
686out_err0: 676out_err0:
687 idr_destroy(&dev_priv->surface_idr); 677 for (i = vmw_res_context; i < vmw_res_max; ++i)
688 idr_destroy(&dev_priv->context_idr); 678 idr_destroy(&dev_priv->res_idr[i]);
689 idr_destroy(&dev_priv->stream_idr); 679
690 kfree(dev_priv); 680 kfree(dev_priv);
691 return ret; 681 return ret;
692} 682}
@@ -694,13 +684,14 @@ out_err0:
694static int vmw_driver_unload(struct drm_device *dev) 684static int vmw_driver_unload(struct drm_device *dev)
695{ 685{
696 struct vmw_private *dev_priv = vmw_priv(dev); 686 struct vmw_private *dev_priv = vmw_priv(dev);
687 enum vmw_res_type i;
697 688
698 unregister_pm_notifier(&dev_priv->pm_nb); 689 unregister_pm_notifier(&dev_priv->pm_nb);
699 690
691 if (dev_priv->ctx.res_ht_initialized)
692 drm_ht_remove(&dev_priv->ctx.res_ht);
700 if (dev_priv->ctx.cmd_bounce) 693 if (dev_priv->ctx.cmd_bounce)
701 vfree(dev_priv->ctx.cmd_bounce); 694 vfree(dev_priv->ctx.cmd_bounce);
702 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
703 drm_irq_uninstall(dev_priv->dev);
704 if (dev_priv->enable_fb) { 695 if (dev_priv->enable_fb) {
705 vmw_fb_close(dev_priv); 696 vmw_fb_close(dev_priv);
706 vmw_kms_restore_vga(dev_priv); 697 vmw_kms_restore_vga(dev_priv);
@@ -709,6 +700,8 @@ static int vmw_driver_unload(struct drm_device *dev)
709 vmw_kms_close(dev_priv); 700 vmw_kms_close(dev_priv);
710 vmw_overlay_close(dev_priv); 701 vmw_overlay_close(dev_priv);
711 vmw_fence_manager_takedown(dev_priv->fman); 702 vmw_fence_manager_takedown(dev_priv->fman);
703 if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
704 drm_irq_uninstall(dev_priv->dev);
712 if (dev_priv->stealth) 705 if (dev_priv->stealth)
713 pci_release_region(dev->pdev, 2); 706 pci_release_region(dev->pdev, 2);
714 else 707 else
@@ -723,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
723 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); 716 (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
724 (void)ttm_bo_device_release(&dev_priv->bdev); 717 (void)ttm_bo_device_release(&dev_priv->bdev);
725 vmw_ttm_global_release(dev_priv); 718 vmw_ttm_global_release(dev_priv);
726 idr_destroy(&dev_priv->surface_idr); 719
727 idr_destroy(&dev_priv->context_idr); 720 for (i = vmw_res_context; i < vmw_res_max; ++i)
728 idr_destroy(&dev_priv->stream_idr); 721 idr_destroy(&dev_priv->res_idr[i]);
729 722
730 kfree(dev_priv); 723 kfree(dev_priv);
731 724
@@ -924,11 +917,11 @@ static int vmw_master_set(struct drm_device *dev,
924 917
925out_no_active_lock: 918out_no_active_lock:
926 if (!dev_priv->enable_fb) { 919 if (!dev_priv->enable_fb) {
920 vmw_kms_restore_vga(dev_priv);
921 vmw_3d_resource_dec(dev_priv, true);
927 mutex_lock(&dev_priv->hw_mutex); 922 mutex_lock(&dev_priv->hw_mutex);
928 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 923 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
929 mutex_unlock(&dev_priv->hw_mutex); 924 mutex_unlock(&dev_priv->hw_mutex);
930 vmw_kms_restore_vga(dev_priv);
931 vmw_3d_resource_dec(dev_priv, true);
932 } 925 }
933 return ret; 926 return ret;
934} 927}
@@ -949,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
949 942
950 vmw_fp->locked_master = drm_master_get(file_priv->master); 943 vmw_fp->locked_master = drm_master_get(file_priv->master);
951 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); 944 ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
952 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 945 vmw_execbuf_release_pinned_bo(dev_priv);
953 946
954 if (unlikely((ret != 0))) { 947 if (unlikely((ret != 0))) {
955 DRM_ERROR("Unable to lock TTM at VT switch.\n"); 948 DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -962,11 +955,11 @@ static void vmw_master_drop(struct drm_device *dev,
962 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM); 955 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
963 if (unlikely(ret != 0)) 956 if (unlikely(ret != 0))
964 DRM_ERROR("Unable to clean VRAM on master drop.\n"); 957 DRM_ERROR("Unable to clean VRAM on master drop.\n");
958 vmw_kms_restore_vga(dev_priv);
959 vmw_3d_resource_dec(dev_priv, true);
965 mutex_lock(&dev_priv->hw_mutex); 960 mutex_lock(&dev_priv->hw_mutex);
966 vmw_write(dev_priv, SVGA_REG_TRACES, 1); 961 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
967 mutex_unlock(&dev_priv->hw_mutex); 962 mutex_unlock(&dev_priv->hw_mutex);
968 vmw_kms_restore_vga(dev_priv);
969 vmw_3d_resource_dec(dev_priv, true);
970 } 963 }
971 964
972 dev_priv->active_master = &dev_priv->fbdev_master; 965 dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1001,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
1001 * This empties VRAM and unbinds all GMR bindings. 994 * This empties VRAM and unbinds all GMR bindings.
1002 * Buffer contents is moved to swappable memory. 995 * Buffer contents is moved to swappable memory.
1003 */ 996 */
1004 vmw_execbuf_release_pinned_bo(dev_priv, false, 0); 997 vmw_execbuf_release_pinned_bo(dev_priv);
998 vmw_resource_evict_all(dev_priv);
1005 ttm_bo_swapout_all(&dev_priv->bdev); 999 ttm_bo_swapout_all(&dev_priv->bdev);
1006 1000
1007 break; 1001 break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 88a179e26de9..13aeda71280e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -67,31 +67,46 @@ struct vmw_fpriv {
67 67
68struct vmw_dma_buffer { 68struct vmw_dma_buffer {
69 struct ttm_buffer_object base; 69 struct ttm_buffer_object base;
70 struct list_head validate_list; 70 struct list_head res_list;
71 bool gmr_bound;
72 uint32_t cur_validate_node;
73 bool on_validate_list;
74}; 71};
75 72
73/**
74 * struct vmw_validate_buffer - Carries validation info about buffers.
75 *
76 * @base: Validation info for TTM.
77 * @hash: Hash entry for quick lookup of the TTM buffer object.
78 *
79 * This structure contains also driver private validation info
80 * on top of the info needed by TTM.
81 */
82struct vmw_validate_buffer {
83 struct ttm_validate_buffer base;
84 struct drm_hash_item hash;
85};
86
87struct vmw_res_func;
76struct vmw_resource { 88struct vmw_resource {
77 struct kref kref; 89 struct kref kref;
78 struct vmw_private *dev_priv; 90 struct vmw_private *dev_priv;
79 struct idr *idr;
80 int id; 91 int id;
81 enum ttm_object_type res_type;
82 bool avail; 92 bool avail;
83 void (*remove_from_lists) (struct vmw_resource *res); 93 unsigned long backup_size;
84 void (*hw_destroy) (struct vmw_resource *res); 94 bool res_dirty; /* Protected by backup buffer reserved */
95 bool backup_dirty; /* Protected by backup buffer reserved */
96 struct vmw_dma_buffer *backup;
97 unsigned long backup_offset;
98 const struct vmw_res_func *func;
99 struct list_head lru_head; /* Protected by the resource lock */
100 struct list_head mob_head; /* Protected by @backup reserved */
85 void (*res_free) (struct vmw_resource *res); 101 void (*res_free) (struct vmw_resource *res);
86 struct list_head validate_head; 102 void (*hw_destroy) (struct vmw_resource *res);
87 struct list_head query_head; /* Protected by the cmdbuf mutex */ 103};
88 /* TODO is a generic snooper needed? */ 104
89#if 0 105enum vmw_res_type {
90 void (*snoop)(struct vmw_resource *res, 106 vmw_res_context,
91 struct ttm_object_file *tfile, 107 vmw_res_surface,
92 SVGA3dCmdHeader *header); 108 vmw_res_stream,
93 void *snoop_priv; 109 vmw_res_max
94#endif
95}; 110};
96 111
97struct vmw_cursor_snooper { 112struct vmw_cursor_snooper {
@@ -105,20 +120,18 @@ struct vmw_surface_offset;
105 120
106struct vmw_surface { 121struct vmw_surface {
107 struct vmw_resource res; 122 struct vmw_resource res;
108 struct list_head lru_head; /* Protected by the resource lock */
109 uint32_t flags; 123 uint32_t flags;
110 uint32_t format; 124 uint32_t format;
111 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; 125 uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
126 struct drm_vmw_size base_size;
112 struct drm_vmw_size *sizes; 127 struct drm_vmw_size *sizes;
113 uint32_t num_sizes; 128 uint32_t num_sizes;
114
115 bool scanout; 129 bool scanout;
116
117 /* TODO so far just a extra pointer */ 130 /* TODO so far just a extra pointer */
118 struct vmw_cursor_snooper snooper; 131 struct vmw_cursor_snooper snooper;
119 struct ttm_buffer_object *backup;
120 struct vmw_surface_offset *offsets; 132 struct vmw_surface_offset *offsets;
121 uint32_t backup_size; 133 SVGA3dTextureFilter autogen_filter;
134 uint32_t multisample_count;
122}; 135};
123 136
124struct vmw_marker_queue { 137struct vmw_marker_queue {
@@ -145,29 +158,46 @@ struct vmw_relocation {
145 uint32_t index; 158 uint32_t index;
146}; 159};
147 160
161/**
162 * struct vmw_res_cache_entry - resource information cache entry
163 *
164 * @valid: Whether the entry is valid, which also implies that the execbuf
165 * code holds a reference to the resource, and it's placed on the
166 * validation list.
167 * @handle: User-space handle of a resource.
168 * @res: Non-ref-counted pointer to the resource.
169 *
170 * Used to avoid frequent repeated user-space handle lookups of the
171 * same resource.
172 */
173struct vmw_res_cache_entry {
174 bool valid;
175 uint32_t handle;
176 struct vmw_resource *res;
177 struct vmw_resource_val_node *node;
178};
179
148struct vmw_sw_context{ 180struct vmw_sw_context{
149 struct ida bo_list; 181 struct drm_open_hash res_ht;
150 uint32_t last_cid; 182 bool res_ht_initialized;
151 bool cid_valid;
152 bool kernel; /**< is the called made from the kernel */ 183 bool kernel; /**< is the called made from the kernel */
153 struct vmw_resource *cur_ctx;
154 uint32_t last_sid;
155 uint32_t sid_translation;
156 bool sid_valid;
157 struct ttm_object_file *tfile; 184 struct ttm_object_file *tfile;
158 struct list_head validate_nodes; 185 struct list_head validate_nodes;
159 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; 186 struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
160 uint32_t cur_reloc; 187 uint32_t cur_reloc;
161 struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; 188 struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
162 uint32_t cur_val_buf; 189 uint32_t cur_val_buf;
163 uint32_t *cmd_bounce; 190 uint32_t *cmd_bounce;
164 uint32_t cmd_bounce_size; 191 uint32_t cmd_bounce_size;
165 struct list_head resource_list; 192 struct list_head resource_list;
166 uint32_t fence_flags; 193 uint32_t fence_flags;
167 struct list_head query_list;
168 struct ttm_buffer_object *cur_query_bo; 194 struct ttm_buffer_object *cur_query_bo;
169 uint32_t cur_query_cid; 195 struct list_head res_relocations;
170 bool query_cid_valid; 196 uint32_t *buf_start;
197 struct vmw_res_cache_entry res_cache[vmw_res_max];
198 struct vmw_resource *last_query_ctx;
199 bool needs_post_query_barrier;
200 struct vmw_resource *error_resource;
171}; 201};
172 202
173struct vmw_legacy_display; 203struct vmw_legacy_display;
@@ -242,10 +272,7 @@ struct vmw_private {
242 */ 272 */
243 273
244 rwlock_t resource_lock; 274 rwlock_t resource_lock;
245 struct idr context_idr; 275 struct idr res_idr[vmw_res_max];
246 struct idr surface_idr;
247 struct idr stream_idr;
248
249 /* 276 /*
250 * Block lastclose from racing with firstopen. 277 * Block lastclose from racing with firstopen.
251 */ 278 */
@@ -320,6 +347,7 @@ struct vmw_private {
320 struct ttm_buffer_object *dummy_query_bo; 347 struct ttm_buffer_object *dummy_query_bo;
321 struct ttm_buffer_object *pinned_bo; 348 struct ttm_buffer_object *pinned_bo;
322 uint32_t query_cid; 349 uint32_t query_cid;
350 uint32_t query_cid_valid;
323 bool dummy_query_bo_pinned; 351 bool dummy_query_bo_pinned;
324 352
325 /* 353 /*
@@ -329,10 +357,15 @@ struct vmw_private {
329 * protected by the cmdbuf mutex for simplicity. 357 * protected by the cmdbuf mutex for simplicity.
330 */ 358 */
331 359
332 struct list_head surface_lru; 360 struct list_head res_lru[vmw_res_max];
333 uint32_t used_memory_size; 361 uint32_t used_memory_size;
334}; 362};
335 363
364static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
365{
366 return container_of(res, struct vmw_surface, res);
367}
368
336static inline struct vmw_private *vmw_priv(struct drm_device *dev) 369static inline struct vmw_private *vmw_priv(struct drm_device *dev)
337{ 370{
338 return (struct vmw_private *)dev->dev_private; 371 return (struct vmw_private *)dev->dev_private;
@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
381/** 414/**
382 * Resource utilities - vmwgfx_resource.c 415 * Resource utilities - vmwgfx_resource.c
383 */ 416 */
417struct vmw_user_resource_conv;
418extern const struct vmw_user_resource_conv *user_surface_converter;
419extern const struct vmw_user_resource_conv *user_context_converter;
384 420
385extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); 421extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
386extern void vmw_resource_unreference(struct vmw_resource **p_res); 422extern void vmw_resource_unreference(struct vmw_resource **p_res);
387extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); 423extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
424extern int vmw_resource_validate(struct vmw_resource *res);
425extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
426extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
388extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 427extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
389 struct drm_file *file_priv); 428 struct drm_file *file_priv);
390extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 429extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
398 uint32_t handle, 437 uint32_t handle,
399 struct vmw_surface **out_surf, 438 struct vmw_surface **out_surf,
400 struct vmw_dma_buffer **out_buf); 439 struct vmw_dma_buffer **out_buf);
440extern int vmw_user_resource_lookup_handle(
441 struct vmw_private *dev_priv,
442 struct ttm_object_file *tfile,
443 uint32_t handle,
444 const struct vmw_user_resource_conv *converter,
445 struct vmw_resource **p_res);
401extern void vmw_surface_res_free(struct vmw_resource *res); 446extern void vmw_surface_res_free(struct vmw_resource *res);
402extern int vmw_surface_init(struct vmw_private *dev_priv,
403 struct vmw_surface *srf,
404 void (*res_free) (struct vmw_resource *res));
405extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
406 struct ttm_object_file *tfile,
407 uint32_t handle,
408 struct vmw_surface **out);
409extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, 447extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
410 struct drm_file *file_priv); 448 struct drm_file *file_priv);
411extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, 449extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -423,6 +461,8 @@ extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
423 size_t size, struct ttm_placement *placement, 461 size_t size, struct ttm_placement *placement,
424 bool interuptable, 462 bool interuptable,
425 void (*bo_free) (struct ttm_buffer_object *bo)); 463 void (*bo_free) (struct ttm_buffer_object *bo));
464extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
465 struct ttm_object_file *tfile);
426extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 466extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
427 struct drm_file *file_priv); 467 struct drm_file *file_priv);
428extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, 468extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -440,7 +480,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
440 struct ttm_object_file *tfile, 480 struct ttm_object_file *tfile,
441 uint32_t *inout_id, 481 uint32_t *inout_id,
442 struct vmw_resource **out); 482 struct vmw_resource **out);
443extern void vmw_resource_unreserve(struct list_head *list); 483extern void vmw_resource_unreserve(struct vmw_resource *res,
484 struct vmw_dma_buffer *new_backup,
485 unsigned long new_backup_offset);
486extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
487 struct ttm_mem_reg *mem);
488extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
489 struct vmw_fence_obj *fence);
490extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
444 491
445/** 492/**
446 * DMA buffer helper routines - vmwgfx_dmabuf.c 493 * DMA buffer helper routines - vmwgfx_dmabuf.c
@@ -538,10 +585,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
538 struct drm_vmw_fence_rep __user 585 struct drm_vmw_fence_rep __user
539 *user_fence_rep, 586 *user_fence_rep,
540 struct vmw_fence_obj **out_fence); 587 struct vmw_fence_obj **out_fence);
541 588extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
542extern void 589 struct vmw_fence_obj *fence);
543vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 590extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
544 bool only_on_cid_match, uint32_t cid);
545 591
546extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, 592extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
547 struct vmw_private *dev_priv, 593 struct vmw_private *dev_priv,
@@ -699,10 +745,13 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
699static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf) 745static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
700{ 746{
701 struct vmw_dma_buffer *tmp_buf = *buf; 747 struct vmw_dma_buffer *tmp_buf = *buf;
702 struct ttm_buffer_object *bo = &tmp_buf->base; 748
703 *buf = NULL; 749 *buf = NULL;
750 if (tmp_buf != NULL) {
751 struct ttm_buffer_object *bo = &tmp_buf->base;
704 752
705 ttm_bo_unref(&bo); 753 ttm_bo_unref(&bo);
754 }
706} 755}
707 756
708static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf) 757static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 30654b4cc972..394e6476105b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -30,6 +30,181 @@
30#include <drm/ttm/ttm_bo_api.h> 30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32 32
33#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58 * @first_usage: Set to true the first time the resource is referenced in
59 * the command stream.
60 * @no_buffer_needed: Resources do not need to allocate buffer backup on
61 * reservation. The command stream will provide one.
62 */
63struct vmw_resource_val_node {
64 struct list_head head;
65 struct drm_hash_item hash;
66 struct vmw_resource *res;
67 struct vmw_dma_buffer *new_backup;
68 unsigned long new_backup_offset;
69 bool first_usage;
70 bool no_buffer_needed;
71};
72
73/**
74 * vmw_resource_unreserve - unreserve resources previously reserved for
75 * command submission.
76 *
77 * @list_head: list of resources to unreserve.
78 * @backoff: Whether command submission failed.
79 */
80static void vmw_resource_list_unreserve(struct list_head *list,
81 bool backoff)
82{
83 struct vmw_resource_val_node *val;
84
85 list_for_each_entry(val, list, head) {
86 struct vmw_resource *res = val->res;
87 struct vmw_dma_buffer *new_backup =
88 backoff ? NULL : val->new_backup;
89
90 vmw_resource_unreserve(res, new_backup,
91 val->new_backup_offset);
92 vmw_dmabuf_unreference(&val->new_backup);
93 }
94}
95
96
97/**
98 * vmw_resource_val_add - Add a resource to the software context's
99 * resource list if it's not already on it.
100 *
101 * @sw_context: Pointer to the software context.
102 * @res: Pointer to the resource.
103 * @p_node On successful return points to a valid pointer to a
104 * struct vmw_resource_val_node, if non-NULL on entry.
105 */
106static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
107 struct vmw_resource *res,
108 struct vmw_resource_val_node **p_node)
109{
110 struct vmw_resource_val_node *node;
111 struct drm_hash_item *hash;
112 int ret;
113
114 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
115 &hash) == 0)) {
116 node = container_of(hash, struct vmw_resource_val_node, hash);
117 node->first_usage = false;
118 if (unlikely(p_node != NULL))
119 *p_node = node;
120 return 0;
121 }
122
123 node = kzalloc(sizeof(*node), GFP_KERNEL);
124 if (unlikely(node == NULL)) {
125 DRM_ERROR("Failed to allocate a resource validation "
126 "entry.\n");
127 return -ENOMEM;
128 }
129
130 node->hash.key = (unsigned long) res;
131 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
132 if (unlikely(ret != 0)) {
133 DRM_ERROR("Failed to initialize a resource validation "
134 "entry.\n");
135 kfree(node);
136 return ret;
137 }
138 list_add_tail(&node->head, &sw_context->resource_list);
139 node->res = vmw_resource_reference(res);
140 node->first_usage = true;
141
142 if (unlikely(p_node != NULL))
143 *p_node = node;
144
145 return 0;
146}
147
148/**
149 * vmw_resource_relocation_add - Add a relocation to the relocation list
150 *
151 * @list: Pointer to head of relocation list.
152 * @res: The resource.
153 * @offset: Offset into the command buffer currently being parsed where the
154 * id that needs fixup is located. Granularity is 4 bytes.
155 */
156static int vmw_resource_relocation_add(struct list_head *list,
157 const struct vmw_resource *res,
158 unsigned long offset)
159{
160 struct vmw_resource_relocation *rel;
161
162 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
163 if (unlikely(rel == NULL)) {
164 DRM_ERROR("Failed to allocate a resource relocation.\n");
165 return -ENOMEM;
166 }
167
168 rel->res = res;
169 rel->offset = offset;
170 list_add_tail(&rel->head, list);
171
172 return 0;
173}
174
175/**
176 * vmw_resource_relocations_free - Free all relocations on a list
177 *
178 * @list: Pointer to the head of the relocation list.
179 */
180static void vmw_resource_relocations_free(struct list_head *list)
181{
182 struct vmw_resource_relocation *rel, *n;
183
184 list_for_each_entry_safe(rel, n, list, head) {
185 list_del(&rel->head);
186 kfree(rel);
187 }
188}
189
190/**
191 * vmw_resource_relocations_apply - Apply all relocations on a list
192 *
193 * @cb: Pointer to the start of the command buffer bein patch. This need
194 * not be the same buffer as the one being parsed when the relocation
195 * list was built, but the contents must be the same modulo the
196 * resource ids.
197 * @list: Pointer to the head of the relocation list.
198 */
199static void vmw_resource_relocations_apply(uint32_t *cb,
200 struct list_head *list)
201{
202 struct vmw_resource_relocation *rel;
203
204 list_for_each_entry(rel, list, head)
205 cb[rel->offset] = rel->res->id;
206}
207
33static int vmw_cmd_invalid(struct vmw_private *dev_priv, 208static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context, 209 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header) 210 SVGA3dCmdHeader *header)
@@ -44,25 +219,11 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
44 return 0; 219 return 0;
45} 220}
46 221
47static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
48 struct vmw_resource **p_res)
49{
50 struct vmw_resource *res = *p_res;
51
52 if (list_empty(&res->validate_head)) {
53 list_add_tail(&res->validate_head, &sw_context->resource_list);
54 *p_res = NULL;
55 } else
56 vmw_resource_unreference(p_res);
57}
58
59/** 222/**
60 * vmw_bo_to_validate_list - add a bo to a validate list 223 * vmw_bo_to_validate_list - add a bo to a validate list
61 * 224 *
62 * @sw_context: The software context used for this command submission batch. 225 * @sw_context: The software context used for this command submission batch.
63 * @bo: The buffer object to add. 226 * @bo: The buffer object to add.
64 * @fence_flags: Fence flags to be or'ed with any other fence flags for
65 * this buffer on this submission batch.
66 * @p_val_node: If non-NULL Will be updated with the validate node number 227 * @p_val_node: If non-NULL Will be updated with the validate node number
67 * on return. 228 * on return.
68 * 229 *
@@ -71,31 +232,43 @@ static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
71 */ 232 */
72static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, 233static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
73 struct ttm_buffer_object *bo, 234 struct ttm_buffer_object *bo,
74 uint32_t fence_flags,
75 uint32_t *p_val_node) 235 uint32_t *p_val_node)
76{ 236{
77 uint32_t val_node; 237 uint32_t val_node;
238 struct vmw_validate_buffer *vval_buf;
78 struct ttm_validate_buffer *val_buf; 239 struct ttm_validate_buffer *val_buf;
240 struct drm_hash_item *hash;
241 int ret;
79 242
80 val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); 243 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
81 244 &hash) == 0)) {
82 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { 245 vval_buf = container_of(hash, struct vmw_validate_buffer,
83 DRM_ERROR("Max number of DMA buffers per submission" 246 hash);
84 " exceeded.\n"); 247 val_buf = &vval_buf->base;
85 return -EINVAL; 248 val_node = vval_buf - sw_context->val_bufs;
86 } 249 } else {
87 250 val_node = sw_context->cur_val_buf;
88 val_buf = &sw_context->val_bufs[val_node]; 251 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
89 if (unlikely(val_node == sw_context->cur_val_buf)) { 252 DRM_ERROR("Max number of DMA buffers per submission "
90 val_buf->new_sync_obj_arg = NULL; 253 "exceeded.\n");
254 return -EINVAL;
255 }
256 vval_buf = &sw_context->val_bufs[val_node];
257 vval_buf->hash.key = (unsigned long) bo;
258 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
259 if (unlikely(ret != 0)) {
260 DRM_ERROR("Failed to initialize a buffer validation "
261 "entry.\n");
262 return ret;
263 }
264 ++sw_context->cur_val_buf;
265 val_buf = &vval_buf->base;
91 val_buf->bo = ttm_bo_reference(bo); 266 val_buf->bo = ttm_bo_reference(bo);
267 val_buf->reserved = false;
92 list_add_tail(&val_buf->head, &sw_context->validate_nodes); 268 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
93 ++sw_context->cur_val_buf;
94 } 269 }
95 270
96 val_buf->new_sync_obj_arg = (void *) 271 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
97 ((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
98 sw_context->fence_flags |= fence_flags;
99 272
100 if (p_val_node) 273 if (p_val_node)
101 *p_val_node = val_node; 274 *p_val_node = val_node;
@@ -103,85 +276,174 @@ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
103 return 0; 276 return 0;
104} 277}
105 278
106static int vmw_cmd_cid_check(struct vmw_private *dev_priv, 279/**
107 struct vmw_sw_context *sw_context, 280 * vmw_resources_reserve - Reserve all resources on the sw_context's
108 SVGA3dCmdHeader *header) 281 * resource list.
282 *
283 * @sw_context: Pointer to the software context.
284 *
285 * Note that since vmware's command submission currently is protected by
286 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
287 * since only a single thread at once will attempt this.
288 */
289static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
109{ 290{
110 struct vmw_resource *ctx; 291 struct vmw_resource_val_node *val;
111
112 struct vmw_cid_cmd {
113 SVGA3dCmdHeader header;
114 __le32 cid;
115 } *cmd;
116 int ret; 292 int ret;
117 293
118 cmd = container_of(header, struct vmw_cid_cmd, header); 294 list_for_each_entry(val, &sw_context->resource_list, head) {
119 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid)) 295 struct vmw_resource *res = val->res;
120 return 0;
121 296
122 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid, 297 ret = vmw_resource_reserve(res, val->no_buffer_needed);
123 &ctx); 298 if (unlikely(ret != 0))
124 if (unlikely(ret != 0)) { 299 return ret;
125 DRM_ERROR("Could not find or use context %u\n", 300
126 (unsigned) cmd->cid); 301 if (res->backup) {
127 return ret; 302 struct ttm_buffer_object *bo = &res->backup->base;
303
304 ret = vmw_bo_to_validate_list
305 (sw_context, bo, NULL);
306
307 if (unlikely(ret != 0))
308 return ret;
309 }
128 } 310 }
311 return 0;
312}
129 313
130 sw_context->last_cid = cmd->cid; 314/**
131 sw_context->cid_valid = true; 315 * vmw_resources_validate - Validate all resources on the sw_context's
132 sw_context->cur_ctx = ctx; 316 * resource list.
133 vmw_resource_to_validate_list(sw_context, &ctx); 317 *
318 * @sw_context: Pointer to the software context.
319 *
320 * Before this function is called, all resource backup buffers must have
321 * been validated.
322 */
323static int vmw_resources_validate(struct vmw_sw_context *sw_context)
324{
325 struct vmw_resource_val_node *val;
326 int ret;
327
328 list_for_each_entry(val, &sw_context->resource_list, head) {
329 struct vmw_resource *res = val->res;
134 330
331 ret = vmw_resource_validate(res);
332 if (unlikely(ret != 0)) {
333 if (ret != -ERESTARTSYS)
334 DRM_ERROR("Failed to validate resource.\n");
335 return ret;
336 }
337 }
135 return 0; 338 return 0;
136} 339}
137 340
138static int vmw_cmd_sid_check(struct vmw_private *dev_priv, 341/**
342 * vmw_cmd_res_check - Check that a resource is present and if so, put it
343 * on the resource validate list unless it's already there.
344 *
345 * @dev_priv: Pointer to a device private structure.
346 * @sw_context: Pointer to the software context.
347 * @res_type: Resource type.
348 * @converter: User-space visisble type specific information.
349 * @id: Pointer to the location in the command buffer currently being
350 * parsed from where the user-space resource id handle is located.
351 */
352static int vmw_cmd_res_check(struct vmw_private *dev_priv,
139 struct vmw_sw_context *sw_context, 353 struct vmw_sw_context *sw_context,
140 uint32_t *sid) 354 enum vmw_res_type res_type,
355 const struct vmw_user_resource_conv *converter,
356 uint32_t *id,
357 struct vmw_resource_val_node **p_val)
141{ 358{
142 struct vmw_surface *srf; 359 struct vmw_res_cache_entry *rcache =
143 int ret; 360 &sw_context->res_cache[res_type];
144 struct vmw_resource *res; 361 struct vmw_resource *res;
362 struct vmw_resource_val_node *node;
363 int ret;
145 364
146 if (*sid == SVGA3D_INVALID_ID) 365 if (*id == SVGA3D_INVALID_ID)
147 return 0; 366 return 0;
148 367
149 if (likely((sw_context->sid_valid && 368 /*
150 *sid == sw_context->last_sid))) { 369 * Fastpath in case of repeated commands referencing the same
151 *sid = sw_context->sid_translation; 370 * resource
152 return 0; 371 */
153 }
154 372
155 ret = vmw_user_surface_lookup_handle(dev_priv, 373 if (likely(rcache->valid && *id == rcache->handle)) {
156 sw_context->tfile, 374 const struct vmw_resource *res = rcache->res;
157 *sid, &srf); 375
158 if (unlikely(ret != 0)) { 376 rcache->node->first_usage = false;
159 DRM_ERROR("Could ot find or use surface 0x%08x " 377 if (p_val)
160 "address 0x%08lx\n", 378 *p_val = rcache->node;
161 (unsigned int) *sid, 379
162 (unsigned long) sid); 380 return vmw_resource_relocation_add
163 return ret; 381 (&sw_context->res_relocations, res,
382 id - sw_context->buf_start);
164 } 383 }
165 384
166 ret = vmw_surface_validate(dev_priv, srf); 385 ret = vmw_user_resource_lookup_handle(dev_priv,
386 sw_context->tfile,
387 *id,
388 converter,
389 &res);
167 if (unlikely(ret != 0)) { 390 if (unlikely(ret != 0)) {
168 if (ret != -ERESTARTSYS) 391 DRM_ERROR("Could not find or use resource 0x%08x.\n",
169 DRM_ERROR("Could not validate surface.\n"); 392 (unsigned) *id);
170 vmw_surface_unreference(&srf); 393 dump_stack();
171 return ret; 394 return ret;
172 } 395 }
173 396
174 sw_context->last_sid = *sid; 397 rcache->valid = true;
175 sw_context->sid_valid = true; 398 rcache->res = res;
176 sw_context->sid_translation = srf->res.id; 399 rcache->handle = *id;
177 *sid = sw_context->sid_translation;
178 400
179 res = &srf->res; 401 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
180 vmw_resource_to_validate_list(sw_context, &res); 402 res,
403 id - sw_context->buf_start);
404 if (unlikely(ret != 0))
405 goto out_no_reloc;
406
407 ret = vmw_resource_val_add(sw_context, res, &node);
408 if (unlikely(ret != 0))
409 goto out_no_reloc;
181 410
411 rcache->node = node;
412 if (p_val)
413 *p_val = node;
414 vmw_resource_unreference(&res);
182 return 0; 415 return 0;
416
417out_no_reloc:
418 BUG_ON(sw_context->error_resource != NULL);
419 sw_context->error_resource = res;
420
421 return ret;
183} 422}
184 423
424/**
425 * vmw_cmd_cid_check - Check a command header for valid context information.
426 *
427 * @dev_priv: Pointer to a device private structure.
428 * @sw_context: Pointer to the software context.
429 * @header: A command header with an embedded user-space context handle.
430 *
431 * Convenience function: Call vmw_cmd_res_check with the user-space context
432 * handle embedded in @header.
433 */
434static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
435 struct vmw_sw_context *sw_context,
436 SVGA3dCmdHeader *header)
437{
438 struct vmw_cid_cmd {
439 SVGA3dCmdHeader header;
440 __le32 cid;
441 } *cmd;
442
443 cmd = container_of(header, struct vmw_cid_cmd, header);
444 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
445 user_context_converter, &cmd->cid, NULL);
446}
185 447
186static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, 448static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
187 struct vmw_sw_context *sw_context, 449 struct vmw_sw_context *sw_context,
@@ -198,7 +460,9 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
198 return ret; 460 return ret;
199 461
200 cmd = container_of(header, struct vmw_sid_cmd, header); 462 cmd = container_of(header, struct vmw_sid_cmd, header);
201 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); 463 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
464 user_surface_converter,
465 &cmd->body.target.sid, NULL);
202 return ret; 466 return ret;
203} 467}
204 468
@@ -213,10 +477,14 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
213 int ret; 477 int ret;
214 478
215 cmd = container_of(header, struct vmw_sid_cmd, header); 479 cmd = container_of(header, struct vmw_sid_cmd, header);
216 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); 480 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
481 user_surface_converter,
482 &cmd->body.src.sid, NULL);
217 if (unlikely(ret != 0)) 483 if (unlikely(ret != 0))
218 return ret; 484 return ret;
219 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); 485 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
486 user_surface_converter,
487 &cmd->body.dest.sid, NULL);
220} 488}
221 489
222static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, 490static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -230,10 +498,14 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
230 int ret; 498 int ret;
231 499
232 cmd = container_of(header, struct vmw_sid_cmd, header); 500 cmd = container_of(header, struct vmw_sid_cmd, header);
233 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); 501 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
502 user_surface_converter,
503 &cmd->body.src.sid, NULL);
234 if (unlikely(ret != 0)) 504 if (unlikely(ret != 0))
235 return ret; 505 return ret;
236 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); 506 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
507 user_surface_converter,
508 &cmd->body.dest.sid, NULL);
237} 509}
238 510
239static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, 511static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -252,7 +524,9 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
252 return -EPERM; 524 return -EPERM;
253 } 525 }
254 526
255 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); 527 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
528 user_surface_converter,
529 &cmd->body.srcImage.sid, NULL);
256} 530}
257 531
258static int vmw_cmd_present_check(struct vmw_private *dev_priv, 532static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -272,14 +546,15 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
272 return -EPERM; 546 return -EPERM;
273 } 547 }
274 548
275 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); 549 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
550 user_surface_converter, &cmd->body.sid,
551 NULL);
276} 552}
277 553
278/** 554/**
279 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. 555 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
280 * 556 *
281 * @dev_priv: The device private structure. 557 * @dev_priv: The device private structure.
282 * @cid: The hardware context for the next query.
283 * @new_query_bo: The new buffer holding query results. 558 * @new_query_bo: The new buffer holding query results.
284 * @sw_context: The software context used for this command submission. 559 * @sw_context: The software context used for this command submission.
285 * 560 *
@@ -287,18 +562,18 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
287 * query results, and if another buffer currently is pinned for query 562 * query results, and if another buffer currently is pinned for query
288 * results. If so, the function prepares the state of @sw_context for 563 * results. If so, the function prepares the state of @sw_context for
289 * switching pinned buffers after successful submission of the current 564 * switching pinned buffers after successful submission of the current
290 * command batch. It also checks whether we're using a new query context. 565 * command batch.
291 * In that case, it makes sure we emit a query barrier for the old
292 * context before the current query buffer is fenced.
293 */ 566 */
294static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, 567static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
295 uint32_t cid,
296 struct ttm_buffer_object *new_query_bo, 568 struct ttm_buffer_object *new_query_bo,
297 struct vmw_sw_context *sw_context) 569 struct vmw_sw_context *sw_context)
298{ 570{
571 struct vmw_res_cache_entry *ctx_entry =
572 &sw_context->res_cache[vmw_res_context];
299 int ret; 573 int ret;
300 bool add_cid = false; 574
301 uint32_t cid_to_add; 575 BUG_ON(!ctx_entry->valid);
576 sw_context->last_query_ctx = ctx_entry->res;
302 577
303 if (unlikely(new_query_bo != sw_context->cur_query_bo)) { 578 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
304 579
@@ -308,12 +583,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
308 } 583 }
309 584
310 if (unlikely(sw_context->cur_query_bo != NULL)) { 585 if (unlikely(sw_context->cur_query_bo != NULL)) {
311 BUG_ON(!sw_context->query_cid_valid); 586 sw_context->needs_post_query_barrier = true;
312 add_cid = true;
313 cid_to_add = sw_context->cur_query_cid;
314 ret = vmw_bo_to_validate_list(sw_context, 587 ret = vmw_bo_to_validate_list(sw_context,
315 sw_context->cur_query_bo, 588 sw_context->cur_query_bo,
316 DRM_VMW_FENCE_FLAG_EXEC,
317 NULL); 589 NULL);
318 if (unlikely(ret != 0)) 590 if (unlikely(ret != 0))
319 return ret; 591 return ret;
@@ -322,35 +594,12 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
322 594
323 ret = vmw_bo_to_validate_list(sw_context, 595 ret = vmw_bo_to_validate_list(sw_context,
324 dev_priv->dummy_query_bo, 596 dev_priv->dummy_query_bo,
325 DRM_VMW_FENCE_FLAG_EXEC,
326 NULL); 597 NULL);
327 if (unlikely(ret != 0)) 598 if (unlikely(ret != 0))
328 return ret; 599 return ret;
329 600
330 } 601 }
331 602
332 if (unlikely(cid != sw_context->cur_query_cid &&
333 sw_context->query_cid_valid)) {
334 add_cid = true;
335 cid_to_add = sw_context->cur_query_cid;
336 }
337
338 sw_context->cur_query_cid = cid;
339 sw_context->query_cid_valid = true;
340
341 if (add_cid) {
342 struct vmw_resource *ctx = sw_context->cur_ctx;
343
344 if (list_empty(&ctx->query_head))
345 list_add_tail(&ctx->query_head,
346 &sw_context->query_list);
347 ret = vmw_bo_to_validate_list(sw_context,
348 dev_priv->dummy_query_bo,
349 DRM_VMW_FENCE_FLAG_EXEC,
350 NULL);
351 if (unlikely(ret != 0))
352 return ret;
353 }
354 return 0; 603 return 0;
355} 604}
356 605
@@ -362,10 +611,9 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
362 * @sw_context: The software context used for this command submission batch. 611 * @sw_context: The software context used for this command submission batch.
363 * 612 *
364 * This function will check if we're switching query buffers, and will then, 613 * This function will check if we're switching query buffers, and will then,
365 * if no other query waits are issued this command submission batch,
366 * issue a dummy occlusion query wait used as a query barrier. When the fence 614 * issue a dummy occlusion query wait used as a query barrier. When the fence
367 * object following that query wait has signaled, we are sure that all 615 * object following that query wait has signaled, we are sure that all
368 * preseding queries have finished, and the old query buffer can be unpinned. 616 * preceding queries have finished, and the old query buffer can be unpinned.
369 * However, since both the new query buffer and the old one are fenced with 617 * However, since both the new query buffer and the old one are fenced with
370 * that fence, we can do an asynchronus unpin now, and be sure that the 618 * that fence, we can do an asynchronus unpin now, and be sure that the
371 * old query buffer won't be moved until the fence has signaled. 619 * old query buffer won't be moved until the fence has signaled.
@@ -376,20 +624,19 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
376static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, 624static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
377 struct vmw_sw_context *sw_context) 625 struct vmw_sw_context *sw_context)
378{ 626{
379
380 struct vmw_resource *ctx, *next_ctx;
381 int ret;
382
383 /* 627 /*
384 * The validate list should still hold references to all 628 * The validate list should still hold references to all
385 * contexts here. 629 * contexts here.
386 */ 630 */
387 631
388 list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list, 632 if (sw_context->needs_post_query_barrier) {
389 query_head) { 633 struct vmw_res_cache_entry *ctx_entry =
390 list_del_init(&ctx->query_head); 634 &sw_context->res_cache[vmw_res_context];
635 struct vmw_resource *ctx;
636 int ret;
391 637
392 BUG_ON(list_empty(&ctx->validate_head)); 638 BUG_ON(!ctx_entry->valid);
639 ctx = ctx_entry->res;
393 640
394 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); 641 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
395 642
@@ -403,40 +650,46 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
403 ttm_bo_unref(&dev_priv->pinned_bo); 650 ttm_bo_unref(&dev_priv->pinned_bo);
404 } 651 }
405 652
406 vmw_bo_pin(sw_context->cur_query_bo, true); 653 if (!sw_context->needs_post_query_barrier) {
654 vmw_bo_pin(sw_context->cur_query_bo, true);
407 655
408 /* 656 /*
409 * We pin also the dummy_query_bo buffer so that we 657 * We pin also the dummy_query_bo buffer so that we
410 * don't need to validate it when emitting 658 * don't need to validate it when emitting
411 * dummy queries in context destroy paths. 659 * dummy queries in context destroy paths.
412 */ 660 */
413 661
414 vmw_bo_pin(dev_priv->dummy_query_bo, true); 662 vmw_bo_pin(dev_priv->dummy_query_bo, true);
415 dev_priv->dummy_query_bo_pinned = true; 663 dev_priv->dummy_query_bo_pinned = true;
416 664
417 dev_priv->query_cid = sw_context->cur_query_cid; 665 BUG_ON(sw_context->last_query_ctx == NULL);
418 dev_priv->pinned_bo = 666 dev_priv->query_cid = sw_context->last_query_ctx->id;
419 ttm_bo_reference(sw_context->cur_query_bo); 667 dev_priv->query_cid_valid = true;
668 dev_priv->pinned_bo =
669 ttm_bo_reference(sw_context->cur_query_bo);
670 }
420 } 671 }
421} 672}
422 673
423/** 674/**
424 * vmw_query_switch_backoff - clear query barrier list 675 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
425 * @sw_context: The sw context used for this submission batch. 676 * handle to a valid SVGAGuestPtr
426 * 677 *
427 * This function is used as part of an error path, where a previously 678 * @dev_priv: Pointer to a device private structure.
428 * set up list of query barriers needs to be cleared. 679 * @sw_context: The software context used for this command batch validation.
680 * @ptr: Pointer to the user-space handle to be translated.
681 * @vmw_bo_p: Points to a location that, on successful return will carry
682 * a reference-counted pointer to the DMA buffer identified by the
683 * user-space handle in @id.
429 * 684 *
685 * This function saves information needed to translate a user-space buffer
686 * handle to a valid SVGAGuestPtr. The translation does not take place
687 * immediately, but during a call to vmw_apply_relocations().
688 * This function builds a relocation list and a list of buffers to validate.
689 * The former needs to be freed using either vmw_apply_relocations() or
690 * vmw_free_relocations(). The latter needs to be freed using
691 * vmw_clear_validations.
430 */ 692 */
431static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
432{
433 struct list_head *list, *next;
434
435 list_for_each_safe(list, next, &sw_context->query_list) {
436 list_del_init(list);
437 }
438}
439
440static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, 693static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
441 struct vmw_sw_context *sw_context, 694 struct vmw_sw_context *sw_context,
442 SVGAGuestPtr *ptr, 695 SVGAGuestPtr *ptr,
@@ -465,8 +718,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
465 reloc = &sw_context->relocs[sw_context->cur_reloc++]; 718 reloc = &sw_context->relocs[sw_context->cur_reloc++];
466 reloc->location = ptr; 719 reloc->location = ptr;
467 720
468 ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC, 721 ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
469 &reloc->index);
470 if (unlikely(ret != 0)) 722 if (unlikely(ret != 0))
471 goto out_no_reloc; 723 goto out_no_reloc;
472 724
@@ -479,6 +731,37 @@ out_no_reloc:
479 return ret; 731 return ret;
480} 732}
481 733
734/**
735 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
736 *
737 * @dev_priv: Pointer to a device private struct.
738 * @sw_context: The software context used for this command submission.
739 * @header: Pointer to the command header in the command stream.
740 */
741static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
742 struct vmw_sw_context *sw_context,
743 SVGA3dCmdHeader *header)
744{
745 struct vmw_begin_query_cmd {
746 SVGA3dCmdHeader header;
747 SVGA3dCmdBeginQuery q;
748 } *cmd;
749
750 cmd = container_of(header, struct vmw_begin_query_cmd,
751 header);
752
753 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
754 user_context_converter, &cmd->q.cid,
755 NULL);
756}
757
758/**
759 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
760 *
761 * @dev_priv: Pointer to a device private struct.
762 * @sw_context: The software context used for this command submission.
763 * @header: Pointer to the command header in the command stream.
764 */
482static int vmw_cmd_end_query(struct vmw_private *dev_priv, 765static int vmw_cmd_end_query(struct vmw_private *dev_priv,
483 struct vmw_sw_context *sw_context, 766 struct vmw_sw_context *sw_context,
484 SVGA3dCmdHeader *header) 767 SVGA3dCmdHeader *header)
@@ -501,13 +784,19 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
501 if (unlikely(ret != 0)) 784 if (unlikely(ret != 0))
502 return ret; 785 return ret;
503 786
504 ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid, 787 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
505 &vmw_bo->base, sw_context);
506 788
507 vmw_dmabuf_unreference(&vmw_bo); 789 vmw_dmabuf_unreference(&vmw_bo);
508 return ret; 790 return ret;
509} 791}
510 792
793/*
794 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
795 *
796 * @dev_priv: Pointer to a device private struct.
797 * @sw_context: The software context used for this command submission.
798 * @header: Pointer to the command header in the command stream.
799 */
511static int vmw_cmd_wait_query(struct vmw_private *dev_priv, 800static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
512 struct vmw_sw_context *sw_context, 801 struct vmw_sw_context *sw_context,
513 SVGA3dCmdHeader *header) 802 SVGA3dCmdHeader *header)
@@ -518,7 +807,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
518 SVGA3dCmdWaitForQuery q; 807 SVGA3dCmdWaitForQuery q;
519 } *cmd; 808 } *cmd;
520 int ret; 809 int ret;
521 struct vmw_resource *ctx;
522 810
523 cmd = container_of(header, struct vmw_query_cmd, header); 811 cmd = container_of(header, struct vmw_query_cmd, header);
524 ret = vmw_cmd_cid_check(dev_priv, sw_context, header); 812 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -532,16 +820,6 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
532 return ret; 820 return ret;
533 821
534 vmw_dmabuf_unreference(&vmw_bo); 822 vmw_dmabuf_unreference(&vmw_bo);
535
536 /*
537 * This wait will act as a barrier for previous waits for this
538 * context.
539 */
540
541 ctx = sw_context->cur_ctx;
542 if (!list_empty(&ctx->query_head))
543 list_del_init(&ctx->query_head);
544
545 return 0; 823 return 0;
546} 824}
547 825
@@ -550,14 +828,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
550 SVGA3dCmdHeader *header) 828 SVGA3dCmdHeader *header)
551{ 829{
552 struct vmw_dma_buffer *vmw_bo = NULL; 830 struct vmw_dma_buffer *vmw_bo = NULL;
553 struct ttm_buffer_object *bo;
554 struct vmw_surface *srf = NULL; 831 struct vmw_surface *srf = NULL;
555 struct vmw_dma_cmd { 832 struct vmw_dma_cmd {
556 SVGA3dCmdHeader header; 833 SVGA3dCmdHeader header;
557 SVGA3dCmdSurfaceDMA dma; 834 SVGA3dCmdSurfaceDMA dma;
558 } *cmd; 835 } *cmd;
559 int ret; 836 int ret;
560 struct vmw_resource *res;
561 837
562 cmd = container_of(header, struct vmw_dma_cmd, header); 838 cmd = container_of(header, struct vmw_dma_cmd, header);
563 ret = vmw_translate_guest_ptr(dev_priv, sw_context, 839 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
@@ -566,37 +842,20 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
566 if (unlikely(ret != 0)) 842 if (unlikely(ret != 0))
567 return ret; 843 return ret;
568 844
569 bo = &vmw_bo->base; 845 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
570 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, 846 user_surface_converter, &cmd->dma.host.sid,
571 cmd->dma.host.sid, &srf); 847 NULL);
572 if (ret) {
573 DRM_ERROR("could not find surface\n");
574 goto out_no_reloc;
575 }
576
577 ret = vmw_surface_validate(dev_priv, srf);
578 if (unlikely(ret != 0)) { 848 if (unlikely(ret != 0)) {
579 if (ret != -ERESTARTSYS) 849 if (unlikely(ret != -ERESTARTSYS))
580 DRM_ERROR("Culd not validate surface.\n"); 850 DRM_ERROR("could not find surface for DMA.\n");
581 goto out_no_validate; 851 goto out_no_surface;
582 } 852 }
583 853
584 /* 854 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
585 * Patch command stream with device SID.
586 */
587 cmd->dma.host.sid = srf->res.id;
588 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
589
590 vmw_dmabuf_unreference(&vmw_bo);
591
592 res = &srf->res;
593 vmw_resource_to_validate_list(sw_context, &res);
594 855
595 return 0; 856 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
596 857
597out_no_validate: 858out_no_surface:
598 vmw_surface_unreference(&srf);
599out_no_reloc:
600 vmw_dmabuf_unreference(&vmw_bo); 859 vmw_dmabuf_unreference(&vmw_bo);
601 return ret; 860 return ret;
602} 861}
@@ -629,8 +888,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
629 } 888 }
630 889
631 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { 890 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
632 ret = vmw_cmd_sid_check(dev_priv, sw_context, 891 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
633 &decl->array.surfaceId); 892 user_surface_converter,
893 &decl->array.surfaceId, NULL);
634 if (unlikely(ret != 0)) 894 if (unlikely(ret != 0))
635 return ret; 895 return ret;
636 } 896 }
@@ -644,8 +904,9 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
644 904
645 range = (SVGA3dPrimitiveRange *) decl; 905 range = (SVGA3dPrimitiveRange *) decl;
646 for (i = 0; i < cmd->body.numRanges; ++i, ++range) { 906 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
647 ret = vmw_cmd_sid_check(dev_priv, sw_context, 907 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
648 &range->indexArray.surfaceId); 908 user_surface_converter,
909 &range->indexArray.surfaceId, NULL);
649 if (unlikely(ret != 0)) 910 if (unlikely(ret != 0))
650 return ret; 911 return ret;
651 } 912 }
@@ -676,8 +937,9 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
676 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 937 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
677 continue; 938 continue;
678 939
679 ret = vmw_cmd_sid_check(dev_priv, sw_context, 940 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
680 &cur_state->value); 941 user_surface_converter,
942 &cur_state->value, NULL);
681 if (unlikely(ret != 0)) 943 if (unlikely(ret != 0))
682 return ret; 944 return ret;
683 } 945 }
@@ -708,6 +970,34 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
708 return ret; 970 return ret;
709} 971}
710 972
973/**
974 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
975 * command
976 *
977 * @dev_priv: Pointer to a device private struct.
978 * @sw_context: The software context being used for this batch.
979 * @header: Pointer to the command header in the command stream.
980 */
981static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
982 struct vmw_sw_context *sw_context,
983 SVGA3dCmdHeader *header)
984{
985 struct vmw_set_shader_cmd {
986 SVGA3dCmdHeader header;
987 SVGA3dCmdSetShader body;
988 } *cmd;
989 int ret;
990
991 cmd = container_of(header, struct vmw_set_shader_cmd,
992 header);
993
994 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
995 if (unlikely(ret != 0))
996 return ret;
997
998 return 0;
999}
1000
711static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, 1001static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
712 struct vmw_sw_context *sw_context, 1002 struct vmw_sw_context *sw_context,
713 void *buf, uint32_t *size) 1003 void *buf, uint32_t *size)
@@ -781,16 +1071,20 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
781 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check), 1071 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
782 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check), 1072 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
783 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), 1073 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
784 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), 1074 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
785 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), 1075 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
786 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), 1076 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
787 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), 1077 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
788 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), 1078 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
789 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query), 1079 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
790 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query), 1080 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
791 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok), 1081 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
792 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN, 1082 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
793 &vmw_cmd_blt_surf_screen_check) 1083 &vmw_cmd_blt_surf_screen_check),
1084 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
1085 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
1086 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
1087 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
794}; 1088};
795 1089
796static int vmw_cmd_check(struct vmw_private *dev_priv, 1090static int vmw_cmd_check(struct vmw_private *dev_priv,
@@ -837,6 +1131,8 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
837 int32_t cur_size = size; 1131 int32_t cur_size = size;
838 int ret; 1132 int ret;
839 1133
1134 sw_context->buf_start = buf;
1135
840 while (cur_size > 0) { 1136 while (cur_size > 0) {
841 size = cur_size; 1137 size = cur_size;
842 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); 1138 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
@@ -868,43 +1164,63 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
868 1164
869 for (i = 0; i < sw_context->cur_reloc; ++i) { 1165 for (i = 0; i < sw_context->cur_reloc; ++i) {
870 reloc = &sw_context->relocs[i]; 1166 reloc = &sw_context->relocs[i];
871 validate = &sw_context->val_bufs[reloc->index]; 1167 validate = &sw_context->val_bufs[reloc->index].base;
872 bo = validate->bo; 1168 bo = validate->bo;
873 if (bo->mem.mem_type == TTM_PL_VRAM) { 1169 switch (bo->mem.mem_type) {
1170 case TTM_PL_VRAM:
874 reloc->location->offset += bo->offset; 1171 reloc->location->offset += bo->offset;
875 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER; 1172 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
876 } else 1173 break;
1174 case VMW_PL_GMR:
877 reloc->location->gmrId = bo->mem.start; 1175 reloc->location->gmrId = bo->mem.start;
1176 break;
1177 default:
1178 BUG();
1179 }
878 } 1180 }
879 vmw_free_relocations(sw_context); 1181 vmw_free_relocations(sw_context);
880} 1182}
881 1183
1184/**
1185 * vmw_resource_list_unrefererence - Free up a resource list and unreference
1186 * all resources referenced by it.
1187 *
1188 * @list: The resource list.
1189 */
1190static void vmw_resource_list_unreference(struct list_head *list)
1191{
1192 struct vmw_resource_val_node *val, *val_next;
1193
1194 /*
1195 * Drop references to resources held during command submission.
1196 */
1197
1198 list_for_each_entry_safe(val, val_next, list, head) {
1199 list_del_init(&val->head);
1200 vmw_resource_unreference(&val->res);
1201 kfree(val);
1202 }
1203}
1204
882static void vmw_clear_validations(struct vmw_sw_context *sw_context) 1205static void vmw_clear_validations(struct vmw_sw_context *sw_context)
883{ 1206{
884 struct ttm_validate_buffer *entry, *next; 1207 struct vmw_validate_buffer *entry, *next;
885 struct vmw_resource *res, *res_next; 1208 struct vmw_resource_val_node *val;
886 1209
887 /* 1210 /*
888 * Drop references to DMA buffers held during command submission. 1211 * Drop references to DMA buffers held during command submission.
889 */ 1212 */
890 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes, 1213 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
891 head) { 1214 base.head) {
892 list_del(&entry->head); 1215 list_del(&entry->base.head);
893 vmw_dmabuf_validate_clear(entry->bo); 1216 ttm_bo_unref(&entry->base.bo);
894 ttm_bo_unref(&entry->bo); 1217 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
895 sw_context->cur_val_buf--; 1218 sw_context->cur_val_buf--;
896 } 1219 }
897 BUG_ON(sw_context->cur_val_buf != 0); 1220 BUG_ON(sw_context->cur_val_buf != 0);
898 1221
899 /* 1222 list_for_each_entry(val, &sw_context->resource_list, head)
900 * Drop references to resources held during command submission. 1223 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
901 */
902 vmw_resource_unreserve(&sw_context->resource_list);
903 list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
904 validate_head) {
905 list_del_init(&res->validate_head);
906 vmw_resource_unreference(&res);
907 }
908} 1224}
909 1225
910static int vmw_validate_single_buffer(struct vmw_private *dev_priv, 1226static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
@@ -929,7 +1245,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
929 * used as a GMR, this will return -ENOMEM. 1245 * used as a GMR, this will return -ENOMEM.
930 */ 1246 */
931 1247
932 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false); 1248 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
933 if (likely(ret == 0 || ret == -ERESTARTSYS)) 1249 if (likely(ret == 0 || ret == -ERESTARTSYS))
934 return ret; 1250 return ret;
935 1251
@@ -939,7 +1255,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
939 */ 1255 */
940 1256
941 DRM_INFO("Falling through to VRAM.\n"); 1257 DRM_INFO("Falling through to VRAM.\n");
942 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false); 1258 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
943 return ret; 1259 return ret;
944} 1260}
945 1261
@@ -947,11 +1263,11 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
947static int vmw_validate_buffers(struct vmw_private *dev_priv, 1263static int vmw_validate_buffers(struct vmw_private *dev_priv,
948 struct vmw_sw_context *sw_context) 1264 struct vmw_sw_context *sw_context)
949{ 1265{
950 struct ttm_validate_buffer *entry; 1266 struct vmw_validate_buffer *entry;
951 int ret; 1267 int ret;
952 1268
953 list_for_each_entry(entry, &sw_context->validate_nodes, head) { 1269 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
954 ret = vmw_validate_single_buffer(dev_priv, entry->bo); 1270 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
955 if (unlikely(ret != 0)) 1271 if (unlikely(ret != 0))
956 return ret; 1272 return ret;
957 } 1273 }
@@ -1114,6 +1430,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1114{ 1430{
1115 struct vmw_sw_context *sw_context = &dev_priv->ctx; 1431 struct vmw_sw_context *sw_context = &dev_priv->ctx;
1116 struct vmw_fence_obj *fence = NULL; 1432 struct vmw_fence_obj *fence = NULL;
1433 struct vmw_resource *error_resource;
1434 struct list_head resource_list;
1117 uint32_t handle; 1435 uint32_t handle;
1118 void *cmd; 1436 void *cmd;
1119 int ret; 1437 int ret;
@@ -1143,24 +1461,33 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1143 sw_context->kernel = true; 1461 sw_context->kernel = true;
1144 1462
1145 sw_context->tfile = vmw_fpriv(file_priv)->tfile; 1463 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
1146 sw_context->cid_valid = false;
1147 sw_context->sid_valid = false;
1148 sw_context->cur_reloc = 0; 1464 sw_context->cur_reloc = 0;
1149 sw_context->cur_val_buf = 0; 1465 sw_context->cur_val_buf = 0;
1150 sw_context->fence_flags = 0; 1466 sw_context->fence_flags = 0;
1151 INIT_LIST_HEAD(&sw_context->query_list);
1152 INIT_LIST_HEAD(&sw_context->resource_list); 1467 INIT_LIST_HEAD(&sw_context->resource_list);
1153 sw_context->cur_query_bo = dev_priv->pinned_bo; 1468 sw_context->cur_query_bo = dev_priv->pinned_bo;
1154 sw_context->cur_query_cid = dev_priv->query_cid; 1469 sw_context->last_query_ctx = NULL;
1155 sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL); 1470 sw_context->needs_post_query_barrier = false;
1156 1471 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
1157 INIT_LIST_HEAD(&sw_context->validate_nodes); 1472 INIT_LIST_HEAD(&sw_context->validate_nodes);
1473 INIT_LIST_HEAD(&sw_context->res_relocations);
1474 if (!sw_context->res_ht_initialized) {
1475 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
1476 if (unlikely(ret != 0))
1477 goto out_unlock;
1478 sw_context->res_ht_initialized = true;
1479 }
1158 1480
1481 INIT_LIST_HEAD(&resource_list);
1159 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 1482 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1160 command_size); 1483 command_size);
1161 if (unlikely(ret != 0)) 1484 if (unlikely(ret != 0))
1162 goto out_err; 1485 goto out_err;
1163 1486
1487 ret = vmw_resources_reserve(sw_context);
1488 if (unlikely(ret != 0))
1489 goto out_err;
1490
1164 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes); 1491 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
1165 if (unlikely(ret != 0)) 1492 if (unlikely(ret != 0))
1166 goto out_err; 1493 goto out_err;
@@ -1169,24 +1496,31 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1169 if (unlikely(ret != 0)) 1496 if (unlikely(ret != 0))
1170 goto out_err; 1497 goto out_err;
1171 1498
1172 vmw_apply_relocations(sw_context); 1499 ret = vmw_resources_validate(sw_context);
1500 if (unlikely(ret != 0))
1501 goto out_err;
1173 1502
1174 if (throttle_us) { 1503 if (throttle_us) {
1175 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, 1504 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
1176 throttle_us); 1505 throttle_us);
1177 1506
1178 if (unlikely(ret != 0)) 1507 if (unlikely(ret != 0))
1179 goto out_throttle; 1508 goto out_err;
1180 } 1509 }
1181 1510
1182 cmd = vmw_fifo_reserve(dev_priv, command_size); 1511 cmd = vmw_fifo_reserve(dev_priv, command_size);
1183 if (unlikely(cmd == NULL)) { 1512 if (unlikely(cmd == NULL)) {
1184 DRM_ERROR("Failed reserving fifo space for commands.\n"); 1513 DRM_ERROR("Failed reserving fifo space for commands.\n");
1185 ret = -ENOMEM; 1514 ret = -ENOMEM;
1186 goto out_throttle; 1515 goto out_err;
1187 } 1516 }
1188 1517
1518 vmw_apply_relocations(sw_context);
1189 memcpy(cmd, kernel_commands, command_size); 1519 memcpy(cmd, kernel_commands, command_size);
1520
1521 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
1522 vmw_resource_relocations_free(&sw_context->res_relocations);
1523
1190 vmw_fifo_commit(dev_priv, command_size); 1524 vmw_fifo_commit(dev_priv, command_size);
1191 1525
1192 vmw_query_bo_switch_commit(dev_priv, sw_context); 1526 vmw_query_bo_switch_commit(dev_priv, sw_context);
@@ -1202,9 +1536,14 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1202 if (ret != 0) 1536 if (ret != 0)
1203 DRM_ERROR("Fence submission error. Syncing.\n"); 1537 DRM_ERROR("Fence submission error. Syncing.\n");
1204 1538
1539 vmw_resource_list_unreserve(&sw_context->resource_list, false);
1205 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes, 1540 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
1206 (void *) fence); 1541 (void *) fence);
1207 1542
1543 if (unlikely(dev_priv->pinned_bo != NULL &&
1544 !dev_priv->query_cid_valid))
1545 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
1546
1208 vmw_clear_validations(sw_context); 1547 vmw_clear_validations(sw_context);
1209 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret, 1548 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
1210 user_fence_rep, fence, handle); 1549 user_fence_rep, fence, handle);
@@ -1217,17 +1556,40 @@ int vmw_execbuf_process(struct drm_file *file_priv,
1217 vmw_fence_obj_unreference(&fence); 1556 vmw_fence_obj_unreference(&fence);
1218 } 1557 }
1219 1558
1559 list_splice_init(&sw_context->resource_list, &resource_list);
1220 mutex_unlock(&dev_priv->cmdbuf_mutex); 1560 mutex_unlock(&dev_priv->cmdbuf_mutex);
1561
1562 /*
1563 * Unreference resources outside of the cmdbuf_mutex to
1564 * avoid deadlocks in resource destruction paths.
1565 */
1566 vmw_resource_list_unreference(&resource_list);
1567
1221 return 0; 1568 return 0;
1222 1569
1223out_err: 1570out_err:
1571 vmw_resource_relocations_free(&sw_context->res_relocations);
1224 vmw_free_relocations(sw_context); 1572 vmw_free_relocations(sw_context);
1225out_throttle:
1226 vmw_query_switch_backoff(sw_context);
1227 ttm_eu_backoff_reservation(&sw_context->validate_nodes); 1573 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
1574 vmw_resource_list_unreserve(&sw_context->resource_list, true);
1228 vmw_clear_validations(sw_context); 1575 vmw_clear_validations(sw_context);
1576 if (unlikely(dev_priv->pinned_bo != NULL &&
1577 !dev_priv->query_cid_valid))
1578 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
1229out_unlock: 1579out_unlock:
1580 list_splice_init(&sw_context->resource_list, &resource_list);
1581 error_resource = sw_context->error_resource;
1582 sw_context->error_resource = NULL;
1230 mutex_unlock(&dev_priv->cmdbuf_mutex); 1583 mutex_unlock(&dev_priv->cmdbuf_mutex);
1584
1585 /*
1586 * Unreference resources outside of the cmdbuf_mutex to
1587 * avoid deadlocks in resource destruction paths.
1588 */
1589 vmw_resource_list_unreference(&resource_list);
1590 if (unlikely(error_resource != NULL))
1591 vmw_resource_unreference(&error_resource);
1592
1231 return ret; 1593 return ret;
1232} 1594}
1233 1595
@@ -1252,13 +1614,13 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1252 1614
1253 1615
1254/** 1616/**
1255 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned 1617 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1256 * query bo. 1618 * query bo.
1257 * 1619 *
1258 * @dev_priv: The device private structure. 1620 * @dev_priv: The device private structure.
1259 * @only_on_cid_match: Only flush and unpin if the current active query cid 1621 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
1260 * matches @cid. 1622 * _after_ a query barrier that flushes all queries touching the current
1261 * @cid: Optional context id to match. 1623 * buffer pointed to by @dev_priv->pinned_bo
1262 * 1624 *
1263 * This function should be used to unpin the pinned query bo, or 1625 * This function should be used to unpin the pinned query bo, or
1264 * as a query barrier when we need to make sure that all queries have 1626 * as a query barrier when we need to make sure that all queries have
@@ -1271,31 +1633,26 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1271 * 1633 *
1272 * The function will synchronize on the previous query barrier, and will 1634 * The function will synchronize on the previous query barrier, and will
1273 * thus not finish until that barrier has executed. 1635 * thus not finish until that barrier has executed.
1636 *
1637 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
1638 * before calling this function.
1274 */ 1639 */
1275void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, 1640void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1276 bool only_on_cid_match, uint32_t cid) 1641 struct vmw_fence_obj *fence)
1277{ 1642{
1278 int ret = 0; 1643 int ret = 0;
1279 struct list_head validate_list; 1644 struct list_head validate_list;
1280 struct ttm_validate_buffer pinned_val, query_val; 1645 struct ttm_validate_buffer pinned_val, query_val;
1281 struct vmw_fence_obj *fence; 1646 struct vmw_fence_obj *lfence = NULL;
1282
1283 mutex_lock(&dev_priv->cmdbuf_mutex);
1284 1647
1285 if (dev_priv->pinned_bo == NULL) 1648 if (dev_priv->pinned_bo == NULL)
1286 goto out_unlock; 1649 goto out_unlock;
1287 1650
1288 if (only_on_cid_match && cid != dev_priv->query_cid)
1289 goto out_unlock;
1290
1291 INIT_LIST_HEAD(&validate_list); 1651 INIT_LIST_HEAD(&validate_list);
1292 1652
1293 pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1294 DRM_VMW_FENCE_FLAG_EXEC;
1295 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo); 1653 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1296 list_add_tail(&pinned_val.head, &validate_list); 1654 list_add_tail(&pinned_val.head, &validate_list);
1297 1655
1298 query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1299 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo); 1656 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1300 list_add_tail(&query_val.head, &validate_list); 1657 list_add_tail(&query_val.head, &validate_list);
1301 1658
@@ -1308,25 +1665,34 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1308 goto out_no_reserve; 1665 goto out_no_reserve;
1309 } 1666 }
1310 1667
1311 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); 1668 if (dev_priv->query_cid_valid) {
1312 if (unlikely(ret != 0)) { 1669 BUG_ON(fence != NULL);
1313 vmw_execbuf_unpin_panic(dev_priv); 1670 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1314 goto out_no_emit; 1671 if (unlikely(ret != 0)) {
1672 vmw_execbuf_unpin_panic(dev_priv);
1673 goto out_no_emit;
1674 }
1675 dev_priv->query_cid_valid = false;
1315 } 1676 }
1316 1677
1317 vmw_bo_pin(dev_priv->pinned_bo, false); 1678 vmw_bo_pin(dev_priv->pinned_bo, false);
1318 vmw_bo_pin(dev_priv->dummy_query_bo, false); 1679 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1319 dev_priv->dummy_query_bo_pinned = false; 1680 dev_priv->dummy_query_bo_pinned = false;
1320 1681
1321 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 1682 if (fence == NULL) {
1683 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
1684 NULL);
1685 fence = lfence;
1686 }
1322 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence); 1687 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
1688 if (lfence != NULL)
1689 vmw_fence_obj_unreference(&lfence);
1323 1690
1324 ttm_bo_unref(&query_val.bo); 1691 ttm_bo_unref(&query_val.bo);
1325 ttm_bo_unref(&pinned_val.bo); 1692 ttm_bo_unref(&pinned_val.bo);
1326 ttm_bo_unref(&dev_priv->pinned_bo); 1693 ttm_bo_unref(&dev_priv->pinned_bo);
1327 1694
1328out_unlock: 1695out_unlock:
1329 mutex_unlock(&dev_priv->cmdbuf_mutex);
1330 return; 1696 return;
1331 1697
1332out_no_emit: 1698out_no_emit:
@@ -1335,6 +1701,31 @@ out_no_reserve:
1335 ttm_bo_unref(&query_val.bo); 1701 ttm_bo_unref(&query_val.bo);
1336 ttm_bo_unref(&pinned_val.bo); 1702 ttm_bo_unref(&pinned_val.bo);
1337 ttm_bo_unref(&dev_priv->pinned_bo); 1703 ttm_bo_unref(&dev_priv->pinned_bo);
1704}
1705
1706/**
1707 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1708 * query bo.
1709 *
1710 * @dev_priv: The device private structure.
1711 *
1712 * This function should be used to unpin the pinned query bo, or
1713 * as a query barrier when we need to make sure that all queries have
1714 * finished before the next fifo command. (For example on hardware
1715 * context destructions where the hardware may otherwise leak unfinished
1716 * queries).
1717 *
1718 * This function does not return any failure codes, but make attempts
1719 * to do safe unpinning in case of errors.
1720 *
1721 * The function will synchronize on the previous query barrier, and will
1722 * thus not finish until that barrier has executed.
1723 */
1724void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
1725{
1726 mutex_lock(&dev_priv->cmdbuf_mutex);
1727 if (dev_priv->query_cid_valid)
1728 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
1338 mutex_unlock(&dev_priv->cmdbuf_mutex); 1729 mutex_unlock(&dev_priv->cmdbuf_mutex);
1339} 1730}
1340 1731
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index bc187fafd58c..c62d20e8a6f1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -537,7 +537,7 @@ static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
537 container_of(fence, struct vmw_user_fence, fence); 537 container_of(fence, struct vmw_user_fence, fence);
538 struct vmw_fence_manager *fman = fence->fman; 538 struct vmw_fence_manager *fman = fence->fman;
539 539
540 kfree(ufence); 540 ttm_base_object_kfree(ufence, base);
541 /* 541 /*
542 * Free kernel space accounting. 542 * Free kernel space accounting.
543 */ 543 */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 7290811f89be..d9fbbe191071 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -133,6 +133,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
133 struct drm_vmw_rect *clips = NULL; 133 struct drm_vmw_rect *clips = NULL;
134 struct drm_mode_object *obj; 134 struct drm_mode_object *obj;
135 struct vmw_framebuffer *vfb; 135 struct vmw_framebuffer *vfb;
136 struct vmw_resource *res;
136 uint32_t num_clips; 137 uint32_t num_clips;
137 int ret; 138 int ret;
138 139
@@ -180,11 +181,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
180 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
181 goto out_no_ttm_lock; 182 goto out_no_ttm_lock;
182 183
183 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, 184 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
184 &surface); 185 user_surface_converter,
186 &res);
185 if (ret) 187 if (ret)
186 goto out_no_surface; 188 goto out_no_surface;
187 189
190 surface = vmw_res_to_srf(res);
188 ret = vmw_kms_present(dev_priv, file_priv, 191 ret = vmw_kms_present(dev_priv, file_priv,
189 vfb, surface, arg->sid, 192 vfb, surface, arg->sid,
190 arg->dest_x, arg->dest_y, 193 arg->dest_x, arg->dest_y,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 070fb239c5af..79f7e8e60529 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -373,7 +373,7 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
373 373
374 drm_mode_crtc_set_gamma_size(crtc, 256); 374 drm_mode_crtc_set_gamma_size(crtc, 256);
375 375
376 drm_connector_attach_property(connector, 376 drm_object_attach_property(&connector->base,
377 dev->mode_config.dirty_info_property, 377 dev->mode_config.dirty_info_property,
378 1); 378 1);
379 379
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index cb55b7b66377..87e39f68e9d0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -35,6 +35,7 @@
35#include "svga_escape.h" 35#include "svga_escape.h"
36 36
37#define VMW_MAX_NUM_STREAMS 1 37#define VMW_MAX_NUM_STREAMS 1
38#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
38 39
39struct vmw_stream { 40struct vmw_stream {
40 struct vmw_dma_buffer *buf; 41 struct vmw_dma_buffer *buf;
@@ -449,6 +450,14 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
449 return 0; 450 return 0;
450} 451}
451 452
453
454static bool vmw_overlay_available(const struct vmw_private *dev_priv)
455{
456 return (dev_priv->overlay_priv != NULL &&
457 ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
458 VMW_OVERLAY_CAP_MASK));
459}
460
452int vmw_overlay_ioctl(struct drm_device *dev, void *data, 461int vmw_overlay_ioctl(struct drm_device *dev, void *data,
453 struct drm_file *file_priv) 462 struct drm_file *file_priv)
454{ 463{
@@ -461,7 +470,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
461 struct vmw_resource *res; 470 struct vmw_resource *res;
462 int ret; 471 int ret;
463 472
464 if (!overlay) 473 if (!vmw_overlay_available(dev_priv))
465 return -ENOSYS; 474 return -ENOSYS;
466 475
467 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); 476 ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
@@ -492,7 +501,7 @@ out_unlock:
492 501
493int vmw_overlay_num_overlays(struct vmw_private *dev_priv) 502int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
494{ 503{
495 if (!dev_priv->overlay_priv) 504 if (!vmw_overlay_available(dev_priv))
496 return 0; 505 return 0;
497 506
498 return VMW_MAX_NUM_STREAMS; 507 return VMW_MAX_NUM_STREAMS;
@@ -503,7 +512,7 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
503 struct vmw_overlay *overlay = dev_priv->overlay_priv; 512 struct vmw_overlay *overlay = dev_priv->overlay_priv;
504 int i, k; 513 int i, k;
505 514
506 if (!overlay) 515 if (!vmw_overlay_available(dev_priv))
507 return 0; 516 return 0;
508 517
509 mutex_lock(&overlay->mutex); 518 mutex_lock(&overlay->mutex);
@@ -569,12 +578,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv)
569 if (dev_priv->overlay_priv) 578 if (dev_priv->overlay_priv)
570 return -EINVAL; 579 return -EINVAL;
571 580
572 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
573 (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
574 DRM_INFO("hardware doesn't support overlays\n");
575 return -ENOSYS;
576 }
577
578 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); 581 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
579 if (!overlay) 582 if (!overlay)
580 return -ENOMEM; 583 return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index da3c6b5b98a1..e01a17b407b2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -30,17 +30,7 @@
30#include <drm/ttm/ttm_object.h> 30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h> 31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33 33#include "vmwgfx_resource_priv.h"
34struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37};
38
39struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
42 uint32_t size;
43};
44 34
45struct vmw_user_dma_buffer { 35struct vmw_user_dma_buffer {
46 struct ttm_base_object base; 36 struct ttm_base_object base;
@@ -62,17 +52,21 @@ struct vmw_user_stream {
62 struct vmw_stream stream; 52 struct vmw_stream stream;
63}; 53};
64 54
65struct vmw_surface_offset {
66 uint32_t face;
67 uint32_t mip;
68 uint32_t bo_offset;
69};
70 55
71
72static uint64_t vmw_user_context_size;
73static uint64_t vmw_user_surface_size;
74static uint64_t vmw_user_stream_size; 56static uint64_t vmw_user_stream_size;
75 57
58static const struct vmw_res_func vmw_stream_func = {
59 .res_type = vmw_res_stream,
60 .needs_backup = false,
61 .may_evict = false,
62 .type_name = "video streams",
63 .backup_placement = NULL,
64 .create = NULL,
65 .destroy = NULL,
66 .bind = NULL,
67 .unbind = NULL
68};
69
76static inline struct vmw_dma_buffer * 70static inline struct vmw_dma_buffer *
77vmw_dma_buffer(struct ttm_buffer_object *bo) 71vmw_dma_buffer(struct ttm_buffer_object *bo)
78{ 72{
@@ -100,13 +94,14 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
100 * 94 *
101 * Release the resource id to the resource id manager and set it to -1 95 * Release the resource id to the resource id manager and set it to -1
102 */ 96 */
103static void vmw_resource_release_id(struct vmw_resource *res) 97void vmw_resource_release_id(struct vmw_resource *res)
104{ 98{
105 struct vmw_private *dev_priv = res->dev_priv; 99 struct vmw_private *dev_priv = res->dev_priv;
100 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
106 101
107 write_lock(&dev_priv->resource_lock); 102 write_lock(&dev_priv->resource_lock);
108 if (res->id != -1) 103 if (res->id != -1)
109 idr_remove(res->idr, res->id); 104 idr_remove(idr, res->id);
110 res->id = -1; 105 res->id = -1;
111 write_unlock(&dev_priv->resource_lock); 106 write_unlock(&dev_priv->resource_lock);
112} 107}
@@ -116,17 +111,33 @@ static void vmw_resource_release(struct kref *kref)
116 struct vmw_resource *res = 111 struct vmw_resource *res =
117 container_of(kref, struct vmw_resource, kref); 112 container_of(kref, struct vmw_resource, kref);
118 struct vmw_private *dev_priv = res->dev_priv; 113 struct vmw_private *dev_priv = res->dev_priv;
119 int id = res->id; 114 int id;
120 struct idr *idr = res->idr; 115 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
121 116
122 res->avail = false; 117 res->avail = false;
123 if (res->remove_from_lists != NULL) 118 list_del_init(&res->lru_head);
124 res->remove_from_lists(res);
125 write_unlock(&dev_priv->resource_lock); 119 write_unlock(&dev_priv->resource_lock);
120 if (res->backup) {
121 struct ttm_buffer_object *bo = &res->backup->base;
122
123 ttm_bo_reserve(bo, false, false, false, 0);
124 if (!list_empty(&res->mob_head) &&
125 res->func->unbind != NULL) {
126 struct ttm_validate_buffer val_buf;
127
128 val_buf.bo = bo;
129 res->func->unbind(res, false, &val_buf);
130 }
131 res->backup_dirty = false;
132 list_del_init(&res->mob_head);
133 ttm_bo_unreserve(bo);
134 vmw_dmabuf_unreference(&res->backup);
135 }
126 136
127 if (likely(res->hw_destroy != NULL)) 137 if (likely(res->hw_destroy != NULL))
128 res->hw_destroy(res); 138 res->hw_destroy(res);
129 139
140 id = res->id;
130 if (res->res_free != NULL) 141 if (res->res_free != NULL)
131 res->res_free(res); 142 res->res_free(res);
132 else 143 else
@@ -153,25 +164,25 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
153/** 164/**
154 * vmw_resource_alloc_id - release a resource id to the id manager. 165 * vmw_resource_alloc_id - release a resource id to the id manager.
155 * 166 *
156 * @dev_priv: Pointer to the device private structure.
157 * @res: Pointer to the resource. 167 * @res: Pointer to the resource.
158 * 168 *
159 * Allocate the lowest free resource from the resource manager, and set 169 * Allocate the lowest free resource from the resource manager, and set
160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure. 170 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
161 */ 171 */
162static int vmw_resource_alloc_id(struct vmw_private *dev_priv, 172int vmw_resource_alloc_id(struct vmw_resource *res)
163 struct vmw_resource *res)
164{ 173{
174 struct vmw_private *dev_priv = res->dev_priv;
165 int ret; 175 int ret;
176 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
166 177
167 BUG_ON(res->id != -1); 178 BUG_ON(res->id != -1);
168 179
169 do { 180 do {
170 if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0)) 181 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
171 return -ENOMEM; 182 return -ENOMEM;
172 183
173 write_lock(&dev_priv->resource_lock); 184 write_lock(&dev_priv->resource_lock);
174 ret = idr_get_new_above(res->idr, res, 1, &res->id); 185 ret = idr_get_new_above(idr, res, 1, &res->id);
175 write_unlock(&dev_priv->resource_lock); 186 write_unlock(&dev_priv->resource_lock);
176 187
177 } while (ret == -EAGAIN); 188 } while (ret == -EAGAIN);
@@ -179,31 +190,39 @@ static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
179 return ret; 190 return ret;
180} 191}
181 192
182 193/**
183static int vmw_resource_init(struct vmw_private *dev_priv, 194 * vmw_resource_init - initialize a struct vmw_resource
184 struct vmw_resource *res, 195 *
185 struct idr *idr, 196 * @dev_priv: Pointer to a device private struct.
186 enum ttm_object_type obj_type, 197 * @res: The struct vmw_resource to initialize.
187 bool delay_id, 198 * @obj_type: Resource object type.
188 void (*res_free) (struct vmw_resource *res), 199 * @delay_id: Boolean whether to defer device id allocation until
189 void (*remove_from_lists) 200 * the first validation.
190 (struct vmw_resource *res)) 201 * @res_free: Resource destructor.
202 * @func: Resource function table.
203 */
204int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
205 bool delay_id,
206 void (*res_free) (struct vmw_resource *res),
207 const struct vmw_res_func *func)
191{ 208{
192 kref_init(&res->kref); 209 kref_init(&res->kref);
193 res->hw_destroy = NULL; 210 res->hw_destroy = NULL;
194 res->res_free = res_free; 211 res->res_free = res_free;
195 res->remove_from_lists = remove_from_lists;
196 res->res_type = obj_type;
197 res->idr = idr;
198 res->avail = false; 212 res->avail = false;
199 res->dev_priv = dev_priv; 213 res->dev_priv = dev_priv;
200 INIT_LIST_HEAD(&res->query_head); 214 res->func = func;
201 INIT_LIST_HEAD(&res->validate_head); 215 INIT_LIST_HEAD(&res->lru_head);
216 INIT_LIST_HEAD(&res->mob_head);
202 res->id = -1; 217 res->id = -1;
218 res->backup = NULL;
219 res->backup_offset = 0;
220 res->backup_dirty = false;
221 res->res_dirty = false;
203 if (delay_id) 222 if (delay_id)
204 return 0; 223 return 0;
205 else 224 else
206 return vmw_resource_alloc_id(dev_priv, res); 225 return vmw_resource_alloc_id(res);
207} 226}
208 227
209/** 228/**
@@ -218,9 +237,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
218 * Activate basically means that the function vmw_resource_lookup will 237 * Activate basically means that the function vmw_resource_lookup will
219 * find it. 238 * find it.
220 */ 239 */
221 240void vmw_resource_activate(struct vmw_resource *res,
222static void vmw_resource_activate(struct vmw_resource *res, 241 void (*hw_destroy) (struct vmw_resource *))
223 void (*hw_destroy) (struct vmw_resource *))
224{ 242{
225 struct vmw_private *dev_priv = res->dev_priv; 243 struct vmw_private *dev_priv = res->dev_priv;
226 244
@@ -250,994 +268,41 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
250} 268}
251 269
252/** 270/**
253 * Context management: 271 * vmw_user_resource_lookup_handle - lookup a struct resource from a
254 */ 272 * TTM user-space handle and perform basic type checks
255
256static void vmw_hw_context_destroy(struct vmw_resource *res)
257{
258
259 struct vmw_private *dev_priv = res->dev_priv;
260 struct {
261 SVGA3dCmdHeader header;
262 SVGA3dCmdDestroyContext body;
263 } *cmd;
264
265
266 vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
267
268 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
269 if (unlikely(cmd == NULL)) {
270 DRM_ERROR("Failed reserving FIFO space for surface "
271 "destruction.\n");
272 return;
273 }
274
275 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
276 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
277 cmd->body.cid = cpu_to_le32(res->id);
278
279 vmw_fifo_commit(dev_priv, sizeof(*cmd));
280 vmw_3d_resource_dec(dev_priv, false);
281}
282
283static int vmw_context_init(struct vmw_private *dev_priv,
284 struct vmw_resource *res,
285 void (*res_free) (struct vmw_resource *res))
286{
287 int ret;
288
289 struct {
290 SVGA3dCmdHeader header;
291 SVGA3dCmdDefineContext body;
292 } *cmd;
293
294 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
295 VMW_RES_CONTEXT, false, res_free, NULL);
296
297 if (unlikely(ret != 0)) {
298 DRM_ERROR("Failed to allocate a resource id.\n");
299 goto out_early;
300 }
301
302 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
303 DRM_ERROR("Out of hw context ids.\n");
304 vmw_resource_unreference(&res);
305 return -ENOMEM;
306 }
307
308 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
309 if (unlikely(cmd == NULL)) {
310 DRM_ERROR("Fifo reserve failed.\n");
311 vmw_resource_unreference(&res);
312 return -ENOMEM;
313 }
314
315 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
316 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
317 cmd->body.cid = cpu_to_le32(res->id);
318
319 vmw_fifo_commit(dev_priv, sizeof(*cmd));
320 (void) vmw_3d_resource_inc(dev_priv, false);
321 vmw_resource_activate(res, vmw_hw_context_destroy);
322 return 0;
323
324out_early:
325 if (res_free == NULL)
326 kfree(res);
327 else
328 res_free(res);
329 return ret;
330}
331
332struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
333{
334 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
335 int ret;
336
337 if (unlikely(res == NULL))
338 return NULL;
339
340 ret = vmw_context_init(dev_priv, res, NULL);
341 return (ret == 0) ? res : NULL;
342}
343
344/**
345 * User-space context management:
346 */
347
348static void vmw_user_context_free(struct vmw_resource *res)
349{
350 struct vmw_user_context *ctx =
351 container_of(res, struct vmw_user_context, res);
352 struct vmw_private *dev_priv = res->dev_priv;
353
354 kfree(ctx);
355 ttm_mem_global_free(vmw_mem_glob(dev_priv),
356 vmw_user_context_size);
357}
358
359/**
360 * This function is called when user space has no more references on the
361 * base object. It releases the base-object's reference on the resource object.
362 */
363
364static void vmw_user_context_base_release(struct ttm_base_object **p_base)
365{
366 struct ttm_base_object *base = *p_base;
367 struct vmw_user_context *ctx =
368 container_of(base, struct vmw_user_context, base);
369 struct vmw_resource *res = &ctx->res;
370
371 *p_base = NULL;
372 vmw_resource_unreference(&res);
373}
374
375int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
376 struct drm_file *file_priv)
377{
378 struct vmw_private *dev_priv = vmw_priv(dev);
379 struct vmw_resource *res;
380 struct vmw_user_context *ctx;
381 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
382 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
383 int ret = 0;
384
385 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
386 if (unlikely(res == NULL))
387 return -EINVAL;
388
389 if (res->res_free != &vmw_user_context_free) {
390 ret = -EINVAL;
391 goto out;
392 }
393
394 ctx = container_of(res, struct vmw_user_context, res);
395 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
396 ret = -EPERM;
397 goto out;
398 }
399
400 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
401out:
402 vmw_resource_unreference(&res);
403 return ret;
404}
405
406int vmw_context_define_ioctl(struct drm_device *dev, void *data,
407 struct drm_file *file_priv)
408{
409 struct vmw_private *dev_priv = vmw_priv(dev);
410 struct vmw_user_context *ctx;
411 struct vmw_resource *res;
412 struct vmw_resource *tmp;
413 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
414 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
415 struct vmw_master *vmaster = vmw_master(file_priv->master);
416 int ret;
417
418
419 /*
420 * Approximate idr memory usage with 128 bytes. It will be limited
421 * by maximum number_of contexts anyway.
422 */
423
424 if (unlikely(vmw_user_context_size == 0))
425 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
426
427 ret = ttm_read_lock(&vmaster->lock, true);
428 if (unlikely(ret != 0))
429 return ret;
430
431 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
432 vmw_user_context_size,
433 false, true);
434 if (unlikely(ret != 0)) {
435 if (ret != -ERESTARTSYS)
436 DRM_ERROR("Out of graphics memory for context"
437 " creation.\n");
438 goto out_unlock;
439 }
440
441 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442 if (unlikely(ctx == NULL)) {
443 ttm_mem_global_free(vmw_mem_glob(dev_priv),
444 vmw_user_context_size);
445 ret = -ENOMEM;
446 goto out_unlock;
447 }
448
449 res = &ctx->res;
450 ctx->base.shareable = false;
451 ctx->base.tfile = NULL;
452
453 /*
454 * From here on, the destructor takes over resource freeing.
455 */
456
457 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
458 if (unlikely(ret != 0))
459 goto out_unlock;
460
461 tmp = vmw_resource_reference(&ctx->res);
462 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
463 &vmw_user_context_base_release, NULL);
464
465 if (unlikely(ret != 0)) {
466 vmw_resource_unreference(&tmp);
467 goto out_err;
468 }
469
470 arg->cid = res->id;
471out_err:
472 vmw_resource_unreference(&res);
473out_unlock:
474 ttm_read_unlock(&vmaster->lock);
475 return ret;
476
477}
478
479int vmw_context_check(struct vmw_private *dev_priv,
480 struct ttm_object_file *tfile,
481 int id,
482 struct vmw_resource **p_res)
483{
484 struct vmw_resource *res;
485 int ret = 0;
486
487 read_lock(&dev_priv->resource_lock);
488 res = idr_find(&dev_priv->context_idr, id);
489 if (res && res->avail) {
490 struct vmw_user_context *ctx =
491 container_of(res, struct vmw_user_context, res);
492 if (ctx->base.tfile != tfile && !ctx->base.shareable)
493 ret = -EPERM;
494 if (p_res)
495 *p_res = vmw_resource_reference(res);
496 } else
497 ret = -EINVAL;
498 read_unlock(&dev_priv->resource_lock);
499
500 return ret;
501}
502
503struct vmw_bpp {
504 uint8_t bpp;
505 uint8_t s_bpp;
506};
507
508/*
509 * Size table for the supported SVGA3D surface formats. It consists of
510 * two values. The bpp value and the s_bpp value which is short for
511 * "stride bits per pixel" The values are given in such a way that the
512 * minimum stride for the image is calculated using
513 *
514 * min_stride = w*s_bpp
515 *
516 * and the total memory requirement for the image is
517 *
518 * h*min_stride*bpp/s_bpp
519 *
520 */
521static const struct vmw_bpp vmw_sf_bpp[] = {
522 [SVGA3D_FORMAT_INVALID] = {0, 0},
523 [SVGA3D_X8R8G8B8] = {32, 32},
524 [SVGA3D_A8R8G8B8] = {32, 32},
525 [SVGA3D_R5G6B5] = {16, 16},
526 [SVGA3D_X1R5G5B5] = {16, 16},
527 [SVGA3D_A1R5G5B5] = {16, 16},
528 [SVGA3D_A4R4G4B4] = {16, 16},
529 [SVGA3D_Z_D32] = {32, 32},
530 [SVGA3D_Z_D16] = {16, 16},
531 [SVGA3D_Z_D24S8] = {32, 32},
532 [SVGA3D_Z_D15S1] = {16, 16},
533 [SVGA3D_LUMINANCE8] = {8, 8},
534 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
535 [SVGA3D_LUMINANCE16] = {16, 16},
536 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
537 [SVGA3D_DXT1] = {4, 16},
538 [SVGA3D_DXT2] = {8, 32},
539 [SVGA3D_DXT3] = {8, 32},
540 [SVGA3D_DXT4] = {8, 32},
541 [SVGA3D_DXT5] = {8, 32},
542 [SVGA3D_BUMPU8V8] = {16, 16},
543 [SVGA3D_BUMPL6V5U5] = {16, 16},
544 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
545 [SVGA3D_ARGB_S10E5] = {16, 16},
546 [SVGA3D_ARGB_S23E8] = {32, 32},
547 [SVGA3D_A2R10G10B10] = {32, 32},
548 [SVGA3D_V8U8] = {16, 16},
549 [SVGA3D_Q8W8V8U8] = {32, 32},
550 [SVGA3D_CxV8U8] = {16, 16},
551 [SVGA3D_X8L8V8U8] = {32, 32},
552 [SVGA3D_A2W10V10U10] = {32, 32},
553 [SVGA3D_ALPHA8] = {8, 8},
554 [SVGA3D_R_S10E5] = {16, 16},
555 [SVGA3D_R_S23E8] = {32, 32},
556 [SVGA3D_RG_S10E5] = {16, 16},
557 [SVGA3D_RG_S23E8] = {32, 32},
558 [SVGA3D_BUFFER] = {8, 8},
559 [SVGA3D_Z_D24X8] = {32, 32},
560 [SVGA3D_V16U16] = {32, 32},
561 [SVGA3D_G16R16] = {32, 32},
562 [SVGA3D_A16B16G16R16] = {64, 64},
563 [SVGA3D_UYVY] = {12, 12},
564 [SVGA3D_YUY2] = {12, 12},
565 [SVGA3D_NV12] = {12, 8},
566 [SVGA3D_AYUV] = {32, 32},
567 [SVGA3D_BC4_UNORM] = {4, 16},
568 [SVGA3D_BC5_UNORM] = {8, 32},
569 [SVGA3D_Z_DF16] = {16, 16},
570 [SVGA3D_Z_DF24] = {24, 24},
571 [SVGA3D_Z_D24S8_INT] = {32, 32}
572};
573
574
575/**
576 * Surface management.
577 */
578
579struct vmw_surface_dma {
580 SVGA3dCmdHeader header;
581 SVGA3dCmdSurfaceDMA body;
582 SVGA3dCopyBox cb;
583 SVGA3dCmdSurfaceDMASuffix suffix;
584};
585
586struct vmw_surface_define {
587 SVGA3dCmdHeader header;
588 SVGA3dCmdDefineSurface body;
589};
590
591struct vmw_surface_destroy {
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDestroySurface body;
594};
595
596
597/**
598 * vmw_surface_dma_size - Compute fifo size for a dma command.
599 *
600 * @srf: Pointer to a struct vmw_surface
601 *
602 * Computes the required size for a surface dma command for backup or
603 * restoration of the surface represented by @srf.
604 */
605static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
606{
607 return srf->num_sizes * sizeof(struct vmw_surface_dma);
608}
609
610
611/**
612 * vmw_surface_define_size - Compute fifo size for a surface define command.
613 *
614 * @srf: Pointer to a struct vmw_surface
615 *
616 * Computes the required size for a surface define command for the definition
617 * of the surface represented by @srf.
618 */
619static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
620{
621 return sizeof(struct vmw_surface_define) + srf->num_sizes *
622 sizeof(SVGA3dSize);
623}
624
625
626/**
627 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
628 * 273 *
629 * Computes the required size for a surface destroy command for the destruction 274 * @dev_priv: Pointer to a device private struct
630 * of a hw surface. 275 * @tfile: Pointer to a struct ttm_object_file identifying the caller
631 */ 276 * @handle: The TTM user-space handle
632static inline uint32_t vmw_surface_destroy_size(void) 277 * @converter: Pointer to an object describing the resource type
633{ 278 * @p_res: On successful return the location pointed to will contain
634 return sizeof(struct vmw_surface_destroy); 279 * a pointer to a refcounted struct vmw_resource.
635}
636
637/**
638 * vmw_surface_destroy_encode - Encode a surface_destroy command.
639 *
640 * @id: The surface id
641 * @cmd_space: Pointer to memory area in which the commands should be encoded.
642 */
643static void vmw_surface_destroy_encode(uint32_t id,
644 void *cmd_space)
645{
646 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
647 cmd_space;
648
649 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
650 cmd->header.size = sizeof(cmd->body);
651 cmd->body.sid = id;
652}
653
654/**
655 * vmw_surface_define_encode - Encode a surface_define command.
656 * 280 *
657 * @srf: Pointer to a struct vmw_surface object. 281 * If the handle can't be found or is associated with an incorrect resource
658 * @cmd_space: Pointer to memory area in which the commands should be encoded. 282 * type, -EINVAL will be returned.
659 */ 283 */
660static void vmw_surface_define_encode(const struct vmw_surface *srf, 284int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
661 void *cmd_space) 285 struct ttm_object_file *tfile,
286 uint32_t handle,
287 const struct vmw_user_resource_conv
288 *converter,
289 struct vmw_resource **p_res)
662{ 290{
663 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
664 cmd_space;
665 struct drm_vmw_size *src_size;
666 SVGA3dSize *cmd_size;
667 uint32_t cmd_len;
668 int i;
669
670 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
671
672 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
673 cmd->header.size = cmd_len;
674 cmd->body.sid = srf->res.id;
675 cmd->body.surfaceFlags = srf->flags;
676 cmd->body.format = cpu_to_le32(srf->format);
677 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
678 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
679
680 cmd += 1;
681 cmd_size = (SVGA3dSize *) cmd;
682 src_size = srf->sizes;
683
684 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
685 cmd_size->width = src_size->width;
686 cmd_size->height = src_size->height;
687 cmd_size->depth = src_size->depth;
688 }
689}
690
691
692/**
693 * vmw_surface_dma_encode - Encode a surface_dma command.
694 *
695 * @srf: Pointer to a struct vmw_surface object.
696 * @cmd_space: Pointer to memory area in which the commands should be encoded.
697 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
698 * should be placed or read from.
699 * @to_surface: Boolean whether to DMA to the surface or from the surface.
700 */
701static void vmw_surface_dma_encode(struct vmw_surface *srf,
702 void *cmd_space,
703 const SVGAGuestPtr *ptr,
704 bool to_surface)
705{
706 uint32_t i;
707 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
708 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
709 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
710
711 for (i = 0; i < srf->num_sizes; ++i) {
712 SVGA3dCmdHeader *header = &cmd->header;
713 SVGA3dCmdSurfaceDMA *body = &cmd->body;
714 SVGA3dCopyBox *cb = &cmd->cb;
715 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
716 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
717 const struct drm_vmw_size *cur_size = &srf->sizes[i];
718
719 header->id = SVGA_3D_CMD_SURFACE_DMA;
720 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
721
722 body->guest.ptr = *ptr;
723 body->guest.ptr.offset += cur_offset->bo_offset;
724 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
725 body->host.sid = srf->res.id;
726 body->host.face = cur_offset->face;
727 body->host.mipmap = cur_offset->mip;
728 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
729 SVGA3D_READ_HOST_VRAM);
730 cb->x = 0;
731 cb->y = 0;
732 cb->z = 0;
733 cb->srcx = 0;
734 cb->srcy = 0;
735 cb->srcz = 0;
736 cb->w = cur_size->width;
737 cb->h = cur_size->height;
738 cb->d = cur_size->depth;
739
740 suffix->suffixSize = sizeof(*suffix);
741 suffix->maximumOffset = body->guest.pitch*cur_size->height*
742 cur_size->depth*bpp / stride_bpp;
743 suffix->flags.discard = 0;
744 suffix->flags.unsynchronized = 0;
745 suffix->flags.reserved = 0;
746 ++cmd;
747 }
748};
749
750
751static void vmw_hw_surface_destroy(struct vmw_resource *res)
752{
753
754 struct vmw_private *dev_priv = res->dev_priv;
755 struct vmw_surface *srf;
756 void *cmd;
757
758 if (res->id != -1) {
759
760 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
761 if (unlikely(cmd == NULL)) {
762 DRM_ERROR("Failed reserving FIFO space for surface "
763 "destruction.\n");
764 return;
765 }
766
767 vmw_surface_destroy_encode(res->id, cmd);
768 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
769
770 /*
771 * used_memory_size_atomic, or separate lock
772 * to avoid taking dev_priv::cmdbuf_mutex in
773 * the destroy path.
774 */
775
776 mutex_lock(&dev_priv->cmdbuf_mutex);
777 srf = container_of(res, struct vmw_surface, res);
778 dev_priv->used_memory_size -= srf->backup_size;
779 mutex_unlock(&dev_priv->cmdbuf_mutex);
780
781 }
782 vmw_3d_resource_dec(dev_priv, false);
783}
784
785void vmw_surface_res_free(struct vmw_resource *res)
786{
787 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
788
789 if (srf->backup)
790 ttm_bo_unref(&srf->backup);
791 kfree(srf->offsets);
792 kfree(srf->sizes);
793 kfree(srf->snooper.image);
794 kfree(srf);
795}
796
797
798/**
799 * vmw_surface_do_validate - make a surface available to the device.
800 *
801 * @dev_priv: Pointer to a device private struct.
802 * @srf: Pointer to a struct vmw_surface.
803 *
804 * If the surface doesn't have a hw id, allocate one, and optionally
805 * DMA the backed up surface contents to the device.
806 *
807 * Returns -EBUSY if there wasn't sufficient device resources to
808 * complete the validation. Retry after freeing up resources.
809 *
810 * May return other errors if the kernel is out of guest resources.
811 */
812int vmw_surface_do_validate(struct vmw_private *dev_priv,
813 struct vmw_surface *srf)
814{
815 struct vmw_resource *res = &srf->res;
816 struct list_head val_list;
817 struct ttm_validate_buffer val_buf;
818 uint32_t submit_size;
819 uint8_t *cmd;
820 int ret;
821
822 if (likely(res->id != -1))
823 return 0;
824
825 if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
826 dev_priv->memory_size))
827 return -EBUSY;
828
829 /*
830 * Reserve- and validate the backup DMA bo.
831 */
832
833 if (srf->backup) {
834 INIT_LIST_HEAD(&val_list);
835 val_buf.bo = ttm_bo_reference(srf->backup);
836 val_buf.new_sync_obj_arg = (void *)((unsigned long)
837 DRM_VMW_FENCE_FLAG_EXEC);
838 list_add_tail(&val_buf.head, &val_list);
839 ret = ttm_eu_reserve_buffers(&val_list);
840 if (unlikely(ret != 0))
841 goto out_no_reserve;
842
843 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
844 true, false, false);
845 if (unlikely(ret != 0))
846 goto out_no_validate;
847 }
848
849 /*
850 * Alloc id for the resource.
851 */
852
853 ret = vmw_resource_alloc_id(dev_priv, res);
854 if (unlikely(ret != 0)) {
855 DRM_ERROR("Failed to allocate a surface id.\n");
856 goto out_no_id;
857 }
858 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
859 ret = -EBUSY;
860 goto out_no_fifo;
861 }
862
863
864 /*
865 * Encode surface define- and dma commands.
866 */
867
868 submit_size = vmw_surface_define_size(srf);
869 if (srf->backup)
870 submit_size += vmw_surface_dma_size(srf);
871
872 cmd = vmw_fifo_reserve(dev_priv, submit_size);
873 if (unlikely(cmd == NULL)) {
874 DRM_ERROR("Failed reserving FIFO space for surface "
875 "validation.\n");
876 ret = -ENOMEM;
877 goto out_no_fifo;
878 }
879
880 vmw_surface_define_encode(srf, cmd);
881 if (srf->backup) {
882 SVGAGuestPtr ptr;
883
884 cmd += vmw_surface_define_size(srf);
885 vmw_bo_get_guest_ptr(srf->backup, &ptr);
886 vmw_surface_dma_encode(srf, cmd, &ptr, true);
887 }
888
889 vmw_fifo_commit(dev_priv, submit_size);
890
891 /*
892 * Create a fence object and fence the backup buffer.
893 */
894
895 if (srf->backup) {
896 struct vmw_fence_obj *fence;
897
898 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
899 &fence, NULL);
900 ttm_eu_fence_buffer_objects(&val_list, fence);
901 if (likely(fence != NULL))
902 vmw_fence_obj_unreference(&fence);
903 ttm_bo_unref(&val_buf.bo);
904 ttm_bo_unref(&srf->backup);
905 }
906
907 /*
908 * Surface memory usage accounting.
909 */
910
911 dev_priv->used_memory_size += srf->backup_size;
912
913 return 0;
914
915out_no_fifo:
916 vmw_resource_release_id(res);
917out_no_id:
918out_no_validate:
919 if (srf->backup)
920 ttm_eu_backoff_reservation(&val_list);
921out_no_reserve:
922 if (srf->backup)
923 ttm_bo_unref(&val_buf.bo);
924 return ret;
925}
926
927/**
928 * vmw_surface_evict - Evict a hw surface.
929 *
930 * @dev_priv: Pointer to a device private struct.
931 * @srf: Pointer to a struct vmw_surface
932 *
933 * DMA the contents of a hw surface to a backup guest buffer object,
934 * and destroy the hw surface, releasing its id.
935 */
936int vmw_surface_evict(struct vmw_private *dev_priv,
937 struct vmw_surface *srf)
938{
939 struct vmw_resource *res = &srf->res;
940 struct list_head val_list;
941 struct ttm_validate_buffer val_buf;
942 uint32_t submit_size;
943 uint8_t *cmd;
944 int ret;
945 struct vmw_fence_obj *fence;
946 SVGAGuestPtr ptr;
947
948 BUG_ON(res->id == -1);
949
950 /*
951 * Create a surface backup buffer object.
952 */
953
954 if (!srf->backup) {
955 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
956 ttm_bo_type_device,
957 &vmw_srf_placement, 0, 0, true,
958 NULL, &srf->backup);
959 if (unlikely(ret != 0))
960 return ret;
961 }
962
963 /*
964 * Reserve- and validate the backup DMA bo.
965 */
966
967 INIT_LIST_HEAD(&val_list);
968 val_buf.bo = ttm_bo_reference(srf->backup);
969 val_buf.new_sync_obj_arg = (void *)(unsigned long)
970 DRM_VMW_FENCE_FLAG_EXEC;
971 list_add_tail(&val_buf.head, &val_list);
972 ret = ttm_eu_reserve_buffers(&val_list);
973 if (unlikely(ret != 0))
974 goto out_no_reserve;
975
976 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
977 true, false, false);
978 if (unlikely(ret != 0))
979 goto out_no_validate;
980
981
982 /*
983 * Encode the dma- and surface destroy commands.
984 */
985
986 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
987 cmd = vmw_fifo_reserve(dev_priv, submit_size);
988 if (unlikely(cmd == NULL)) {
989 DRM_ERROR("Failed reserving FIFO space for surface "
990 "eviction.\n");
991 ret = -ENOMEM;
992 goto out_no_fifo;
993 }
994
995 vmw_bo_get_guest_ptr(srf->backup, &ptr);
996 vmw_surface_dma_encode(srf, cmd, &ptr, false);
997 cmd += vmw_surface_dma_size(srf);
998 vmw_surface_destroy_encode(res->id, cmd);
999 vmw_fifo_commit(dev_priv, submit_size);
1000
1001 /*
1002 * Surface memory usage accounting.
1003 */
1004
1005 dev_priv->used_memory_size -= srf->backup_size;
1006
1007 /*
1008 * Create a fence object and fence the DMA buffer.
1009 */
1010
1011 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1012 &fence, NULL);
1013 ttm_eu_fence_buffer_objects(&val_list, fence);
1014 if (likely(fence != NULL))
1015 vmw_fence_obj_unreference(&fence);
1016 ttm_bo_unref(&val_buf.bo);
1017
1018 /*
1019 * Release the surface ID.
1020 */
1021
1022 vmw_resource_release_id(res);
1023
1024 return 0;
1025
1026out_no_fifo:
1027out_no_validate:
1028 if (srf->backup)
1029 ttm_eu_backoff_reservation(&val_list);
1030out_no_reserve:
1031 ttm_bo_unref(&val_buf.bo);
1032 ttm_bo_unref(&srf->backup);
1033 return ret;
1034}
1035
1036
1037/**
1038 * vmw_surface_validate - make a surface available to the device, evicting
1039 * other surfaces if needed.
1040 *
1041 * @dev_priv: Pointer to a device private struct.
1042 * @srf: Pointer to a struct vmw_surface.
1043 *
1044 * Try to validate a surface and if it fails due to limited device resources,
1045 * repeatedly try to evict other surfaces until the request can be
1046 * acommodated.
1047 *
1048 * May return errors if out of resources.
1049 */
1050int vmw_surface_validate(struct vmw_private *dev_priv,
1051 struct vmw_surface *srf)
1052{
1053 int ret;
1054 struct vmw_surface *evict_srf;
1055
1056 do {
1057 write_lock(&dev_priv->resource_lock);
1058 list_del_init(&srf->lru_head);
1059 write_unlock(&dev_priv->resource_lock);
1060
1061 ret = vmw_surface_do_validate(dev_priv, srf);
1062 if (likely(ret != -EBUSY))
1063 break;
1064
1065 write_lock(&dev_priv->resource_lock);
1066 if (list_empty(&dev_priv->surface_lru)) {
1067 DRM_ERROR("Out of device memory for surfaces.\n");
1068 ret = -EBUSY;
1069 write_unlock(&dev_priv->resource_lock);
1070 break;
1071 }
1072
1073 evict_srf = vmw_surface_reference
1074 (list_first_entry(&dev_priv->surface_lru,
1075 struct vmw_surface,
1076 lru_head));
1077 list_del_init(&evict_srf->lru_head);
1078
1079 write_unlock(&dev_priv->resource_lock);
1080 (void) vmw_surface_evict(dev_priv, evict_srf);
1081
1082 vmw_surface_unreference(&evict_srf);
1083
1084 } while (1);
1085
1086 if (unlikely(ret != 0 && srf->res.id != -1)) {
1087 write_lock(&dev_priv->resource_lock);
1088 list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1089 write_unlock(&dev_priv->resource_lock);
1090 }
1091
1092 return ret;
1093}
1094
1095
1096/**
1097 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1098 *
1099 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1100 *
1101 * As part of the resource destruction, remove the surface from any
1102 * lookup lists.
1103 */
1104static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1105{
1106 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1107
1108 list_del_init(&srf->lru_head);
1109}
1110
1111int vmw_surface_init(struct vmw_private *dev_priv,
1112 struct vmw_surface *srf,
1113 void (*res_free) (struct vmw_resource *res))
1114{
1115 int ret;
1116 struct vmw_resource *res = &srf->res;
1117
1118 BUG_ON(res_free == NULL);
1119 INIT_LIST_HEAD(&srf->lru_head);
1120 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
1121 VMW_RES_SURFACE, true, res_free,
1122 vmw_surface_remove_from_lists);
1123
1124 if (unlikely(ret != 0))
1125 res_free(res);
1126
1127 /*
1128 * The surface won't be visible to hardware until a
1129 * surface validate.
1130 */
1131
1132 (void) vmw_3d_resource_inc(dev_priv, false);
1133 vmw_resource_activate(res, vmw_hw_surface_destroy);
1134 return ret;
1135}
1136
1137static void vmw_user_surface_free(struct vmw_resource *res)
1138{
1139 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1140 struct vmw_user_surface *user_srf =
1141 container_of(srf, struct vmw_user_surface, srf);
1142 struct vmw_private *dev_priv = srf->res.dev_priv;
1143 uint32_t size = user_srf->size;
1144
1145 if (srf->backup)
1146 ttm_bo_unref(&srf->backup);
1147 kfree(srf->offsets);
1148 kfree(srf->sizes);
1149 kfree(srf->snooper.image);
1150 kfree(user_srf);
1151 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1152}
1153
1154/**
1155 * vmw_resource_unreserve - unreserve resources previously reserved for
1156 * command submission.
1157 *
1158 * @list_head: list of resources to unreserve.
1159 *
1160 * Currently only surfaces are considered, and unreserving a surface
1161 * means putting it back on the device's surface lru list,
1162 * so that it can be evicted if necessary.
1163 * This function traverses the resource list and
1164 * checks whether resources are surfaces, and in that case puts them back
1165 * on the device's surface LRU list.
1166 */
1167void vmw_resource_unreserve(struct list_head *list)
1168{
1169 struct vmw_resource *res;
1170 struct vmw_surface *srf;
1171 rwlock_t *lock = NULL;
1172
1173 list_for_each_entry(res, list, validate_head) {
1174
1175 if (res->res_free != &vmw_surface_res_free &&
1176 res->res_free != &vmw_user_surface_free)
1177 continue;
1178
1179 if (unlikely(lock == NULL)) {
1180 lock = &res->dev_priv->resource_lock;
1181 write_lock(lock);
1182 }
1183
1184 srf = container_of(res, struct vmw_surface, res);
1185 list_del_init(&srf->lru_head);
1186 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1187 }
1188
1189 if (lock != NULL)
1190 write_unlock(lock);
1191}
1192
1193/**
1194 * Helper function that looks either a surface or dmabuf.
1195 *
1196 * The pointer this pointed at by out_surf and out_buf needs to be null.
1197 */
1198int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1199 struct ttm_object_file *tfile,
1200 uint32_t handle,
1201 struct vmw_surface **out_surf,
1202 struct vmw_dma_buffer **out_buf)
1203{
1204 int ret;
1205
1206 BUG_ON(*out_surf || *out_buf);
1207
1208 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
1209 if (!ret)
1210 return 0;
1211
1212 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1213 return ret;
1214}
1215
1216
1217int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1218 struct ttm_object_file *tfile,
1219 uint32_t handle, struct vmw_surface **out)
1220{
1221 struct vmw_resource *res;
1222 struct vmw_surface *srf;
1223 struct vmw_user_surface *user_srf;
1224 struct ttm_base_object *base; 291 struct ttm_base_object *base;
292 struct vmw_resource *res;
1225 int ret = -EINVAL; 293 int ret = -EINVAL;
1226 294
1227 base = ttm_base_object_lookup(tfile, handle); 295 base = ttm_base_object_lookup(tfile, handle);
1228 if (unlikely(base == NULL)) 296 if (unlikely(base == NULL))
1229 return -EINVAL; 297 return -EINVAL;
1230 298
1231 if (unlikely(base->object_type != VMW_RES_SURFACE)) 299 if (unlikely(base->object_type != converter->object_type))
1232 goto out_bad_resource; 300 goto out_bad_resource;
1233 301
1234 user_srf = container_of(base, struct vmw_user_surface, base); 302 res = converter->base_obj_to_res(base);
1235 srf = &user_srf->srf;
1236 res = &srf->res;
1237 303
1238 read_lock(&dev_priv->resource_lock); 304 read_lock(&dev_priv->resource_lock);
1239 305 if (!res->avail || res->res_free != converter->res_free) {
1240 if (!res->avail || res->res_free != &vmw_user_surface_free) {
1241 read_unlock(&dev_priv->resource_lock); 306 read_unlock(&dev_priv->resource_lock);
1242 goto out_bad_resource; 307 goto out_bad_resource;
1243 } 308 }
@@ -1245,7 +310,7 @@ int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1245 kref_get(&res->kref); 310 kref_get(&res->kref);
1246 read_unlock(&dev_priv->resource_lock); 311 read_unlock(&dev_priv->resource_lock);
1247 312
1248 *out = srf; 313 *p_res = res;
1249 ret = 0; 314 ret = 0;
1250 315
1251out_bad_resource: 316out_bad_resource:
@@ -1254,286 +319,32 @@ out_bad_resource:
1254 return ret; 319 return ret;
1255} 320}
1256 321
1257static void vmw_user_surface_base_release(struct ttm_base_object **p_base) 322/**
1258{ 323 * Helper function that looks either a surface or dmabuf.
1259 struct ttm_base_object *base = *p_base; 324 *
1260 struct vmw_user_surface *user_srf = 325 * The pointer this pointed at by out_surf and out_buf needs to be null.
1261 container_of(base, struct vmw_user_surface, base); 326 */
1262 struct vmw_resource *res = &user_srf->srf.res; 327int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1263 328 struct ttm_object_file *tfile,
1264 *p_base = NULL; 329 uint32_t handle,
1265 vmw_resource_unreference(&res); 330 struct vmw_surface **out_surf,
1266} 331 struct vmw_dma_buffer **out_buf)
1267
1268int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1269 struct drm_file *file_priv)
1270{
1271 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1272 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1273
1274 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
1275}
1276
1277int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1278 struct drm_file *file_priv)
1279{ 332{
1280 struct vmw_private *dev_priv = vmw_priv(dev);
1281 struct vmw_user_surface *user_srf;
1282 struct vmw_surface *srf;
1283 struct vmw_resource *res; 333 struct vmw_resource *res;
1284 struct vmw_resource *tmp;
1285 union drm_vmw_surface_create_arg *arg =
1286 (union drm_vmw_surface_create_arg *)data;
1287 struct drm_vmw_surface_create_req *req = &arg->req;
1288 struct drm_vmw_surface_arg *rep = &arg->rep;
1289 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1290 struct drm_vmw_size __user *user_sizes;
1291 int ret; 334 int ret;
1292 int i, j;
1293 uint32_t cur_bo_offset;
1294 struct drm_vmw_size *cur_size;
1295 struct vmw_surface_offset *cur_offset;
1296 uint32_t stride_bpp;
1297 uint32_t bpp;
1298 uint32_t num_sizes;
1299 uint32_t size;
1300 struct vmw_master *vmaster = vmw_master(file_priv->master);
1301 335
1302 if (unlikely(vmw_user_surface_size == 0)) 336 BUG_ON(*out_surf || *out_buf);
1303 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1304 128;
1305
1306 num_sizes = 0;
1307 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1308 num_sizes += req->mip_levels[i];
1309
1310 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1311 DRM_VMW_MAX_MIP_LEVELS)
1312 return -EINVAL;
1313
1314 size = vmw_user_surface_size + 128 +
1315 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1316 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1317
1318
1319 ret = ttm_read_lock(&vmaster->lock, true);
1320 if (unlikely(ret != 0))
1321 return ret;
1322
1323 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1324 size, false, true);
1325 if (unlikely(ret != 0)) {
1326 if (ret != -ERESTARTSYS)
1327 DRM_ERROR("Out of graphics memory for surface"
1328 " creation.\n");
1329 goto out_unlock;
1330 }
1331
1332 user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
1333 if (unlikely(user_srf == NULL)) {
1334 ret = -ENOMEM;
1335 goto out_no_user_srf;
1336 }
1337
1338 srf = &user_srf->srf;
1339 res = &srf->res;
1340
1341 srf->flags = req->flags;
1342 srf->format = req->format;
1343 srf->scanout = req->scanout;
1344 srf->backup = NULL;
1345
1346 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
1347 srf->num_sizes = num_sizes;
1348 user_srf->size = size;
1349
1350 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1351 if (unlikely(srf->sizes == NULL)) {
1352 ret = -ENOMEM;
1353 goto out_no_sizes;
1354 }
1355 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1356 GFP_KERNEL);
1357 if (unlikely(srf->sizes == NULL)) {
1358 ret = -ENOMEM;
1359 goto out_no_offsets;
1360 }
1361
1362 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1363 req->size_addr;
1364
1365 ret = copy_from_user(srf->sizes, user_sizes,
1366 srf->num_sizes * sizeof(*srf->sizes));
1367 if (unlikely(ret != 0)) {
1368 ret = -EFAULT;
1369 goto out_no_copy;
1370 }
1371
1372 cur_bo_offset = 0;
1373 cur_offset = srf->offsets;
1374 cur_size = srf->sizes;
1375
1376 bpp = vmw_sf_bpp[srf->format].bpp;
1377 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1378
1379 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1380 for (j = 0; j < srf->mip_levels[i]; ++j) {
1381 uint32_t stride =
1382 (cur_size->width * stride_bpp + 7) >> 3;
1383
1384 cur_offset->face = i;
1385 cur_offset->mip = j;
1386 cur_offset->bo_offset = cur_bo_offset;
1387 cur_bo_offset += stride * cur_size->height *
1388 cur_size->depth * bpp / stride_bpp;
1389 ++cur_offset;
1390 ++cur_size;
1391 }
1392 }
1393 srf->backup_size = cur_bo_offset;
1394
1395 if (srf->scanout &&
1396 srf->num_sizes == 1 &&
1397 srf->sizes[0].width == 64 &&
1398 srf->sizes[0].height == 64 &&
1399 srf->format == SVGA3D_A8R8G8B8) {
1400
1401 /* allocate image area and clear it */
1402 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1403 if (!srf->snooper.image) {
1404 DRM_ERROR("Failed to allocate cursor_image\n");
1405 ret = -ENOMEM;
1406 goto out_no_copy;
1407 }
1408 } else {
1409 srf->snooper.image = NULL;
1410 }
1411 srf->snooper.crtc = NULL;
1412
1413 user_srf->base.shareable = false;
1414 user_srf->base.tfile = NULL;
1415
1416 /**
1417 * From this point, the generic resource management functions
1418 * destroy the object on failure.
1419 */
1420
1421 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1422 if (unlikely(ret != 0))
1423 goto out_unlock;
1424
1425 tmp = vmw_resource_reference(&srf->res);
1426 ret = ttm_base_object_init(tfile, &user_srf->base,
1427 req->shareable, VMW_RES_SURFACE,
1428 &vmw_user_surface_base_release, NULL);
1429
1430 if (unlikely(ret != 0)) {
1431 vmw_resource_unreference(&tmp);
1432 vmw_resource_unreference(&res);
1433 goto out_unlock;
1434 }
1435
1436 rep->sid = user_srf->base.hash.key;
1437 if (rep->sid == SVGA3D_INVALID_ID)
1438 DRM_ERROR("Created bad Surface ID.\n");
1439
1440 vmw_resource_unreference(&res);
1441
1442 ttm_read_unlock(&vmaster->lock);
1443 return 0;
1444out_no_copy:
1445 kfree(srf->offsets);
1446out_no_offsets:
1447 kfree(srf->sizes);
1448out_no_sizes:
1449 kfree(user_srf);
1450out_no_user_srf:
1451 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1452out_unlock:
1453 ttm_read_unlock(&vmaster->lock);
1454 return ret;
1455}
1456
1457int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1458 struct drm_file *file_priv)
1459{
1460 union drm_vmw_surface_reference_arg *arg =
1461 (union drm_vmw_surface_reference_arg *)data;
1462 struct drm_vmw_surface_arg *req = &arg->req;
1463 struct drm_vmw_surface_create_req *rep = &arg->rep;
1464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1465 struct vmw_surface *srf;
1466 struct vmw_user_surface *user_srf;
1467 struct drm_vmw_size __user *user_sizes;
1468 struct ttm_base_object *base;
1469 int ret = -EINVAL;
1470
1471 base = ttm_base_object_lookup(tfile, req->sid);
1472 if (unlikely(base == NULL)) {
1473 DRM_ERROR("Could not find surface to reference.\n");
1474 return -EINVAL;
1475 }
1476
1477 if (unlikely(base->object_type != VMW_RES_SURFACE))
1478 goto out_bad_resource;
1479
1480 user_srf = container_of(base, struct vmw_user_surface, base);
1481 srf = &user_srf->srf;
1482
1483 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1484 if (unlikely(ret != 0)) {
1485 DRM_ERROR("Could not add a reference to a surface.\n");
1486 goto out_no_reference;
1487 }
1488
1489 rep->flags = srf->flags;
1490 rep->format = srf->format;
1491 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1492 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1493 rep->size_addr;
1494 337
1495 if (user_sizes) 338 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
1496 ret = copy_to_user(user_sizes, srf->sizes, 339 user_surface_converter,
1497 srf->num_sizes * sizeof(*srf->sizes)); 340 &res);
1498 if (unlikely(ret != 0)) { 341 if (!ret) {
1499 DRM_ERROR("copy_to_user failed %p %u\n", 342 *out_surf = vmw_res_to_srf(res);
1500 user_sizes, srf->num_sizes); 343 return 0;
1501 ret = -EFAULT;
1502 } 344 }
1503out_bad_resource:
1504out_no_reference:
1505 ttm_base_object_unref(&base);
1506
1507 return ret;
1508}
1509
1510int vmw_surface_check(struct vmw_private *dev_priv,
1511 struct ttm_object_file *tfile,
1512 uint32_t handle, int *id)
1513{
1514 struct ttm_base_object *base;
1515 struct vmw_user_surface *user_srf;
1516
1517 int ret = -EPERM;
1518
1519 base = ttm_base_object_lookup(tfile, handle);
1520 if (unlikely(base == NULL))
1521 return -EINVAL;
1522 345
1523 if (unlikely(base->object_type != VMW_RES_SURFACE)) 346 *out_surf = NULL;
1524 goto out_bad_surface; 347 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1525
1526 user_srf = container_of(base, struct vmw_user_surface, base);
1527 *id = user_srf->srf.res.id;
1528 ret = 0;
1529
1530out_bad_surface:
1531 /**
1532 * FIXME: May deadlock here when called from the
1533 * command parsing code.
1534 */
1535
1536 ttm_base_object_unref(&base);
1537 return ret; 348 return ret;
1538} 349}
1539 350
@@ -1562,11 +373,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
1562 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); 373 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
1563 memset(vmw_bo, 0, sizeof(*vmw_bo)); 374 memset(vmw_bo, 0, sizeof(*vmw_bo));
1564 375
1565 INIT_LIST_HEAD(&vmw_bo->validate_list); 376 INIT_LIST_HEAD(&vmw_bo->res_list);
1566 377
1567 ret = ttm_bo_init(bdev, &vmw_bo->base, size, 378 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1568 ttm_bo_type_device, placement, 379 ttm_bo_type_device, placement,
1569 0, 0, interruptible, 380 0, interruptible,
1570 NULL, acc_size, NULL, bo_free); 381 NULL, acc_size, NULL, bo_free);
1571 return ret; 382 return ret;
1572} 383}
@@ -1575,7 +386,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1575{ 386{
1576 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); 387 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
1577 388
1578 kfree(vmw_user_bo); 389 ttm_base_object_kfree(vmw_user_bo, base);
1579} 390}
1580 391
1581static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) 392static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -1594,6 +405,79 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1594 ttm_bo_unref(&bo); 405 ttm_bo_unref(&bo);
1595} 406}
1596 407
408/**
409 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
410 *
411 * @dev_priv: Pointer to a struct device private.
412 * @tfile: Pointer to a struct ttm_object_file on which to register the user
413 * object.
414 * @size: Size of the dma buffer.
415 * @shareable: Boolean whether the buffer is shareable with other open files.
416 * @handle: Pointer to where the handle value should be assigned.
417 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
418 * should be assigned.
419 */
420int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
421 struct ttm_object_file *tfile,
422 uint32_t size,
423 bool shareable,
424 uint32_t *handle,
425 struct vmw_dma_buffer **p_dma_buf)
426{
427 struct vmw_user_dma_buffer *user_bo;
428 struct ttm_buffer_object *tmp;
429 int ret;
430
431 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
432 if (unlikely(user_bo == NULL)) {
433 DRM_ERROR("Failed to allocate a buffer.\n");
434 return -ENOMEM;
435 }
436
437 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
438 &vmw_vram_sys_placement, true,
439 &vmw_user_dmabuf_destroy);
440 if (unlikely(ret != 0))
441 return ret;
442
443 tmp = ttm_bo_reference(&user_bo->dma.base);
444 ret = ttm_base_object_init(tfile,
445 &user_bo->base,
446 shareable,
447 ttm_buffer_type,
448 &vmw_user_dmabuf_release, NULL);
449 if (unlikely(ret != 0)) {
450 ttm_bo_unref(&tmp);
451 goto out_no_base_object;
452 }
453
454 *p_dma_buf = &user_bo->dma;
455 *handle = user_bo->base.hash.key;
456
457out_no_base_object:
458 return ret;
459}
460
461/**
462 * vmw_user_dmabuf_verify_access - verify access permissions on this
463 * buffer object.
464 *
465 * @bo: Pointer to the buffer object being accessed
466 * @tfile: Identifying the caller.
467 */
468int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
469 struct ttm_object_file *tfile)
470{
471 struct vmw_user_dma_buffer *vmw_user_bo;
472
473 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
474 return -EPERM;
475
476 vmw_user_bo = vmw_user_dma_buffer(bo);
477 return (vmw_user_bo->base.tfile == tfile ||
478 vmw_user_bo->base.shareable) ? 0 : -EPERM;
479}
480
1597int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, 481int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1598 struct drm_file *file_priv) 482 struct drm_file *file_priv)
1599{ 483{
@@ -1602,44 +486,27 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1602 (union drm_vmw_alloc_dmabuf_arg *)data; 486 (union drm_vmw_alloc_dmabuf_arg *)data;
1603 struct drm_vmw_alloc_dmabuf_req *req = &arg->req; 487 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1604 struct drm_vmw_dmabuf_rep *rep = &arg->rep; 488 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1605 struct vmw_user_dma_buffer *vmw_user_bo; 489 struct vmw_dma_buffer *dma_buf;
1606 struct ttm_buffer_object *tmp; 490 uint32_t handle;
1607 struct vmw_master *vmaster = vmw_master(file_priv->master); 491 struct vmw_master *vmaster = vmw_master(file_priv->master);
1608 int ret; 492 int ret;
1609 493
1610 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1611 if (unlikely(vmw_user_bo == NULL))
1612 return -ENOMEM;
1613
1614 ret = ttm_read_lock(&vmaster->lock, true); 494 ret = ttm_read_lock(&vmaster->lock, true);
1615 if (unlikely(ret != 0)) { 495 if (unlikely(ret != 0))
1616 kfree(vmw_user_bo);
1617 return ret; 496 return ret;
1618 }
1619 497
1620 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size, 498 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1621 &vmw_vram_sys_placement, true, 499 req->size, false, &handle, &dma_buf);
1622 &vmw_user_dmabuf_destroy);
1623 if (unlikely(ret != 0)) 500 if (unlikely(ret != 0))
1624 goto out_no_dmabuf; 501 goto out_no_dmabuf;
1625 502
1626 tmp = ttm_bo_reference(&vmw_user_bo->dma.base); 503 rep->handle = handle;
1627 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, 504 rep->map_handle = dma_buf->base.addr_space_offset;
1628 &vmw_user_bo->base, 505 rep->cur_gmr_id = handle;
1629 false, 506 rep->cur_gmr_offset = 0;
1630 ttm_buffer_type, 507
1631 &vmw_user_dmabuf_release, NULL); 508 vmw_dmabuf_unreference(&dma_buf);
1632 if (unlikely(ret != 0))
1633 goto out_no_base_object;
1634 else {
1635 rep->handle = vmw_user_bo->base.hash.key;
1636 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
1637 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
1638 rep->cur_gmr_offset = 0;
1639 }
1640 509
1641out_no_base_object:
1642 ttm_bo_unref(&tmp);
1643out_no_dmabuf: 510out_no_dmabuf:
1644 ttm_read_unlock(&vmaster->lock); 511 ttm_read_unlock(&vmaster->lock);
1645 512
@@ -1657,27 +524,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
1657 TTM_REF_USAGE); 524 TTM_REF_USAGE);
1658} 525}
1659 526
1660uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
1661 uint32_t cur_validate_node)
1662{
1663 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1664
1665 if (likely(vmw_bo->on_validate_list))
1666 return vmw_bo->cur_validate_node;
1667
1668 vmw_bo->cur_validate_node = cur_validate_node;
1669 vmw_bo->on_validate_list = true;
1670
1671 return cur_validate_node;
1672}
1673
1674void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
1675{
1676 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1677
1678 vmw_bo->on_validate_list = false;
1679}
1680
1681int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, 527int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1682 uint32_t handle, struct vmw_dma_buffer **out) 528 uint32_t handle, struct vmw_dma_buffer **out)
1683{ 529{
@@ -1706,6 +552,18 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1706 return 0; 552 return 0;
1707} 553}
1708 554
555int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
556 struct vmw_dma_buffer *dma_buf)
557{
558 struct vmw_user_dma_buffer *user_bo;
559
560 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
561 return -EINVAL;
562
563 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
564 return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
565}
566
1709/* 567/*
1710 * Stream management 568 * Stream management
1711 */ 569 */
@@ -1730,8 +588,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
1730 struct vmw_resource *res = &stream->res; 588 struct vmw_resource *res = &stream->res;
1731 int ret; 589 int ret;
1732 590
1733 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr, 591 ret = vmw_resource_init(dev_priv, res, false, res_free,
1734 VMW_RES_STREAM, false, res_free, NULL); 592 &vmw_stream_func);
1735 593
1736 if (unlikely(ret != 0)) { 594 if (unlikely(ret != 0)) {
1737 if (res_free == NULL) 595 if (res_free == NULL)
@@ -1753,17 +611,13 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
1753 return 0; 611 return 0;
1754} 612}
1755 613
1756/**
1757 * User-space context management:
1758 */
1759
1760static void vmw_user_stream_free(struct vmw_resource *res) 614static void vmw_user_stream_free(struct vmw_resource *res)
1761{ 615{
1762 struct vmw_user_stream *stream = 616 struct vmw_user_stream *stream =
1763 container_of(res, struct vmw_user_stream, stream.res); 617 container_of(res, struct vmw_user_stream, stream.res);
1764 struct vmw_private *dev_priv = res->dev_priv; 618 struct vmw_private *dev_priv = res->dev_priv;
1765 619
1766 kfree(stream); 620 ttm_base_object_kfree(stream, base);
1767 ttm_mem_global_free(vmw_mem_glob(dev_priv), 621 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1768 vmw_user_stream_size); 622 vmw_user_stream_size);
1769} 623}
@@ -1792,9 +646,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1792 struct vmw_user_stream *stream; 646 struct vmw_user_stream *stream;
1793 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; 647 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1794 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 648 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
649 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
1795 int ret = 0; 650 int ret = 0;
1796 651
1797 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id); 652
653 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
1798 if (unlikely(res == NULL)) 654 if (unlikely(res == NULL))
1799 return -EINVAL; 655 return -EINVAL;
1800 656
@@ -1895,7 +751,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1895 struct vmw_resource *res; 751 struct vmw_resource *res;
1896 int ret; 752 int ret;
1897 753
1898 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id); 754 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
755 *inout_id);
1899 if (unlikely(res == NULL)) 756 if (unlikely(res == NULL))
1900 return -EINVAL; 757 return -EINVAL;
1901 758
@@ -1990,3 +847,453 @@ int vmw_dumb_destroy(struct drm_file *file_priv,
1990 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 847 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1991 handle, TTM_REF_USAGE); 848 handle, TTM_REF_USAGE);
1992} 849}
850
851/**
852 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
853 *
854 * @res: The resource for which to allocate a backup buffer.
855 * @interruptible: Whether any sleeps during allocation should be
856 * performed while interruptible.
857 */
858static int vmw_resource_buf_alloc(struct vmw_resource *res,
859 bool interruptible)
860{
861 unsigned long size =
862 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
863 struct vmw_dma_buffer *backup;
864 int ret;
865
866 if (likely(res->backup)) {
867 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
868 return 0;
869 }
870
871 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
872 if (unlikely(backup == NULL))
873 return -ENOMEM;
874
875 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
876 res->func->backup_placement,
877 interruptible,
878 &vmw_dmabuf_bo_free);
879 if (unlikely(ret != 0))
880 goto out_no_dmabuf;
881
882 res->backup = backup;
883
884out_no_dmabuf:
885 return ret;
886}
887
888/**
889 * vmw_resource_do_validate - Make a resource up-to-date and visible
890 * to the device.
891 *
892 * @res: The resource to make visible to the device.
893 * @val_buf: Information about a buffer possibly
894 * containing backup data if a bind operation is needed.
895 *
896 * On hardware resource shortage, this function returns -EBUSY and
897 * should be retried once resources have been freed up.
898 */
899static int vmw_resource_do_validate(struct vmw_resource *res,
900 struct ttm_validate_buffer *val_buf)
901{
902 int ret = 0;
903 const struct vmw_res_func *func = res->func;
904
905 if (unlikely(res->id == -1)) {
906 ret = func->create(res);
907 if (unlikely(ret != 0))
908 return ret;
909 }
910
911 if (func->bind &&
912 ((func->needs_backup && list_empty(&res->mob_head) &&
913 val_buf->bo != NULL) ||
914 (!func->needs_backup && val_buf->bo != NULL))) {
915 ret = func->bind(res, val_buf);
916 if (unlikely(ret != 0))
917 goto out_bind_failed;
918 if (func->needs_backup)
919 list_add_tail(&res->mob_head, &res->backup->res_list);
920 }
921
922 /*
923 * Only do this on write operations, and move to
924 * vmw_resource_unreserve if it can be called after
925 * backup buffers have been unreserved. Otherwise
926 * sort out locking.
927 */
928 res->res_dirty = true;
929
930 return 0;
931
932out_bind_failed:
933 func->destroy(res);
934
935 return ret;
936}
937
938/**
939 * vmw_resource_unreserve - Unreserve a resource previously reserved for
940 * command submission.
941 *
942 * @res: Pointer to the struct vmw_resource to unreserve.
943 * @new_backup: Pointer to new backup buffer if command submission
944 * switched.
945 * @new_backup_offset: New backup offset if @new_backup is !NULL.
946 *
947 * Currently unreserving a resource means putting it back on the device's
948 * resource lru list, so that it can be evicted if necessary.
949 */
950void vmw_resource_unreserve(struct vmw_resource *res,
951 struct vmw_dma_buffer *new_backup,
952 unsigned long new_backup_offset)
953{
954 struct vmw_private *dev_priv = res->dev_priv;
955
956 if (!list_empty(&res->lru_head))
957 return;
958
959 if (new_backup && new_backup != res->backup) {
960
961 if (res->backup) {
962 BUG_ON(atomic_read(&res->backup->base.reserved) == 0);
963 list_del_init(&res->mob_head);
964 vmw_dmabuf_unreference(&res->backup);
965 }
966
967 res->backup = vmw_dmabuf_reference(new_backup);
968 BUG_ON(atomic_read(&new_backup->base.reserved) == 0);
969 list_add_tail(&res->mob_head, &new_backup->res_list);
970 }
971 if (new_backup)
972 res->backup_offset = new_backup_offset;
973
974 if (!res->func->may_evict)
975 return;
976
977 write_lock(&dev_priv->resource_lock);
978 list_add_tail(&res->lru_head,
979 &res->dev_priv->res_lru[res->func->res_type]);
980 write_unlock(&dev_priv->resource_lock);
981}
982
983/**
984 * vmw_resource_check_buffer - Check whether a backup buffer is needed
985 * for a resource and in that case, allocate
986 * one, reserve and validate it.
987 *
988 * @res: The resource for which to allocate a backup buffer.
989 * @interruptible: Whether any sleeps during allocation should be
990 * performed while interruptible.
991 * @val_buf: On successful return contains data about the
992 * reserved and validated backup buffer.
993 */
994int vmw_resource_check_buffer(struct vmw_resource *res,
995 bool interruptible,
996 struct ttm_validate_buffer *val_buf)
997{
998 struct list_head val_list;
999 bool backup_dirty = false;
1000 int ret;
1001
1002 if (unlikely(res->backup == NULL)) {
1003 ret = vmw_resource_buf_alloc(res, interruptible);
1004 if (unlikely(ret != 0))
1005 return ret;
1006 }
1007
1008 INIT_LIST_HEAD(&val_list);
1009 val_buf->bo = ttm_bo_reference(&res->backup->base);
1010 list_add_tail(&val_buf->head, &val_list);
1011 ret = ttm_eu_reserve_buffers(&val_list);
1012 if (unlikely(ret != 0))
1013 goto out_no_reserve;
1014
1015 if (res->func->needs_backup && list_empty(&res->mob_head))
1016 return 0;
1017
1018 backup_dirty = res->backup_dirty;
1019 ret = ttm_bo_validate(&res->backup->base,
1020 res->func->backup_placement,
1021 true, false);
1022
1023 if (unlikely(ret != 0))
1024 goto out_no_validate;
1025
1026 return 0;
1027
1028out_no_validate:
1029 ttm_eu_backoff_reservation(&val_list);
1030out_no_reserve:
1031 ttm_bo_unref(&val_buf->bo);
1032 if (backup_dirty)
1033 vmw_dmabuf_unreference(&res->backup);
1034
1035 return ret;
1036}
1037
1038/**
1039 * vmw_resource_reserve - Reserve a resource for command submission
1040 *
1041 * @res: The resource to reserve.
1042 *
1043 * This function takes the resource off the LRU list and make sure
1044 * a backup buffer is present for guest-backed resources. However,
1045 * the buffer may not be bound to the resource at this point.
1046 *
1047 */
1048int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1049{
1050 struct vmw_private *dev_priv = res->dev_priv;
1051 int ret;
1052
1053 write_lock(&dev_priv->resource_lock);
1054 list_del_init(&res->lru_head);
1055 write_unlock(&dev_priv->resource_lock);
1056
1057 if (res->func->needs_backup && res->backup == NULL &&
1058 !no_backup) {
1059 ret = vmw_resource_buf_alloc(res, true);
1060 if (unlikely(ret != 0))
1061 return ret;
1062 }
1063
1064 return 0;
1065}
1066
1067/**
1068 * vmw_resource_backoff_reservation - Unreserve and unreference a
1069 * backup buffer
1070 *.
1071 * @val_buf: Backup buffer information.
1072 */
1073void vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1074{
1075 struct list_head val_list;
1076
1077 if (likely(val_buf->bo == NULL))
1078 return;
1079
1080 INIT_LIST_HEAD(&val_list);
1081 list_add_tail(&val_buf->head, &val_list);
1082 ttm_eu_backoff_reservation(&val_list);
1083 ttm_bo_unref(&val_buf->bo);
1084}
1085
1086/**
1087 * vmw_resource_do_evict - Evict a resource, and transfer its data
1088 * to a backup buffer.
1089 *
1090 * @res: The resource to evict.
1091 */
1092int vmw_resource_do_evict(struct vmw_resource *res)
1093{
1094 struct ttm_validate_buffer val_buf;
1095 const struct vmw_res_func *func = res->func;
1096 int ret;
1097
1098 BUG_ON(!func->may_evict);
1099
1100 val_buf.bo = NULL;
1101 ret = vmw_resource_check_buffer(res, true, &val_buf);
1102 if (unlikely(ret != 0))
1103 return ret;
1104
1105 if (unlikely(func->unbind != NULL &&
1106 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1107 ret = func->unbind(res, res->res_dirty, &val_buf);
1108 if (unlikely(ret != 0))
1109 goto out_no_unbind;
1110 list_del_init(&res->mob_head);
1111 }
1112 ret = func->destroy(res);
1113 res->backup_dirty = true;
1114 res->res_dirty = false;
1115out_no_unbind:
1116 vmw_resource_backoff_reservation(&val_buf);
1117
1118 return ret;
1119}
1120
1121
1122/**
1123 * vmw_resource_validate - Make a resource up-to-date and visible
1124 * to the device.
1125 *
1126 * @res: The resource to make visible to the device.
1127 *
1128 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1129 * be reserved and validated.
1130 * On hardware resource shortage, this function will repeatedly evict
1131 * resources of the same type until the validation succeeds.
1132 */
1133int vmw_resource_validate(struct vmw_resource *res)
1134{
1135 int ret;
1136 struct vmw_resource *evict_res;
1137 struct vmw_private *dev_priv = res->dev_priv;
1138 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1139 struct ttm_validate_buffer val_buf;
1140
1141 if (likely(!res->func->may_evict))
1142 return 0;
1143
1144 val_buf.bo = NULL;
1145 if (res->backup)
1146 val_buf.bo = &res->backup->base;
1147 do {
1148 ret = vmw_resource_do_validate(res, &val_buf);
1149 if (likely(ret != -EBUSY))
1150 break;
1151
1152 write_lock(&dev_priv->resource_lock);
1153 if (list_empty(lru_list) || !res->func->may_evict) {
1154 DRM_ERROR("Out of device device id entries "
1155 "for %s.\n", res->func->type_name);
1156 ret = -EBUSY;
1157 write_unlock(&dev_priv->resource_lock);
1158 break;
1159 }
1160
1161 evict_res = vmw_resource_reference
1162 (list_first_entry(lru_list, struct vmw_resource,
1163 lru_head));
1164 list_del_init(&evict_res->lru_head);
1165
1166 write_unlock(&dev_priv->resource_lock);
1167 vmw_resource_do_evict(evict_res);
1168 vmw_resource_unreference(&evict_res);
1169 } while (1);
1170
1171 if (unlikely(ret != 0))
1172 goto out_no_validate;
1173 else if (!res->func->needs_backup && res->backup) {
1174 list_del_init(&res->mob_head);
1175 vmw_dmabuf_unreference(&res->backup);
1176 }
1177
1178 return 0;
1179
1180out_no_validate:
1181 return ret;
1182}
1183
1184/**
1185 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1186 * object without unreserving it.
1187 *
1188 * @bo: Pointer to the struct ttm_buffer_object to fence.
1189 * @fence: Pointer to the fence. If NULL, this function will
1190 * insert a fence into the command stream..
1191 *
1192 * Contrary to the ttm_eu version of this function, it takes only
1193 * a single buffer object instead of a list, and it also doesn't
1194 * unreserve the buffer object, which needs to be done separately.
1195 */
1196void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1197 struct vmw_fence_obj *fence)
1198{
1199 struct ttm_bo_device *bdev = bo->bdev;
1200 struct ttm_bo_driver *driver = bdev->driver;
1201 struct vmw_fence_obj *old_fence_obj;
1202 struct vmw_private *dev_priv =
1203 container_of(bdev, struct vmw_private, bdev);
1204
1205 if (fence == NULL)
1206 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1207 else
1208 driver->sync_obj_ref(fence);
1209
1210 spin_lock(&bdev->fence_lock);
1211
1212 old_fence_obj = bo->sync_obj;
1213 bo->sync_obj = fence;
1214
1215 spin_unlock(&bdev->fence_lock);
1216
1217 if (old_fence_obj)
1218 vmw_fence_obj_unreference(&old_fence_obj);
1219}
1220
1221/**
1222 * vmw_resource_move_notify - TTM move_notify_callback
1223 *
1224 * @bo: The TTM buffer object about to move.
1225 * @mem: The truct ttm_mem_reg indicating to what memory
1226 * region the move is taking place.
1227 *
1228 * For now does nothing.
1229 */
1230void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1231 struct ttm_mem_reg *mem)
1232{
1233}
1234
1235/**
1236 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1237 *
1238 * @res: The resource being queried.
1239 */
1240bool vmw_resource_needs_backup(const struct vmw_resource *res)
1241{
1242 return res->func->needs_backup;
1243}
1244
1245/**
1246 * vmw_resource_evict_type - Evict all resources of a specific type
1247 *
1248 * @dev_priv: Pointer to a device private struct
1249 * @type: The resource type to evict
1250 *
1251 * To avoid thrashing starvation or as part of the hibernation sequence,
1252 * evict all evictable resources of a specific type.
1253 */
1254static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1255 enum vmw_res_type type)
1256{
1257 struct list_head *lru_list = &dev_priv->res_lru[type];
1258 struct vmw_resource *evict_res;
1259
1260 do {
1261 write_lock(&dev_priv->resource_lock);
1262
1263 if (list_empty(lru_list))
1264 goto out_unlock;
1265
1266 evict_res = vmw_resource_reference(
1267 list_first_entry(lru_list, struct vmw_resource,
1268 lru_head));
1269 list_del_init(&evict_res->lru_head);
1270 write_unlock(&dev_priv->resource_lock);
1271 vmw_resource_do_evict(evict_res);
1272 vmw_resource_unreference(&evict_res);
1273 } while (1);
1274
1275out_unlock:
1276 write_unlock(&dev_priv->resource_lock);
1277}
1278
1279/**
1280 * vmw_resource_evict_all - Evict all evictable resources
1281 *
1282 * @dev_priv: Pointer to a device private struct
1283 *
1284 * To avoid thrashing starvation or as part of the hibernation sequence,
1285 * evict all evictable resources. In particular this means that all
1286 * guest-backed resources that are registered with the device are
1287 * evicted and the OTable becomes clean.
1288 */
1289void vmw_resource_evict_all(struct vmw_private *dev_priv)
1290{
1291 enum vmw_res_type type;
1292
1293 mutex_lock(&dev_priv->cmdbuf_mutex);
1294
1295 for (type = 0; type < vmw_res_max; ++type)
1296 vmw_resource_evict_type(dev_priv, type);
1297
1298 mutex_unlock(&dev_priv->cmdbuf_mutex);
1299}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
new file mode 100644
index 000000000000..f3adeed2854c
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -0,0 +1,84 @@
1/**************************************************************************
2 *
3 * Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef _VMWGFX_RESOURCE_PRIV_H_
29#define _VMWGFX_RESOURCE_PRIV_H_
30
31#include "vmwgfx_drv.h"
32
33/**
34 * struct vmw_user_resource_conv - Identify a derived user-exported resource
35 * type and provide a function to convert its ttm_base_object pointer to
36 * a struct vmw_resource
37 */
38struct vmw_user_resource_conv {
39 enum ttm_object_type object_type;
40 struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
41 void (*res_free) (struct vmw_resource *res);
42};
43
44/**
45 * struct vmw_res_func - members and functions common for a resource type
46 *
47 * @res_type: Enum that identifies the lru list to use for eviction.
48 * @needs_backup: Whether the resource is guest-backed and needs
49 * persistent buffer storage.
50 * @type_name: String that identifies the resource type.
51 * @backup_placement: TTM placement for backup buffers.
52 * @may_evict Whether the resource may be evicted.
53 * @create: Create a hardware resource.
54 * @destroy: Destroy a hardware resource.
55 * @bind: Bind a hardware resource to persistent buffer storage.
56 * @unbind: Unbind a hardware resource from persistent
57 * buffer storage.
58 */
59
60struct vmw_res_func {
61 enum vmw_res_type res_type;
62 bool needs_backup;
63 const char *type_name;
64 struct ttm_placement *backup_placement;
65 bool may_evict;
66
67 int (*create) (struct vmw_resource *res);
68 int (*destroy) (struct vmw_resource *res);
69 int (*bind) (struct vmw_resource *res,
70 struct ttm_validate_buffer *val_buf);
71 int (*unbind) (struct vmw_resource *res,
72 bool readback,
73 struct ttm_validate_buffer *val_buf);
74};
75
76int vmw_resource_alloc_id(struct vmw_resource *res);
77void vmw_resource_release_id(struct vmw_resource *res);
78int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
79 bool delay_id,
80 void (*res_free) (struct vmw_resource *res),
81 const struct vmw_res_func *func);
82void vmw_resource_activate(struct vmw_resource *res,
83 void (*hw_destroy) (struct vmw_resource *));
84#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 6deaf2f8bab1..26387c3d5a21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -468,7 +468,7 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
468 468
469 drm_mode_crtc_set_gamma_size(crtc, 256); 469 drm_mode_crtc_set_gamma_size(crtc, 256);
470 470
471 drm_connector_attach_property(connector, 471 drm_object_attach_property(&connector->base,
472 dev->mode_config.dirty_info_property, 472 dev->mode_config.dirty_info_property,
473 1); 473 1);
474 474
@@ -485,7 +485,7 @@ int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
485 return -EINVAL; 485 return -EINVAL;
486 } 486 }
487 487
488 if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) { 488 if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
489 DRM_INFO("Not using screen objects," 489 DRM_INFO("Not using screen objects,"
490 " missing cap SCREEN_OBJECT_2\n"); 490 " missing cap SCREEN_OBJECT_2\n");
491 return -ENOSYS; 491 return -ENOSYS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
new file mode 100644
index 000000000000..582814339748
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -0,0 +1,893 @@
1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include <ttm/ttm_placement.h>
31#include "svga3d_surfacedefs.h"
32
33/**
34 * struct vmw_user_surface - User-space visible surface resource
35 *
36 * @base: The TTM base object handling user-space visibility.
37 * @srf: The surface metadata.
38 * @size: TTM accounting size for the surface.
39 */
40struct vmw_user_surface {
41 struct ttm_base_object base;
42 struct vmw_surface srf;
43 uint32_t size;
44 uint32_t backup_handle;
45};
46
47/**
48 * struct vmw_surface_offset - Backing store mip level offset info
49 *
50 * @face: Surface face.
51 * @mip: Mip level.
52 * @bo_offset: Offset into backing store of this mip level.
53 *
54 */
55struct vmw_surface_offset {
56 uint32_t face;
57 uint32_t mip;
58 uint32_t bo_offset;
59};
60
61static void vmw_user_surface_free(struct vmw_resource *res);
62static struct vmw_resource *
63vmw_user_surface_base_to_res(struct ttm_base_object *base);
64static int vmw_legacy_srf_bind(struct vmw_resource *res,
65 struct ttm_validate_buffer *val_buf);
66static int vmw_legacy_srf_unbind(struct vmw_resource *res,
67 bool readback,
68 struct ttm_validate_buffer *val_buf);
69static int vmw_legacy_srf_create(struct vmw_resource *res);
70static int vmw_legacy_srf_destroy(struct vmw_resource *res);
71
72static const struct vmw_user_resource_conv user_surface_conv = {
73 .object_type = VMW_RES_SURFACE,
74 .base_obj_to_res = vmw_user_surface_base_to_res,
75 .res_free = vmw_user_surface_free
76};
77
78const struct vmw_user_resource_conv *user_surface_converter =
79 &user_surface_conv;
80
81
82static uint64_t vmw_user_surface_size;
83
84static const struct vmw_res_func vmw_legacy_surface_func = {
85 .res_type = vmw_res_surface,
86 .needs_backup = false,
87 .may_evict = true,
88 .type_name = "legacy surfaces",
89 .backup_placement = &vmw_srf_placement,
90 .create = &vmw_legacy_srf_create,
91 .destroy = &vmw_legacy_srf_destroy,
92 .bind = &vmw_legacy_srf_bind,
93 .unbind = &vmw_legacy_srf_unbind
94};
95
96/**
97 * struct vmw_surface_dma - SVGA3D DMA command
98 */
99struct vmw_surface_dma {
100 SVGA3dCmdHeader header;
101 SVGA3dCmdSurfaceDMA body;
102 SVGA3dCopyBox cb;
103 SVGA3dCmdSurfaceDMASuffix suffix;
104};
105
106/**
107 * struct vmw_surface_define - SVGA3D Surface Define command
108 */
109struct vmw_surface_define {
110 SVGA3dCmdHeader header;
111 SVGA3dCmdDefineSurface body;
112};
113
114/**
115 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
116 */
117struct vmw_surface_destroy {
118 SVGA3dCmdHeader header;
119 SVGA3dCmdDestroySurface body;
120};
121
122
123/**
124 * vmw_surface_dma_size - Compute fifo size for a dma command.
125 *
126 * @srf: Pointer to a struct vmw_surface
127 *
128 * Computes the required size for a surface dma command for backup or
129 * restoration of the surface represented by @srf.
130 */
131static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
132{
133 return srf->num_sizes * sizeof(struct vmw_surface_dma);
134}
135
136
137/**
138 * vmw_surface_define_size - Compute fifo size for a surface define command.
139 *
140 * @srf: Pointer to a struct vmw_surface
141 *
142 * Computes the required size for a surface define command for the definition
143 * of the surface represented by @srf.
144 */
145static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
146{
147 return sizeof(struct vmw_surface_define) + srf->num_sizes *
148 sizeof(SVGA3dSize);
149}
150
151
152/**
153 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
154 *
155 * Computes the required size for a surface destroy command for the destruction
156 * of a hw surface.
157 */
158static inline uint32_t vmw_surface_destroy_size(void)
159{
160 return sizeof(struct vmw_surface_destroy);
161}
162
163/**
164 * vmw_surface_destroy_encode - Encode a surface_destroy command.
165 *
166 * @id: The surface id
167 * @cmd_space: Pointer to memory area in which the commands should be encoded.
168 */
169static void vmw_surface_destroy_encode(uint32_t id,
170 void *cmd_space)
171{
172 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
173 cmd_space;
174
175 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
176 cmd->header.size = sizeof(cmd->body);
177 cmd->body.sid = id;
178}
179
180/**
181 * vmw_surface_define_encode - Encode a surface_define command.
182 *
183 * @srf: Pointer to a struct vmw_surface object.
184 * @cmd_space: Pointer to memory area in which the commands should be encoded.
185 */
186static void vmw_surface_define_encode(const struct vmw_surface *srf,
187 void *cmd_space)
188{
189 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
190 cmd_space;
191 struct drm_vmw_size *src_size;
192 SVGA3dSize *cmd_size;
193 uint32_t cmd_len;
194 int i;
195
196 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
197
198 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
199 cmd->header.size = cmd_len;
200 cmd->body.sid = srf->res.id;
201 cmd->body.surfaceFlags = srf->flags;
202 cmd->body.format = cpu_to_le32(srf->format);
203 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
204 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
205
206 cmd += 1;
207 cmd_size = (SVGA3dSize *) cmd;
208 src_size = srf->sizes;
209
210 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
211 cmd_size->width = src_size->width;
212 cmd_size->height = src_size->height;
213 cmd_size->depth = src_size->depth;
214 }
215}
216
217/**
218 * vmw_surface_dma_encode - Encode a surface_dma command.
219 *
220 * @srf: Pointer to a struct vmw_surface object.
221 * @cmd_space: Pointer to memory area in which the commands should be encoded.
222 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
223 * should be placed or read from.
224 * @to_surface: Boolean whether to DMA to the surface or from the surface.
225 */
226static void vmw_surface_dma_encode(struct vmw_surface *srf,
227 void *cmd_space,
228 const SVGAGuestPtr *ptr,
229 bool to_surface)
230{
231 uint32_t i;
232 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
233 const struct svga3d_surface_desc *desc =
234 svga3dsurface_get_desc(srf->format);
235
236 for (i = 0; i < srf->num_sizes; ++i) {
237 SVGA3dCmdHeader *header = &cmd->header;
238 SVGA3dCmdSurfaceDMA *body = &cmd->body;
239 SVGA3dCopyBox *cb = &cmd->cb;
240 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
241 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
242 const struct drm_vmw_size *cur_size = &srf->sizes[i];
243
244 header->id = SVGA_3D_CMD_SURFACE_DMA;
245 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
246
247 body->guest.ptr = *ptr;
248 body->guest.ptr.offset += cur_offset->bo_offset;
249 body->guest.pitch = svga3dsurface_calculate_pitch(desc,
250 cur_size);
251 body->host.sid = srf->res.id;
252 body->host.face = cur_offset->face;
253 body->host.mipmap = cur_offset->mip;
254 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
255 SVGA3D_READ_HOST_VRAM);
256 cb->x = 0;
257 cb->y = 0;
258 cb->z = 0;
259 cb->srcx = 0;
260 cb->srcy = 0;
261 cb->srcz = 0;
262 cb->w = cur_size->width;
263 cb->h = cur_size->height;
264 cb->d = cur_size->depth;
265
266 suffix->suffixSize = sizeof(*suffix);
267 suffix->maximumOffset =
268 svga3dsurface_get_image_buffer_size(desc, cur_size,
269 body->guest.pitch);
270 suffix->flags.discard = 0;
271 suffix->flags.unsynchronized = 0;
272 suffix->flags.reserved = 0;
273 ++cmd;
274 }
275};
276
277
278/**
279 * vmw_hw_surface_destroy - destroy a Device surface
280 *
281 * @res: Pointer to a struct vmw_resource embedded in a struct
282 * vmw_surface.
283 *
284 * Destroys a the device surface associated with a struct vmw_surface if
285 * any, and adjusts accounting and resource count accordingly.
286 */
287static void vmw_hw_surface_destroy(struct vmw_resource *res)
288{
289
290 struct vmw_private *dev_priv = res->dev_priv;
291 struct vmw_surface *srf;
292 void *cmd;
293
294 if (res->id != -1) {
295
296 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
297 if (unlikely(cmd == NULL)) {
298 DRM_ERROR("Failed reserving FIFO space for surface "
299 "destruction.\n");
300 return;
301 }
302
303 vmw_surface_destroy_encode(res->id, cmd);
304 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
305
306 /*
307 * used_memory_size_atomic, or separate lock
308 * to avoid taking dev_priv::cmdbuf_mutex in
309 * the destroy path.
310 */
311
312 mutex_lock(&dev_priv->cmdbuf_mutex);
313 srf = vmw_res_to_srf(res);
314 dev_priv->used_memory_size -= res->backup_size;
315 mutex_unlock(&dev_priv->cmdbuf_mutex);
316 }
317 vmw_3d_resource_dec(dev_priv, false);
318}
319
320/**
321 * vmw_legacy_srf_create - Create a device surface as part of the
322 * resource validation process.
323 *
324 * @res: Pointer to a struct vmw_surface.
325 *
326 * If the surface doesn't have a hw id.
327 *
328 * Returns -EBUSY if there wasn't sufficient device resources to
329 * complete the validation. Retry after freeing up resources.
330 *
331 * May return other errors if the kernel is out of guest resources.
332 */
333static int vmw_legacy_srf_create(struct vmw_resource *res)
334{
335 struct vmw_private *dev_priv = res->dev_priv;
336 struct vmw_surface *srf;
337 uint32_t submit_size;
338 uint8_t *cmd;
339 int ret;
340
341 if (likely(res->id != -1))
342 return 0;
343
344 srf = vmw_res_to_srf(res);
345 if (unlikely(dev_priv->used_memory_size + res->backup_size >=
346 dev_priv->memory_size))
347 return -EBUSY;
348
349 /*
350 * Alloc id for the resource.
351 */
352
353 ret = vmw_resource_alloc_id(res);
354 if (unlikely(ret != 0)) {
355 DRM_ERROR("Failed to allocate a surface id.\n");
356 goto out_no_id;
357 }
358
359 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
360 ret = -EBUSY;
361 goto out_no_fifo;
362 }
363
364 /*
365 * Encode surface define- commands.
366 */
367
368 submit_size = vmw_surface_define_size(srf);
369 cmd = vmw_fifo_reserve(dev_priv, submit_size);
370 if (unlikely(cmd == NULL)) {
371 DRM_ERROR("Failed reserving FIFO space for surface "
372 "creation.\n");
373 ret = -ENOMEM;
374 goto out_no_fifo;
375 }
376
377 vmw_surface_define_encode(srf, cmd);
378 vmw_fifo_commit(dev_priv, submit_size);
379 /*
380 * Surface memory usage accounting.
381 */
382
383 dev_priv->used_memory_size += res->backup_size;
384 return 0;
385
386out_no_fifo:
387 vmw_resource_release_id(res);
388out_no_id:
389 return ret;
390}
391
392/**
393 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
394 *
395 * @res: Pointer to a struct vmw_res embedded in a struct
396 * vmw_surface.
397 * @val_buf: Pointer to a struct ttm_validate_buffer containing
398 * information about the backup buffer.
399 * @bind: Boolean wether to DMA to the surface.
400 *
401 * Transfer backup data to or from a legacy surface as part of the
402 * validation process.
403 * May return other errors if the kernel is out of guest resources.
404 * The backup buffer will be fenced or idle upon successful completion,
405 * and if the surface needs persistent backup storage, the backup buffer
406 * will also be returned reserved iff @bind is true.
407 */
408static int vmw_legacy_srf_dma(struct vmw_resource *res,
409 struct ttm_validate_buffer *val_buf,
410 bool bind)
411{
412 SVGAGuestPtr ptr;
413 struct vmw_fence_obj *fence;
414 uint32_t submit_size;
415 struct vmw_surface *srf = vmw_res_to_srf(res);
416 uint8_t *cmd;
417 struct vmw_private *dev_priv = res->dev_priv;
418
419 BUG_ON(val_buf->bo == NULL);
420
421 submit_size = vmw_surface_dma_size(srf);
422 cmd = vmw_fifo_reserve(dev_priv, submit_size);
423 if (unlikely(cmd == NULL)) {
424 DRM_ERROR("Failed reserving FIFO space for surface "
425 "DMA.\n");
426 return -ENOMEM;
427 }
428 vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
429 vmw_surface_dma_encode(srf, cmd, &ptr, bind);
430
431 vmw_fifo_commit(dev_priv, submit_size);
432
433 /*
434 * Create a fence object and fence the backup buffer.
435 */
436
437 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
438 &fence, NULL);
439
440 vmw_fence_single_bo(val_buf->bo, fence);
441
442 if (likely(fence != NULL))
443 vmw_fence_obj_unreference(&fence);
444
445 return 0;
446}
447
448/**
449 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
450 * surface validation process.
451 *
452 * @res: Pointer to a struct vmw_res embedded in a struct
453 * vmw_surface.
454 * @val_buf: Pointer to a struct ttm_validate_buffer containing
455 * information about the backup buffer.
456 *
457 * This function will copy backup data to the surface if the
458 * backup buffer is dirty.
459 */
460static int vmw_legacy_srf_bind(struct vmw_resource *res,
461 struct ttm_validate_buffer *val_buf)
462{
463 if (!res->backup_dirty)
464 return 0;
465
466 return vmw_legacy_srf_dma(res, val_buf, true);
467}
468
469
470/**
471 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
472 * surface eviction process.
473 *
474 * @res: Pointer to a struct vmw_res embedded in a struct
475 * vmw_surface.
476 * @val_buf: Pointer to a struct ttm_validate_buffer containing
477 * information about the backup buffer.
478 *
479 * This function will copy backup data from the surface.
480 */
481static int vmw_legacy_srf_unbind(struct vmw_resource *res,
482 bool readback,
483 struct ttm_validate_buffer *val_buf)
484{
485 if (unlikely(readback))
486 return vmw_legacy_srf_dma(res, val_buf, false);
487 return 0;
488}
489
490/**
491 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
492 * resource eviction process.
493 *
494 * @res: Pointer to a struct vmw_res embedded in a struct
495 * vmw_surface.
496 */
497static int vmw_legacy_srf_destroy(struct vmw_resource *res)
498{
499 struct vmw_private *dev_priv = res->dev_priv;
500 uint32_t submit_size;
501 uint8_t *cmd;
502
503 BUG_ON(res->id == -1);
504
505 /*
506 * Encode the dma- and surface destroy commands.
507 */
508
509 submit_size = vmw_surface_destroy_size();
510 cmd = vmw_fifo_reserve(dev_priv, submit_size);
511 if (unlikely(cmd == NULL)) {
512 DRM_ERROR("Failed reserving FIFO space for surface "
513 "eviction.\n");
514 return -ENOMEM;
515 }
516
517 vmw_surface_destroy_encode(res->id, cmd);
518 vmw_fifo_commit(dev_priv, submit_size);
519
520 /*
521 * Surface memory usage accounting.
522 */
523
524 dev_priv->used_memory_size -= res->backup_size;
525
526 /*
527 * Release the surface ID.
528 */
529
530 vmw_resource_release_id(res);
531
532 return 0;
533}
534
535
536/**
537 * vmw_surface_init - initialize a struct vmw_surface
538 *
539 * @dev_priv: Pointer to a device private struct.
540 * @srf: Pointer to the struct vmw_surface to initialize.
541 * @res_free: Pointer to a resource destructor used to free
542 * the object.
543 */
544static int vmw_surface_init(struct vmw_private *dev_priv,
545 struct vmw_surface *srf,
546 void (*res_free) (struct vmw_resource *res))
547{
548 int ret;
549 struct vmw_resource *res = &srf->res;
550
551 BUG_ON(res_free == NULL);
552 (void) vmw_3d_resource_inc(dev_priv, false);
553 ret = vmw_resource_init(dev_priv, res, true, res_free,
554 &vmw_legacy_surface_func);
555
556 if (unlikely(ret != 0)) {
557 vmw_3d_resource_dec(dev_priv, false);
558 res_free(res);
559 return ret;
560 }
561
562 /*
563 * The surface won't be visible to hardware until a
564 * surface validate.
565 */
566
567 vmw_resource_activate(res, vmw_hw_surface_destroy);
568 return ret;
569}
570
571/**
572 * vmw_user_surface_base_to_res - TTM base object to resource converter for
573 * user visible surfaces
574 *
575 * @base: Pointer to a TTM base object
576 *
577 * Returns the struct vmw_resource embedded in a struct vmw_surface
578 * for the user-visible object identified by the TTM base object @base.
579 */
580static struct vmw_resource *
581vmw_user_surface_base_to_res(struct ttm_base_object *base)
582{
583 return &(container_of(base, struct vmw_user_surface, base)->srf.res);
584}
585
586/**
587 * vmw_user_surface_free - User visible surface resource destructor
588 *
589 * @res: A struct vmw_resource embedded in a struct vmw_surface.
590 */
591static void vmw_user_surface_free(struct vmw_resource *res)
592{
593 struct vmw_surface *srf = vmw_res_to_srf(res);
594 struct vmw_user_surface *user_srf =
595 container_of(srf, struct vmw_user_surface, srf);
596 struct vmw_private *dev_priv = srf->res.dev_priv;
597 uint32_t size = user_srf->size;
598
599 kfree(srf->offsets);
600 kfree(srf->sizes);
601 kfree(srf->snooper.image);
602 ttm_base_object_kfree(user_srf, base);
603 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
604}
605
606/**
607 * vmw_user_surface_free - User visible surface TTM base object destructor
608 *
609 * @p_base: Pointer to a pointer to a TTM base object
610 * embedded in a struct vmw_user_surface.
611 *
612 * Drops the base object's reference on its resource, and the
613 * pointer pointed to by *p_base is set to NULL.
614 */
615static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
616{
617 struct ttm_base_object *base = *p_base;
618 struct vmw_user_surface *user_srf =
619 container_of(base, struct vmw_user_surface, base);
620 struct vmw_resource *res = &user_srf->srf.res;
621
622 *p_base = NULL;
623 vmw_resource_unreference(&res);
624}
625
626/**
627 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
628 * the user surface destroy functionality.
629 *
630 * @dev: Pointer to a struct drm_device.
631 * @data: Pointer to data copied from / to user-space.
632 * @file_priv: Pointer to a drm file private structure.
633 */
634int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
635 struct drm_file *file_priv)
636{
637 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
638 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
639
640 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
641}
642
643/**
644 * vmw_user_surface_define_ioctl - Ioctl function implementing
645 * the user surface define functionality.
646 *
647 * @dev: Pointer to a struct drm_device.
648 * @data: Pointer to data copied from / to user-space.
649 * @file_priv: Pointer to a drm file private structure.
650 */
651int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
652 struct drm_file *file_priv)
653{
654 struct vmw_private *dev_priv = vmw_priv(dev);
655 struct vmw_user_surface *user_srf;
656 struct vmw_surface *srf;
657 struct vmw_resource *res;
658 struct vmw_resource *tmp;
659 union drm_vmw_surface_create_arg *arg =
660 (union drm_vmw_surface_create_arg *)data;
661 struct drm_vmw_surface_create_req *req = &arg->req;
662 struct drm_vmw_surface_arg *rep = &arg->rep;
663 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
664 struct drm_vmw_size __user *user_sizes;
665 int ret;
666 int i, j;
667 uint32_t cur_bo_offset;
668 struct drm_vmw_size *cur_size;
669 struct vmw_surface_offset *cur_offset;
670 uint32_t num_sizes;
671 uint32_t size;
672 struct vmw_master *vmaster = vmw_master(file_priv->master);
673 const struct svga3d_surface_desc *desc;
674
675 if (unlikely(vmw_user_surface_size == 0))
676 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
677 128;
678
679 num_sizes = 0;
680 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
681 num_sizes += req->mip_levels[i];
682
683 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
684 DRM_VMW_MAX_MIP_LEVELS)
685 return -EINVAL;
686
687 size = vmw_user_surface_size + 128 +
688 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
689 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
690
691
692 desc = svga3dsurface_get_desc(req->format);
693 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
694 DRM_ERROR("Invalid surface format for surface creation.\n");
695 return -EINVAL;
696 }
697
698 ret = ttm_read_lock(&vmaster->lock, true);
699 if (unlikely(ret != 0))
700 return ret;
701
702 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
703 size, false, true);
704 if (unlikely(ret != 0)) {
705 if (ret != -ERESTARTSYS)
706 DRM_ERROR("Out of graphics memory for surface"
707 " creation.\n");
708 goto out_unlock;
709 }
710
711 user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
712 if (unlikely(user_srf == NULL)) {
713 ret = -ENOMEM;
714 goto out_no_user_srf;
715 }
716
717 srf = &user_srf->srf;
718 res = &srf->res;
719
720 srf->flags = req->flags;
721 srf->format = req->format;
722 srf->scanout = req->scanout;
723
724 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
725 srf->num_sizes = num_sizes;
726 user_srf->size = size;
727
728 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
729 if (unlikely(srf->sizes == NULL)) {
730 ret = -ENOMEM;
731 goto out_no_sizes;
732 }
733 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
734 GFP_KERNEL);
735 if (unlikely(srf->sizes == NULL)) {
736 ret = -ENOMEM;
737 goto out_no_offsets;
738 }
739
740 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
741 req->size_addr;
742
743 ret = copy_from_user(srf->sizes, user_sizes,
744 srf->num_sizes * sizeof(*srf->sizes));
745 if (unlikely(ret != 0)) {
746 ret = -EFAULT;
747 goto out_no_copy;
748 }
749
750 srf->base_size = *srf->sizes;
751 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
752 srf->multisample_count = 1;
753
754 cur_bo_offset = 0;
755 cur_offset = srf->offsets;
756 cur_size = srf->sizes;
757
758 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
759 for (j = 0; j < srf->mip_levels[i]; ++j) {
760 uint32_t stride = svga3dsurface_calculate_pitch
761 (desc, cur_size);
762
763 cur_offset->face = i;
764 cur_offset->mip = j;
765 cur_offset->bo_offset = cur_bo_offset;
766 cur_bo_offset += svga3dsurface_get_image_buffer_size
767 (desc, cur_size, stride);
768 ++cur_offset;
769 ++cur_size;
770 }
771 }
772 res->backup_size = cur_bo_offset;
773 if (srf->scanout &&
774 srf->num_sizes == 1 &&
775 srf->sizes[0].width == 64 &&
776 srf->sizes[0].height == 64 &&
777 srf->format == SVGA3D_A8R8G8B8) {
778
779 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
780 /* clear the image */
781 if (srf->snooper.image) {
782 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
783 } else {
784 DRM_ERROR("Failed to allocate cursor_image\n");
785 ret = -ENOMEM;
786 goto out_no_copy;
787 }
788 } else {
789 srf->snooper.image = NULL;
790 }
791 srf->snooper.crtc = NULL;
792
793 user_srf->base.shareable = false;
794 user_srf->base.tfile = NULL;
795
796 /**
797 * From this point, the generic resource management functions
798 * destroy the object on failure.
799 */
800
801 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
802 if (unlikely(ret != 0))
803 goto out_unlock;
804
805 tmp = vmw_resource_reference(&srf->res);
806 ret = ttm_base_object_init(tfile, &user_srf->base,
807 req->shareable, VMW_RES_SURFACE,
808 &vmw_user_surface_base_release, NULL);
809
810 if (unlikely(ret != 0)) {
811 vmw_resource_unreference(&tmp);
812 vmw_resource_unreference(&res);
813 goto out_unlock;
814 }
815
816 rep->sid = user_srf->base.hash.key;
817 vmw_resource_unreference(&res);
818
819 ttm_read_unlock(&vmaster->lock);
820 return 0;
821out_no_copy:
822 kfree(srf->offsets);
823out_no_offsets:
824 kfree(srf->sizes);
825out_no_sizes:
826 ttm_base_object_kfree(user_srf, base);
827out_no_user_srf:
828 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
829out_unlock:
830 ttm_read_unlock(&vmaster->lock);
831 return ret;
832}
833
834/**
835 * vmw_user_surface_define_ioctl - Ioctl function implementing
836 * the user surface reference functionality.
837 *
838 * @dev: Pointer to a struct drm_device.
839 * @data: Pointer to data copied from / to user-space.
840 * @file_priv: Pointer to a drm file private structure.
841 */
842int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
843 struct drm_file *file_priv)
844{
845 union drm_vmw_surface_reference_arg *arg =
846 (union drm_vmw_surface_reference_arg *)data;
847 struct drm_vmw_surface_arg *req = &arg->req;
848 struct drm_vmw_surface_create_req *rep = &arg->rep;
849 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
850 struct vmw_surface *srf;
851 struct vmw_user_surface *user_srf;
852 struct drm_vmw_size __user *user_sizes;
853 struct ttm_base_object *base;
854 int ret = -EINVAL;
855
856 base = ttm_base_object_lookup(tfile, req->sid);
857 if (unlikely(base == NULL)) {
858 DRM_ERROR("Could not find surface to reference.\n");
859 return -EINVAL;
860 }
861
862 if (unlikely(base->object_type != VMW_RES_SURFACE))
863 goto out_bad_resource;
864
865 user_srf = container_of(base, struct vmw_user_surface, base);
866 srf = &user_srf->srf;
867
868 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
869 if (unlikely(ret != 0)) {
870 DRM_ERROR("Could not add a reference to a surface.\n");
871 goto out_no_reference;
872 }
873
874 rep->flags = srf->flags;
875 rep->format = srf->format;
876 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
877 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
878 rep->size_addr;
879
880 if (user_sizes)
881 ret = copy_to_user(user_sizes, srf->sizes,
882 srf->num_sizes * sizeof(*srf->sizes));
883 if (unlikely(ret != 0)) {
884 DRM_ERROR("copy_to_user failed %p %u\n",
885 user_sizes, srf->num_sizes);
886 ret = -EFAULT;
887 }
888out_bad_resource:
889out_no_reference:
890 ttm_base_object_unref(&base);
891
892 return ret;
893}
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index e25cf31faab2..fa60add0ff63 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -18,7 +18,6 @@
18 */ 18 */
19 19
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/dmi.h>
22#include <linux/seq_file.h> 21#include <linux/seq_file.h>
23#include <linux/uaccess.h> 22#include <linux/uaccess.h>
24#include <linux/fs.h> 23#include <linux/fs.h>
@@ -376,7 +375,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
376 size_t cnt, loff_t *ppos) 375 size_t cnt, loff_t *ppos)
377{ 376{
378 char usercmd[64]; 377 char usercmd[64];
379 const char *pdev_name;
380 int ret; 378 int ret;
381 bool delay = false, can_switch; 379 bool delay = false, can_switch;
382 bool just_mux = false; 380 bool just_mux = false;
@@ -468,7 +466,6 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
468 goto out; 466 goto out;
469 467
470 if (can_switch) { 468 if (can_switch) {
471 pdev_name = pci_name(client->pdev);
472 ret = vga_switchto_stage1(client); 469 ret = vga_switchto_stage1(client);
473 if (ret) 470 if (ret)
474 printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret); 471 printk(KERN_ERR "vga_switcheroo: switching failed stage 1 %d\n", ret);
@@ -540,7 +537,6 @@ fail:
540int vga_switcheroo_process_delayed_switch(void) 537int vga_switcheroo_process_delayed_switch(void)
541{ 538{
542 struct vga_switcheroo_client *client; 539 struct vga_switcheroo_client *client;
543 const char *pdev_name;
544 int ret; 540 int ret;
545 int err = -EINVAL; 541 int err = -EINVAL;
546 542
@@ -555,7 +551,6 @@ int vga_switcheroo_process_delayed_switch(void)
555 if (!client || !check_can_switch()) 551 if (!client || !check_can_switch())
556 goto err; 552 goto err;
557 553
558 pdev_name = pci_name(client->pdev);
559 ret = vga_switchto_stage2(client); 554 ret = vga_switchto_stage2(client);
560 if (ret) 555 if (ret)
561 printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret); 556 printk(KERN_ERR "vga_switcheroo: delayed switching failed stage 2 %d\n", ret);
@@ -567,4 +562,3 @@ err:
567 return err; 562 return err;
568} 563}
569EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); 564EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
570
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 3fd82809b2d4..fad21c927a38 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1431,6 +1431,8 @@ extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
1431extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1431extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1432extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 1432extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
1433 struct timeval *vblanktime); 1433 struct timeval *vblanktime);
1434extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
1435 struct drm_pending_vblank_event *e);
1434extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 1436extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
1435extern int drm_vblank_get(struct drm_device *dev, int crtc); 1437extern int drm_vblank_get(struct drm_device *dev, int crtc);
1436extern void drm_vblank_put(struct drm_device *dev, int crtc); 1438extern void drm_vblank_put(struct drm_device *dev, int crtc);
@@ -1503,6 +1505,7 @@ extern unsigned int drm_debug;
1503 1505
1504extern unsigned int drm_vblank_offdelay; 1506extern unsigned int drm_vblank_offdelay;
1505extern unsigned int drm_timestamp_precision; 1507extern unsigned int drm_timestamp_precision;
1508extern unsigned int drm_timestamp_monotonic;
1506 1509
1507extern struct class *drm_class; 1510extern struct class *drm_class;
1508extern struct proc_dir_entry *drm_proc_root; 1511extern struct proc_dir_entry *drm_proc_root;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3fa18b7e9497..00d78b5161c0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -792,6 +792,7 @@ struct drm_mode_config {
792 792
793 /* output poll support */ 793 /* output poll support */
794 bool poll_enabled; 794 bool poll_enabled;
795 bool poll_running;
795 struct delayed_work output_poll_work; 796 struct delayed_work output_poll_work;
796 797
797 /* pointers to standard properties */ 798 /* pointers to standard properties */
@@ -887,14 +888,14 @@ extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_
887extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src); 888extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
888extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, 889extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
889 const struct drm_display_mode *mode); 890 const struct drm_display_mode *mode);
890extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); 891extern void drm_mode_debug_printmodeline(const struct drm_display_mode *mode);
891extern void drm_mode_config_init(struct drm_device *dev); 892extern void drm_mode_config_init(struct drm_device *dev);
892extern void drm_mode_config_reset(struct drm_device *dev); 893extern void drm_mode_config_reset(struct drm_device *dev);
893extern void drm_mode_config_cleanup(struct drm_device *dev); 894extern void drm_mode_config_cleanup(struct drm_device *dev);
894extern void drm_mode_set_name(struct drm_display_mode *mode); 895extern void drm_mode_set_name(struct drm_display_mode *mode);
895extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); 896extern bool drm_mode_equal(const struct drm_display_mode *mode1, const struct drm_display_mode *mode2);
896extern int drm_mode_width(struct drm_display_mode *mode); 897extern int drm_mode_width(const struct drm_display_mode *mode);
897extern int drm_mode_height(struct drm_display_mode *mode); 898extern int drm_mode_height(const struct drm_display_mode *mode);
898 899
899/* for us by fb module */ 900/* for us by fb module */
900extern int drm_mode_attachmode_crtc(struct drm_device *dev, 901extern int drm_mode_attachmode_crtc(struct drm_device *dev,
@@ -919,12 +920,6 @@ extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
919extern void drm_mode_connector_list_update(struct drm_connector *connector); 920extern void drm_mode_connector_list_update(struct drm_connector *connector);
920extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, 921extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
921 struct edid *edid); 922 struct edid *edid);
922extern int drm_connector_property_set_value(struct drm_connector *connector,
923 struct drm_property *property,
924 uint64_t value);
925extern int drm_connector_property_get_value(struct drm_connector *connector,
926 struct drm_property *property,
927 uint64_t *value);
928extern int drm_object_property_set_value(struct drm_mode_object *obj, 923extern int drm_object_property_set_value(struct drm_mode_object *obj,
929 struct drm_property *property, 924 struct drm_property *property,
930 uint64_t val); 925 uint64_t val);
@@ -946,8 +941,6 @@ extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
946extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); 941extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
947extern bool drm_crtc_in_use(struct drm_crtc *crtc); 942extern bool drm_crtc_in_use(struct drm_crtc *crtc);
948 943
949extern void drm_connector_attach_property(struct drm_connector *connector,
950 struct drm_property *property, uint64_t init_val);
951extern void drm_object_attach_property(struct drm_mode_object *obj, 944extern void drm_object_attach_property(struct drm_mode_object *obj,
952 struct drm_property *property, 945 struct drm_property *property,
953 uint64_t init_val); 946 uint64_t init_val);
@@ -1037,6 +1030,7 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
1037extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 1030extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
1038 void *data, struct drm_file *file_priv); 1031 void *data, struct drm_file *file_priv);
1039extern u8 *drm_find_cea_extension(struct edid *edid); 1032extern u8 *drm_find_cea_extension(struct edid *edid);
1033extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
1040extern bool drm_detect_hdmi_monitor(struct edid *edid); 1034extern bool drm_detect_hdmi_monitor(struct edid *edid);
1041extern bool drm_detect_monitor_audio(struct edid *edid); 1035extern bool drm_detect_monitor_audio(struct edid *edid);
1042extern int drm_mode_page_flip_ioctl(struct drm_device *dev, 1036extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
@@ -1053,6 +1047,7 @@ extern struct drm_display_mode *drm_gtf_mode_complex(struct drm_device *dev,
1053 int GTF_2C, int GTF_K, int GTF_2J); 1047 int GTF_2C, int GTF_K, int GTF_2J);
1054extern int drm_add_modes_noedid(struct drm_connector *connector, 1048extern int drm_add_modes_noedid(struct drm_connector *connector,
1055 int hdisplay, int vdisplay); 1049 int hdisplay, int vdisplay);
1050extern uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode);
1056 1051
1057extern int drm_edid_header_is_valid(const u8 *raw_edid); 1052extern int drm_edid_header_is_valid(const u8 *raw_edid);
1058extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1053extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index e01cc80c9c30..f43d556bf40b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -137,6 +137,8 @@ extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
137 137
138extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); 138extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
139 139
140extern void drm_helper_move_panel_connectors_to_head(struct drm_device *);
141
140extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, 142extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
141 struct drm_mode_fb_cmd2 *mode_cmd); 143 struct drm_mode_fb_cmd2 *mode_cmd);
142 144
@@ -162,6 +164,7 @@ extern int drm_helper_resume_force_mode(struct drm_device *dev);
162extern void drm_kms_helper_poll_init(struct drm_device *dev); 164extern void drm_kms_helper_poll_init(struct drm_device *dev);
163extern void drm_kms_helper_poll_fini(struct drm_device *dev); 165extern void drm_kms_helper_poll_fini(struct drm_device *dev);
164extern void drm_helper_hpd_irq_event(struct drm_device *dev); 166extern void drm_helper_hpd_irq_event(struct drm_device *dev);
167extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
165 168
166extern void drm_kms_helper_poll_disable(struct drm_device *dev); 169extern void drm_kms_helper_poll_disable(struct drm_device *dev);
167extern void drm_kms_helper_poll_enable(struct drm_device *dev); 170extern void drm_kms_helper_poll_enable(struct drm_device *dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index fe061489f91f..e8e1417af3d9 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/types.h> 26#include <linux/types.h>
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28#include <linux/delay.h>
28 29
29/* 30/*
30 * Unless otherwise noted, all values are from the DP 1.1a spec. Note that 31 * Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -311,6 +312,14 @@
311#define MODE_I2C_READ 4 312#define MODE_I2C_READ 4
312#define MODE_I2C_STOP 8 313#define MODE_I2C_STOP 8
313 314
315/**
316 * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
317 * aux algorithm
318 * @running: set by the algo indicating whether an i2c is ongoing or whether
319 * the i2c bus is quiescent
320 * @address: i2c target address for the currently ongoing transfer
321 * @aux_ch: driver callback to transfer a single byte of the i2c payload
322 */
314struct i2c_algo_dp_aux_data { 323struct i2c_algo_dp_aux_data {
315 bool running; 324 bool running;
316 u16 address; 325 u16 address;
@@ -322,4 +331,34 @@ struct i2c_algo_dp_aux_data {
322int 331int
323i2c_dp_aux_add_bus(struct i2c_adapter *adapter); 332i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
324 333
334
335#define DP_LINK_STATUS_SIZE 6
336bool drm_dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
337 int lane_count);
338bool drm_dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
339 int lane_count);
340u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
341 int lane);
342u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
343 int lane);
344
345#define DP_RECEIVER_CAP_SIZE 0xf
346void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
347void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
348
349u8 drm_dp_link_rate_to_bw_code(int link_rate);
350int drm_dp_bw_code_to_link_rate(u8 link_bw);
351
352static inline int
353drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
354{
355 return drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
356}
357
358static inline u8
359drm_dp_max_lane_count(u8 dpcd[DP_RECEIVER_CAP_SIZE])
360{
361 return dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
362}
363
325#endif /* _DRM_DP_HELPER_H_ */ 364#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index 3650d5d011ee..fce2ef3fdfff 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -61,5 +61,19 @@ extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
61extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); 61extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
62extern void drm_ht_remove(struct drm_open_hash *ht); 62extern void drm_ht_remove(struct drm_open_hash *ht);
63 63
64/*
65 * RCU-safe interface
66 *
67 * The user of this API needs to make sure that two or more instances of the
68 * hash table manipulation functions are never run simultaneously.
69 * The lookup function drm_ht_find_item_rcu may, however, run simultaneously
70 * with any of the manipulation functions as long as it's called from within
71 * an RCU read-locked section.
72 */
73#define drm_ht_insert_item_rcu drm_ht_insert_item
74#define drm_ht_just_insert_please_rcu drm_ht_just_insert_please
75#define drm_ht_remove_key_rcu drm_ht_remove_key
76#define drm_ht_remove_item_rcu drm_ht_remove_item
77#define drm_ht_find_item_rcu drm_ht_find_item
64 78
65#endif 79#endif
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 3c13a3a4b158..808dad29607a 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -85,4 +85,30 @@ struct exynos_drm_hdmi_pdata {
85 int (*get_hpd)(void); 85 int (*get_hpd)(void);
86}; 86};
87 87
88/**
89 * Platform Specific Structure for DRM based IPP.
90 *
91 * @inv_pclk: if set 1. invert pixel clock
92 * @inv_vsync: if set 1. invert vsync signal for wb
93 * @inv_href: if set 1. invert href signal
94 * @inv_hsync: if set 1. invert hsync signal for wb
95 */
96struct exynos_drm_ipp_pol {
97 unsigned int inv_pclk;
98 unsigned int inv_vsync;
99 unsigned int inv_href;
100 unsigned int inv_hsync;
101};
102
103/**
104 * Platform Specific Structure for DRM based FIMC.
105 *
106 * @pol: current hardware block polarity settings.
107 * @clk_rate: current hardware clock rate.
108 */
109struct exynos_drm_fimc_pdata {
110 struct exynos_drm_ipp_pol pol;
111 int clk_rate;
112};
113
88#endif /* _EXYNOS_DRM_H_ */ 114#endif /* _EXYNOS_DRM_H_ */
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index 2e37e9f02e71..6eb76a1f11ab 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -3,7 +3,7 @@
3#ifndef _DRM_INTEL_GTT_H 3#ifndef _DRM_INTEL_GTT_H
4#define _DRM_INTEL_GTT_H 4#define _DRM_INTEL_GTT_H
5 5
6const struct intel_gtt { 6struct intel_gtt {
7 /* Size of memory reserved for graphics by the BIOS */ 7 /* Size of memory reserved for graphics by the BIOS */
8 unsigned int stolen_size; 8 unsigned int stolen_size;
9 /* Total number of gtt entries. */ 9 /* Total number of gtt entries. */
@@ -17,6 +17,7 @@ const struct intel_gtt {
17 unsigned int do_idle_maps : 1; 17 unsigned int do_idle_maps : 1;
18 /* Share the scratch page dma with ppgtts. */ 18 /* Share the scratch page dma with ppgtts. */
19 dma_addr_t scratch_page_dma; 19 dma_addr_t scratch_page_dma;
20 struct page *scratch_page;
20 /* for ppgtt PDE access */ 21 /* for ppgtt PDE access */
21 u32 __iomem *gtt; 22 u32 __iomem *gtt;
22 /* needed for ioremap in drm/i915 */ 23 /* needed for ioremap in drm/i915 */
@@ -39,10 +40,6 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
39#define AGP_DCACHE_MEMORY 1 40#define AGP_DCACHE_MEMORY 1
40#define AGP_PHYS_MEMORY 2 41#define AGP_PHYS_MEMORY 2
41 42
42/* New caching attributes for gen6/sandybridge */
43#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
44#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
45
46/* flag for GFDT type */ 43/* flag for GFDT type */
47#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) 44#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
48 45
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index e8028ade567f..3cb5d848fb66 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -141,8 +141,6 @@ struct ttm_tt;
141 * struct ttm_buffer_object 141 * struct ttm_buffer_object
142 * 142 *
143 * @bdev: Pointer to the buffer object device structure. 143 * @bdev: Pointer to the buffer object device structure.
144 * @buffer_start: The virtual user-space start address of ttm_bo_type_user
145 * buffers.
146 * @type: The bo type. 144 * @type: The bo type.
147 * @destroy: Destruction function. If NULL, kfree is used. 145 * @destroy: Destruction function. If NULL, kfree is used.
148 * @num_pages: Actual number of pages. 146 * @num_pages: Actual number of pages.
@@ -172,7 +170,6 @@ struct ttm_tt;
172 * @seq_valid: The value of @val_seq is valid. This value is protected by 170 * @seq_valid: The value of @val_seq is valid. This value is protected by
173 * the bo_device::lru_lock. 171 * the bo_device::lru_lock.
174 * @reserved: Deadlock-free lock used for synchronization state transitions. 172 * @reserved: Deadlock-free lock used for synchronization state transitions.
175 * @sync_obj_arg: Opaque argument to synchronization object function.
176 * @sync_obj: Pointer to a synchronization object. 173 * @sync_obj: Pointer to a synchronization object.
177 * @priv_flags: Flags describing buffer object internal state. 174 * @priv_flags: Flags describing buffer object internal state.
178 * @vm_rb: Rb node for the vm rb tree. 175 * @vm_rb: Rb node for the vm rb tree.
@@ -200,7 +197,6 @@ struct ttm_buffer_object {
200 197
201 struct ttm_bo_global *glob; 198 struct ttm_bo_global *glob;
202 struct ttm_bo_device *bdev; 199 struct ttm_bo_device *bdev;
203 unsigned long buffer_start;
204 enum ttm_bo_type type; 200 enum ttm_bo_type type;
205 void (*destroy) (struct ttm_buffer_object *); 201 void (*destroy) (struct ttm_buffer_object *);
206 unsigned long num_pages; 202 unsigned long num_pages;
@@ -255,7 +251,6 @@ struct ttm_buffer_object {
255 * checking NULL while reserved but not holding the mentioned lock. 251 * checking NULL while reserved but not holding the mentioned lock.
256 */ 252 */
257 253
258 void *sync_obj_arg;
259 void *sync_obj; 254 void *sync_obj;
260 unsigned long priv_flags; 255 unsigned long priv_flags;
261 256
@@ -342,7 +337,6 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
342 * @bo: The buffer object. 337 * @bo: The buffer object.
343 * @placement: Proposed placement for the buffer object. 338 * @placement: Proposed placement for the buffer object.
344 * @interruptible: Sleep interruptible if sleeping. 339 * @interruptible: Sleep interruptible if sleeping.
345 * @no_wait_reserve: Return immediately if other buffers are busy.
346 * @no_wait_gpu: Return immediately if the GPU is busy. 340 * @no_wait_gpu: Return immediately if the GPU is busy.
347 * 341 *
348 * Changes placement and caching policy of the buffer object 342 * Changes placement and caching policy of the buffer object
@@ -355,7 +349,7 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
355 */ 349 */
356extern int ttm_bo_validate(struct ttm_buffer_object *bo, 350extern int ttm_bo_validate(struct ttm_buffer_object *bo,
357 struct ttm_placement *placement, 351 struct ttm_placement *placement,
358 bool interruptible, bool no_wait_reserve, 352 bool interruptible,
359 bool no_wait_gpu); 353 bool no_wait_gpu);
360 354
361/** 355/**
@@ -429,8 +423,9 @@ extern void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev,
429 * @no_wait: Return immediately if buffer is busy. 423 * @no_wait: Return immediately if buffer is busy.
430 * 424 *
431 * Synchronizes a buffer object for CPU RW access. This means 425 * Synchronizes a buffer object for CPU RW access. This means
432 * blocking command submission that affects the buffer and 426 * command submission that affects the buffer will return -EBUSY
433 * waiting for buffer idle. This lock is recursive. 427 * until ttm_bo_synccpu_write_release is called.
428 *
434 * Returns 429 * Returns
435 * -EBUSY if the buffer is busy and no_wait is true. 430 * -EBUSY if the buffer is busy and no_wait is true.
436 * -ERESTARTSYS if interrupted by a signal. 431 * -ERESTARTSYS if interrupted by a signal.
@@ -472,8 +467,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
472 * @type: Requested type of buffer object. 467 * @type: Requested type of buffer object.
473 * @flags: Initial placement flags. 468 * @flags: Initial placement flags.
474 * @page_alignment: Data alignment in pages. 469 * @page_alignment: Data alignment in pages.
475 * @buffer_start: Virtual address of user space data backing a
476 * user buffer object.
477 * @interruptible: If needing to sleep to wait for GPU resources, 470 * @interruptible: If needing to sleep to wait for GPU resources,
478 * sleep interruptible. 471 * sleep interruptible.
479 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 472 * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -505,7 +498,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
505 enum ttm_bo_type type, 498 enum ttm_bo_type type,
506 struct ttm_placement *placement, 499 struct ttm_placement *placement,
507 uint32_t page_alignment, 500 uint32_t page_alignment,
508 unsigned long buffer_start,
509 bool interrubtible, 501 bool interrubtible,
510 struct file *persistent_swap_storage, 502 struct file *persistent_swap_storage,
511 size_t acc_size, 503 size_t acc_size,
@@ -521,8 +513,6 @@ extern int ttm_bo_init(struct ttm_bo_device *bdev,
521 * @type: Requested type of buffer object. 513 * @type: Requested type of buffer object.
522 * @flags: Initial placement flags. 514 * @flags: Initial placement flags.
523 * @page_alignment: Data alignment in pages. 515 * @page_alignment: Data alignment in pages.
524 * @buffer_start: Virtual address of user space data backing a
525 * user buffer object.
526 * @interruptible: If needing to sleep while waiting for GPU resources, 516 * @interruptible: If needing to sleep while waiting for GPU resources,
527 * sleep interruptible. 517 * sleep interruptible.
528 * @persistent_swap_storage: Usually the swap storage is deleted for buffers 518 * @persistent_swap_storage: Usually the swap storage is deleted for buffers
@@ -545,7 +535,6 @@ extern int ttm_bo_create(struct ttm_bo_device *bdev,
545 enum ttm_bo_type type, 535 enum ttm_bo_type type,
546 struct ttm_placement *placement, 536 struct ttm_placement *placement,
547 uint32_t page_alignment, 537 uint32_t page_alignment,
548 unsigned long buffer_start,
549 bool interruptible, 538 bool interruptible,
550 struct file *persistent_swap_storage, 539 struct file *persistent_swap_storage,
551 struct ttm_buffer_object **p_bo); 540 struct ttm_buffer_object **p_bo);
@@ -736,4 +725,18 @@ extern ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
736 725
737extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev); 726extern void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
738 727
728/**
729 * ttm_bo_is_reserved - return an indication if a ttm buffer object is reserved
730 *
731 * @bo: The buffer object to check.
732 *
733 * This function returns an indication if a bo is reserved or not, and should
734 * only be used to print an error when it is not from incorrect api usage, since
735 * there's no guarantee that it is the caller that is holding the reservation.
736 */
737static inline bool ttm_bo_is_reserved(struct ttm_buffer_object *bo)
738{
739 return atomic_read(&bo->reserved);
740}
741
739#endif 742#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d803b92b0324..e3a43a47d78c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -394,7 +394,7 @@ struct ttm_bo_driver {
394 */ 394 */
395 int (*move) (struct ttm_buffer_object *bo, 395 int (*move) (struct ttm_buffer_object *bo,
396 bool evict, bool interruptible, 396 bool evict, bool interruptible,
397 bool no_wait_reserve, bool no_wait_gpu, 397 bool no_wait_gpu,
398 struct ttm_mem_reg *new_mem); 398 struct ttm_mem_reg *new_mem);
399 399
400 /** 400 /**
@@ -422,10 +422,10 @@ struct ttm_bo_driver {
422 * documentation. 422 * documentation.
423 */ 423 */
424 424
425 bool (*sync_obj_signaled) (void *sync_obj, void *sync_arg); 425 bool (*sync_obj_signaled) (void *sync_obj);
426 int (*sync_obj_wait) (void *sync_obj, void *sync_arg, 426 int (*sync_obj_wait) (void *sync_obj,
427 bool lazy, bool interruptible); 427 bool lazy, bool interruptible);
428 int (*sync_obj_flush) (void *sync_obj, void *sync_arg); 428 int (*sync_obj_flush) (void *sync_obj);
429 void (*sync_obj_unref) (void **sync_obj); 429 void (*sync_obj_unref) (void **sync_obj);
430 void *(*sync_obj_ref) (void *sync_obj); 430 void *(*sync_obj_ref) (void *sync_obj);
431 431
@@ -521,8 +521,6 @@ struct ttm_bo_global {
521 * lru_lock: Spinlock that protects the buffer+device lru lists and 521 * lru_lock: Spinlock that protects the buffer+device lru lists and
522 * ddestroy lists. 522 * ddestroy lists.
523 * @val_seq: Current validation sequence. 523 * @val_seq: Current validation sequence.
524 * @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
525 * If a GPU lockup has been detected, this is forced to 0.
526 * @dev_mapping: A pointer to the struct address_space representing the 524 * @dev_mapping: A pointer to the struct address_space representing the
527 * device address space. 525 * device address space.
528 * @wq: Work queue structure for the delayed delete workqueue. 526 * @wq: Work queue structure for the delayed delete workqueue.
@@ -556,7 +554,6 @@ struct ttm_bo_device {
556 * Protected by load / firstopen / lastclose /unload sync. 554 * Protected by load / firstopen / lastclose /unload sync.
557 */ 555 */
558 556
559 bool nice_mode;
560 struct address_space *dev_mapping; 557 struct address_space *dev_mapping;
561 558
562 /* 559 /*
@@ -706,7 +703,6 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
706 * @proposed_placement: Proposed new placement for the buffer object. 703 * @proposed_placement: Proposed new placement for the buffer object.
707 * @mem: A struct ttm_mem_reg. 704 * @mem: A struct ttm_mem_reg.
708 * @interruptible: Sleep interruptible when sliping. 705 * @interruptible: Sleep interruptible when sliping.
709 * @no_wait_reserve: Return immediately if other buffers are busy.
710 * @no_wait_gpu: Return immediately if the GPU is busy. 706 * @no_wait_gpu: Return immediately if the GPU is busy.
711 * 707 *
712 * Allocate memory space for the buffer object pointed to by @bo, using 708 * Allocate memory space for the buffer object pointed to by @bo, using
@@ -722,27 +718,13 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
722 struct ttm_placement *placement, 718 struct ttm_placement *placement,
723 struct ttm_mem_reg *mem, 719 struct ttm_mem_reg *mem,
724 bool interruptible, 720 bool interruptible,
725 bool no_wait_reserve, bool no_wait_gpu); 721 bool no_wait_gpu);
726 722
727extern void ttm_bo_mem_put(struct ttm_buffer_object *bo, 723extern void ttm_bo_mem_put(struct ttm_buffer_object *bo,
728 struct ttm_mem_reg *mem); 724 struct ttm_mem_reg *mem);
729extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo, 725extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
730 struct ttm_mem_reg *mem); 726 struct ttm_mem_reg *mem);
731 727
732/**
733 * ttm_bo_wait_for_cpu
734 *
735 * @bo: Pointer to a struct ttm_buffer_object.
736 * @no_wait: Don't sleep while waiting.
737 *
738 * Wait until a buffer object is no longer sync'ed for CPU access.
739 * Returns:
740 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
741 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
742 */
743
744extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
745
746extern void ttm_bo_global_release(struct drm_global_reference *ref); 728extern void ttm_bo_global_release(struct drm_global_reference *ref);
747extern int ttm_bo_global_init(struct drm_global_reference *ref); 729extern int ttm_bo_global_init(struct drm_global_reference *ref);
748 730
@@ -918,7 +900,6 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
918 * 900 *
919 * @bo: A pointer to a struct ttm_buffer_object. 901 * @bo: A pointer to a struct ttm_buffer_object.
920 * @evict: 1: This is an eviction. Don't try to pipeline. 902 * @evict: 1: This is an eviction. Don't try to pipeline.
921 * @no_wait_reserve: Return immediately if other buffers are busy.
922 * @no_wait_gpu: Return immediately if the GPU is busy. 903 * @no_wait_gpu: Return immediately if the GPU is busy.
923 * @new_mem: struct ttm_mem_reg indicating where to move. 904 * @new_mem: struct ttm_mem_reg indicating where to move.
924 * 905 *
@@ -933,15 +914,14 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
933 */ 914 */
934 915
935extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 916extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
936 bool evict, bool no_wait_reserve, 917 bool evict, bool no_wait_gpu,
937 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 918 struct ttm_mem_reg *new_mem);
938 919
939/** 920/**
940 * ttm_bo_move_memcpy 921 * ttm_bo_move_memcpy
941 * 922 *
942 * @bo: A pointer to a struct ttm_buffer_object. 923 * @bo: A pointer to a struct ttm_buffer_object.
943 * @evict: 1: This is an eviction. Don't try to pipeline. 924 * @evict: 1: This is an eviction. Don't try to pipeline.
944 * @no_wait_reserve: Return immediately if other buffers are busy.
945 * @no_wait_gpu: Return immediately if the GPU is busy. 925 * @no_wait_gpu: Return immediately if the GPU is busy.
946 * @new_mem: struct ttm_mem_reg indicating where to move. 926 * @new_mem: struct ttm_mem_reg indicating where to move.
947 * 927 *
@@ -956,8 +936,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
956 */ 936 */
957 937
958extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 938extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
959 bool evict, bool no_wait_reserve, 939 bool evict, bool no_wait_gpu,
960 bool no_wait_gpu, struct ttm_mem_reg *new_mem); 940 struct ttm_mem_reg *new_mem);
961 941
962/** 942/**
963 * ttm_bo_free_old_node 943 * ttm_bo_free_old_node
@@ -973,10 +953,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
973 * 953 *
974 * @bo: A pointer to a struct ttm_buffer_object. 954 * @bo: A pointer to a struct ttm_buffer_object.
975 * @sync_obj: A sync object that signals when moving is complete. 955 * @sync_obj: A sync object that signals when moving is complete.
976 * @sync_obj_arg: An argument to pass to the sync object idle / wait
977 * functions.
978 * @evict: This is an evict move. Don't return until the buffer is idle. 956 * @evict: This is an evict move. Don't return until the buffer is idle.
979 * @no_wait_reserve: Return immediately if other buffers are busy.
980 * @no_wait_gpu: Return immediately if the GPU is busy. 957 * @no_wait_gpu: Return immediately if the GPU is busy.
981 * @new_mem: struct ttm_mem_reg indicating where to move. 958 * @new_mem: struct ttm_mem_reg indicating where to move.
982 * 959 *
@@ -990,9 +967,7 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
990 967
991extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 968extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
992 void *sync_obj, 969 void *sync_obj,
993 void *sync_obj_arg, 970 bool evict, bool no_wait_gpu,
994 bool evict, bool no_wait_reserve,
995 bool no_wait_gpu,
996 struct ttm_mem_reg *new_mem); 971 struct ttm_mem_reg *new_mem);
997/** 972/**
998 * ttm_io_prot 973 * ttm_io_prot
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 1926cae373ba..547e19f06e57 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -39,8 +39,6 @@
39 * 39 *
40 * @head: list head for thread-private list. 40 * @head: list head for thread-private list.
41 * @bo: refcounted buffer object pointer. 41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object.
44 * @reserved: Indicates whether @bo has been reserved for validation. 42 * @reserved: Indicates whether @bo has been reserved for validation.
45 * @removed: Indicates whether @bo has been removed from lru lists. 43 * @removed: Indicates whether @bo has been removed from lru lists.
46 * @put_count: Number of outstanding references on bo::list_kref. 44 * @put_count: Number of outstanding references on bo::list_kref.
@@ -50,7 +48,6 @@
50struct ttm_validate_buffer { 48struct ttm_validate_buffer {
51 struct list_head head; 49 struct list_head head;
52 struct ttm_buffer_object *bo; 50 struct ttm_buffer_object *bo;
53 void *new_sync_obj_arg;
54 bool reserved; 51 bool reserved;
55 bool removed; 52 bool removed;
56 int put_count; 53 int put_count;
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index d6d1da468c97..72dcbe81dd07 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -60,7 +60,6 @@ struct ttm_mem_shrink {
60 * for the GPU, and this will otherwise block other workqueue tasks(?) 60 * for the GPU, and this will otherwise block other workqueue tasks(?)
61 * At this point we use only a single-threaded workqueue. 61 * At this point we use only a single-threaded workqueue.
62 * @work: The workqueue callback for the shrink queue. 62 * @work: The workqueue callback for the shrink queue.
63 * @queue: Wait queue for processes suspended waiting for memory.
64 * @lock: Lock to protect the @shrink - and the memory accounting members, 63 * @lock: Lock to protect the @shrink - and the memory accounting members,
65 * that is, essentially the whole structure with some exceptions. 64 * that is, essentially the whole structure with some exceptions.
66 * @zones: Array of pointers to accounting zones. 65 * @zones: Array of pointers to accounting zones.
@@ -80,7 +79,6 @@ struct ttm_mem_global {
80 struct ttm_mem_shrink *shrink; 79 struct ttm_mem_shrink *shrink;
81 struct workqueue_struct *swap_queue; 80 struct workqueue_struct *swap_queue;
82 struct work_struct work; 81 struct work_struct work;
83 wait_queue_head_t queue;
84 spinlock_t lock; 82 spinlock_t lock;
85 struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES]; 83 struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
86 unsigned int num_zones; 84 unsigned int num_zones;
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index b01c563b2751..fc0cf0649901 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -40,6 +40,7 @@
40#include <linux/list.h> 40#include <linux/list.h>
41#include <drm/drm_hashtab.h> 41#include <drm/drm_hashtab.h>
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/rcupdate.h>
43#include <ttm/ttm_memory.h> 44#include <ttm/ttm_memory.h>
44 45
45/** 46/**
@@ -120,6 +121,7 @@ struct ttm_object_device;
120 */ 121 */
121 122
122struct ttm_base_object { 123struct ttm_base_object {
124 struct rcu_head rhead;
123 struct drm_hash_item hash; 125 struct drm_hash_item hash;
124 enum ttm_object_type object_type; 126 enum ttm_object_type object_type;
125 bool shareable; 127 bool shareable;
@@ -268,4 +270,6 @@ extern struct ttm_object_device *ttm_object_device_init
268 270
269extern void ttm_object_device_release(struct ttm_object_device **p_tdev); 271extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
270 272
273#define ttm_base_object_kfree(__object, __base)\
274 kfree_rcu(__object, __base.rhead)
271#endif 275#endif
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index f83f793223ff..c8e1831d7572 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -17,6 +17,7 @@ enum dma_attr {
17 DMA_ATTR_NON_CONSISTENT, 17 DMA_ATTR_NON_CONSISTENT,
18 DMA_ATTR_NO_KERNEL_MAPPING, 18 DMA_ATTR_NO_KERNEL_MAPPING,
19 DMA_ATTR_SKIP_CPU_SYNC, 19 DMA_ATTR_SKIP_CPU_SYNC,
20 DMA_ATTR_FORCE_CONTIGUOUS,
20 DMA_ATTR_MAX, 21 DMA_ATTR_MAX,
21}; 22};
22 23
diff --git a/include/linux/kref.h b/include/linux/kref.h
index 65af6887872f..4972e6e9ca93 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -111,4 +111,25 @@ static inline int kref_put_mutex(struct kref *kref,
111 } 111 }
112 return 0; 112 return 0;
113} 113}
114
115/**
116 * kref_get_unless_zero - Increment refcount for object unless it is zero.
117 * @kref: object.
118 *
119 * Return non-zero if the increment succeeded. Otherwise return 0.
120 *
121 * This function is intended to simplify locking around refcounting for
122 * objects that can be looked up from a lookup structure, and which are
123 * removed from that lookup structure in the object destructor.
124 * Operations on such objects require at least a read lock around
125 * lookup + kref_get, and a write lock around kref_put + remove from lookup
126 * structure. Furthermore, RCU implementations become extremely tricky.
127 * With a lookup followed by a kref_get_unless_zero *with return value check*
128 * locking in the kref_put path can be deferred to the actual removal from
129 * the lookup structure and RCU lookups become trivial.
130 */
131static inline int __must_check kref_get_unless_zero(struct kref *kref)
132{
133 return atomic_add_unless(&kref->refcount, 1, 0);
134}
114#endif /* _KREF_H_ */ 135#endif /* _KREF_H_ */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 1e3481edf062..8d1e2bbee83a 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -778,6 +778,7 @@ struct drm_event_vblank {
778#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3 778#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
779#define DRM_CAP_DUMB_PREFER_SHADOW 0x4 779#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
780#define DRM_CAP_PRIME 0x5 780#define DRM_CAP_PRIME 0x5
781#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
781 782
782#define DRM_PRIME_CAP_IMPORT 0x1 783#define DRM_PRIME_CAP_IMPORT 0x1
783#define DRM_PRIME_CAP_EXPORT 0x2 784#define DRM_PRIME_CAP_EXPORT 0x2
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index c0494d586e23..e7f52c334005 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -133,17 +133,26 @@ struct drm_exynos_g2d_cmd {
133 __u32 data; 133 __u32 data;
134}; 134};
135 135
136enum drm_exynos_g2d_buf_type {
137 G2D_BUF_USERPTR = 1 << 31,
138};
139
136enum drm_exynos_g2d_event_type { 140enum drm_exynos_g2d_event_type {
137 G2D_EVENT_NOT, 141 G2D_EVENT_NOT,
138 G2D_EVENT_NONSTOP, 142 G2D_EVENT_NONSTOP,
139 G2D_EVENT_STOP, /* not yet */ 143 G2D_EVENT_STOP, /* not yet */
140}; 144};
141 145
146struct drm_exynos_g2d_userptr {
147 unsigned long userptr;
148 unsigned long size;
149};
150
142struct drm_exynos_g2d_set_cmdlist { 151struct drm_exynos_g2d_set_cmdlist {
143 __u64 cmd; 152 __u64 cmd;
144 __u64 cmd_gem; 153 __u64 cmd_buf;
145 __u32 cmd_nr; 154 __u32 cmd_nr;
146 __u32 cmd_gem_nr; 155 __u32 cmd_buf_nr;
147 156
148 /* for g2d event */ 157 /* for g2d event */
149 __u64 event_type; 158 __u64 event_type;
@@ -154,6 +163,170 @@ struct drm_exynos_g2d_exec {
154 __u64 async; 163 __u64 async;
155}; 164};
156 165
166enum drm_exynos_ops_id {
167 EXYNOS_DRM_OPS_SRC,
168 EXYNOS_DRM_OPS_DST,
169 EXYNOS_DRM_OPS_MAX,
170};
171
172struct drm_exynos_sz {
173 __u32 hsize;
174 __u32 vsize;
175};
176
177struct drm_exynos_pos {
178 __u32 x;
179 __u32 y;
180 __u32 w;
181 __u32 h;
182};
183
184enum drm_exynos_flip {
185 EXYNOS_DRM_FLIP_NONE = (0 << 0),
186 EXYNOS_DRM_FLIP_VERTICAL = (1 << 0),
187 EXYNOS_DRM_FLIP_HORIZONTAL = (1 << 1),
188};
189
190enum drm_exynos_degree {
191 EXYNOS_DRM_DEGREE_0,
192 EXYNOS_DRM_DEGREE_90,
193 EXYNOS_DRM_DEGREE_180,
194 EXYNOS_DRM_DEGREE_270,
195};
196
197enum drm_exynos_planer {
198 EXYNOS_DRM_PLANAR_Y,
199 EXYNOS_DRM_PLANAR_CB,
200 EXYNOS_DRM_PLANAR_CR,
201 EXYNOS_DRM_PLANAR_MAX,
202};
203
204/**
205 * A structure for ipp supported property list.
206 *
207 * @version: version of this structure.
208 * @ipp_id: id of ipp driver.
209 * @count: count of ipp driver.
210 * @writeback: flag of writeback supporting.
211 * @flip: flag of flip supporting.
212 * @degree: flag of degree information.
213 * @csc: flag of csc supporting.
214 * @crop: flag of crop supporting.
215 * @scale: flag of scale supporting.
216 * @refresh_min: min hz of refresh.
217 * @refresh_max: max hz of refresh.
218 * @crop_min: crop min resolution.
219 * @crop_max: crop max resolution.
220 * @scale_min: scale min resolution.
221 * @scale_max: scale max resolution.
222 */
223struct drm_exynos_ipp_prop_list {
224 __u32 version;
225 __u32 ipp_id;
226 __u32 count;
227 __u32 writeback;
228 __u32 flip;
229 __u32 degree;
230 __u32 csc;
231 __u32 crop;
232 __u32 scale;
233 __u32 refresh_min;
234 __u32 refresh_max;
235 __u32 reserved;
236 struct drm_exynos_sz crop_min;
237 struct drm_exynos_sz crop_max;
238 struct drm_exynos_sz scale_min;
239 struct drm_exynos_sz scale_max;
240};
241
242/**
243 * A structure for ipp config.
244 *
245 * @ops_id: property of operation directions.
246 * @flip: property of mirror, flip.
247 * @degree: property of rotation degree.
248 * @fmt: property of image format.
249 * @sz: property of image size.
250 * @pos: property of image position(src-cropped,dst-scaler).
251 */
252struct drm_exynos_ipp_config {
253 enum drm_exynos_ops_id ops_id;
254 enum drm_exynos_flip flip;
255 enum drm_exynos_degree degree;
256 __u32 fmt;
257 struct drm_exynos_sz sz;
258 struct drm_exynos_pos pos;
259};
260
261enum drm_exynos_ipp_cmd {
262 IPP_CMD_NONE,
263 IPP_CMD_M2M,
264 IPP_CMD_WB,
265 IPP_CMD_OUTPUT,
266 IPP_CMD_MAX,
267};
268
269/**
270 * A structure for ipp property.
271 *
272 * @config: source, destination config.
273 * @cmd: definition of command.
274 * @ipp_id: id of ipp driver.
275 * @prop_id: id of property.
276 * @refresh_rate: refresh rate.
277 */
278struct drm_exynos_ipp_property {
279 struct drm_exynos_ipp_config config[EXYNOS_DRM_OPS_MAX];
280 enum drm_exynos_ipp_cmd cmd;
281 __u32 ipp_id;
282 __u32 prop_id;
283 __u32 refresh_rate;
284};
285
286enum drm_exynos_ipp_buf_type {
287 IPP_BUF_ENQUEUE,
288 IPP_BUF_DEQUEUE,
289};
290
291/**
292 * A structure for ipp buffer operations.
293 *
294 * @ops_id: operation directions.
295 * @buf_type: definition of buffer.
296 * @prop_id: id of property.
297 * @buf_id: id of buffer.
298 * @handle: Y, Cb, Cr each planar handle.
299 * @user_data: user data.
300 */
301struct drm_exynos_ipp_queue_buf {
302 enum drm_exynos_ops_id ops_id;
303 enum drm_exynos_ipp_buf_type buf_type;
304 __u32 prop_id;
305 __u32 buf_id;
306 __u32 handle[EXYNOS_DRM_PLANAR_MAX];
307 __u32 reserved;
308 __u64 user_data;
309};
310
311enum drm_exynos_ipp_ctrl {
312 IPP_CTRL_PLAY,
313 IPP_CTRL_STOP,
314 IPP_CTRL_PAUSE,
315 IPP_CTRL_RESUME,
316 IPP_CTRL_MAX,
317};
318
319/**
320 * A structure for ipp start/stop operations.
321 *
322 * @prop_id: id of property.
323 * @ctrl: definition of control.
324 */
325struct drm_exynos_ipp_cmd_ctrl {
326 __u32 prop_id;
327 enum drm_exynos_ipp_ctrl ctrl;
328};
329
157#define DRM_EXYNOS_GEM_CREATE 0x00 330#define DRM_EXYNOS_GEM_CREATE 0x00
158#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01 331#define DRM_EXYNOS_GEM_MAP_OFFSET 0x01
159#define DRM_EXYNOS_GEM_MMAP 0x02 332#define DRM_EXYNOS_GEM_MMAP 0x02
@@ -166,6 +339,12 @@ struct drm_exynos_g2d_exec {
166#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21 339#define DRM_EXYNOS_G2D_SET_CMDLIST 0x21
167#define DRM_EXYNOS_G2D_EXEC 0x22 340#define DRM_EXYNOS_G2D_EXEC 0x22
168 341
342/* IPP - Image Post Processing */
343#define DRM_EXYNOS_IPP_GET_PROPERTY 0x30
344#define DRM_EXYNOS_IPP_SET_PROPERTY 0x31
345#define DRM_EXYNOS_IPP_QUEUE_BUF 0x32
346#define DRM_EXYNOS_IPP_CMD_CTRL 0x33
347
169#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ 348#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
170 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) 349 DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
171 350
@@ -188,8 +367,18 @@ struct drm_exynos_g2d_exec {
188#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ 367#define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \
189 DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) 368 DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
190 369
370#define DRM_IOCTL_EXYNOS_IPP_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
371 DRM_EXYNOS_IPP_GET_PROPERTY, struct drm_exynos_ipp_prop_list)
372#define DRM_IOCTL_EXYNOS_IPP_SET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + \
373 DRM_EXYNOS_IPP_SET_PROPERTY, struct drm_exynos_ipp_property)
374#define DRM_IOCTL_EXYNOS_IPP_QUEUE_BUF DRM_IOWR(DRM_COMMAND_BASE + \
375 DRM_EXYNOS_IPP_QUEUE_BUF, struct drm_exynos_ipp_queue_buf)
376#define DRM_IOCTL_EXYNOS_IPP_CMD_CTRL DRM_IOWR(DRM_COMMAND_BASE + \
377 DRM_EXYNOS_IPP_CMD_CTRL, struct drm_exynos_ipp_cmd_ctrl)
378
191/* EXYNOS specific events */ 379/* EXYNOS specific events */
192#define DRM_EXYNOS_G2D_EVENT 0x80000000 380#define DRM_EXYNOS_G2D_EVENT 0x80000000
381#define DRM_EXYNOS_IPP_EVENT 0x80000001
193 382
194struct drm_exynos_g2d_event { 383struct drm_exynos_g2d_event {
195 struct drm_event base; 384 struct drm_event base;
@@ -200,4 +389,14 @@ struct drm_exynos_g2d_event {
200 __u32 reserved; 389 __u32 reserved;
201}; 390};
202 391
392struct drm_exynos_ipp_event {
393 struct drm_event base;
394 __u64 user_data;
395 __u32 tv_sec;
396 __u32 tv_usec;
397 __u32 prop_id;
398 __u32 reserved;
399 __u32 buf_id[EXYNOS_DRM_OPS_MAX];
400};
401
203#endif /* _UAPI_EXYNOS_DRM_H_ */ 402#endif /* _UAPI_EXYNOS_DRM_H_ */
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 4322b1e7d2ed..b746a3cf5fa9 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -306,6 +306,7 @@ typedef struct drm_i915_irq_wait {
306#define I915_PARAM_HAS_SEMAPHORES 20 306#define I915_PARAM_HAS_SEMAPHORES 20
307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21 307#define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22 308#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
309#define I915_PARAM_HAS_SECURE_BATCHES 23
309 310
310typedef struct drm_i915_getparam { 311typedef struct drm_i915_getparam {
311 int param; 312 int param;
@@ -671,6 +672,11 @@ struct drm_i915_gem_execbuffer2 {
671/** Resets the SO write offset registers for transform feedback on gen7. */ 672/** Resets the SO write offset registers for transform feedback on gen7. */
672#define I915_EXEC_GEN7_SOL_RESET (1<<8) 673#define I915_EXEC_GEN7_SOL_RESET (1<<8)
673 674
675/** Request a privileged ("secure") batch buffer. Note only available for
676 * DRM_ROOT_ONLY | DRM_MASTER processes.
677 */
678#define I915_EXEC_SECURE (1<<9)
679
674#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) 680#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
675#define i915_execbuffer2_set_context_id(eb2, context) \ 681#define i915_execbuffer2_set_context_id(eb2, context) \
676 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK 682 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 4766c0f6a838..eeda91774c8a 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -913,9 +913,11 @@ struct drm_radeon_gem_va {
913/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ 913/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
914#define RADEON_CS_KEEP_TILING_FLAGS 0x01 914#define RADEON_CS_KEEP_TILING_FLAGS 0x01
915#define RADEON_CS_USE_VM 0x02 915#define RADEON_CS_USE_VM 0x02
916#define RADEON_CS_END_OF_FRAME 0x04 /* a hint from userspace which CS is the last one */
916/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */ 917/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
917#define RADEON_CS_RING_GFX 0 918#define RADEON_CS_RING_GFX 0
918#define RADEON_CS_RING_COMPUTE 1 919#define RADEON_CS_RING_COMPUTE 1
920#define RADEON_CS_RING_DMA 2
919/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */ 921/* The third dword of RADEON_CHUNK_ID_FLAGS is a sint32 that sets the priority */
920/* 0 = normal, + = higher priority, - = lower priority */ 922/* 0 = normal, + = higher priority, - = lower priority */
921 923
@@ -966,6 +968,10 @@ struct drm_radeon_cs {
966#define RADEON_INFO_MAX_PIPES 0x10 968#define RADEON_INFO_MAX_PIPES 0x10
967/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ 969/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */
968#define RADEON_INFO_TIMESTAMP 0x11 970#define RADEON_INFO_TIMESTAMP 0x11
971/* max shader engines (SE) - needed for geometry shaders, etc. */
972#define RADEON_INFO_MAX_SE 0x12
973/* max SH per SE */
974#define RADEON_INFO_MAX_SH_PER_SE 0x13
969 975
970struct drm_radeon_info { 976struct drm_radeon_info {
971 uint32_t request; 977 uint32_t request;